Bug Summary

File:dev/pci/if_iwm.c
Warning:line 5812, column 2
Value stored to 'seq' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_iwm.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_iwm.c
1/* $OpenBSD: if_iwm.c,v 1.389 2022/01/09 05:42:50 jsg Exp $ */
2
3/*
4 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 * Author: Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw@linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93/*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109#include "bpfilter.h"
110
111#include <sys/param.h>
112#include <sys/conf.h>
113#include <sys/kernel.h>
114#include <sys/malloc.h>
115#include <sys/mbuf.h>
116#include <sys/mutex.h>
117#include <sys/proc.h>
118#include <sys/rwlock.h>
119#include <sys/socket.h>
120#include <sys/sockio.h>
121#include <sys/systm.h>
122#include <sys/endian.h>
123
124#include <sys/refcnt.h>
125#include <sys/task.h>
126#include <machine/bus.h>
127#include <machine/intr.h>
128
129#include <dev/pci/pcireg.h>
130#include <dev/pci/pcivar.h>
131#include <dev/pci/pcidevs.h>
132
133#if NBPFILTER1 > 0
134#include <net/bpf.h>
135#endif
136#include <net/if.h>
137#include <net/if_dl.h>
138#include <net/if_media.h>
139
140#include <netinet/in.h>
141#include <netinet/if_ether.h>
142
143#include <net80211/ieee80211_var.h>
144#include <net80211/ieee80211_amrr.h>
145#include <net80211/ieee80211_ra.h>
146#include <net80211/ieee80211_radiotap.h>
147#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
148#undef DPRINTF /* defined in ieee80211_priv.h */
149
150#define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname)
151
152#define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if)
153
154#define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_))))
155#define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_))))
156
157#ifdef IWM_DEBUG
158#define DPRINTF(x)do { ; } while (0) do { if (iwm_debug > 0) printf x; } while (0)
159#define DPRINTFN(n, x)do { ; } while (0) do { if (iwm_debug >= (n)) printf x; } while (0)
160int iwm_debug = 1;
161#else
162#define DPRINTF(x)do { ; } while (0) do { ; } while (0)
163#define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0)
164#endif
165
166#include <dev/pci/if_iwmreg.h>
167#include <dev/pci/if_iwmvar.h>
168
169const uint8_t iwm_nvm_channels[] = {
170 /* 2.4 GHz */
171 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
172 /* 5 GHz */
173 36, 40, 44 , 48, 52, 56, 60, 64,
174 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
175 149, 153, 157, 161, 165
176};
177
178const uint8_t iwm_nvm_channels_8000[] = {
179 /* 2.4 GHz */
180 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
181 /* 5 GHz */
182 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
183 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
184 149, 153, 157, 161, 165, 169, 173, 177, 181
185};
186
187#define IWM_NUM_2GHZ_CHANNELS14 14
188
189const struct iwm_rate {
190 uint16_t rate;
191 uint8_t plcp;
192 uint8_t ht_plcp;
193} iwm_rates[] = {
194 /* Legacy */ /* HT */
195 { 2, IWM_RATE_1M_PLCP10, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
196 { 4, IWM_RATE_2M_PLCP20, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
197 { 11, IWM_RATE_5M_PLCP55, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
198 { 22, IWM_RATE_11M_PLCP110, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
199 { 12, IWM_RATE_6M_PLCP13, IWM_RATE_HT_SISO_MCS_0_PLCP0 },
200 { 18, IWM_RATE_9M_PLCP15, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
201 { 24, IWM_RATE_12M_PLCP5, IWM_RATE_HT_SISO_MCS_1_PLCP1 },
202 { 26, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_8_PLCP0x8 },
203 { 36, IWM_RATE_18M_PLCP7, IWM_RATE_HT_SISO_MCS_2_PLCP2 },
204 { 48, IWM_RATE_24M_PLCP9, IWM_RATE_HT_SISO_MCS_3_PLCP3 },
205 { 52, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_9_PLCP0x9 },
206 { 72, IWM_RATE_36M_PLCP11, IWM_RATE_HT_SISO_MCS_4_PLCP4 },
207 { 78, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_10_PLCP0xA },
208 { 96, IWM_RATE_48M_PLCP1, IWM_RATE_HT_SISO_MCS_5_PLCP5 },
209 { 104, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_11_PLCP0xB },
210 { 108, IWM_RATE_54M_PLCP3, IWM_RATE_HT_SISO_MCS_6_PLCP6 },
211 { 128, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_SISO_MCS_7_PLCP7 },
212 { 156, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_12_PLCP0xC },
213 { 208, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_13_PLCP0xD },
214 { 234, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_14_PLCP0xE },
215 { 260, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_15_PLCP0xF },
216};
217#define IWM_RIDX_CCK0 0
218#define IWM_RIDX_OFDM4 4
219#define IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) (nitems(iwm_rates)(sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1)
220#define IWM_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWM_RIDX_OFDM4)
221#define IWM_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWM_RIDX_OFDM4)
222#define IWM_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22)
223
224/* Convert an MCS index into an iwm_rates[] index. */
225const int iwm_mcs2ridx[] = {
226 IWM_RATE_MCS_0_INDEX,
227 IWM_RATE_MCS_1_INDEX,
228 IWM_RATE_MCS_2_INDEX,
229 IWM_RATE_MCS_3_INDEX,
230 IWM_RATE_MCS_4_INDEX,
231 IWM_RATE_MCS_5_INDEX,
232 IWM_RATE_MCS_6_INDEX,
233 IWM_RATE_MCS_7_INDEX,
234 IWM_RATE_MCS_8_INDEX,
235 IWM_RATE_MCS_9_INDEX,
236 IWM_RATE_MCS_10_INDEX,
237 IWM_RATE_MCS_11_INDEX,
238 IWM_RATE_MCS_12_INDEX,
239 IWM_RATE_MCS_13_INDEX,
240 IWM_RATE_MCS_14_INDEX,
241 IWM_RATE_MCS_15_INDEX,
242};
243
244struct iwm_nvm_section {
245 uint16_t length;
246 uint8_t *data;
247};
248
249int iwm_is_mimo_ht_plcp(uint8_t);
250int iwm_is_mimo_mcs(int);
251int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
252int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
253 uint8_t *, size_t);
254int iwm_set_default_calib(struct iwm_softc *, const void *);
255void iwm_fw_info_free(struct iwm_fw_info *);
256void iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
257int iwm_read_firmware(struct iwm_softc *);
258uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
259uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
260void iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
261void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
262int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
263int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
264int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
265int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
266int iwm_nic_lock(struct iwm_softc *);
267void iwm_nic_assert_locked(struct iwm_softc *);
268void iwm_nic_unlock(struct iwm_softc *);
269int iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
270 uint32_t);
271int iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
272int iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
273int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
274 bus_size_t);
275void iwm_dma_contig_free(struct iwm_dma_info *);
276int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
277void iwm_disable_rx_dma(struct iwm_softc *);
278void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
279void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
281void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
282void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
283void iwm_enable_rfkill_int(struct iwm_softc *);
284int iwm_check_rfkill(struct iwm_softc *);
285void iwm_enable_interrupts(struct iwm_softc *);
286void iwm_enable_fwload_interrupt(struct iwm_softc *);
287void iwm_restore_interrupts(struct iwm_softc *);
288void iwm_disable_interrupts(struct iwm_softc *);
289void iwm_ict_reset(struct iwm_softc *);
290int iwm_set_hw_ready(struct iwm_softc *);
291int iwm_prepare_card_hw(struct iwm_softc *);
292void iwm_apm_config(struct iwm_softc *);
293int iwm_apm_init(struct iwm_softc *);
294void iwm_apm_stop(struct iwm_softc *);
295int iwm_allow_mcast(struct iwm_softc *);
296void iwm_init_msix_hw(struct iwm_softc *);
297void iwm_conf_msix_hw(struct iwm_softc *, int);
298int iwm_clear_persistence_bit(struct iwm_softc *);
299int iwm_start_hw(struct iwm_softc *);
300void iwm_stop_device(struct iwm_softc *);
301void iwm_nic_config(struct iwm_softc *);
302int iwm_nic_rx_init(struct iwm_softc *);
303int iwm_nic_rx_legacy_init(struct iwm_softc *);
304int iwm_nic_rx_mq_init(struct iwm_softc *);
305int iwm_nic_tx_init(struct iwm_softc *);
306int iwm_nic_init(struct iwm_softc *);
307int iwm_enable_ac_txq(struct iwm_softc *, int, int);
308int iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
309 uint16_t);
310int iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
311int iwm_post_alive(struct iwm_softc *);
312struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
313 uint16_t);
314int iwm_phy_db_set_section(struct iwm_softc *,
315 struct iwm_calib_res_notif_phy_db *);
316int iwm_is_valid_channel(uint16_t);
317uint8_t iwm_ch_id_to_ch_index(uint16_t);
318uint16_t iwm_channel_id_to_papd(uint16_t);
319uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
320int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
321 uint16_t *, uint16_t);
322int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
323int iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
324 uint8_t);
325int iwm_send_phy_db_data(struct iwm_softc *);
326void iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
327 uint32_t);
328void iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
329int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
330 uint8_t *, uint16_t *);
331int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
332 uint16_t *, size_t);
333void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
334 const uint8_t *nvm_channels, int nchan);
335int iwm_mimo_enabled(struct iwm_softc *);
336void iwm_setup_ht_rates(struct iwm_softc *);
337void iwm_mac_ctxt_task(void *);
338void iwm_phy_ctxt_task(void *);
339void iwm_updateprot(struct ieee80211com *);
340void iwm_updateslot(struct ieee80211com *);
341void iwm_updateedca(struct ieee80211com *);
342void iwm_updatechan(struct ieee80211com *);
343void iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
344 uint16_t);
345void iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
346int iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
347 uint8_t);
348void iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
349 uint8_t);
350void iwm_rx_ba_session_expired(void *);
351void iwm_reorder_timer_expired(void *);
352int iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
353 uint16_t, uint16_t, int, int);
354int iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
355 uint8_t);
356void iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
357 uint8_t);
358void iwm_ba_task(void *);
359
360int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
361 const uint16_t *, const uint16_t *,
362 const uint16_t *, const uint16_t *,
363 const uint16_t *, int);
364void iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
365 const uint16_t *, const uint16_t *);
366int iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
367int iwm_nvm_init(struct iwm_softc *);
368int iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
369 uint32_t);
370int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
371 uint32_t);
372int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
373int iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
374 int , int *);
375int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
376int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
377int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
378int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
379int iwm_send_phy_cfg_cmd(struct iwm_softc *);
380int iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
381int iwm_send_dqa_cmd(struct iwm_softc *);
382int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
383int iwm_config_ltr(struct iwm_softc *);
384int iwm_rx_addbuf(struct iwm_softc *, int, int);
385int iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
386int iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
387void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
388 struct iwm_rx_data *);
389int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
390int iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
391 struct ieee80211_rxinfo *);
392int iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
393 struct ieee80211_node *, struct ieee80211_rxinfo *);
394void iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
395 uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
396void iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
397 int, uint8_t, int);
398void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
399 struct iwm_node *, int, int);
400void iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
401void iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
402void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
403 struct iwm_rx_data *);
404void iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
405void iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
406 struct iwm_tx_ring *, int, uint16_t, uint16_t);
407void iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
408void iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
409 struct iwm_rx_data *);
410int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
411int iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
412 uint8_t, uint32_t, uint32_t, uint8_t);
413void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
414 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
415void iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
416 struct ieee80211_channel *, uint8_t, uint8_t, uint8_t);
417int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
418 uint8_t, uint32_t, uint32_t, uint8_t);
419int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
420int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
421 const void *);
422int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
423 uint32_t *);
424int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
425 const void *, uint32_t *);
426void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
427void iwm_cmd_done(struct iwm_softc *, int, int, int);
428void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
429void iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
430const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
431 struct ieee80211_frame *, struct iwm_tx_cmd *);
432int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
433int iwm_flush_tx_path(struct iwm_softc *, int);
434int iwm_wait_tx_queues_empty(struct iwm_softc *);
435void iwm_led_enable(struct iwm_softc *);
436void iwm_led_disable(struct iwm_softc *);
437int iwm_led_is_enabled(struct iwm_softc *);
438void iwm_led_blink_timeout(void *);
439void iwm_led_blink_start(struct iwm_softc *);
440void iwm_led_blink_stop(struct iwm_softc *);
441int iwm_beacon_filter_send_cmd(struct iwm_softc *,
442 struct iwm_beacon_filter_cmd *);
443void iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
444 struct iwm_beacon_filter_cmd *);
445int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
446void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
447 struct iwm_mac_power_cmd *);
448int iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
449int iwm_power_update_device(struct iwm_softc *);
450int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
451int iwm_disable_beacon_filter(struct iwm_softc *);
452int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
453int iwm_add_aux_sta(struct iwm_softc *);
454int iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
455int iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
456int iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
457uint16_t iwm_scan_rx_chain(struct iwm_softc *);
458uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
459uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
460 struct iwm_scan_channel_cfg_lmac *, int, int);
461int iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
462int iwm_lmac_scan(struct iwm_softc *, int);
463int iwm_config_umac_scan(struct iwm_softc *);
464int iwm_umac_scan(struct iwm_softc *, int);
465void iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
466uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
467int iwm_rval2ridx(int);
468void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
469void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
470 struct iwm_mac_ctx_cmd *, uint32_t);
471void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
472 struct iwm_mac_data_sta *, int);
473int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
474int iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
475void iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
476void iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
477int iwm_scan(struct iwm_softc *);
478int iwm_bgscan(struct ieee80211com *);
479void iwm_bgscan_done(struct ieee80211com *,
480 struct ieee80211_node_switch_bss_arg *, size_t);
481void iwm_bgscan_done_task(void *);
482int iwm_umac_scan_abort(struct iwm_softc *);
483int iwm_lmac_scan_abort(struct iwm_softc *);
484int iwm_scan_abort(struct iwm_softc *);
485int iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
486 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t);
487int iwm_auth(struct iwm_softc *);
488int iwm_deauth(struct iwm_softc *);
489int iwm_run(struct iwm_softc *);
490int iwm_run_stop(struct iwm_softc *);
491struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
492int iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
493 struct ieee80211_key *);
494int iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
495 struct ieee80211_key *);
496void iwm_delete_key_v1(struct ieee80211com *,
497 struct ieee80211_node *, struct ieee80211_key *);
498void iwm_delete_key(struct ieee80211com *,
499 struct ieee80211_node *, struct ieee80211_key *);
500void iwm_calib_timeout(void *);
501void iwm_setrates(struct iwm_node *, int);
502int iwm_media_change(struct ifnet *);
503void iwm_newstate_task(void *);
504int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
505void iwm_endscan(struct iwm_softc *);
506void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
507 struct ieee80211_node *);
508int iwm_sf_config(struct iwm_softc *, int);
509int iwm_send_bt_init_conf(struct iwm_softc *);
510int iwm_send_soc_conf(struct iwm_softc *);
511int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
512int iwm_send_temp_report_ths_cmd(struct iwm_softc *);
513void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
514void iwm_free_fw_paging(struct iwm_softc *);
515int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
516int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
517int iwm_init_hw(struct iwm_softc *);
518int iwm_init(struct ifnet *);
519void iwm_start(struct ifnet *);
520void iwm_stop(struct ifnet *);
521void iwm_watchdog(struct ifnet *);
522int iwm_ioctl(struct ifnet *, u_long, caddr_t);
523const char *iwm_desc_lookup(uint32_t);
524void iwm_nic_error(struct iwm_softc *);
525void iwm_dump_driver_status(struct iwm_softc *);
526void iwm_nic_umac_error(struct iwm_softc *);
527void iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
528 struct mbuf_list *);
529void iwm_flip_address(uint8_t *);
530int iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
531 struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
532int iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
533void iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
534 struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
535 struct mbuf_list *);
536int iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
537 int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
538int iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
539 struct iwm_rx_mpdu_desc *, int, int, uint32_t,
540 struct ieee80211_rxinfo *, struct mbuf_list *);
541void iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
542 struct mbuf_list *);
543int iwm_rx_pkt_valid(struct iwm_rx_packet *);
544void iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
545 struct mbuf_list *);
546void iwm_notif_intr(struct iwm_softc *);
547int iwm_intr(void *);
548int iwm_intr_msix(void *);
549int iwm_match(struct device *, void *, void *);
550int iwm_preinit(struct iwm_softc *);
551void iwm_attach_hook(struct device *);
552void iwm_attach(struct device *, struct device *, void *);
553void iwm_init_task(void *);
554int iwm_activate(struct device *, int);
555void iwm_resume(struct iwm_softc *);
556int iwm_wakeup(struct iwm_softc *);
557
558#if NBPFILTER1 > 0
559void iwm_radiotap_attach(struct iwm_softc *);
560#endif
561
562uint8_t
563iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
564{
565 const struct iwm_fw_cmd_version *entry;
566 int i;
567
568 for (i = 0; i < sc->n_cmd_versions; i++) {
569 entry = &sc->cmd_versions[i];
570 if (entry->group == grp && entry->cmd == cmd)
571 return entry->cmd_ver;
572 }
573
574 return IWM_FW_CMD_VER_UNKNOWN99;
575}
576
577int
578iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
579{
580 return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 &&
581 (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK(3 << 3)));
582}
583
584int
585iwm_is_mimo_mcs(int mcs)
586{
587 int ridx = iwm_mcs2ridx[mcs];
588 return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
589
590}
591
592int
593iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
594{
595 struct iwm_fw_cscheme_list *l = (void *)data;
596
597 if (dlen < sizeof(*l) ||
598 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
599 return EINVAL22;
600
601 /* we don't actually store anything for now, always use s/w crypto */
602
603 return 0;
604}
605
606int
607iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
608 uint8_t *data, size_t dlen)
609{
610 struct iwm_fw_sects *fws;
611 struct iwm_fw_onesect *fwone;
612
613 if (type >= IWM_UCODE_TYPE_MAX)
614 return EINVAL22;
615 if (dlen < sizeof(uint32_t))
616 return EINVAL22;
617
618 fws = &sc->sc_fw.fw_sects[type];
619 if (fws->fw_count >= IWM_UCODE_SECT_MAX16)
620 return EINVAL22;
621
622 fwone = &fws->fw_sect[fws->fw_count];
623
624 /* first 32bit are device load offset */
625 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof
(uint32_t)))
;
626
627 /* rest is data */
628 fwone->fws_data = data + sizeof(uint32_t);
629 fwone->fws_len = dlen - sizeof(uint32_t);
630
631 fws->fw_count++;
632 fws->fw_totlen += fwone->fws_len;
633
634 return 0;
635}
636
637#define IWM_DEFAULT_SCAN_CHANNELS40 40
638/* Newer firmware might support more channels. Raise this value if needed. */
639#define IWM_MAX_SCAN_CHANNELS52 52 /* as of 8265-34 firmware image */
640
641struct iwm_tlv_calib_data {
642 uint32_t ucode_type;
643 struct iwm_tlv_calib_ctrl calib;
644} __packed__attribute__((__packed__));
645
646int
647iwm_set_default_calib(struct iwm_softc *sc, const void *data)
648{
649 const struct iwm_tlv_calib_data *def_calib = data;
650 uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type));
651
652 if (ucode_type >= IWM_UCODE_TYPE_MAX)
653 return EINVAL22;
654
655 sc->sc_default_calib[ucode_type].flow_trigger =
656 def_calib->calib.flow_trigger;
657 sc->sc_default_calib[ucode_type].event_trigger =
658 def_calib->calib.event_trigger;
659
660 return 0;
661}
662
663void
664iwm_fw_info_free(struct iwm_fw_info *fw)
665{
666 free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize);
667 fw->fw_rawdata = NULL((void *)0);
668 fw->fw_rawsize = 0;
669 /* don't touch fw->fw_status */
670 memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects
)))
;
671}
672
673void
674iwm_fw_version_str(char *buf, size_t bufsize,
675 uint32_t major, uint32_t minor, uint32_t api)
676{
677 /*
678 * Starting with major version 35 the Linux driver prints the minor
679 * version in hexadecimal.
680 */
681 if (major >= 35)
682 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
683 else
684 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
685}
686
687int
688iwm_read_firmware(struct iwm_softc *sc)
689{
690 struct iwm_fw_info *fw = &sc->sc_fw;
691 struct iwm_tlv_ucode_header *uhdr;
692 struct iwm_ucode_tlv tlv;
693 uint32_t tlv_type;
694 uint8_t *data;
695 uint32_t usniffer_img;
696 uint32_t paging_mem_size;
697 int err;
698 size_t len;
699
700 if (fw->fw_status == IWM_FW_STATUS_DONE2)
701 return 0;
702
703 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS1)
704 tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP0xffffffffffffffffULL);
705 fw->fw_status = IWM_FW_STATUS_INPROGRESS1;
706
707 if (fw->fw_rawdata != NULL((void *)0))
708 iwm_fw_info_free(fw);
709
710 err = loadfirmware(sc->sc_fwname,
711 (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
712 if (err) {
713 printf("%s: could not read firmware %s (error %d)\n",
714 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err);
715 goto out;
716 }
717
718 sc->sc_capaflags = 0;
719 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS40;
720 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc->
sc_enabled_capa)))
;
721 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api
)))
;
722 sc->n_cmd_versions = 0;
723
724 uhdr = (void *)fw->fw_rawdata;
725 if (*(uint32_t *)fw->fw_rawdata != 0
726 || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWM_TLV_UCODE_MAGIC0x0a4c5749) {
727 printf("%s: invalid firmware %s\n",
728 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname);
729 err = EINVAL22;
730 goto out;
731 }
732
733 iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
734 IWM_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24
)
,
735 IWM_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16
)
,
736 IWM_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8
)
);
737
738 data = uhdr->data;
739 len = fw->fw_rawsize - sizeof(*uhdr);
740
741 while (len >= sizeof(tlv)) {
742 size_t tlv_len;
743 void *tlv_data;
744
745 memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv)));
746 tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length));
747 tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type));
748
749 len -= sizeof(tlv);
750 data += sizeof(tlv);
751 tlv_data = data;
752
753 if (len < tlv_len) {
754 printf("%s: firmware too short: %zu bytes\n",
755 DEVNAME(sc)((sc)->sc_dev.dv_xname), len);
756 err = EINVAL22;
757 goto parse_out;
758 }
759
760 switch (tlv_type) {
761 case IWM_UCODE_TLV_PROBE_MAX_LEN6:
762 if (tlv_len < sizeof(uint32_t)) {
763 err = EINVAL22;
764 goto parse_out;
765 }
766 sc->sc_capa_max_probe_len
767 = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
768 if (sc->sc_capa_max_probe_len >
769 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE512) {
770 err = EINVAL22;
771 goto parse_out;
772 }
773 break;
774 case IWM_UCODE_TLV_PAN7:
775 if (tlv_len) {
776 err = EINVAL22;
777 goto parse_out;
778 }
779 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN(1 << 0);
780 break;
781 case IWM_UCODE_TLV_FLAGS18:
782 if (tlv_len < sizeof(uint32_t)) {
783 err = EINVAL22;
784 goto parse_out;
785 }
786 /*
787 * Apparently there can be many flags, but Linux driver
788 * parses only the first one, and so do we.
789 *
790 * XXX: why does this override IWM_UCODE_TLV_PAN?
791 * Intentional or a bug? Observations from
792 * current firmware file:
793 * 1) TLV_PAN is parsed first
794 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
795 * ==> this resets TLV_PAN to itself... hnnnk
796 */
797 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
798 break;
799 case IWM_UCODE_TLV_CSCHEME28:
800 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
801 if (err)
802 goto parse_out;
803 break;
804 case IWM_UCODE_TLV_NUM_OF_CPU27: {
805 uint32_t num_cpu;
806 if (tlv_len != sizeof(uint32_t)) {
807 err = EINVAL22;
808 goto parse_out;
809 }
810 num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
811 if (num_cpu < 1 || num_cpu > 2) {
812 err = EINVAL22;
813 goto parse_out;
814 }
815 break;
816 }
817 case IWM_UCODE_TLV_SEC_RT19:
818 err = iwm_firmware_store_section(sc,
819 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
820 if (err)
821 goto parse_out;
822 break;
823 case IWM_UCODE_TLV_SEC_INIT20:
824 err = iwm_firmware_store_section(sc,
825 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
826 if (err)
827 goto parse_out;
828 break;
829 case IWM_UCODE_TLV_SEC_WOWLAN21:
830 err = iwm_firmware_store_section(sc,
831 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
832 if (err)
833 goto parse_out;
834 break;
835 case IWM_UCODE_TLV_DEF_CALIB22:
836 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
837 err = EINVAL22;
838 goto parse_out;
839 }
840 err = iwm_set_default_calib(sc, tlv_data);
841 if (err)
842 goto parse_out;
843 break;
844 case IWM_UCODE_TLV_PHY_SKU23:
845 if (tlv_len != sizeof(uint32_t)) {
846 err = EINVAL22;
847 goto parse_out;
848 }
849 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
850 break;
851
852 case IWM_UCODE_TLV_API_CHANGES_SET29: {
853 struct iwm_ucode_api *api;
854 int idx, i;
855 if (tlv_len != sizeof(*api)) {
856 err = EINVAL22;
857 goto parse_out;
858 }
859 api = (struct iwm_ucode_api *)tlv_data;
860 idx = le32toh(api->api_index)((__uint32_t)(api->api_index));
861 if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) {
862 err = EINVAL22;
863 goto parse_out;
864 }
865 for (i = 0; i < 32; i++) {
866 if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0)
867 continue;
868 setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
869 }
870 break;
871 }
872
873 case IWM_UCODE_TLV_ENABLED_CAPABILITIES30: {
874 struct iwm_ucode_capa *capa;
875 int idx, i;
876 if (tlv_len != sizeof(*capa)) {
877 err = EINVAL22;
878 goto parse_out;
879 }
880 capa = (struct iwm_ucode_capa *)tlv_data;
881 idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index));
882 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) {
883 goto parse_out;
884 }
885 for (i = 0; i < 32; i++) {
886 if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0)
887 continue;
888 setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
889 }
890 break;
891 }
892
893 case IWM_UCODE_TLV_CMD_VERSIONS48:
894 if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
895 tlv_len /= sizeof(struct iwm_fw_cmd_version);
896 tlv_len *= sizeof(struct iwm_fw_cmd_version);
897 }
898 if (sc->n_cmd_versions != 0) {
899 err = EINVAL22;
900 goto parse_out;
901 }
902 if (tlv_len > sizeof(sc->cmd_versions)) {
903 err = EINVAL22;
904 goto parse_out;
905 }
906 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), (
tlv_len))
;
907 sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
908 break;
909
910 case IWM_UCODE_TLV_SDIO_ADMA_ADDR35:
911 case IWM_UCODE_TLV_FW_GSCAN_CAPA50:
912 /* ignore, not used by current driver */
913 break;
914
915 case IWM_UCODE_TLV_SEC_RT_USNIFFER34:
916 err = iwm_firmware_store_section(sc,
917 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
918 tlv_len);
919 if (err)
920 goto parse_out;
921 break;
922
923 case IWM_UCODE_TLV_PAGING32:
924 if (tlv_len != sizeof(uint32_t)) {
925 err = EINVAL22;
926 goto parse_out;
927 }
928 paging_mem_size = le32toh(*(const uint32_t *)tlv_data)((__uint32_t)(*(const uint32_t *)tlv_data));
929
930 DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",do { ; } while (0)
931 DEVNAME(sc), paging_mem_size))do { ; } while (0);
932 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12)))) {
933 printf("%s: Driver only supports up to %u"
934 " bytes for paging image (%u requested)\n",
935 DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12))),
936 paging_mem_size);
937 err = EINVAL22;
938 goto out;
939 }
940 if (paging_mem_size & (IWM_FW_PAGING_SIZE(1 << 12) - 1)) {
941 printf("%s: Paging: image isn't multiple of %u\n",
942 DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_FW_PAGING_SIZE(1 << 12));
943 err = EINVAL22;
944 goto out;
945 }
946
947 fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
948 paging_mem_size;
949 usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
950 fw->fw_sects[usniffer_img].paging_mem_size =
951 paging_mem_size;
952 break;
953
954 case IWM_UCODE_TLV_N_SCAN_CHANNELS31:
955 if (tlv_len != sizeof(uint32_t)) {
956 err = EINVAL22;
957 goto parse_out;
958 }
959 sc->sc_capa_n_scan_channels =
960 le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
961 if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS52) {
962 err = ERANGE34;
963 goto parse_out;
964 }
965 break;
966
967 case IWM_UCODE_TLV_FW_VERSION36:
968 if (tlv_len != sizeof(uint32_t) * 3) {
969 err = EINVAL22;
970 goto parse_out;
971 }
972
973 iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
974 le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])),
975 le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])),
976 le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2])));
977 break;
978
979 case IWM_UCODE_TLV_FW_DBG_DEST38:
980 case IWM_UCODE_TLV_FW_DBG_CONF39:
981 case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS54:
982 case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS55:
983 case IWM_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0):
984 case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1):
985 case IWM_UCODE_TLV_TYPE_HCMD(0x1000005 + 2):
986 case IWM_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3):
987 case IWM_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4):
988 break;
989
990 case IWM_UCODE_TLV_HW_TYPE58:
991 break;
992
993 case IWM_UCODE_TLV_FW_MEM_SEG51:
994 break;
995
996 /* undocumented TLVs found in iwm-9000-43 image */
997 case 0x1000003:
998 case 0x1000004:
999 break;
1000
1001 default:
1002 err = EINVAL22;
1003 goto parse_out;
1004 }
1005
1006 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1007 data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1008 }
1009
1010 KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 1010, "err == 0"))
;
1011
1012 parse_out:
1013 if (err) {
1014 printf("%s: firmware parse error %d, "
1015 "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type);
1016 }
1017
1018 out:
1019 if (err) {
1020 fw->fw_status = IWM_FW_STATUS_NONE0;
1021 if (fw->fw_rawdata != NULL((void *)0))
1022 iwm_fw_info_free(fw);
1023 } else
1024 fw->fw_status = IWM_FW_STATUS_DONE2;
1025 wakeup(&sc->sc_fw);
1026
1027 return err;
1028}
1029
1030uint32_t
1031iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1032{
1033 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
1034 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1035 IWM_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x01 | 0x02)
;
1036 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050
)))))
;
1037}
1038
1039uint32_t
1040iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1041{
1042 iwm_nic_assert_locked(sc);
1043 return iwm_read_prph_unlocked(sc, addr);
1044}
1045
1046void
1047iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1048{
1049 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
1050 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1051 IWM_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x02)
;
1052 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c
))), ((val))))
;
1053}
1054
1055void
1056iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1057{
1058 iwm_nic_assert_locked(sc);
1059 iwm_write_prph_unlocked(sc, addr, val);
1060}
1061
1062void
1063iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1064{
1065 iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1066 iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1067}
1068
1069int
1070iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1071{
1072 int offs, err = 0;
1073 uint32_t *vals = buf;
1074
1075 if (iwm_nic_lock(sc)) {
1076 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c
))), ((addr))))
;
1077 for (offs = 0; offs < dwords; offs++)
1078 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x01c
)))))
;
1079 iwm_nic_unlock(sc);
1080 } else {
1081 err = EBUSY16;
1082 }
1083 return err;
1084}
1085
1086int
1087iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1088{
1089 int offs;
1090 const uint32_t *vals = buf;
1091
1092 if (iwm_nic_lock(sc)) {
1093 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010
))), ((addr))))
;
1094 /* WADDR auto-increments */
1095 for (offs = 0; offs < dwords; offs++) {
1096 uint32_t val = vals ? vals[offs] : 0;
1097 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018
))), ((val))))
;
1098 }
1099 iwm_nic_unlock(sc);
1100 } else {
1101 return EBUSY16;
1102 }
1103 return 0;
1104}
1105
1106int
1107iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1108{
1109 return iwm_write_mem(sc, addr, &val, 1);
1110}
1111
1112int
1113iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1114 int timo)
1115{
1116 for (;;) {
1117 if ((IWM_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) {
1118 return 1;
1119 }
1120 if (timo < 10) {
1121 return 0;
1122 }
1123 timo -= 10;
1124 DELAY(10)(*delay_func)(10);
1125 }
1126}
1127
1128int
1129iwm_nic_lock(struct iwm_softc *sc)
1130{
1131 if (sc->sc_nic_locks > 0) {
1132 iwm_nic_assert_locked(sc);
1133 sc->sc_nic_locks++;
1134 return 1; /* already locked */
1135 }
1136
1137 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
1138 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
;
1139
1140 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002)
1141 DELAY(2)(*delay_func)(2);
1142
1143 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024),
1144 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001),
1145 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001)
1146 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) {
1147 sc->sc_nic_locks++;
1148 return 1;
1149 }
1150
1151 printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1152 return 0;
1153}
1154
1155void
1156iwm_nic_assert_locked(struct iwm_softc *sc)
1157{
1158 if (sc->sc_nic_locks <= 0)
1159 panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
1160}
1161
1162void
1163iwm_nic_unlock(struct iwm_softc *sc)
1164{
1165 if (sc->sc_nic_locks > 0) {
1166 if (--sc->sc_nic_locks == 0)
1167 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
1168 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
1169 } else
1170 printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1171}
1172
1173int
1174iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1175 uint32_t mask)
1176{
1177 uint32_t val;
1178
1179 if (iwm_nic_lock(sc)) {
1180 val = iwm_read_prph(sc, reg) & mask;
1181 val |= bits;
1182 iwm_write_prph(sc, reg, val);
1183 iwm_nic_unlock(sc);
1184 return 0;
1185 }
1186 return EBUSY16;
1187}
1188
1189int
1190iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1191{
1192 return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1193}
1194
1195int
1196iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1197{
1198 return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1199}
1200
1201int
1202iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1203 bus_size_t size, bus_size_t alignment)
1204{
1205 int nsegs, err;
1206 caddr_t va;
1207
1208 dma->tag = tag;
1209 dma->size = size;
1210
1211 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
1212 &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
;
1213 if (err)
1214 goto fail;
1215
1216 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
1217 BUS_DMA_NOWAIT)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
;
1218 if (err)
1219 goto fail;
1220
1221 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
1222 BUS_DMA_NOWAIT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
;
1223 if (err)
1224 goto fail;
1225 dma->vaddr = va;
1226
1227 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
1228 BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
;
1229 if (err)
1230 goto fail;
1231
1232 memset(dma->vaddr, 0, size)__builtin_memset((dma->vaddr), (0), (size));
1233 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), (
0x04))
;
1234 dma->paddr = dma->map->dm_segs[0].ds_addr;
1235
1236 return 0;
1237
1238fail: iwm_dma_contig_free(dma);
1239 return err;
1240}
1241
1242void
1243iwm_dma_contig_free(struct iwm_dma_info *dma)
1244{
1245 if (dma->map != NULL((void *)0)) {
1246 if (dma->vaddr != NULL((void *)0)) {
1247 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
1248 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
;
1249 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
1250 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (dma->size))
;
1251 bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
seg), (1))
;
1252 dma->vaddr = NULL((void *)0);
1253 }
1254 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
1255 dma->map = NULL((void *)0);
1256 }
1257}
1258
1259int
1260iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1261{
1262 bus_size_t size;
1263 size_t descsz;
1264 int count, i, err;
1265
1266 ring->cur = 0;
1267
1268 if (sc->sc_mqrx_supported) {
1269 count = IWM_RX_MQ_RING_COUNT512;
1270 descsz = sizeof(uint64_t);
1271 } else {
1272 count = IWM_RX_RING_COUNT256;
1273 descsz = sizeof(uint32_t);
1274 }
1275
1276 /* Allocate RX descriptors (256-byte aligned). */
1277 size = count * descsz;
1278 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1279 if (err) {
1280 printf("%s: could not allocate RX ring DMA memory\n",
1281 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1282 goto fail;
1283 }
1284 ring->desc = ring->free_desc_dma.vaddr;
1285
1286 /* Allocate RX status area (16-byte aligned). */
1287 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1288 sizeof(*ring->stat), 16);
1289 if (err) {
1290 printf("%s: could not allocate RX status DMA memory\n",
1291 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1292 goto fail;
1293 }
1294 ring->stat = ring->stat_dma.vaddr;
1295
1296 if (sc->sc_mqrx_supported) {
1297 size = count * sizeof(uint32_t);
1298 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1299 size, 256);
1300 if (err) {
1301 printf("%s: could not allocate RX ring DMA memory\n",
1302 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1303 goto fail;
1304 }
1305 }
1306
1307 for (i = 0; i < count; i++) {
1308 struct iwm_rx_data *data = &ring->data[i];
1309
1310 memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data)));
1311 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1312 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1313 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
;
1314 if (err) {
1315 printf("%s: could not create RX buf DMA map\n",
1316 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1317 goto fail;
1318 }
1319
1320 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE4096, i);
1321 if (err)
1322 goto fail;
1323 }
1324 return 0;
1325
1326fail: iwm_free_rx_ring(sc, ring);
1327 return err;
1328}
1329
1330void
1331iwm_disable_rx_dma(struct iwm_softc *sc)
1332{
1333 int ntries;
1334
1335 if (iwm_nic_lock(sc)) {
1336 if (sc->sc_mqrx_supported) {
1337 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0);
1338 for (ntries = 0; ntries < 1000; ntries++) {
1339 if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS0xA09808) &
1340 IWM_RXF_DMA_IDLE(1U << 31))
1341 break;
1342 DELAY(10)(*delay_func)(10);
1343 }
1344 } else {
1345 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), ((0))))
;
1346 for (ntries = 0; ntries < 1000; ntries++) {
1347 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) +
0xC40) + 0x004)))))
&
1348 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE(0x01000000))
1349 break;
1350 DELAY(10)(*delay_func)(10);
1351 }
1352 }
1353 iwm_nic_unlock(sc);
1354 }
1355}
1356
1357void
1358iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1359{
1360 ring->cur = 0;
1361 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
1362 ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
;
1363 memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat
)))
;
1364 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
1365 ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
;
1366
1367}
1368
1369void
1370iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1371{
1372 int count, i;
1373
1374 iwm_dma_contig_free(&ring->free_desc_dma);
1375 iwm_dma_contig_free(&ring->stat_dma);
1376 iwm_dma_contig_free(&ring->used_desc_dma);
1377
1378 if (sc->sc_mqrx_supported)
1379 count = IWM_RX_MQ_RING_COUNT512;
1380 else
1381 count = IWM_RX_RING_COUNT256;
1382
1383 for (i = 0; i < count; i++) {
1384 struct iwm_rx_data *data = &ring->data[i];
1385
1386 if (data->m != NULL((void *)0)) {
1387 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
1388 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
1389 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1390 m_freem(data->m);
1391 data->m = NULL((void *)0);
1392 }
1393 if (data->map != NULL((void *)0))
1394 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1395 }
1396}
1397
1398int
1399iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1400{
1401 bus_addr_t paddr;
1402 bus_size_t size;
1403 int i, err;
1404
1405 ring->qid = qid;
1406 ring->queued = 0;
1407 ring->cur = 0;
1408 ring->tail = 0;
1409
1410 /* Allocate TX descriptors (256-byte aligned). */
1411 size = IWM_TX_RING_COUNT256 * sizeof (struct iwm_tfd);
1412 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1413 if (err) {
1414 printf("%s: could not allocate TX ring DMA memory\n",
1415 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1416 goto fail;
1417 }
1418 ring->desc = ring->desc_dma.vaddr;
1419
1420 /*
1421 * There is no need to allocate DMA buffers for unused rings.
1422 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1423 * than we currently need.
1424 *
1425 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1426 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1427 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1428 * in order to provide one queue per EDCA category.
1429 * Tx aggregation requires additional queues, one queue per TID for
1430 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1431 *
1432 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1433 * and Tx aggregation is not supported.
1434 *
1435 * Unfortunately, we cannot tell if DQA will be used until the
1436 * firmware gets loaded later, so just allocate sufficient rings
1437 * in order to satisfy both cases.
1438 */
1439 if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
1440 return 0;
1441
1442 size = IWM_TX_RING_COUNT256 * sizeof(struct iwm_device_cmd);
1443 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1444 if (err) {
1445 printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1446 goto fail;
1447 }
1448 ring->cmd = ring->cmd_dma.vaddr;
1449
1450 paddr = ring->cmd_dma.paddr;
1451 for (i = 0; i < IWM_TX_RING_COUNT256; i++) {
1452 struct iwm_tx_data *data = &ring->data[i];
1453 size_t mapsize;
1454
1455 data->cmd_paddr = paddr;
1456 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1457 + offsetof(struct iwm_tx_cmd, scratch)__builtin_offsetof(struct iwm_tx_cmd, scratch);
1458 paddr += sizeof(struct iwm_device_cmd);
1459
1460 /* FW commands may require more mapped space than packets. */
1461 if (qid == IWM_CMD_QUEUE9 || qid == IWM_DQA_CMD_QUEUE0)
1462 mapsize = (sizeof(struct iwm_cmd_header) +
1463 IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header)));
1464 else
1465 mapsize = MCLBYTES(1 << 11);
1466 err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (20 - 2), (mapsize), (0), (0x0001), (&data->map))
1467 IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (20 - 2), (mapsize), (0), (0x0001), (&data->map))
1468 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (20 - 2), (mapsize), (0), (0x0001), (&data->map))
;
1469 if (err) {
1470 printf("%s: could not create TX buf DMA map\n",
1471 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1472 goto fail;
1473 }
1474 }
1475 KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 1475, "paddr == ring->cmd_dma.paddr + size"
))
;
1476 return 0;
1477
1478fail: iwm_free_tx_ring(sc, ring);
1479 return err;
1480}
1481
1482void
1483iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1484{
1485 int i;
1486
1487 for (i = 0; i < IWM_TX_RING_COUNT256; i++) {
1488 struct iwm_tx_data *data = &ring->data[i];
1489
1490 if (data->m != NULL((void *)0)) {
1491 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1492 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1493 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1494 m_freem(data->m);
1495 data->m = NULL((void *)0);
1496 }
1497 }
1498 /* Clear TX descriptors. */
1499 memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size
))
;
1500 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
1501 ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
;
1502 sc->qfullmsk &= ~(1 << ring->qid);
1503 sc->qenablemsk &= ~(1 << ring->qid);
1504 /* 7000 family NICs are locked while commands are in progress. */
1505 if (ring->qid == sc->cmdqid && ring->queued > 0) {
1506 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
1507 iwm_nic_unlock(sc);
1508 }
1509 ring->queued = 0;
1510 ring->cur = 0;
1511 ring->tail = 0;
1512}
1513
1514void
1515iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1516{
1517 int i;
1518
1519 iwm_dma_contig_free(&ring->desc_dma);
1520 iwm_dma_contig_free(&ring->cmd_dma);
1521
1522 for (i = 0; i < IWM_TX_RING_COUNT256; i++) {
1523 struct iwm_tx_data *data = &ring->data[i];
1524
1525 if (data->m != NULL((void *)0)) {
1526 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1527 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1528 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1529 m_freem(data->m);
1530 data->m = NULL((void *)0);
1531 }
1532 if (data->map != NULL((void *)0))
1533 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1534 }
1535}
1536
1537void
1538iwm_enable_rfkill_int(struct iwm_softc *sc)
1539{
1540 if (!sc->sc_msix) {
1541 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL(1 << 7);
1542 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1543 } else {
1544 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
1545 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
1546 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
1547 ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
;
1548 sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1549 }
1550
1551 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_90003)
1552 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
1553 IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
;
1554}
1555
1556int
1557iwm_check_rfkill(struct iwm_softc *sc)
1558{
1559 uint32_t v;
1560 int rv;
1561
1562 /*
1563 * "documentation" is not really helpful here:
1564 * 27: HW_RF_KILL_SW
1565 * Indicates state of (platform's) hardware RF-Kill switch
1566 *
1567 * But apparently when it's off, it's on ...
1568 */
1569 v = IWM_READ(sc, IWM_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))));
1570 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0;
1571 if (rv) {
1572 sc->sc_flags |= IWM_FLAG_RFKILL0x02;
1573 } else {
1574 sc->sc_flags &= ~IWM_FLAG_RFKILL0x02;
1575 }
1576
1577 return rv;
1578}
1579
1580void
1581iwm_enable_interrupts(struct iwm_softc *sc)
1582{
1583 if (!sc->sc_msix) {
1584 sc->sc_intmask = IWM_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 <<
25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 <<
0) | (1 << 28))
;
1585 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1586 } else {
1587 /*
1588 * fh/hw_mask keeps all the unmasked causes.
1589 * Unlike msi, in msix cause is enabled when it is unset.
1590 */
1591 sc->sc_hw_mask = sc->sc_hw_init_mask;
1592 sc->sc_fh_mask = sc->sc_fh_init_mask;
1593 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
1594 ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
;
1595 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
1596 ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
;
1597 }
1598}
1599
1600void
1601iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1602{
1603 if (!sc->sc_msix) {
1604 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX(1 << 27);
1605 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1606 } else {
1607 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
1608 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
1609 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM))))
1610 ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM))))
;
1611 sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1612 }
1613}
1614
1615void
1616iwm_restore_interrupts(struct iwm_softc *sc)
1617{
1618 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1619}
1620
1621void
1622iwm_disable_interrupts(struct iwm_softc *sc)
1623{
1624 if (!sc->sc_msix) {
1625 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
1626
1627 /* acknowledge all interrupts */
1628 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
1629 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(~0))))
;
1630 } else {
1631 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
1632 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
1633 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
1634 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
1635 }
1636}
1637
1638void
1639iwm_ict_reset(struct iwm_softc *sc)
1640{
1641 iwm_disable_interrupts(sc);
1642
1643 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096));
1644 sc->ict_cur = 0;
1645
1646 /* Set physical address of ICT (4KB aligned). */
1647 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1648 IWM_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1649 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1650 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1651 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
;
1652
1653 /* Switch to ICT interrupt mode in driver. */
1654 sc->sc_flags |= IWM_FLAG_USE_ICT0x01;
1655
1656 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
1657 iwm_enable_interrupts(sc);
1658}
1659
1660#define IWM_HW_READY_TIMEOUT 50
1661int
1662iwm_set_hw_ready(struct iwm_softc *sc)
1663{
1664 int ready;
1665
1666 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
1667 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
;
1668
1669 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG(0x000),
1670 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
1671 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
1672 IWM_HW_READY_TIMEOUT);
1673 if (ready)
1674 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
1675 IWM_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
;
1676
1677 return ready;
1678}
1679#undef IWM_HW_READY_TIMEOUT
1680
1681int
1682iwm_prepare_card_hw(struct iwm_softc *sc)
1683{
1684 int t = 0;
1685 int ntries;
1686
1687 if (iwm_set_hw_ready(sc))
1688 return 0;
1689
1690 IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
1691 IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
1692 DELAY(1000)(*delay_func)(1000);
1693
1694 for (ntries = 0; ntries < 10; ntries++) {
1695 /* If HW is not ready, prepare the conditions to check again */
1696 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
1697 IWM_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
;
1698
1699 do {
1700 if (iwm_set_hw_ready(sc))
1701 return 0;
1702 DELAY(200)(*delay_func)(200);
1703 t += 200;
1704 } while (t < 150000);
1705 DELAY(25000)(*delay_func)(25000);
1706 }
1707
1708 return ETIMEDOUT60;
1709}
1710
1711void
1712iwm_apm_config(struct iwm_softc *sc)
1713{
1714 pcireg_t lctl, cap;
1715
1716 /*
1717 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1718 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1719 * If so (likely), disable L0S, so device moves directly L0->L1;
1720 * costs negligible amount of power savings.
1721 * If not (unlikely), enable L0S, so there is at least some
1722 * power savings, even without L1.
1723 */
1724 lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1725 sc->sc_cap_off + PCI_PCIE_LCSR0x10);
1726 if (lctl & PCI_PCIE_LCSR_ASPM_L10x00000002) {
1727 IWM_SETBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
1728 IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
;
1729 } else {
1730 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
& ~((0x00000002))))))
1731 IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
& ~((0x00000002))))))
;
1732 }
1733
1734 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1735 sc->sc_cap_off + PCI_PCIE_DCSR20x28);
1736 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0;
1737 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0)
1738 DEVNAME(sc),do { ; } while (0)
1739 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0)
1740 sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0);
1741}
1742
1743/*
1744 * Start up NIC's basic functionality after it has been reset
1745 * e.g. after platform boot or shutdown.
1746 * NOTE: This does not load uCode nor start the embedded processor
1747 */
1748int
1749iwm_apm_init(struct iwm_softc *sc)
1750{
1751 int err = 0;
1752
1753 /* Disable L0S exit timer (platform NMI workaround) */
1754 if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002)
1755 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x20000000))))))
1756 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x20000000))))))
;
1757
1758 /*
1759 * Disable L0s without affecting L1;
1760 * don't wait for ICH L0s (ICH bug W/A)
1761 */
1762 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
1763 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
;
1764
1765 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1766 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240)))))
| ((0xFFFF0000))))))
;
1767
1768 /*
1769 * Enable HAP INTA (interrupt from management bus) to
1770 * wake device's PCI Express link L1a -> L0s
1771 */
1772 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
1773 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
;
1774
1775 iwm_apm_config(sc);
1776
1777#if 0 /* not for 7k/8k */
1778 /* Configure analog phase-lock-loop before activating to D0A */
1779 if (trans->cfg->base_params->pll_cfg_val)
1780 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c
))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), (
((0x20c))))) | (trans->cfg->base_params->pll_cfg_val
)))))
1781 trans->cfg->base_params->pll_cfg_val)(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c
))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), (
((0x20c))))) | (trans->cfg->base_params->pll_cfg_val
)))))
;
1782#endif
1783
1784 /*
1785 * Set "initialization complete" bit to move adapter from
1786 * D0U* --> D0A* (powered-up active) state.
1787 */
1788 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
1789
1790 /*
1791 * Wait for clock stabilization; once stabilized, access to
1792 * device-internal resources is supported, e.g. iwm_write_prph()
1793 * and accesses to uCode SRAM.
1794 */
1795 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024),
1796 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
1797 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
1798 printf("%s: timeout waiting for clock stabilization\n",
1799 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1800 err = ETIMEDOUT60;
1801 goto out;
1802 }
1803
1804 if (sc->host_interrupt_operation_mode) {
1805 /*
1806 * This is a bit of an abuse - This is needed for 7260 / 3160
1807 * only check host_interrupt_operation_mode even if this is
1808 * not related to host_interrupt_operation_mode.
1809 *
1810 * Enable the oscillator to count wake up time for L1 exit. This
1811 * consumes slightly more power (100uA) - but allows to be sure
1812 * that we wake up from L1 on time.
1813 *
1814 * This looks weird: read twice the same register, discard the
1815 * value, set a bit, and yet again, read that same register
1816 * just to discard the value. But that's the way the hardware
1817 * seems to like it.
1818 */
1819 if (iwm_nic_lock(sc)) {
1820 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1821 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1822 iwm_nic_unlock(sc);
1823 }
1824 err = iwm_set_bits_prph(sc, IWM_OSC_CLK(0xa04068),
1825 IWM_OSC_CLK_FORCE_CONTROL(0x8));
1826 if (err)
1827 goto out;
1828 if (iwm_nic_lock(sc)) {
1829 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1830 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1831 iwm_nic_unlock(sc);
1832 }
1833 }
1834
1835 /*
1836 * Enable DMA clock and wait for it to stabilize.
1837 *
1838 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1839 * do not disable clocks. This preserves any hardware bits already
1840 * set by default in "CLK_CTRL_REG" after reset.
1841 */
1842 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
1843 if (iwm_nic_lock(sc)) {
1844 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG(((0x00000) + 0x3000) + 0x0004),
1845 IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200));
1846 iwm_nic_unlock(sc);
1847 }
1848 DELAY(20)(*delay_func)(20);
1849
1850 /* Disable L1-Active */
1851 err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010),
1852 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800));
1853 if (err)
1854 goto out;
1855
1856 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1857 if (iwm_nic_lock(sc)) {
1858 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG(((0x00000) + 0x3000) + 0x001c),
1859 IWM_APMG_RTC_INT_STT_RFKILL(0x10000000));
1860 iwm_nic_unlock(sc);
1861 }
1862 }
1863 out:
1864 if (err)
1865 printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
1866 return err;
1867}
1868
1869void
1870iwm_apm_stop(struct iwm_softc *sc)
1871{
1872 IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
1873 IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
1874 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
1875 IWM_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
1876 IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
;
1877 DELAY(1000)(*delay_func)(1000);
1878 IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
1879 IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
;
1880 DELAY(5000)(*delay_func)(5000);
1881
1882 /* stop device's busmaster DMA activity */
1883 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000200))))))
;
1884
1885 if (!iwm_poll_bit(sc, IWM_CSR_RESET(0x020),
1886 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100),
1887 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100))
1888 printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1889
1890 /*
1891 * Clear "initialization complete" bit to move adapter from
1892 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1893 */
1894 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
1895 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
;
1896}
1897
1898void
1899iwm_init_msix_hw(struct iwm_softc *sc)
1900{
1901 iwm_conf_msix_hw(sc, 0);
1902
1903 if (!sc->sc_msix)
1904 return;
1905
1906 sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804
)))))
;
1907 sc->sc_fh_mask = sc->sc_fh_init_mask;
1908 sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C
)))))
;
1909 sc->sc_hw_mask = sc->sc_hw_init_mask;
1910}
1911
1912void
1913iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1914{
1915 int vector = 0;
1916
1917 if (!sc->sc_msix) {
1918 /* Newer chips default to MSIX. */
1919 if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1920 iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00,
1921 IWM_UREG_CHICK_MSI_ENABLE(1 << 24));
1922 iwm_nic_unlock(sc);
1923 }
1924 return;
1925 }
1926
1927 if (!stopped && iwm_nic_lock(sc)) {
1928 iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00, IWM_UREG_CHICK_MSIX_ENABLE(1 << 25));
1929 iwm_nic_unlock(sc);
1930 }
1931
1932 /* Disable all interrupts */
1933 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~0))))
;
1934 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~0))))
;
1935
1936 /* Map fallback-queue (command/mgmt) to a single vector */
1937 IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
1938 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
;
1939 /* Map RSS queue (data) to the same vector */
1940 IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
1941 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
;
1942
1943 /* Enable the RX queues cause interrupts */
1944 IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1
)))))
1945 IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1
)))))
;
1946
1947 /* Map non-RX causes to the same vector */
1948 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
1949 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
;
1950 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
1951 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
;
1952 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
1953 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
;
1954 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
1955 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
;
1956 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
1957 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
;
1958 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
1959 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
;
1960 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
1961 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
;
1962 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
1963 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
;
1964 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
1965 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
;
1966 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
1967 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
;
1968 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
1969 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
;
1970 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
1971 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
;
1972 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
1973 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
;
1974 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
1975 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
;
1976 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
1977 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
;
1978
1979 /* Enable non-RX causes interrupts */
1980 IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
1981 IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
1982 IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
1983 IWM_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
1984 IWM_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
;
1985 IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1986 IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1987 IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1988 IWM_MSIX_HW_INT_CAUSES_REG_IML |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1989 IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1990 IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1991 IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1992 IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1993 IWM_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1994 IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1995 IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
1996 IWM_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
;
1997}
1998
1999int
2000iwm_clear_persistence_bit(struct iwm_softc *sc)
2001{
2002 uint32_t hpm, wprot;
2003
2004 hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440);
2005 if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT(1 << 12))) {
2006 wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_90000xa04ce0);
2007 if (wprot & IWM_PREG_WFPM_ACCESS(1 << 12)) {
2008 printf("%s: cannot clear persistence bit\n",
2009 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2010 return EPERM1;
2011 }
2012 iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440,
2013 hpm & ~IWM_HPM_PERSISTENCE_BIT(1 << 12));
2014 }
2015
2016 return 0;
2017}
2018
2019int
2020iwm_start_hw(struct iwm_softc *sc)
2021{
2022 int err;
2023
2024 err = iwm_prepare_card_hw(sc);
2025 if (err)
2026 return err;
2027
2028 if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003) {
2029 err = iwm_clear_persistence_bit(sc);
2030 if (err)
2031 return err;
2032 }
2033
2034 /* Reset the entire device */
2035 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((0x00000080)))))
;
2036 DELAY(5000)(*delay_func)(5000);
2037
2038 err = iwm_apm_init(sc);
2039 if (err)
2040 return err;
2041
2042 iwm_init_msix_hw(sc);
2043
2044 iwm_enable_rfkill_int(sc);
2045 iwm_check_rfkill(sc);
2046
2047 return 0;
2048}
2049
2050
2051void
2052iwm_stop_device(struct iwm_softc *sc)
2053{
2054 int chnl, ntries;
2055 int qid;
2056
2057 iwm_disable_interrupts(sc);
2058 sc->sc_flags &= ~IWM_FLAG_USE_ICT0x01;
2059
2060 /* Stop all DMA channels. */
2061 if (iwm_nic_lock(sc)) {
2062 /* Deactivate TX scheduler. */
2063 iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0);
2064
2065 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) {
2066 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), ((0))))
2067 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), ((0))))
;
2068 for (ntries = 0; ntries < 200; ntries++) {
2069 uint32_t r;
2070
2071 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) +
0xEA0) + 0x010)))))
;
2072 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(((1 << (chnl)) << 16)
2073 chnl)((1 << (chnl)) << 16))
2074 break;
2075 DELAY(20)(*delay_func)(20);
2076 }
2077 }
2078 iwm_nic_unlock(sc);
2079 }
2080 iwm_disable_rx_dma(sc);
2081
2082 iwm_reset_rx_ring(sc, &sc->rxq);
2083
2084 for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++)
2085 iwm_reset_tx_ring(sc, &sc->txq[qid]);
2086
2087 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
2088 if (iwm_nic_lock(sc)) {
2089 /* Power-down device's busmaster DMA clocks */
2090 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG(((0x00000) + 0x3000) + 0x0008),
2091 IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200));
2092 iwm_nic_unlock(sc);
2093 }
2094 DELAY(5)(*delay_func)(5);
2095 }
2096
2097 /* Make sure (redundant) we've released our request to stay awake */
2098 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
2099 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
2100 if (sc->sc_nic_locks > 0)
2101 printf("%s: %d active NIC locks forcefully cleared\n",
2102 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
2103 sc->sc_nic_locks = 0;
2104
2105 /* Stop the device, and put it in low power state */
2106 iwm_apm_stop(sc);
2107
2108 /* Reset the on-board processor. */
2109 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((0x00000080)))))
;
2110 DELAY(5000)(*delay_func)(5000);
2111
2112 /*
2113 * Upon stop, the IVAR table gets erased, so msi-x won't
2114 * work. This causes a bug in RF-KILL flows, since the interrupt
2115 * that enables radio won't fire on the correct irq, and the
2116 * driver won't be able to handle the interrupt.
2117 * Configure the IVAR table again after reset.
2118 */
2119 iwm_conf_msix_hw(sc, 1);
2120
2121 /*
2122 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2123 * Clear the interrupt again.
2124 */
2125 iwm_disable_interrupts(sc);
2126
2127 /* Even though we stop the HW we still want the RF kill interrupt. */
2128 iwm_enable_rfkill_int(sc);
2129 iwm_check_rfkill(sc);
2130
2131 iwm_prepare_card_hw(sc);
2132}
2133
2134void
2135iwm_nic_config(struct iwm_softc *sc)
2136{
2137 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2138 uint32_t mask, val, reg_val = 0;
2139
2140 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >>
2141 IWM_FW_PHY_CFG_RADIO_TYPE_POS0;
2142 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >>
2143 IWM_FW_PHY_CFG_RADIO_STEP_POS2;
2144 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >>
2145 IWM_FW_PHY_CFG_RADIO_DASH_POS4;
2146
2147 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) <<
2148 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2);
2149 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) <<
2150 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0);
2151
2152 /* radio configuration */
2153 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10);
2154 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14);
2155 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12);
2156
2157 mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) |
2158 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) |
2159 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) |
2160 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) |
2161 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) |
2162 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) |
2163 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100);
2164
2165 val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))));
2166 val &= ~mask;
2167 val |= reg_val;
2168 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
(val))))
;
2169
2170 /*
2171 * W/A : NIC is stuck in a reset state after Early PCIe power off
2172 * (PCIe power is lost before PERST# is asserted), causing ME FW
2173 * to lose ownership and not being able to obtain it back.
2174 */
2175 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
2176 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c),
2177 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000),
2178 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000));
2179}
2180
2181int
2182iwm_nic_rx_init(struct iwm_softc *sc)
2183{
2184 if (sc->sc_mqrx_supported)
2185 return iwm_nic_rx_mq_init(sc);
2186 else
2187 return iwm_nic_rx_legacy_init(sc);
2188}
2189
2190int
2191iwm_nic_rx_mq_init(struct iwm_softc *sc)
2192{
2193 int enabled;
2194
2195 if (!iwm_nic_lock(sc))
2196 return EBUSY16;
2197
2198 /* Stop RX DMA. */
2199 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0);
2200 /* Disable RX used and free queue operation. */
2201 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, 0);
2202
2203 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB0xA08000,
2204 sc->rxq.free_desc_dma.paddr);
2205 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB0xA08100,
2206 sc->rxq.used_desc_dma.paddr);
2207 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB0xA08200,
2208 sc->rxq.stat_dma.paddr);
2209 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX0xA08080, 0);
2210 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX0xA080C0, 0);
2211 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX0xA08180, 0);
2212
2213 /* We configure only queue 0 for now. */
2214 enabled = ((1 << 0) << 16) | (1 << 0);
2215
2216 /* Enable RX DMA, 4KB buffer size. */
2217 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820,
2218 IWM_RFH_DMA_EN_ENABLE_VAL(1U << 31) |
2219 IWM_RFH_RXF_DMA_RB_SIZE_4K(0x4 << 16) |
2220 IWM_RFH_RXF_DMA_MIN_RB_4_8(3 << 24) |
2221 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK(0x04000000) |
2222 IWM_RFH_RXF_DMA_RBDCB_SIZE_512(0x9 << 20));
2223
2224 /* Enable RX DMA snooping. */
2225 iwm_write_prph(sc, IWM_RFH_GEN_CFG0xA09800,
2226 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP(1 << 1) |
2227 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP(1 << 0) |
2228 (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_640x00000000 :
2229 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_1280x00000010));
2230
2231 /* Enable the configured queue(s). */
2232 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, enabled);
2233
2234 iwm_nic_unlock(sc);
2235
2236 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2237
2238 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), (
(8))))
;
2239
2240 return 0;
2241}
2242
2243int
2244iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2245{
2246 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat))__builtin_memset((sc->rxq.stat), (0), (sizeof(*sc->rxq.
stat)))
;
2247
2248 iwm_disable_rx_dma(sc);
2249
2250 if (!iwm_nic_lock(sc))
2251 return EBUSY16;
2252
2253 /* reset and flush pointers */
2254 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00)) + 0x8))), ((0))))
;
2255 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00)) + 0x10))), ((0))))
;
2256 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x00c))), ((0))))
;
2257 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x008))), ((0))))
;
2258
2259 /* Set physical address of RX ring (256-byte aligned). */
2260 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >>
8))))
2261 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >>
8))))
;
2262
2263 /* Set physical address of RX status (16-byte aligned). */
2264 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4))))
2265 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4))))
;
2266
2267 /* Enable RX. */
2268 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2269 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2270 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2271 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2272 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2273 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2274 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
;
2275
2276 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2277
2278 /* W/A for interrupt coalescing bug in 7260 and 3160 */
2279 if (sc->host_interrupt_operation_mode)
2280 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x004))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x004)))))
| ((1U << 31))))))
;
2281
2282 iwm_nic_unlock(sc);
2283
2284 /*
2285 * This value should initially be 0 (before preparing any RBs),
2286 * and should be 8 after preparing the first 8 RBs (for example).
2287 */
2288 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((((0x1000
) + 0xBC0)) + 0x008)))), ((8))))
;
2289
2290 return 0;
2291}
2292
2293int
2294iwm_nic_tx_init(struct iwm_softc *sc)
2295{
2296 int qid, err;
2297
2298 if (!iwm_nic_lock(sc))
2299 return EBUSY16;
2300
2301 /* Deactivate TX scheduler. */
2302 iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0);
2303
2304 /* Set physical address of "keep warm" page (16-byte aligned). */
2305 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) +
0x97C))), ((sc->kw_dma.paddr >> 4))))
;
2306
2307 for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++) {
2308 struct iwm_tx_ring *txq = &sc->txq[qid];
2309
2310 /* Set physical address of TX ring (256-byte aligned). */
2311 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE
(qid))), ((txq->desc_dma.paddr >> 8))))
2312 txq->desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE
(qid))), ((txq->desc_dma.paddr >> 8))))
;
2313 }
2314
2315 err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL(((0x00000) + 0xa02c00) + 0x1a8),
2316 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE(1 << 18) |
2317 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES(1 << 0));
2318
2319 iwm_nic_unlock(sc);
2320
2321 return err;
2322}
2323
2324int
2325iwm_nic_init(struct iwm_softc *sc)
2326{
2327 int err;
2328
2329 iwm_apm_init(sc);
2330 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
2331 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c),
2332 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN(0x00000000),
2333 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC(0x03000000));
2334
2335 iwm_nic_config(sc);
2336
2337 err = iwm_nic_rx_init(sc);
2338 if (err)
2339 return err;
2340
2341 err = iwm_nic_tx_init(sc);
2342 if (err)
2343 return err;
2344
2345 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8)))))
| (0x800fffff)))))
;
2346
2347 return 0;
2348}
2349
2350/* Map a TID to an ieee80211_edca_ac category. */
2351const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT8] = {
2352 EDCA_AC_BE,
2353 EDCA_AC_BK,
2354 EDCA_AC_BK,
2355 EDCA_AC_BE,
2356 EDCA_AC_VI,
2357 EDCA_AC_VI,
2358 EDCA_AC_VO,
2359 EDCA_AC_VO,
2360};
2361
2362/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2363const uint8_t iwm_ac_to_tx_fifo[] = {
2364 IWM_TX_FIFO_BE1,
2365 IWM_TX_FIFO_BK0,
2366 IWM_TX_FIFO_VI2,
2367 IWM_TX_FIFO_VO3,
2368};
2369
2370int
2371iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2372{
2373 int err;
2374 iwm_nic_assert_locked(sc);
2375
2376 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((qid << 8 | 0))))
;
2377
2378 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2379 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3))
2380 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN(19)));
2381
2382 err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL(((0x00000) + 0xa02c00) + 0x248), (1 << qid));
2383 if (err) {
2384 return err;
2385 }
2386
2387 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2388
2389 iwm_write_mem32(sc,
2390 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)), 0);
2391
2392 /* Set scheduler window size and frame limit. */
2393 iwm_write_mem32(sc,
2394 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)) +
2395 sizeof(uint32_t),
2396 ((IWM_FRAME_LIMIT64 << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS(0)) &
2397 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK(0x0000007F)) |
2398 ((IWM_FRAME_LIMIT64
2399 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS(16)) &
2400 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK(0x007F0000)));
2401
2402 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2403 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3)) |
2404 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF(0)) |
2405 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL(4)) |
2406 IWM_SCD_QUEUE_STTS_REG_MSK(0x017F0000));
2407
2408 if (qid == sc->cmdqid)
2409 iwm_write_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254),
2410 iwm_read_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254)) | (1 << qid));
2411
2412 return 0;
2413}
2414
2415int
2416iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2417 int aggregate, uint8_t tid, uint16_t ssn)
2418{
2419 struct iwm_tx_ring *ring = &sc->txq[qid];
2420 struct iwm_scd_txq_cfg_cmd cmd;
2421 int err, idx, scd_bug;
2422
2423 iwm_nic_assert_locked(sc);
2424
2425 /*
2426 * If we need to move the SCD write pointer by steps of
2427 * 0x40, 0x80 or 0xc0, it gets stuck.
2428 * This is really ugly, but this is the easiest way out for
2429 * this sad hardware issue.
2430 * This bug has been fixed on devices 9000 and up.
2431 */
2432 scd_bug = !sc->sc_mqrx_supported &&
2433 !((ssn - ring->cur) & 0x3f) &&
2434 (ssn != ring->cur);
2435 if (scd_bug)
2436 ssn = (ssn + 1) & 0xfff;
2437
2438 idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1));
2439 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((qid << 8 | idx))))
;
2440 ring->cur = idx;
2441 ring->tail = idx;
2442
2443 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2444 cmd.tid = tid;
2445 cmd.scd_queue = qid;
2446 cmd.enable = 1;
2447 cmd.sta_id = sta_id;
2448 cmd.tx_fifo = fifo;
2449 cmd.aggregate = aggregate;
2450 cmd.ssn = htole16(ssn)((__uint16_t)(ssn));
2451 cmd.window = IWM_FRAME_LIMIT64;
2452
2453 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0,
2454 sizeof(cmd), &cmd);
2455 if (err)
2456 return err;
2457
2458 sc->qenablemsk |= (1 << qid);
2459 return 0;
2460}
2461
2462int
2463iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2464{
2465 struct iwm_scd_txq_cfg_cmd cmd;
2466 int err;
2467
2468 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2469 cmd.tid = tid;
2470 cmd.scd_queue = qid;
2471 cmd.enable = 0;
2472 cmd.sta_id = sta_id;
2473
2474 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0, sizeof(cmd), &cmd);
2475 if (err)
2476 return err;
2477
2478 sc->qenablemsk &= ~(1 << qid);
2479 return 0;
2480}
2481
2482int
2483iwm_post_alive(struct iwm_softc *sc)
2484{
2485 int nwords;
2486 int err, chnl;
2487 uint32_t base;
2488
2489 if (!iwm_nic_lock(sc))
2490 return EBUSY16;
2491
2492 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x0));
2493
2494 iwm_ict_reset(sc);
2495
2496 iwm_nic_unlock(sc);
2497
2498 /* Clear TX scheduler state in SRAM. */
2499 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND((0x0000) + 0x808) -
2500 IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600))
2501 / sizeof(uint32_t);
2502 err = iwm_write_mem(sc,
2503 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600),
2504 NULL((void *)0), nwords);
2505 if (err)
2506 return err;
2507
2508 if (!iwm_nic_lock(sc))
2509 return EBUSY16;
2510
2511 /* Set physical address of TX scheduler rings (1KB aligned). */
2512 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x8), sc->sched_dma.paddr >> 10);
2513
2514 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN(((0x00000) + 0xa02c00) + 0x244), 0);
2515
2516 /* enable command channel */
2517 err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD7);
2518 if (err) {
2519 iwm_nic_unlock(sc);
2520 return err;
2521 }
2522
2523 /* Activate TX scheduler. */
2524 iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0xff);
2525
2526 /* Enable DMA channels. */
2527 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) {
2528 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008)))))
2529 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008)))))
2530 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008)))))
;
2531 }
2532
2533 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) +
0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x1000) + 0xE98))))) | ((0x00000002))))))
2534 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) +
0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x1000) + 0xE98))))) | ((0x00000002))))))
;
2535
2536 iwm_nic_unlock(sc);
2537
2538 /* Enable L1-Active */
2539 if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) {
2540 err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010),
2541 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800));
2542 }
2543
2544 return err;
2545}
2546
2547struct iwm_phy_db_entry *
2548iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2549{
2550 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2551
2552 if (type >= IWM_PHY_DB_MAX6)
2553 return NULL((void *)0);
2554
2555 switch (type) {
2556 case IWM_PHY_DB_CFG1:
2557 return &phy_db->cfg;
2558 case IWM_PHY_DB_CALIB_NCH2:
2559 return &phy_db->calib_nch;
2560 case IWM_PHY_DB_CALIB_CHG_PAPD4:
2561 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS9)
2562 return NULL((void *)0);
2563 return &phy_db->calib_ch_group_papd[chg_id];
2564 case IWM_PHY_DB_CALIB_CHG_TXP5:
2565 if (chg_id >= IWM_NUM_TXP_CH_GROUPS9)
2566 return NULL((void *)0);
2567 return &phy_db->calib_ch_group_txp[chg_id];
2568 default:
2569 return NULL((void *)0);
2570 }
2571 return NULL((void *)0);
2572}
2573
2574int
2575iwm_phy_db_set_section(struct iwm_softc *sc,
2576 struct iwm_calib_res_notif_phy_db *phy_db_notif)
2577{
2578 uint16_t type = le16toh(phy_db_notif->type)((__uint16_t)(phy_db_notif->type));
2579 uint16_t size = le16toh(phy_db_notif->length)((__uint16_t)(phy_db_notif->length));
2580 struct iwm_phy_db_entry *entry;
2581 uint16_t chg_id = 0;
2582
2583 if (type == IWM_PHY_DB_CALIB_CHG_PAPD4 ||
2584 type == IWM_PHY_DB_CALIB_CHG_TXP5)
2585 chg_id = le16toh(*(uint16_t *)phy_db_notif->data)((__uint16_t)(*(uint16_t *)phy_db_notif->data));
2586
2587 entry = iwm_phy_db_get_section(sc, type, chg_id);
2588 if (!entry)
2589 return EINVAL22;
2590
2591 if (entry->data)
2592 free(entry->data, M_DEVBUF2, entry->size);
2593 entry->data = malloc(size, M_DEVBUF2, M_NOWAIT0x0002);
2594 if (!entry->data) {
2595 entry->size = 0;
2596 return ENOMEM12;
2597 }
2598 memcpy(entry->data, phy_db_notif->data, size)__builtin_memcpy((entry->data), (phy_db_notif->data), (
size))
;
2599 entry->size = size;
2600
2601 return 0;
2602}
2603
2604int
2605iwm_is_valid_channel(uint16_t ch_id)
2606{
2607 if (ch_id <= 14 ||
2608 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2609 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2610 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2611 return 1;
2612 return 0;
2613}
2614
2615uint8_t
2616iwm_ch_id_to_ch_index(uint16_t ch_id)
2617{
2618 if (!iwm_is_valid_channel(ch_id))
2619 return 0xff;
2620
2621 if (ch_id <= 14)
2622 return ch_id - 1;
2623 if (ch_id <= 64)
2624 return (ch_id + 20) / 4;
2625 if (ch_id <= 140)
2626 return (ch_id - 12) / 4;
2627 return (ch_id - 13) / 4;
2628}
2629
2630
2631uint16_t
2632iwm_channel_id_to_papd(uint16_t ch_id)
2633{
2634 if (!iwm_is_valid_channel(ch_id))
2635 return 0xff;
2636
2637 if (1 <= ch_id && ch_id <= 14)
2638 return 0;
2639 if (36 <= ch_id && ch_id <= 64)
2640 return 1;
2641 if (100 <= ch_id && ch_id <= 140)
2642 return 2;
2643 return 3;
2644}
2645
2646uint16_t
2647iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2648{
2649 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2650 struct iwm_phy_db_chg_txp *txp_chg;
2651 int i;
2652 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2653
2654 if (ch_index == 0xff)
2655 return 0xff;
2656
2657 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS9; i++) {
2658 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2659 if (!txp_chg)
2660 return 0xff;
2661 /*
2662 * Looking for the first channel group the max channel
2663 * of which is higher than the requested channel.
2664 */
2665 if (le16toh(txp_chg->max_channel_idx)((__uint16_t)(txp_chg->max_channel_idx)) >= ch_index)
2666 return i;
2667 }
2668 return 0xff;
2669}
2670
2671int
2672iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2673 uint16_t *size, uint16_t ch_id)
2674{
2675 struct iwm_phy_db_entry *entry;
2676 uint16_t ch_group_id = 0;
2677
2678 if (type == IWM_PHY_DB_CALIB_CHG_PAPD4)
2679 ch_group_id = iwm_channel_id_to_papd(ch_id);
2680 else if (type == IWM_PHY_DB_CALIB_CHG_TXP5)
2681 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2682
2683 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2684 if (!entry)
2685 return EINVAL22;
2686
2687 *data = entry->data;
2688 *size = entry->size;
2689
2690 return 0;
2691}
2692
2693int
2694iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2695 void *data)
2696{
2697 struct iwm_phy_db_cmd phy_db_cmd;
2698 struct iwm_host_cmd cmd = {
2699 .id = IWM_PHY_DB_CMD0x6c,
2700 .flags = IWM_CMD_ASYNC,
2701 };
2702
2703 phy_db_cmd.type = le16toh(type)((__uint16_t)(type));
2704 phy_db_cmd.length = le16toh(length)((__uint16_t)(length));
2705
2706 cmd.data[0] = &phy_db_cmd;
2707 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2708 cmd.data[1] = data;
2709 cmd.len[1] = length;
2710
2711 return iwm_send_cmd(sc, &cmd);
2712}
2713
2714int
2715iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2716 uint8_t max_ch_groups)
2717{
2718 uint16_t i;
2719 int err;
2720 struct iwm_phy_db_entry *entry;
2721
2722 for (i = 0; i < max_ch_groups; i++) {
2723 entry = iwm_phy_db_get_section(sc, type, i);
2724 if (!entry)
2725 return EINVAL22;
2726
2727 if (!entry->size)
2728 continue;
2729
2730 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2731 if (err)
2732 return err;
2733
2734 DELAY(1000)(*delay_func)(1000);
2735 }
2736
2737 return 0;
2738}
2739
2740int
2741iwm_send_phy_db_data(struct iwm_softc *sc)
2742{
2743 uint8_t *data = NULL((void *)0);
2744 uint16_t size = 0;
2745 int err;
2746
2747 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG1, &data, &size, 0);
2748 if (err)
2749 return err;
2750
2751 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG1, size, data);
2752 if (err)
2753 return err;
2754
2755 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH2,
2756 &data, &size, 0);
2757 if (err)
2758 return err;
2759
2760 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH2, size, data);
2761 if (err)
2762 return err;
2763
2764 err = iwm_phy_db_send_all_channel_groups(sc,
2765 IWM_PHY_DB_CALIB_CHG_PAPD4, IWM_NUM_PAPD_CH_GROUPS9);
2766 if (err)
2767 return err;
2768
2769 err = iwm_phy_db_send_all_channel_groups(sc,
2770 IWM_PHY_DB_CALIB_CHG_TXP5, IWM_NUM_TXP_CH_GROUPS9);
2771 if (err)
2772 return err;
2773
2774 return 0;
2775}
2776
2777/*
2778 * For the high priority TE use a time event type that has similar priority to
2779 * the FW's action scan priority.
2780 */
2781#define IWM_ROC_TE_TYPE_NORMAL4 IWM_TE_P2P_DEVICE_DISCOVERABLE4
2782#define IWM_ROC_TE_TYPE_MGMT_TX9 IWM_TE_P2P_CLIENT_ASSOC9
2783
2784int
2785iwm_send_time_event_cmd(struct iwm_softc *sc,
2786 const struct iwm_time_event_cmd *cmd)
2787{
2788 struct iwm_rx_packet *pkt;
2789 struct iwm_time_event_resp *resp;
2790 struct iwm_host_cmd hcmd = {
2791 .id = IWM_TIME_EVENT_CMD0x29,
2792 .flags = IWM_CMD_WANT_RESP,
2793 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2794 };
2795 uint32_t resp_len;
2796 int err;
2797
2798 hcmd.data[0] = cmd;
2799 hcmd.len[0] = sizeof(*cmd);
2800 err = iwm_send_cmd(sc, &hcmd);
2801 if (err)
2802 return err;
2803
2804 pkt = hcmd.resp_pkt;
2805 if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40)) {
2806 err = EIO5;
2807 goto out;
2808 }
2809
2810 resp_len = iwm_rx_packet_payload_len(pkt);
2811 if (resp_len != sizeof(*resp)) {
2812 err = EIO5;
2813 goto out;
2814 }
2815
2816 resp = (void *)pkt->data;
2817 if (le32toh(resp->status)((__uint32_t)(resp->status)) == 0)
2818 sc->sc_time_event_uid = le32toh(resp->unique_id)((__uint32_t)(resp->unique_id));
2819 else
2820 err = EIO5;
2821out:
2822 iwm_free_resp(sc, &hcmd);
2823 return err;
2824}
2825
2826void
2827iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2828 uint32_t duration, uint32_t max_delay)
2829{
2830 struct iwm_time_event_cmd time_cmd;
2831
2832 /* Do nothing if a time event is already scheduled. */
2833 if (sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40)
2834 return;
2835
2836 memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd)));
2837
2838 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD)((__uint32_t)(1));
2839 time_cmd.id_and_color =
2840 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
2841 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC)((__uint32_t)(0));
2842
2843 time_cmd.apply_time = htole32(0)((__uint32_t)(0));
2844
2845 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE0;
2846 time_cmd.max_delay = htole32(max_delay)((__uint32_t)(max_delay));
2847 /* TODO: why do we need to interval = bi if it is not periodic? */
2848 time_cmd.interval = htole32(1)((__uint32_t)(1));
2849 time_cmd.duration = htole32(duration)((__uint32_t)(duration));
2850 time_cmd.repeat = 1;
2851 time_cmd.policy
2852 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11
)))
2853 IWM_TE_V2_NOTIF_HOST_EVENT_END |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11
)))
2854 IWM_T2_V2_START_IMMEDIATELY)((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11
)))
;
2855
2856 if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2857 sc->sc_flags |= IWM_FLAG_TE_ACTIVE0x40;
2858
2859 DELAY(100)(*delay_func)(100);
2860}
2861
2862void
2863iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2864{
2865 struct iwm_time_event_cmd time_cmd;
2866
2867 /* Do nothing if the time event has already ended. */
2868 if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40) == 0)
2869 return;
2870
2871 memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd)));
2872
2873 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE)((__uint32_t)(3));
2874 time_cmd.id_and_color =
2875 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
2876 time_cmd.id = htole32(sc->sc_time_event_uid)((__uint32_t)(sc->sc_time_event_uid));
2877
2878 if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2879 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40;
2880
2881 DELAY(100)(*delay_func)(100);
2882}
2883
2884/*
2885 * NVM read access and content parsing. We do not support
2886 * external NVM or writing NVM.
2887 */
2888
2889/* list of NVM sections we are allowed/need to read */
2890const int iwm_nvm_to_read[] = {
2891 IWM_NVM_SECTION_TYPE_HW0,
2892 IWM_NVM_SECTION_TYPE_SW1,
2893 IWM_NVM_SECTION_TYPE_REGULATORY3,
2894 IWM_NVM_SECTION_TYPE_CALIBRATION4,
2895 IWM_NVM_SECTION_TYPE_PRODUCTION5,
2896 IWM_NVM_SECTION_TYPE_REGULATORY_SDP8,
2897 IWM_NVM_SECTION_TYPE_HW_800010,
2898 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11,
2899 IWM_NVM_SECTION_TYPE_PHY_SKU12,
2900};
2901
2902#define IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024) (2*1024)
2903
2904#define IWM_NVM_WRITE_OPCODE1 1
2905#define IWM_NVM_READ_OPCODE0 0
2906
2907int
2908iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2909 uint16_t length, uint8_t *data, uint16_t *len)
2910{
2911 offset = 0;
2912 struct iwm_nvm_access_cmd nvm_access_cmd = {
2913 .offset = htole16(offset)((__uint16_t)(offset)),
2914 .length = htole16(length)((__uint16_t)(length)),
2915 .type = htole16(section)((__uint16_t)(section)),
2916 .op_code = IWM_NVM_READ_OPCODE0,
2917 };
2918 struct iwm_nvm_access_resp *nvm_resp;
2919 struct iwm_rx_packet *pkt;
2920 struct iwm_host_cmd cmd = {
2921 .id = IWM_NVM_ACCESS_CMD0x88,
2922 .flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2923 .resp_pkt_len = IWM_CMD_RESP_MAX(1 << 12),
2924 .data = { &nvm_access_cmd, },
2925 };
2926 int err, offset_read;
2927 size_t bytes_read;
2928 uint8_t *resp_data;
2929
2930 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2931
2932 err = iwm_send_cmd(sc, &cmd);
2933 if (err)
2934 return err;
2935
2936 pkt = cmd.resp_pkt;
2937 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40) {
2938 err = EIO5;
2939 goto exit;
2940 }
2941
2942 /* Extract NVM response */
2943 nvm_resp = (void *)pkt->data;
2944 if (nvm_resp == NULL((void *)0))
2945 return EIO5;
2946
2947 err = le16toh(nvm_resp->status)((__uint16_t)(nvm_resp->status));
2948 bytes_read = le16toh(nvm_resp->length)((__uint16_t)(nvm_resp->length));
2949 offset_read = le16toh(nvm_resp->offset)((__uint16_t)(nvm_resp->offset));
2950 resp_data = nvm_resp->data;
2951 if (err) {
2952 err = EINVAL22;
2953 goto exit;
2954 }
2955
2956 if (offset_read != offset) {
2957 err = EINVAL22;
2958 goto exit;
2959 }
2960
2961 if (bytes_read > length) {
2962 err = EINVAL22;
2963 goto exit;
2964 }
2965
2966 memcpy(data + offset, resp_data, bytes_read)__builtin_memcpy((data + offset), (resp_data), (bytes_read));
2967 *len = bytes_read;
2968
2969 exit:
2970 iwm_free_resp(sc, &cmd);
2971 return err;
2972}
2973
2974/*
2975 * Reads an NVM section completely.
2976 * NICs prior to 7000 family doesn't have a real NVM, but just read
2977 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2978 * by uCode, we need to manually check in this case that we don't
2979 * overflow and try to read more than the EEPROM size.
2980 */
2981int
2982iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2983 uint16_t *len, size_t max_len)
2984{
2985 uint16_t chunklen, seglen;
2986 int err = 0;
2987
2988 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024);
2989 *len = 0;
2990
2991 /* Read NVM chunks until exhausted (reading less than requested) */
2992 while (seglen == chunklen && *len < max_len) {
2993 err = iwm_nvm_read_chunk(sc,
2994 section, *len, chunklen, data, &seglen);
2995 if (err)
2996 return err;
2997
2998 *len += seglen;
2999 }
3000
3001 return err;
3002}
3003
3004uint8_t
3005iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3006{
3007 uint8_t tx_ant;
3008
3009 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN(0xf << 16))
3010 >> IWM_FW_PHY_CFG_TX_CHAIN_POS16);
3011
3012 if (sc->sc_nvm.valid_tx_ant)
3013 tx_ant &= sc->sc_nvm.valid_tx_ant;
3014
3015 return tx_ant;
3016}
3017
3018uint8_t
3019iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3020{
3021 uint8_t rx_ant;
3022
3023 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN(0xf << 20))
3024 >> IWM_FW_PHY_CFG_RX_CHAIN_POS20);
3025
3026 if (sc->sc_nvm.valid_rx_ant)
3027 rx_ant &= sc->sc_nvm.valid_rx_ant;
3028
3029 return rx_ant;
3030}
3031
3032void
3033iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3034 const uint8_t *nvm_channels, int nchan)
3035{
3036 struct ieee80211com *ic = &sc->sc_ic;
3037 struct iwm_nvm_data *data = &sc->sc_nvm;
3038 int ch_idx;
3039 struct ieee80211_channel *channel;
3040 uint16_t ch_flags;
3041 int is_5ghz;
3042 int flags, hw_value;
3043
3044 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3045 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx)(((__uint16_t)(*(const uint16_t *)(nvm_ch_flags + ch_idx))));
3046
3047 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS14 &&
3048 !data->sku_cap_band_52GHz_enable)
3049 ch_flags &= ~IWM_NVM_CHANNEL_VALID(1 << 0);
3050
3051 if (!(ch_flags & IWM_NVM_CHANNEL_VALID(1 << 0)))
3052 continue;
3053
3054 hw_value = nvm_channels[ch_idx];
3055 channel = &ic->ic_channels[hw_value];
3056
3057 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS14;
3058 if (!is_5ghz) {
3059 flags = IEEE80211_CHAN_2GHZ0x0080;
3060 channel->ic_flags
3061 = IEEE80211_CHAN_CCK0x0020
3062 | IEEE80211_CHAN_OFDM0x0040
3063 | IEEE80211_CHAN_DYN0x0400
3064 | IEEE80211_CHAN_2GHZ0x0080;
3065 } else {
3066 flags = IEEE80211_CHAN_5GHZ0x0100;
3067 channel->ic_flags =
3068 IEEE80211_CHAN_A(0x0100 | 0x0040);
3069 }
3070 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3071
3072 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE(1 << 3)))
3073 channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200;
3074
3075 if (data->sku_cap_11n_enable) {
3076 channel->ic_flags |= IEEE80211_CHAN_HT0x2000;
3077 if (ch_flags & IWM_NVM_CHANNEL_40MHZ(1 << 9))
3078 channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000;
3079 }
3080 }
3081}
3082
3083int
3084iwm_mimo_enabled(struct iwm_softc *sc)
3085{
3086 struct ieee80211com *ic = &sc->sc_ic;
3087
3088 return !sc->sc_nvm.sku_cap_mimo_disable &&
3089 (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0;
3090}
3091
3092void
3093iwm_setup_ht_rates(struct iwm_softc *sc)
3094{
3095 struct ieee80211com *ic = &sc->sc_ic;
3096 uint8_t rx_ant;
3097
3098 /* TX is supported with the same MCS as RX. */
3099 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01;
3100
3101 memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs
)))
;
3102 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
3103
3104 if (!iwm_mimo_enabled(sc))
3105 return;
3106
3107 rx_ant = iwm_fw_valid_rx_ant(sc);
3108 if ((rx_ant & IWM_ANT_AB((1 << 0) | (1 << 1))) == IWM_ANT_AB((1 << 0) | (1 << 1)) ||
3109 (rx_ant & IWM_ANT_BC((1 << 1) | (1 << 2))) == IWM_ANT_BC((1 << 1) | (1 << 2)))
3110 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
3111}
3112
3113void
3114iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3115 uint16_t ssn, uint16_t buf_size)
3116{
3117 reorder_buf->head_sn = ssn;
3118 reorder_buf->num_stored = 0;
3119 reorder_buf->buf_size = buf_size;
3120 reorder_buf->last_amsdu = 0;
3121 reorder_buf->last_sub_index = 0;
3122 reorder_buf->removed = 0;
3123 reorder_buf->valid = 0;
3124 reorder_buf->consec_oldsn_drops = 0;
3125 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3126 reorder_buf->consec_oldsn_prev_drop = 0;
3127}
3128
3129void
3130iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3131{
3132 int i;
3133 struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3134 struct iwm_reorder_buf_entry *entry;
3135
3136 for (i = 0; i < reorder_buf->buf_size; i++) {
3137 entry = &rxba->entries[i];
3138 ml_purge(&entry->frames);
3139 timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time
)->tv_usec = 0
;
3140 }
3141
3142 reorder_buf->removed = 1;
3143 timeout_del(&reorder_buf->reorder_timer);
3144 timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)->
tv_usec = 0
;
3145 timeout_del(&rxba->session_timer);
3146 rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID0x7f;
3147}
3148
3149#define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL)
3150
3151void
3152iwm_rx_ba_session_expired(void *arg)
3153{
3154 struct iwm_rxba_data *rxba = arg;
3155 struct iwm_softc *sc = rxba->sc;
3156 struct ieee80211com *ic = &sc->sc_ic;
3157 struct ieee80211_node *ni = ic->ic_bss;
3158 struct timeval now, timeout, expiry;
3159 int s;
3160
3161 s = splnet()splraise(0x7);
3162 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0 &&
3163 ic->ic_state == IEEE80211_S_RUN &&
3164 rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) {
3165 getmicrouptime(&now);
3166 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
3167 timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec
+ (&timeout)->tv_sec; (&expiry)->tv_usec = (&
rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if
((&expiry)->tv_usec >= 1000000) { (&expiry)->
tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0)
;
3168 if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
) {
3169 timeout_add_usec(&rxba->session_timer, rxba->timeout);
3170 } else {
3171 ic->ic_stats.is_ht_rx_ba_timeout++;
3172 ieee80211_delba_request(ic, ni,
3173 IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3174 }
3175 }
3176 splx(s)spllower(s);
3177}
3178
3179void
3180iwm_reorder_timer_expired(void *arg)
3181{
3182 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3183 struct iwm_reorder_buffer *buf = arg;
3184 struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3185 struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3186 struct iwm_softc *sc = rxba->sc;
3187 struct ieee80211com *ic = &sc->sc_ic;
3188 struct ieee80211_node *ni = ic->ic_bss;
3189 int i, s;
3190 uint16_t sn = 0, index = 0;
3191 int expired = 0;
3192 int cont = 0;
3193 struct timeval now, timeout, expiry;
3194
3195 if (!buf->num_stored || buf->removed)
3196 return;
3197
3198 s = splnet()splraise(0x7);
3199 getmicrouptime(&now);
3200 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
3201
3202 for (i = 0; i < buf->buf_size ; i++) {
3203 index = (buf->head_sn + i) % buf->buf_size;
3204
3205 if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
3206 /*
3207 * If there is a hole and the next frame didn't expire
3208 * we want to break and not advance SN.
3209 */
3210 cont = 0;
3211 continue;
3212 }
3213 timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time
)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec
= (&entries[index].reorder_time)->tv_usec + (&timeout
)->tv_usec; if ((&expiry)->tv_usec >= 1000000) {
(&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000
; } } while (0)
;
3214 if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
)
3215 break;
3216
3217 expired = 1;
3218 /* continue until next hole after this expired frame */
3219 cont = 1;
3220 sn = (buf->head_sn + (i + 1)) & 0xfff;
3221 }
3222
3223 if (expired) {
3224 /* SN is set to the last expired frame + 1 */
3225 iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3226 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
3227 ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3228 } else {
3229 /*
3230 * If no frame expired and there are stored frames, index is now
3231 * pointing to the first unexpired frame - modify reorder timeout
3232 * accordingly.
3233 */
3234 timeout_add_usec(&buf->reorder_timer,
3235 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
3236 }
3237
3238 splx(s)spllower(s);
3239}
3240
3241#define IWM_MAX_RX_BA_SESSIONS16 16
3242
3243int
3244iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3245 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3246{
3247 struct ieee80211com *ic = &sc->sc_ic;
3248 struct iwm_add_sta_cmd cmd;
3249 struct iwm_node *in = (void *)ni;
3250 int err, s;
3251 uint32_t status;
3252 size_t cmdsize;
3253 struct iwm_rxba_data *rxba = NULL((void *)0);
3254 uint8_t baid = 0;
3255
3256 s = splnet()splraise(0x7);
3257
3258 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16) {
3259 ieee80211_addba_req_refuse(ic, ni, tid);
3260 splx(s)spllower(s);
3261 return 0;
3262 }
3263
3264 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3265
3266 cmd.sta_id = IWM_STATION_ID0;
3267 cmd.mac_id_n_color
3268 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3269 cmd.add_modify = IWM_STA_MODE_MODIFY1;
3270
3271 if (start) {
3272 cmd.add_immediate_ba_tid = (uint8_t)tid;
3273 cmd.add_immediate_ba_ssn = ssn;
3274 cmd.rx_ba_window = winsize;
3275 } else {
3276 cmd.remove_immediate_ba_tid = (uint8_t)tid;
3277 }
3278 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID(1 << 3) :
3279 IWM_STA_MODIFY_REMOVE_BA_TID(1 << 4);
3280
3281 status = IWM_ADD_STA_SUCCESS0x1;
3282 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
3283 cmdsize = sizeof(cmd);
3284 else
3285 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3286 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd,
3287 &status);
3288 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
3289 err = EIO5;
3290 if (err) {
3291 if (start)
3292 ieee80211_addba_req_refuse(ic, ni, tid);
3293 splx(s)spllower(s);
3294 return err;
3295 }
3296
3297 if (sc->sc_mqrx_supported) {
3298 /* Deaggregation is done in hardware. */
3299 if (start) {
3300 if (!(status & IWM_ADD_STA_BAID_VALID_MASK0x8000)) {
3301 ieee80211_addba_req_refuse(ic, ni, tid);
3302 splx(s)spllower(s);
3303 return EIO5;
3304 }
3305 baid = (status & IWM_ADD_STA_BAID_MASK0x7F00) >>
3306 IWM_ADD_STA_BAID_SHIFT8;
3307 if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f ||
3308 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
) {
3309 ieee80211_addba_req_refuse(ic, ni, tid);
3310 splx(s)spllower(s);
3311 return EIO5;
3312 }
3313 rxba = &sc->sc_rxba_data[baid];
3314 if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) {
3315 ieee80211_addba_req_refuse(ic, ni, tid);
3316 splx(s)spllower(s);
3317 return 0;
3318 }
3319 rxba->sta_id = IWM_STATION_ID0;
3320 rxba->tid = tid;
3321 rxba->baid = baid;
3322 rxba->timeout = timeout_val;
3323 getmicrouptime(&rxba->last_rx);
3324 iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3325 winsize);
3326 if (timeout_val != 0) {
3327 struct ieee80211_rx_ba *ba;
3328 timeout_add_usec(&rxba->session_timer,
3329 timeout_val);
3330 /* XXX disable net80211's BA timeout handler */
3331 ba = &ni->ni_rx_ba[tid];
3332 ba->ba_timeout_val = 0;
3333 }
3334 } else {
3335 int i;
3336 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
3337 rxba = &sc->sc_rxba_data[i];
3338 if (rxba->baid ==
3339 IWM_RX_REORDER_DATA_INVALID_BAID0x7f)
3340 continue;
3341 if (rxba->tid != tid)
3342 continue;
3343 iwm_clear_reorder_buffer(sc, rxba);
3344 break;
3345 }
3346 }
3347 }
3348
3349 if (start) {
3350 sc->sc_rx_ba_sessions++;
3351 ieee80211_addba_req_accept(ic, ni, tid);
3352 } else if (sc->sc_rx_ba_sessions > 0)
3353 sc->sc_rx_ba_sessions--;
3354
3355 splx(s)spllower(s);
3356 return 0;
3357}
3358
3359void
3360iwm_mac_ctxt_task(void *arg)
3361{
3362 struct iwm_softc *sc = arg;
3363 struct ieee80211com *ic = &sc->sc_ic;
3364 struct iwm_node *in = (void *)ic->ic_bss;
3365 int err, s = splnet()splraise(0x7);
3366
3367 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
3368 ic->ic_state != IEEE80211_S_RUN) {
3369 refcnt_rele_wake(&sc->task_refs);
3370 splx(s)spllower(s);
3371 return;
3372 }
3373
3374 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 1);
3375 if (err)
3376 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3377
3378 refcnt_rele_wake(&sc->task_refs);
3379 splx(s)spllower(s);
3380}
3381
3382void
3383iwm_updateprot(struct ieee80211com *ic)
3384{
3385 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3386
3387 if (ic->ic_state == IEEE80211_S_RUN &&
3388 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3389 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3390}
3391
3392void
3393iwm_updateslot(struct ieee80211com *ic)
3394{
3395 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3396
3397 if (ic->ic_state == IEEE80211_S_RUN &&
3398 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3399 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3400}
3401
3402void
3403iwm_updateedca(struct ieee80211com *ic)
3404{
3405 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3406
3407 if (ic->ic_state == IEEE80211_S_RUN &&
3408 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3409 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3410}
3411
3412void
3413iwm_phy_ctxt_task(void *arg)
3414{
3415 struct iwm_softc *sc = arg;
3416 struct ieee80211com *ic = &sc->sc_ic;
3417 struct iwm_node *in = (void *)ic->ic_bss;
3418 struct ieee80211_node *ni = &in->in_ni;
3419 uint8_t chains, sco;
3420 int err, s = splnet()splraise(0x7);
3421
3422 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
3423 ic->ic_state != IEEE80211_S_RUN ||
3424 in->in_phyctxt == NULL((void *)0)) {
3425 refcnt_rele_wake(&sc->task_refs);
3426 splx(s)spllower(s);
3427 return;
3428 }
3429
3430 chains = iwm_mimo_enabled(sc) ? 2 : 1;
3431 if (ieee80211_node_supports_ht_chan40(ni))
3432 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
3433 else
3434 sco = IEEE80211_HTOP0_SCO_SCN0;
3435 if (in->in_phyctxt->sco != sco) {
3436 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3437 in->in_phyctxt->channel, chains, chains, 0, sco);
3438 if (err)
3439 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3440 iwm_setrates(in, 0);
3441 }
3442
3443 refcnt_rele_wake(&sc->task_refs);
3444 splx(s)spllower(s);
3445}
3446
3447void
3448iwm_updatechan(struct ieee80211com *ic)
3449{
3450 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3451
3452 if (ic->ic_state == IEEE80211_S_RUN &&
3453 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3454 iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3455}
3456
3457int
3458iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3459 uint16_t ssn, uint16_t winsize, int start)
3460{
3461 struct iwm_add_sta_cmd cmd;
3462 struct ieee80211com *ic = &sc->sc_ic;
3463 struct iwm_node *in = (void *)ni;
3464 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
3465 struct iwm_tx_ring *ring;
3466 enum ieee80211_edca_ac ac;
3467 int fifo;
3468 uint32_t status;
3469 int err;
3470 size_t cmdsize;
3471
3472 /* Ensure we can map this TID to an aggregation queue. */
3473 if (tid >= IWM_MAX_TID_COUNT8 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
3474 return ENOSPC28;
3475
3476 if (start) {
3477 if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3478 return 0;
3479 } else {
3480 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3481 return 0;
3482 }
3483
3484 ring = &sc->txq[qid];
3485 ac = iwm_tid_to_ac[tid];
3486 fifo = iwm_ac_to_tx_fifo[ac];
3487
3488 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3489
3490 cmd.sta_id = IWM_STATION_ID0;
3491 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
3492 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3493 cmd.add_modify = IWM_STA_MODE_MODIFY1;
3494
3495 if (start) {
3496 /* Enable Tx aggregation for this queue. */
3497 in->tid_disable_ampdu &= ~(1 << tid);
3498 in->tfd_queue_msk |= (1 << qid);
3499 } else {
3500 in->tid_disable_ampdu |= (1 << tid);
3501 /*
3502 * Queue remains enabled in the TFD queue mask
3503 * until we leave RUN state.
3504 */
3505 err = iwm_flush_sta(sc, in);
3506 if (err)
3507 return err;
3508 }
3509
3510 cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk)((__uint32_t)(in->tfd_queue_msk));
3511 cmd.tid_disable_tx = htole16(in->tid_disable_ampdu)((__uint16_t)(in->tid_disable_ampdu));
3512 cmd.modify_mask = (IWM_STA_MODIFY_QUEUES(1 << 7) |
3513 IWM_STA_MODIFY_TID_DISABLE_TX(1 << 1));
3514
3515 if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3516 if (!iwm_nic_lock(sc)) {
3517 if (start)
3518 ieee80211_addba_resp_refuse(ic, ni, tid,
3519 IEEE80211_STATUS_UNSPECIFIED);
3520 return EBUSY16;
3521 }
3522 err = iwm_enable_txq(sc, IWM_STATION_ID0, qid, fifo, 1, tid,
3523 ssn);
3524 iwm_nic_unlock(sc);
3525 if (err) {
3526 printf("%s: could not enable Tx queue %d (error %d)\n",
3527 DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err);
3528 if (start)
3529 ieee80211_addba_resp_refuse(ic, ni, tid,
3530 IEEE80211_STATUS_UNSPECIFIED);
3531 return err;
3532 }
3533 /*
3534 * If iwm_enable_txq() employed the SCD hardware bug
3535 * workaround we must skip the frame with seqnum SSN.
3536 */
3537 if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1))) {
3538 ssn = (ssn + 1) & 0xfff;
3539 KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn))((ring->cur == ((ssn) & (256 - 1))) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 3539, "ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn)"
))
;
3540 ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3541 ni->ni_qos_txseqs[tid] = ssn;
3542 }
3543 }
3544
3545 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
3546 cmdsize = sizeof(cmd);
3547 else
3548 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3549
3550 status = 0;
3551 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd, &status);
3552 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
3553 err = EIO5;
3554 if (err) {
3555 printf("%s: could not update sta (error %d)\n",
3556 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
3557 if (start)
3558 ieee80211_addba_resp_refuse(ic, ni, tid,
3559 IEEE80211_STATUS_UNSPECIFIED);
3560 return err;
3561 }
3562
3563 if (start) {
3564 sc->tx_ba_queue_mask |= (1 << qid);
3565 ieee80211_addba_resp_accept(ic, ni, tid);
3566 } else {
3567 sc->tx_ba_queue_mask &= ~(1 << qid);
3568
3569 /*
3570 * Clear pending frames but keep the queue enabled.
3571 * Firmware panics if we disable the queue here.
3572 */
3573 iwm_txq_advance(sc, ring, ring->cur);
3574 iwm_clear_oactive(sc, ring);
3575 }
3576
3577 return 0;
3578}
3579
3580void
3581iwm_ba_task(void *arg)
3582{
3583 struct iwm_softc *sc = arg;
3584 struct ieee80211com *ic = &sc->sc_ic;
3585 struct ieee80211_node *ni = ic->ic_bss;
3586 int s = splnet()splraise(0x7);
3587 int tid, err = 0;
3588
3589 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
3590 ic->ic_state != IEEE80211_S_RUN) {
3591 refcnt_rele_wake(&sc->task_refs);
3592 splx(s)spllower(s);
3593 return;
3594 }
3595
3596 for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) {
3597 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100)
3598 break;
3599 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3600 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3601 err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3602 ba->ba_winsize, ba->ba_timeout_val, 1);
3603 sc->ba_rx.start_tidmask &= ~(1 << tid);
3604 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3605 err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3606 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3607 }
3608 }
3609
3610 for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) {
3611 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100)
3612 break;
3613 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3614 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3615 err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3616 ba->ba_winsize, 1);
3617 sc->ba_tx.start_tidmask &= ~(1 << tid);
3618 } else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3619 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3620 sc->ba_tx.stop_tidmask &= ~(1 << tid);
3621 }
3622 }
3623
3624 /*
3625 * We "recover" from failure to start or stop a BA session
3626 * by resetting the device.
3627 */
3628 if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
3629 task_add(systq, &sc->init_task);
3630
3631 refcnt_rele_wake(&sc->task_refs);
3632 splx(s)spllower(s);
3633}
3634
3635/*
3636 * This function is called by upper layer when an ADDBA request is received
3637 * from another STA and before the ADDBA response is sent.
3638 */
3639int
3640iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3641 uint8_t tid)
3642{
3643 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3644
3645 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16 ||
3646 tid > IWM_MAX_TID_COUNT8)
3647 return ENOSPC28;
3648
3649 if (sc->ba_rx.start_tidmask & (1 << tid))
3650 return EBUSY16;
3651
3652 sc->ba_rx.start_tidmask |= (1 << tid);
3653 iwm_add_task(sc, systq, &sc->ba_task);
3654
3655 return EBUSY16;
3656}
3657
3658/*
3659 * This function is called by upper layer on teardown of an HT-immediate
3660 * Block Ack agreement (eg. upon receipt of a DELBA frame).
3661 */
3662void
3663iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3664 uint8_t tid)
3665{
3666 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3667
3668 if (tid > IWM_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid))
3669 return;
3670
3671 sc->ba_rx.stop_tidmask |= (1 << tid);
3672 iwm_add_task(sc, systq, &sc->ba_task);
3673}
3674
3675int
3676iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3677 uint8_t tid)
3678{
3679 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3680 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3681 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
3682
3683 /* We only implement Tx aggregation with DQA-capable firmware. */
3684 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
3685 return ENOTSUP91;
3686
3687 /* Ensure we can map this TID to an aggregation queue. */
3688 if (tid >= IWM_MAX_TID_COUNT8)
3689 return EINVAL22;
3690
3691 /* We only support a fixed Tx aggregation window size, for now. */
3692 if (ba->ba_winsize != IWM_FRAME_LIMIT64)
3693 return ENOTSUP91;
3694
3695 /* Is firmware already using Tx aggregation on this queue? */
3696 if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3697 return ENOSPC28;
3698
3699 /* Are we already processing an ADDBA request? */
3700 if (sc->ba_tx.start_tidmask & (1 << tid))
3701 return EBUSY16;
3702
3703 sc->ba_tx.start_tidmask |= (1 << tid);
3704 iwm_add_task(sc, systq, &sc->ba_task);
3705
3706 return EBUSY16;
3707}
3708
3709void
3710iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3711 uint8_t tid)
3712{
3713 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3714 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
3715
3716 if (tid > IWM_MAX_TID_COUNT8 || sc->ba_tx.stop_tidmask & (1 << tid))
3717 return;
3718
3719 /* Is firmware currently using Tx aggregation on this queue? */
3720 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3721 return;
3722
3723 sc->ba_tx.stop_tidmask |= (1 << tid);
3724 iwm_add_task(sc, systq, &sc->ba_task);
3725}
3726
3727void
3728iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3729 const uint16_t *mac_override, const uint16_t *nvm_hw)
3730{
3731 const uint8_t *hw_addr;
3732
3733 if (mac_override) {
3734 static const uint8_t reserved_mac[] = {
3735 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3736 };
3737
3738 hw_addr = (const uint8_t *)(mac_override +
3739 IWM_MAC_ADDRESS_OVERRIDE_80001);
3740
3741 /*
3742 * Store the MAC address from MAO section.
3743 * No byte swapping is required in MAO section
3744 */
3745 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN)__builtin_memcpy((data->hw_addr), (hw_addr), (6));
3746
3747 /*
3748 * Force the use of the OTP MAC address in case of reserved MAC
3749 * address in the NVM, or if address is given but invalid.
3750 */
3751 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (hw_addr), (6)) != 0 &&
3752 (memcmp(etherbroadcastaddr, data->hw_addr,__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof
(etherbroadcastaddr)))
3753 sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof
(etherbroadcastaddr)))
!= 0) &&
3754 (memcmp(etheranyaddr, data->hw_addr,__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof(
etheranyaddr)))
3755 sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof(
etheranyaddr)))
!= 0) &&
3756 !ETHER_IS_MULTICAST(data->hw_addr)(*(data->hw_addr) & 0x01))
3757 return;
3758 }
3759
3760 if (nvm_hw) {
3761 /* Read the mac address from WFMP registers. */
3762 uint32_t mac_addr0, mac_addr1;
3763
3764 if (!iwm_nic_lock(sc))
3765 goto out;
3766 mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0))((__uint32_t)(iwm_read_prph(sc, 0xa03080)));
3767 mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1))((__uint32_t)(iwm_read_prph(sc, 0xa03084)));
3768 iwm_nic_unlock(sc);
3769
3770 hw_addr = (const uint8_t *)&mac_addr0;
3771 data->hw_addr[0] = hw_addr[3];
3772 data->hw_addr[1] = hw_addr[2];
3773 data->hw_addr[2] = hw_addr[1];
3774 data->hw_addr[3] = hw_addr[0];
3775
3776 hw_addr = (const uint8_t *)&mac_addr1;
3777 data->hw_addr[4] = hw_addr[1];
3778 data->hw_addr[5] = hw_addr[0];
3779
3780 return;
3781 }
3782out:
3783 printf("%s: mac address not found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3784 memset(data->hw_addr, 0, sizeof(data->hw_addr))__builtin_memset((data->hw_addr), (0), (sizeof(data->hw_addr
)))
;
3785}
3786
3787int
3788iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3789 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3790 const uint16_t *mac_override, const uint16_t *phy_sku,
3791 const uint16_t *regulatory, int n_regulatory)
3792{
3793 struct iwm_nvm_data *data = &sc->sc_nvm;
3794 uint8_t hw_addr[ETHER_ADDR_LEN6];
3795 uint32_t sku;
3796 uint16_t lar_config;
3797
3798 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 0))));
3799
3800 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3801 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 1))));
3802 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg)((radio_cfg >> 4) & 0x3);
3803 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg)((radio_cfg >> 2) & 0x3);
3804 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg)(radio_cfg & 0x3);
3805 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg)((radio_cfg >> 6) & 0x3);
3806
3807 sku = le16_to_cpup(nvm_sw + IWM_SKU)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 2))));
3808 } else {
3809 uint32_t radio_cfg =
3810 le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 0))
)))
;
3811 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg)((radio_cfg >> 12) & 0xFFF);
3812 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg)((radio_cfg >> 8) & 0xF);
3813 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg)((radio_cfg >> 4) & 0xF);
3814 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg)(radio_cfg & 0xF);
3815 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 24) & 0xF);
3816 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 28) & 0xF);
3817
3818 sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 2))
)))
;
3819 }
3820
3821 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ(1 << 0);
3822 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ(1 << 1);
3823 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE(1 << 2);
3824 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE(1 << 5);
3825
3826 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) {
3827 uint16_t lar_offset = data->nvm_version < 0xE39 ?
3828 IWM_NVM_LAR_OFFSET_8000_OLD0x4C7 :
3829 IWM_NVM_LAR_OFFSET_80000x507;
3830
3831 lar_config = le16_to_cpup(regulatory + lar_offset)(((__uint16_t)(*(const uint16_t *)(regulatory + lar_offset)))
)
;
3832 data->lar_enabled = !!(lar_config &
3833 IWM_NVM_LAR_ENABLED_80000x7);
3834 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3))));
3835 } else
3836 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3))));
3837
3838
3839 /* The byte order is little endian 16 bit, meaning 214365 */
3840 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3841 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN)__builtin_memcpy((hw_addr), (nvm_hw + 0x15), (6));
3842 data->hw_addr[0] = hw_addr[1];
3843 data->hw_addr[1] = hw_addr[0];
3844 data->hw_addr[2] = hw_addr[3];
3845 data->hw_addr[3] = hw_addr[2];
3846 data->hw_addr[4] = hw_addr[5];
3847 data->hw_addr[5] = hw_addr[4];
3848 } else
3849 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3850
3851 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3852 if (sc->nvm_type == IWM_NVM_SDP) {
3853 iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3854 MIN(n_regulatory, nitems(iwm_nvm_channels))(((n_regulatory)<((sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels
)[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels)) / sizeof
((iwm_nvm_channels)[0]))))
);
3855 } else {
3856 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS0x1E0 - 0x1C0],
3857 iwm_nvm_channels, nitems(iwm_nvm_channels)(sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels)[0])));
3858 }
3859 } else
3860 iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_80000],
3861 iwm_nvm_channels_8000,
3862 MIN(n_regulatory, nitems(iwm_nvm_channels_8000))(((n_regulatory)<((sizeof((iwm_nvm_channels_8000)) / sizeof
((iwm_nvm_channels_8000)[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels_8000
)) / sizeof((iwm_nvm_channels_8000)[0]))))
);
3863
3864 data->calib_version = 255; /* TODO:
3865 this value will prevent some checks from
3866 failing, we need to check if this
3867 field is still needed, and if it does,
3868 where is it in the NVM */
3869
3870 return 0;
3871}
3872
3873int
3874iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3875{
3876 const uint16_t *hw, *sw, *calib, *mac_override = NULL((void *)0), *phy_sku = NULL((void *)0);
3877 const uint16_t *regulatory = NULL((void *)0);
3878 int n_regulatory = 0;
3879
3880 /* Checking for required sections */
3881 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3882 if (!sections[IWM_NVM_SECTION_TYPE_SW1].data ||
3883 !sections[IWM_NVM_SECTION_TYPE_HW0].data) {
3884 return ENOENT2;
3885 }
3886
3887 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW0].data;
3888
3889 if (sc->nvm_type == IWM_NVM_SDP) {
3890 if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data)
3891 return ENOENT2;
3892 regulatory = (const uint16_t *)
3893 sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data;
3894 n_regulatory =
3895 sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].length;
3896 }
3897 } else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) {
3898 /* SW and REGULATORY sections are mandatory */
3899 if (!sections[IWM_NVM_SECTION_TYPE_SW1].data ||
3900 !sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data) {
3901 return ENOENT2;
3902 }
3903 /* MAC_OVERRIDE or at least HW section must exist */
3904 if (!sections[IWM_NVM_SECTION_TYPE_HW_800010].data &&
3905 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data) {
3906 return ENOENT2;
3907 }
3908
3909 /* PHY_SKU section is mandatory in B0 */
3910 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data) {
3911 return ENOENT2;
3912 }
3913
3914 regulatory = (const uint16_t *)
3915 sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data;
3916 n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY3].length;
3917 hw = (const uint16_t *)
3918 sections[IWM_NVM_SECTION_TYPE_HW_800010].data;
3919 mac_override =
3920 (const uint16_t *)
3921 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data;
3922 phy_sku = (const uint16_t *)
3923 sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data;
3924 } else {
3925 panic("unknown device family %d", sc->sc_device_family);
3926 }
3927
3928 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW1].data;
3929 calib = (const uint16_t *)
3930 sections[IWM_NVM_SECTION_TYPE_CALIBRATION4].data;
3931
3932 /* XXX should pass in the length of every section */
3933 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3934 phy_sku, regulatory, n_regulatory);
3935}
3936
3937int
3938iwm_nvm_init(struct iwm_softc *sc)
3939{
3940 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS13];
3941 int i, section, err;
3942 uint16_t len;
3943 uint8_t *buf;
3944 const size_t bufsz = sc->sc_nvm_max_section_size;
3945
3946 memset(nvm_sections, 0, sizeof(nvm_sections))__builtin_memset((nvm_sections), (0), (sizeof(nvm_sections)));
3947
3948 buf = malloc(bufsz, M_DEVBUF2, M_WAIT0x0001);
3949 if (buf == NULL((void *)0))
3950 return ENOMEM12;
3951
3952 for (i = 0; i < nitems(iwm_nvm_to_read)(sizeof((iwm_nvm_to_read)) / sizeof((iwm_nvm_to_read)[0])); i++) {
3953 section = iwm_nvm_to_read[i];
3954 KASSERT(section <= nitems(nvm_sections))((section <= (sizeof((nvm_sections)) / sizeof((nvm_sections
)[0]))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 3954, "section <= nitems(nvm_sections)"))
;
3955
3956 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3957 if (err) {
3958 err = 0;
3959 continue;
3960 }
3961 nvm_sections[section].data = malloc(len, M_DEVBUF2, M_WAIT0x0001);
3962 if (nvm_sections[section].data == NULL((void *)0)) {
3963 err = ENOMEM12;
3964 break;
3965 }
3966 memcpy(nvm_sections[section].data, buf, len)__builtin_memcpy((nvm_sections[section].data), (buf), (len));
3967 nvm_sections[section].length = len;
3968 }
3969 free(buf, M_DEVBUF2, bufsz);
3970 if (err == 0)
3971 err = iwm_parse_nvm_sections(sc, nvm_sections);
3972
3973 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS13; i++) {
3974 if (nvm_sections[i].data != NULL((void *)0))
3975 free(nvm_sections[i].data, M_DEVBUF2,
3976 nvm_sections[i].length);
3977 }
3978
3979 return err;
3980}
3981
3982int
3983iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3984 const uint8_t *section, uint32_t byte_cnt)
3985{
3986 int err = EINVAL22;
3987 uint32_t chunk_sz, offset;
3988
3989 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt)(((0x20000)<(byte_cnt))?(0x20000):(byte_cnt));
3990
3991 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3992 uint32_t addr, len;
3993 const uint8_t *data;
3994
3995 addr = dst_addr + offset;
3996 len = MIN(chunk_sz, byte_cnt - offset)(((chunk_sz)<(byte_cnt - offset))?(chunk_sz):(byte_cnt - offset
))
;
3997 data = section + offset;
3998
3999 err = iwm_firmware_load_chunk(sc, addr, data, len);
4000 if (err)
4001 break;
4002 }
4003
4004 return err;
4005}
4006
4007int
4008iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4009 const uint8_t *chunk, uint32_t byte_cnt)
4010{
4011 struct iwm_dma_info *dma = &sc->fw_dma;
4012 int err;
4013
4014 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
4015 memcpy(dma->vaddr, chunk, byte_cnt)__builtin_memcpy((dma->vaddr), (chunk), (byte_cnt));
4016 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma->
map), (0), (byte_cnt), (0x04))
4017 dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma->
map), (0), (byte_cnt), (0x04))
;
4018
4019 if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 &&
4020 dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) {
4021 err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8,
4022 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01);
4023 if (err)
4024 return err;
4025 }
4026
4027 sc->sc_fw_chunk_done = 0;
4028
4029 if (!iwm_nic_lock(sc))
4030 return EBUSY16;
4031
4032 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x00000000)))))
4033 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x00000000)))))
;
4034 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr))))
4035 dst_addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr))))
;
4036 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF))
)))
4037 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF))
)))
;
4038 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma->
paddr) << 28) | byte_cnt))))
4039 (iwm_get_dma_hi_addr(dma->paddr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma->
paddr) << 28) | byte_cnt))))
4040 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma->
paddr) << 28) | byte_cnt))))
;
4041 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
4042 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
4043 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
4044 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
;
4045 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
4046 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
4047 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
4048 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
;
4049
4050 iwm_nic_unlock(sc);
4051
4052 /* Wait for this segment to load. */
4053 err = 0;
4054 while (!sc->sc_fw_chunk_done) {
4055 err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4056 if (err)
4057 break;
4058 }
4059
4060 if (!sc->sc_fw_chunk_done)
4061 printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4062 DEVNAME(sc)((sc)->sc_dev.dv_xname), dst_addr, byte_cnt);
4063
4064 if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 &&
4065 dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) {
4066 int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8,
4067 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01);
4068 if (!err)
4069 err = err2;
4070 }
4071
4072 return err;
4073}
4074
4075int
4076iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4077{
4078 struct iwm_fw_sects *fws;
4079 int err, i;
4080 void *data;
4081 uint32_t dlen;
4082 uint32_t offset;
4083
4084 fws = &sc->sc_fw.fw_sects[ucode_type];
4085 for (i = 0; i < fws->fw_count; i++) {
4086 data = fws->fw_sect[i].fws_data;
4087 dlen = fws->fw_sect[i].fws_len;
4088 offset = fws->fw_sect[i].fws_devoff;
4089 if (dlen > sc->sc_fwdmasegsz) {
4090 err = EFBIG27;
4091 } else
4092 err = iwm_firmware_load_sect(sc, offset, data, dlen);
4093 if (err) {
4094 printf("%s: could not load firmware chunk %u of %u\n",
4095 DEVNAME(sc)((sc)->sc_dev.dv_xname), i, fws->fw_count);
4096 return err;
4097 }
4098 }
4099
4100 iwm_enable_interrupts(sc);
4101
4102 IWM_WRITE(sc, IWM_CSR_RESET, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
(0))))
;
4103
4104 return 0;
4105}
4106
4107int
4108iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4109 int cpu, int *first_ucode_section)
4110{
4111 int shift_param;
4112 int i, err = 0, sec_num = 0x1;
4113 uint32_t val, last_read_idx = 0;
4114 void *data;
4115 uint32_t dlen;
4116 uint32_t offset;
4117
4118 if (cpu == 1) {
4119 shift_param = 0;
4120 *first_ucode_section = 0;
4121 } else {
4122 shift_param = 16;
4123 (*first_ucode_section)++;
4124 }
4125
4126 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX16; i++) {
4127 last_read_idx = i;
4128 data = fws->fw_sect[i].fws_data;
4129 dlen = fws->fw_sect[i].fws_len;
4130 offset = fws->fw_sect[i].fws_devoff;
4131
4132 /*
4133 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4134 * CPU1 to CPU2.
4135 * PAGING_SEPARATOR_SECTION delimiter - separate between
4136 * CPU2 non paged to CPU2 paging sec.
4137 */
4138 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC ||
4139 offset == IWM_PAGING_SEPARATOR_SECTION0xAAAABBBB)
4140 break;
4141
4142 if (dlen > sc->sc_fwdmasegsz) {
4143 err = EFBIG27;
4144 } else
4145 err = iwm_firmware_load_sect(sc, offset, data, dlen);
4146 if (err) {
4147 printf("%s: could not load firmware chunk %d "
4148 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
4149 return err;
4150 }
4151
4152 /* Notify the ucode of the loaded section number and status */
4153 if (iwm_nic_lock(sc)) {
4154 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((0x1af0))));
4155 val = val | (sec_num << shift_param);
4156 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), (
(val))))
;
4157 sec_num = (sec_num << 1) | 0x1;
4158 iwm_nic_unlock(sc);
4159 } else {
4160 err = EBUSY16;
4161 printf("%s: could not load firmware chunk %d "
4162 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
4163 return err;
4164 }
4165 }
4166
4167 *first_ucode_section = last_read_idx;
4168
4169 if (iwm_nic_lock(sc)) {
4170 if (cpu == 1)
4171 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), (
(0xFFFF))))
;
4172 else
4173 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), (
(0xFFFFFFFF))))
;
4174 iwm_nic_unlock(sc);
4175 } else {
4176 err = EBUSY16;
4177 printf("%s: could not finalize firmware loading (error %d)\n",
4178 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
4179 return err;
4180 }
4181
4182 return 0;
4183}
4184
4185int
4186iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4187{
4188 struct iwm_fw_sects *fws;
4189 int err = 0;
4190 int first_ucode_section;
4191
4192 fws = &sc->sc_fw.fw_sects[ucode_type];
4193
4194 /* configure the ucode to be ready to get the secured image */
4195 /* release CPU reset */
4196 if (iwm_nic_lock(sc)) {
4197 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET0x300c,
4198 IWM_RELEASE_CPU_RESET_BIT0x1000000);
4199 iwm_nic_unlock(sc);
4200 }
4201
4202 /* load to FW the binary Secured sections of CPU1 */
4203 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4204 if (err)
4205 return err;
4206
4207 /* load to FW the binary sections of CPU2 */
4208 err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4209 if (err)
4210 return err;
4211
4212 iwm_enable_interrupts(sc);
4213 return 0;
4214}
4215
4216int
4217iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4218{
4219 int err;
4220
4221 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
4222
4223 sc->sc_uc.uc_intr = 0;
4224 sc->sc_uc.uc_ok = 0;
4225
4226 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002)
4227 err = iwm_load_firmware_8000(sc, ucode_type);
4228 else
4229 err = iwm_load_firmware_7000(sc, ucode_type);
4230
4231 if (err)
4232 return err;
4233
4234 /* wait for the firmware to load */
4235 err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4236 if (err || !sc->sc_uc.uc_ok)
4237 printf("%s: could not load firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4238
4239 return err;
4240}
4241
4242int
4243iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4244{
4245 int err;
4246
4247 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
4248
4249 err = iwm_nic_init(sc);
4250 if (err) {
4251 printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4252 return err;
4253 }
4254
4255 /* make sure rfkill handshake bits are cleared */
4256 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4257 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
4258 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
;
4259
4260 /* clear (again), then enable firmware load interrupt */
4261 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
4262 iwm_enable_fwload_interrupt(sc);
4263
4264 /* really make sure rfkill handshake bits are cleared */
4265 /* maybe we should write a few times more? just to make sure */
4266 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4267 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4268
4269 return iwm_load_firmware(sc, ucode_type);
4270}
4271
4272int
4273iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4274{
4275 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4276 .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)),
4277 };
4278
4279 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD0x98,
4280 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4281}
4282
4283int
4284iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4285{
4286 struct iwm_phy_cfg_cmd phy_cfg_cmd;
4287 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4288
4289 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config
))
4290 sc->sc_extra_phy_config)((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config
))
;
4291 phy_cfg_cmd.calib_control.event_trigger =
4292 sc->sc_default_calib[ucode_type].event_trigger;
4293 phy_cfg_cmd.calib_control.flow_trigger =
4294 sc->sc_default_calib[ucode_type].flow_trigger;
4295
4296 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD0x6a, 0,
4297 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4298}
4299
4300int
4301iwm_send_dqa_cmd(struct iwm_softc *sc)
4302{
4303 struct iwm_dqa_enable_cmd dqa_cmd = {
4304 .cmd_queue = htole32(IWM_DQA_CMD_QUEUE)((__uint32_t)(0)),
4305 };
4306 uint32_t cmd_id;
4307
4308 cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD0x00, IWM_DATA_PATH_GROUP0x5, 0);
4309 return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4310}
4311
4312int
4313iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4314 enum iwm_ucode_type ucode_type)
4315{
4316 enum iwm_ucode_type old_type = sc->sc_uc_current;
4317 struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4318 int err;
4319
4320 err = iwm_read_firmware(sc);
4321 if (err)
4322 return err;
4323
4324 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
4325 sc->cmdqid = IWM_DQA_CMD_QUEUE0;
4326 else
4327 sc->cmdqid = IWM_CMD_QUEUE9;
4328
4329 sc->sc_uc_current = ucode_type;
4330 err = iwm_start_fw(sc, ucode_type);
4331 if (err) {
4332 sc->sc_uc_current = old_type;
4333 return err;
4334 }
4335
4336 err = iwm_post_alive(sc);
4337 if (err)
4338 return err;
4339
4340 /*
4341 * configure and operate fw paging mechanism.
4342 * driver configures the paging flow only once, CPU2 paging image
4343 * included in the IWM_UCODE_INIT image.
4344 */
4345 if (fw->paging_mem_size) {
4346 err = iwm_save_fw_paging(sc, fw);
4347 if (err) {
4348 printf("%s: failed to save the FW paging image\n",
4349 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4350 return err;
4351 }
4352
4353 err = iwm_send_paging_cmd(sc, fw);
4354 if (err) {
4355 printf("%s: failed to send the paging cmd\n",
4356 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4357 iwm_free_fw_paging(sc);
4358 return err;
4359 }
4360 }
4361
4362 return 0;
4363}
4364
4365int
4366iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4367{
4368 const int wait_flags = (IWM_INIT_COMPLETE0x01 | IWM_CALIB_COMPLETE0x02);
4369 int err, s;
4370
4371 if ((sc->sc_flags & IWM_FLAG_RFKILL0x02) && !justnvm) {
4372 printf("%s: radio is disabled by hardware switch\n",
4373 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4374 return EPERM1;
4375 }
4376
4377 s = splnet()splraise(0x7);
4378 sc->sc_init_complete = 0;
4379 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4380 if (err) {
4381 printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4382 splx(s)spllower(s);
4383 return err;
4384 }
4385
4386 if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) {
4387 err = iwm_send_bt_init_conf(sc);
4388 if (err) {
4389 printf("%s: could not init bt coex (error %d)\n",
4390 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
4391 splx(s)spllower(s);
4392 return err;
4393 }
4394 }
4395
4396 if (justnvm) {
4397 err = iwm_nvm_init(sc);
4398 if (err) {
4399 printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4400 splx(s)spllower(s);
4401 return err;
4402 }
4403
4404 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), (
6)) == 0)
)
4405 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
4406 sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
;
4407
4408 splx(s)spllower(s);
4409 return 0;
4410 }
4411
4412 err = iwm_sf_config(sc, IWM_SF_INIT_OFF3);
4413 if (err) {
4414 splx(s)spllower(s);
4415 return err;
4416 }
4417
4418 /* Send TX valid antennas before triggering calibrations */
4419 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4420 if (err) {
4421 splx(s)spllower(s);
4422 return err;
4423 }
4424
4425 /*
4426 * Send phy configurations command to init uCode
4427 * to start the 16.0 uCode init image internal calibrations.
4428 */
4429 err = iwm_send_phy_cfg_cmd(sc);
4430 if (err) {
4431 splx(s)spllower(s);
4432 return err;
4433 }
4434
4435 /*
4436 * Nothing to do but wait for the init complete and phy DB
4437 * notifications from the firmware.
4438 */
4439 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4440 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4441 SEC_TO_NSEC(2));
4442 if (err)
4443 break;
4444 }
4445
4446 splx(s)spllower(s);
4447 return err;
4448}
4449
4450int
4451iwm_config_ltr(struct iwm_softc *sc)
4452{
4453 struct iwm_ltr_config_cmd cmd = {
4454 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)),
4455 };
4456
4457 if (!sc->sc_ltr_enabled)
4458 return 0;
4459
4460 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd);
4461}
4462
4463int
4464iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4465{
4466 struct iwm_rx_ring *ring = &sc->rxq;
4467 struct iwm_rx_data *data = &ring->data[idx];
4468 struct mbuf *m;
4469 int err;
4470 int fatal = 0;
4471
4472 m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1);
4473 if (m == NULL((void *)0))
4474 return ENOBUFS55;
4475
4476 if (size <= MCLBYTES(1 << 11)) {
4477 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
4478 } else {
4479 MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE)m_clget((m), (0x0002), (4096));
4480 }
4481 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
4482 m_freem(m);
4483 return ENOBUFS55;
4484 }
4485
4486 if (data->m != NULL((void *)0)) {
4487 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
4488 fatal = 1;
4489 }
4490
4491 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
4492 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
4493 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
;
4494 if (err) {
4495 /* XXX */
4496 if (fatal)
4497 panic("iwm: could not load RX mbuf");
4498 m_freem(m);
4499 return err;
4500 }
4501 data->m = m;
4502 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (size), (0x01))
;
4503
4504 /* Update RX descriptor. */
4505 if (sc->sc_mqrx_supported) {
4506 ((uint64_t *)ring->desc)[idx] =
4507 htole64(data->map->dm_segs[0].ds_addr)((__uint64_t)(data->map->dm_segs[0].ds_addr));
4508 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
4509 idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
4510 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
;
4511 } else {
4512 ((uint32_t *)ring->desc)[idx] =
4513 htole32(data->map->dm_segs[0].ds_addr >> 8)((__uint32_t)(data->map->dm_segs[0].ds_addr >> 8)
)
;
4514 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t
)), (0x04))
4515 idx * sizeof(uint32_t), sizeof(uint32_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t
)), (0x04))
4516 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t
)), (0x04))
;
4517 }
4518
4519 return 0;
4520}
4521
4522/*
4523 * RSSI values are reported by the FW as positive values - need to negate
4524 * to obtain their dBM. Account for missing antennas by replacing 0
4525 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4526 */
4527int
4528iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4529{
4530 int energy_a, energy_b, energy_c, max_energy;
4531 uint32_t val;
4532
4533 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX])((__uint32_t)(phy_info->non_cfg_phy[1]));
4534 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK0x000000ff) >>
4535 IWM_RX_INFO_ENERGY_ANT_A_POS0;
4536 energy_a = energy_a ? -energy_a : -256;
4537 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK0x0000ff00) >>
4538 IWM_RX_INFO_ENERGY_ANT_B_POS8;
4539 energy_b = energy_b ? -energy_b : -256;
4540 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK0x00ff0000) >>
4541 IWM_RX_INFO_ENERGY_ANT_C_POS16;
4542 energy_c = energy_c ? -energy_c : -256;
4543 max_energy = MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
4544 max_energy = MAX(max_energy, energy_c)(((max_energy)>(energy_c))?(max_energy):(energy_c));
4545
4546 return max_energy;
4547}
4548
4549int
4550iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4551 struct iwm_rx_mpdu_desc *desc)
4552{
4553 int energy_a, energy_b;
4554
4555 energy_a = desc->v1.energy_a;
4556 energy_b = desc->v1.energy_b;
4557 energy_a = energy_a ? -energy_a : -256;
4558 energy_b = energy_b ? -energy_b : -256;
4559 return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
4560}
4561
4562void
4563iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4564 struct iwm_rx_data *data)
4565{
4566 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4567
4568 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
4569 sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
;
4570
4571 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), (
sizeof(sc->sc_last_phy_info)))
;
4572}
4573
4574/*
4575 * Retrieve the average noise (in dBm) among receivers.
4576 */
4577int
4578iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4579{
4580 int i, total, nbant, noise;
4581
4582 total = nbant = noise = 0;
4583 for (i = 0; i < 3; i++) {
4584 noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff;
4585 if (noise) {
4586 total += noise;
4587 nbant++;
4588 }
4589 }
4590
4591 /* There should be at least one antenna but check anyway. */
4592 return (nbant == 0) ? -127 : (total / nbant) - 107;
4593}
4594
4595int
4596iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4597 struct ieee80211_rxinfo *rxi)
4598{
4599 struct ieee80211com *ic = &sc->sc_ic;
4600 struct ieee80211_key *k = &ni->ni_pairwise_key;
4601 struct ieee80211_frame *wh;
4602 uint64_t pn, *prsc;
4603 uint8_t *ivp;
4604 uint8_t tid;
4605 int hdrlen, hasqos;
4606
4607 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4608 hdrlen = ieee80211_get_hdrlen(wh);
4609 ivp = (uint8_t *)wh + hdrlen;
4610
4611 /* Check that ExtIV bit is set. */
4612 if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20))
4613 return 1;
4614
4615 hasqos = ieee80211_has_qos(wh);
4616 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
4617 prsc = &k->k_rsc[tid];
4618
4619 /* Extract the 48-bit PN from the CCMP header. */
4620 pn = (uint64_t)ivp[0] |
4621 (uint64_t)ivp[1] << 8 |
4622 (uint64_t)ivp[4] << 16 |
4623 (uint64_t)ivp[5] << 24 |
4624 (uint64_t)ivp[6] << 32 |
4625 (uint64_t)ivp[7] << 40;
4626 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) {
4627 if (pn < *prsc) {
4628 ic->ic_stats.is_ccmp_replays++;
4629 return 1;
4630 }
4631 } else if (pn <= *prsc) {
4632 ic->ic_stats.is_ccmp_replays++;
4633 return 1;
4634 }
4635 /* Last seen packet number is updated in ieee80211_inputm(). */
4636
4637 /*
4638 * Some firmware versions strip the MIC, and some don't. It is not
4639 * clear which of the capability flags could tell us what to expect.
4640 * For now, keep things simple and just leave the MIC in place if
4641 * it is present.
4642 *
4643 * The IV will be stripped by ieee80211_inputm().
4644 */
4645 return 0;
4646}
4647
4648int
4649iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4650 struct ieee80211_rxinfo *rxi)
4651{
4652 struct ieee80211com *ic = &sc->sc_ic;
4653 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4654 struct ieee80211_frame *wh;
4655 struct ieee80211_node *ni;
4656 int ret = 0;
4657 uint8_t type, subtype;
4658
4659 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4660
4661 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4662 if (type == IEEE80211_FC0_TYPE_CTL0x04)
4663 return 0;
4664
4665 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4666 if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40))
4667 return 0;
4668
4669 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ||
4670 !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40))
4671 return 0;
4672
4673 ni = ieee80211_find_rxnode(ic, wh);
4674 /* Handle hardware decryption. */
4675 if ((ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) &&
4676 ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4677 if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) !=
4678 IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
4679 ic->ic_stats.is_ccmp_dec_errs++;
4680 ret = 1;
4681 goto out;
4682 }
4683 /* Check whether decryption was successful or not. */
4684 if ((rx_pkt_status &
4685 (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
4686 IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) !=
4687 (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
4688 IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) {
4689 ic->ic_stats.is_ccmp_dec_errs++;
4690 ret = 1;
4691 goto out;
4692 }
4693 rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
4694 }
4695out:
4696 if (ret)
4697 ifp->if_ierrorsif_data.ifi_ierrors++;
4698 ieee80211_release_node(ic, ni);
4699 return ret;
4700}
4701
4702void
4703iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4704 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4705 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4706 struct mbuf_list *ml)
4707{
4708 struct ieee80211com *ic = &sc->sc_ic;
4709 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4710 struct ieee80211_frame *wh;
4711 struct ieee80211_node *ni;
4712 struct ieee80211_channel *bss_chan;
4713 uint8_t saved_bssid[IEEE80211_ADDR_LEN6] = { 0 };
4714
4715 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0
]))
)
4716 chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4717
4718 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4719 ni = ieee80211_find_rxnode(ic, wh);
4720 if (ni == ic->ic_bss) {
4721 /*
4722 * We may switch ic_bss's channel during scans.
4723 * Record the current channel so we can restore it later.
4724 */
4725 bss_chan = ni->ni_chan;
4726 IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr)__builtin_memcpy((&saved_bssid), (ni->ni_macaddr), (6)
)
;
4727 }
4728 ni->ni_chan = &ic->ic_channels[chanidx];
4729
4730 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) &&
4731 iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4732 ifp->if_ierrorsif_data.ifi_ierrors++;
4733 m_freem(m);
4734 ieee80211_release_node(ic, ni);
4735 return;
4736 }
4737
4738#if NBPFILTER1 > 0
4739 if (sc->sc_drvbpf != NULL((void *)0)) {
4740 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th;
4741 uint16_t chan_flags;
4742
4743 tap->wr_flags = 0;
4744 if (is_shortpre)
4745 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
4746 tap->wr_chan_freq =
4747 htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq));
4748 chan_flags = ic->ic_channels[chanidx].ic_flags;
4749 if (ic->ic_curmode != IEEE80211_MODE_11N)
4750 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
4751 tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
4752 tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4753 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4754 tap->wr_tsft = device_timestamp;
4755 if (rate_n_flags & IWM_RATE_MCS_HT_MSK(1 << 8)) {
4756 uint8_t mcs = (rate_n_flags &
4757 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 |
4758 IWM_RATE_HT_MCS_NSS_MSK(3 << 3)));
4759 tap->wr_rate = (0x80 | mcs);
4760 } else {
4761 uint8_t rate = (rate_n_flags &
4762 IWM_RATE_LEGACY_RATE_MSK0xff);
4763 switch (rate) {
4764 /* CCK rates. */
4765 case 10: tap->wr_rate = 2; break;
4766 case 20: tap->wr_rate = 4; break;
4767 case 55: tap->wr_rate = 11; break;
4768 case 110: tap->wr_rate = 22; break;
4769 /* OFDM rates. */
4770 case 0xd: tap->wr_rate = 12; break;
4771 case 0xf: tap->wr_rate = 18; break;
4772 case 0x5: tap->wr_rate = 24; break;
4773 case 0x7: tap->wr_rate = 36; break;
4774 case 0x9: tap->wr_rate = 48; break;
4775 case 0xb: tap->wr_rate = 72; break;
4776 case 0x1: tap->wr_rate = 96; break;
4777 case 0x3: tap->wr_rate = 108; break;
4778 /* Unknown rate: should not happen. */
4779 default: tap->wr_rate = 0;
4780 }
4781 }
4782
4783 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4784 m, BPF_DIRECTION_IN(1 << 0));
4785 }
4786#endif
4787 ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml);
4788 /*
4789 * ieee80211_inputm() might have changed our BSS.
4790 * Restore ic_bss's channel if we are still in the same BSS.
4791 */
4792 if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr)(__builtin_memcmp((saved_bssid), (ni->ni_macaddr), (6)) ==
0)
)
4793 ni->ni_chan = bss_chan;
4794 ieee80211_release_node(ic, ni);
4795}
4796
4797void
4798iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4799 size_t maxlen, struct mbuf_list *ml)
4800{
4801 struct ieee80211com *ic = &sc->sc_ic;
4802 struct ieee80211_rxinfo rxi;
4803 struct iwm_rx_phy_info *phy_info;
4804 struct iwm_rx_mpdu_res_start *rx_res;
4805 int device_timestamp;
4806 uint16_t phy_flags;
4807 uint32_t len;
4808 uint32_t rx_pkt_status;
4809 int rssi, chanidx, rate_n_flags;
4810
4811 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
4812
4813 phy_info = &sc->sc_last_phy_info;
4814 rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4815 len = le16toh(rx_res->byte_count)((__uint16_t)(rx_res->byte_count));
4816 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4817 /* Allow control frames in monitor mode. */
4818 if (len < sizeof(struct ieee80211_frame_cts)) {
4819 ic->ic_stats.is_rx_tooshort++;
4820 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4821 m_freem(m);
4822 return;
4823 }
4824 } else if (len < sizeof(struct ieee80211_frame)) {
4825 ic->ic_stats.is_rx_tooshort++;
4826 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4827 m_freem(m);
4828 return;
4829 }
4830 if (len > maxlen - sizeof(*rx_res)) {
4831 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4832 m_freem(m);
4833 return;
4834 }
4835
4836 if (__predict_false(phy_info->cfg_phy_cnt > 20)__builtin_expect(((phy_info->cfg_phy_cnt > 20) != 0), 0
)
) {
4837 m_freem(m);
4838 return;
4839 }
4840
4841 rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len))((__uint32_t)(*(uint32_t *)(pktdata + sizeof(*rx_res) + len))
)
;
4842 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK(1 << 0)) ||
4843 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK(1 << 1))) {
4844 m_freem(m);
4845 return; /* drop */
4846 }
4847
4848 m->m_datam_hdr.mh_data = pktdata + sizeof(*rx_res);
4849 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
4850
4851 if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4852 m_freem(m);
4853 return;
4854 }
4855
4856 chanidx = letoh32(phy_info->channel)((__uint32_t)(phy_info->channel));
4857 device_timestamp = le32toh(phy_info->system_timestamp)((__uint32_t)(phy_info->system_timestamp));
4858 phy_flags = letoh16(phy_info->phy_flags)((__uint16_t)(phy_info->phy_flags));
4859 rate_n_flags = le32toh(phy_info->rate_n_flags)((__uint32_t)(phy_info->rate_n_flags));
4860
4861 rssi = iwm_get_signal_strength(sc, phy_info);
4862 rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */
4863 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
4864
4865 rxi.rxi_rssi = rssi;
4866 rxi.rxi_tstamp = device_timestamp;
4867
4868 iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4869 (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE(1 << 2)),
4870 rate_n_flags, device_timestamp, &rxi, ml);
4871}
4872
4873void
4874iwm_flip_address(uint8_t *addr)
4875{
4876 int i;
4877 uint8_t mac_addr[ETHER_ADDR_LEN6];
4878
4879 for (i = 0; i < ETHER_ADDR_LEN6; i++)
4880 mac_addr[i] = addr[ETHER_ADDR_LEN6 - i - 1];
4881 IEEE80211_ADDR_COPY(addr, mac_addr)__builtin_memcpy((addr), (mac_addr), (6));
4882}
4883
4884/*
4885 * Drop duplicate 802.11 retransmissions
4886 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4887 * and handle pseudo-duplicate frames which result from deaggregation
4888 * of A-MSDU frames in hardware.
4889 */
4890int
4891iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4892 struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4893{
4894 struct ieee80211com *ic = &sc->sc_ic;
4895 struct iwm_node *in = (void *)ic->ic_bss;
4896 struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4897 uint8_t tid = IWM_MAX_TID_COUNT8, subframe_idx;
4898 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4899 uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4900 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4901 int hasqos = ieee80211_has_qos(wh);
4902 uint16_t seq;
4903
4904 if (type == IEEE80211_FC0_TYPE_CTL0x04 ||
4905 (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) ||
4906 IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4907 return 0;
4908
4909 if (hasqos) {
4910 tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f);
4911 if (tid > IWM_MAX_TID_COUNT8)
4912 tid = IWM_MAX_TID_COUNT8;
4913 }
4914
4915 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4916 subframe_idx = desc->amsdu_info &
4917 IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f;
4918
4919 seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4;
4920 if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) &&
4921 dup_data->last_seq[tid] == seq &&
4922 dup_data->last_sub_frame[tid] >= subframe_idx)
4923 return 1;
4924
4925 /*
4926 * Allow the same frame sequence number for all A-MSDU subframes
4927 * following the first subframe.
4928 * Otherwise these subframes would be discarded as replays.
4929 */
4930 if (dup_data->last_seq[tid] == seq &&
4931 subframe_idx > dup_data->last_sub_frame[tid] &&
4932 (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40)) {
4933 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4934 }
4935
4936 dup_data->last_seq[tid] = seq;
4937 dup_data->last_sub_frame[tid] = subframe_idx;
4938
4939 return 0;
4940}
4941
4942/*
4943 * Returns true if sn2 - buffer_size < sn1 < sn2.
4944 * To be used only in order to compare reorder buffer head with NSSN.
4945 * We fully trust NSSN unless it is behind us due to reorder timeout.
4946 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4947 */
4948int
4949iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4950{
4951 return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048
)
&& !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff
) > 2048)
;
4952}
4953
4954void
4955iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
4956 struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
4957 uint16_t nssn, struct mbuf_list *ml)
4958{
4959 struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
4960 uint16_t ssn = reorder_buf->head_sn;
4961
4962 /* ignore nssn smaller than head sn - this can happen due to timeout */
4963 if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4964 goto set_timer;
4965
4966 while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4967 int index = ssn % reorder_buf->buf_size;
4968 struct mbuf *m;
4969 int chanidx, is_shortpre;
4970 uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4971 struct ieee80211_rxinfo *rxi;
4972
4973 /* This data is the same for all A-MSDU subframes. */
4974 chanidx = entries[index].chanidx;
4975 rx_pkt_status = entries[index].rx_pkt_status;
4976 is_shortpre = entries[index].is_shortpre;
4977 rate_n_flags = entries[index].rate_n_flags;
4978 device_timestamp = entries[index].device_timestamp;
4979 rxi = &entries[index].rxi;
4980
4981 /*
4982 * Empty the list. Will have more than one frame for A-MSDU.
4983 * Empty list is valid as well since nssn indicates frames were
4984 * received.
4985 */
4986 while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) {
4987 iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4988 rate_n_flags, device_timestamp, rxi, ml);
4989 reorder_buf->num_stored--;
4990
4991 /*
4992 * Allow the same frame sequence number and CCMP PN for
4993 * all A-MSDU subframes following the first subframe.
4994 * Otherwise they would be discarded as replays.
4995 */
4996 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4997 rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
4998 }
4999
5000 ssn = (ssn + 1) & 0xfff;
5001 }
5002 reorder_buf->head_sn = nssn;
5003
5004set_timer:
5005 if (reorder_buf->num_stored && !reorder_buf->removed) {
5006 timeout_add_usec(&reorder_buf->reorder_timer,
5007 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
5008 } else
5009 timeout_del(&reorder_buf->reorder_timer);
5010}
5011
5012int
5013iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5014 struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5015{
5016 struct ieee80211com *ic = &sc->sc_ic;
5017
5018 if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5019 /* we have a new (A-)MPDU ... */
5020
5021 /*
5022 * reset counter to 0 if we didn't have any oldsn in
5023 * the last A-MPDU (as detected by GP2 being identical)
5024 */
5025 if (!buffer->consec_oldsn_prev_drop)
5026 buffer->consec_oldsn_drops = 0;
5027
5028 /* either way, update our tracking state */
5029 buffer->consec_oldsn_ampdu_gp2 = gp2;
5030 } else if (buffer->consec_oldsn_prev_drop) {
5031 /*
5032 * tracking state didn't change, and we had an old SN
5033 * indication before - do nothing in this case, we
5034 * already noted this one down and are waiting for the
5035 * next A-MPDU (by GP2)
5036 */
5037 return 0;
5038 }
5039
5040 /* return unless this MPDU has old SN */
5041 if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000))
5042 return 0;
5043
5044 /* update state */
5045 buffer->consec_oldsn_prev_drop = 1;
5046 buffer->consec_oldsn_drops++;
5047
5048 /* if limit is reached, send del BA and reset state */
5049 if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA10) {
5050 ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5051 0, tid);
5052 buffer->consec_oldsn_prev_drop = 0;
5053 buffer->consec_oldsn_drops = 0;
5054 return 1;
5055 }
5056
5057 return 0;
5058}
5059
5060/*
5061 * Handle re-ordering of frames which were de-aggregated in hardware.
5062 * Returns 1 if the MPDU was consumed (buffered or dropped).
5063 * Returns 0 if the MPDU should be passed to upper layer.
5064 */
5065int
5066iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5067 struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5068 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5069 struct mbuf_list *ml)
5070{
5071 struct ieee80211com *ic = &sc->sc_ic;
5072 struct ieee80211_frame *wh;
5073 struct ieee80211_node *ni;
5074 struct iwm_rxba_data *rxba;
5075 struct iwm_reorder_buffer *buffer;
5076 uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data));
5077 int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40);
5078 int last_subframe =
5079 (desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME0x80);
5080 uint8_t tid;
5081 uint8_t subframe_idx = (desc->amsdu_info &
5082 IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
5083 struct iwm_reorder_buf_entry *entries;
5084 int index;
5085 uint16_t nssn, sn;
5086 uint8_t baid, type, subtype;
5087 int hasqos;
5088
5089 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5090 hasqos = ieee80211_has_qos(wh);
5091 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
5092
5093 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5094 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
5095
5096 /*
5097 * We are only interested in Block Ack requests and unicast QoS data.
5098 */
5099 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
5100 return 0;
5101 if (hasqos) {
5102 if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)
5103 return 0;
5104 } else {
5105 if (type != IEEE80211_FC0_TYPE_CTL0x04 ||
5106 subtype != IEEE80211_FC0_SUBTYPE_BAR0x80)
5107 return 0;
5108 }
5109
5110 baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK0x7f000000) >>
5111 IWM_RX_MPDU_REORDER_BAID_SHIFT24;
5112 if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f ||
5113 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
5114 return 0;
5115
5116 rxba = &sc->sc_rxba_data[baid];
5117 if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f ||
5118 tid != rxba->tid || rxba->sta_id != IWM_STATION_ID0)
5119 return 0;
5120
5121 if (rxba->timeout != 0)
5122 getmicrouptime(&rxba->last_rx);
5123
5124 /* Bypass A-MPDU re-ordering in net80211. */
5125 rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002;
5126
5127 nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK0x00000fff;
5128 sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK0x00fff000) >>
5129 IWM_RX_MPDU_REORDER_SN_SHIFT12;
5130
5131 buffer = &rxba->reorder_buf;
5132 entries = &rxba->entries[0];
5133
5134 if (!buffer->valid) {
5135 if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000)
5136 return 0;
5137 buffer->valid = 1;
5138 }
5139
5140 ni = ieee80211_find_rxnode(ic, wh);
5141 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
5142 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
5143 iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5144 goto drop;
5145 }
5146
5147 /*
5148 * If there was a significant jump in the nssn - adjust.
5149 * If the SN is smaller than the NSSN it might need to first go into
5150 * the reorder buffer, in which case we just release up to it and the
5151 * rest of the function will take care of storing it and releasing up to
5152 * the nssn.
5153 */
5154 if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5155 buffer->buf_size) ||
5156 !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer->
buf_size)) & 0xfff) > 2048)
) {
5157 uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
? sn : nssn;
5158 ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5159 iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5160 }
5161
5162 if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5163 device_timestamp)) {
5164 /* BA session will be torn down. */
5165 ic->ic_stats.is_ht_rx_ba_window_jump++;
5166 goto drop;
5167
5168 }
5169
5170 /* drop any outdated packets */
5171 if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff
) > 2048)
) {
5172 ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5173 goto drop;
5174 }
5175
5176 /* release immediately if allowed by nssn and no stored frames */
5177 if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
) {
5178 if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5179 (!is_amsdu || last_subframe))
5180 buffer->head_sn = nssn;
5181 ieee80211_release_node(ic, ni);
5182 return 0;
5183 }
5184
5185 /*
5186 * release immediately if there are no stored frames, and the sn is
5187 * equal to the head.
5188 * This can happen due to reorder timer, where NSSN is behind head_sn.
5189 * When we released everything, and we got the next frame in the
5190 * sequence, according to the NSSN we can't release immediately,
5191 * while technically there is no hole and we can move forward.
5192 */
5193 if (!buffer->num_stored && sn == buffer->head_sn) {
5194 if (!is_amsdu || last_subframe)
5195 buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5196 ieee80211_release_node(ic, ni);
5197 return 0;
5198 }
5199
5200 index = sn % buffer->buf_size;
5201
5202 /*
5203 * Check if we already stored this frame
5204 * As AMSDU is either received or not as whole, logic is simple:
5205 * If we have frames in that position in the buffer and the last frame
5206 * originated from AMSDU had a different SN then it is a retransmission.
5207 * If it is the same SN then if the subframe index is incrementing it
5208 * is the same AMSDU - otherwise it is a retransmission.
5209 */
5210 if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
5211 if (!is_amsdu) {
5212 ic->ic_stats.is_ht_rx_ba_no_buf++;
5213 goto drop;
5214 } else if (sn != buffer->last_amsdu ||
5215 buffer->last_sub_index >= subframe_idx) {
5216 ic->ic_stats.is_ht_rx_ba_no_buf++;
5217 goto drop;
5218 }
5219 } else {
5220 /* This data is the same for all A-MSDU subframes. */
5221 entries[index].chanidx = chanidx;
5222 entries[index].is_shortpre = is_shortpre;
5223 entries[index].rate_n_flags = rate_n_flags;
5224 entries[index].device_timestamp = device_timestamp;
5225 memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries
[index].rxi)))
;
5226 }
5227
5228 /* put in reorder buffer */
5229 ml_enqueue(&entries[index].frames, m);
5230 buffer->num_stored++;
5231 getmicrouptime(&entries[index].reorder_time);
5232
5233 if (is_amsdu) {
5234 buffer->last_amsdu = sn;
5235 buffer->last_sub_index = subframe_idx;
5236 }
5237
5238 /*
5239 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5240 * The reason is that NSSN advances on the first sub-frame, and may
5241 * cause the reorder buffer to advance before all the sub-frames arrive.
5242 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5243 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5244 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5245 * already ahead and it will be dropped.
5246 * If the last sub-frame is not on this queue - we will get frame
5247 * release notification with up to date NSSN.
5248 */
5249 if (!is_amsdu || last_subframe)
5250 iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5251
5252 ieee80211_release_node(ic, ni);
5253 return 1;
5254
5255drop:
5256 m_freem(m);
5257 ieee80211_release_node(ic, ni);
5258 return 1;
5259}
5260
5261void
5262iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5263 size_t maxlen, struct mbuf_list *ml)
5264{
5265 struct ieee80211com *ic = &sc->sc_ic;
5266 struct ieee80211_rxinfo rxi;
5267 struct iwm_rx_mpdu_desc *desc;
5268 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5269 int rssi;
5270 uint8_t chanidx;
5271 uint16_t phy_info;
5272
5273 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
5274
5275 desc = (struct iwm_rx_mpdu_desc *)pktdata;
5276
5277 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) ||
5278 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) {
5279 m_freem(m);
5280 return; /* drop */
5281 }
5282
5283 len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len));
5284 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5285 /* Allow control frames in monitor mode. */
5286 if (len < sizeof(struct ieee80211_frame_cts)) {
5287 ic->ic_stats.is_rx_tooshort++;
5288 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5289 m_freem(m);
5290 return;
5291 }
5292 } else if (len < sizeof(struct ieee80211_frame)) {
5293 ic->ic_stats.is_rx_tooshort++;
5294 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5295 m_freem(m);
5296 return;
5297 }
5298 if (len > maxlen - sizeof(*desc)) {
5299 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5300 m_freem(m);
5301 return;
5302 }
5303
5304 m->m_datam_hdr.mh_data = pktdata + sizeof(*desc);
5305 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
5306
5307 /* Account for padding following the frame header. */
5308 if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD0x20) {
5309 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5310 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5311 if (type == IEEE80211_FC0_TYPE_CTL0x04) {
5312 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) {
5313 case IEEE80211_FC0_SUBTYPE_CTS0xc0:
5314 hdrlen = sizeof(struct ieee80211_frame_cts);
5315 break;
5316 case IEEE80211_FC0_SUBTYPE_ACK0xd0:
5317 hdrlen = sizeof(struct ieee80211_frame_ack);
5318 break;
5319 default:
5320 hdrlen = sizeof(struct ieee80211_frame_min);
5321 break;
5322 }
5323 } else
5324 hdrlen = ieee80211_get_hdrlen(wh);
5325
5326 if ((le16toh(desc->status)((__uint16_t)(desc->status)) &
5327 IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) ==
5328 IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
5329 /* Padding is inserted after the IV. */
5330 hdrlen += IEEE80211_CCMP_HDRLEN8;
5331 }
5332
5333 memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data
), (hdrlen))
;
5334 m_adj(m, 2);
5335 }
5336
5337 /*
5338 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5339 * in place for each subframe. But it leaves the 'A-MSDU present'
5340 * bit set in the frame header. We need to clear this bit ourselves.
5341 *
5342 * And we must allow the same CCMP PN for subframes following the
5343 * first subframe. Otherwise they would be discarded as replays.
5344 */
5345 if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40) {
5346 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5347 uint8_t subframe_idx = (desc->amsdu_info &
5348 IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
5349 if (subframe_idx > 0)
5350 rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
5351 if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5352 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5353 struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data))
5354 struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data));
5355 qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
5356
5357 /* HW reverses addr3 and addr4. */
5358 iwm_flip_address(qwh4->i_addr3);
5359 iwm_flip_address(qwh4->i_addr4);
5360 } else if (ieee80211_has_qos(wh) &&
5361 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) {
5362 struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data))
5363 struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data));
5364 qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
5365
5366 /* HW reverses addr3. */
5367 iwm_flip_address(qwh->i_addr3);
5368 }
5369 }
5370
5371 /*
5372 * Verify decryption before duplicate detection. The latter uses
5373 * the TID supplied in QoS frame headers and this TID is implicitly
5374 * verified as part of the CCMP nonce.
5375 */
5376 if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) {
5377 m_freem(m);
5378 return;
5379 }
5380
5381 if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5382 m_freem(m);
5383 return;
5384 }
5385
5386 phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info));
5387 rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags));
5388 chanidx = desc->v1.channel;
5389 device_timestamp = desc->v1.gp2_on_air_rise;
5390
5391 rssi = iwm_rxmq_get_signal_strength(sc, desc);
5392 rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */
5393 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
5394
5395 rxi.rxi_rssi = rssi;
5396 rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise)((__uint64_t)(desc->v1.tsf_on_air_rise));
5397
5398 if (iwm_rx_reorder(sc, m, chanidx, desc,
5399 (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
5400 rate_n_flags, device_timestamp, &rxi, ml))
5401 return;
5402
5403 iwm_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)),
5404 (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
5405 rate_n_flags, device_timestamp, &rxi, ml);
5406}
5407
5408void
5409iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5410{
5411 struct ieee80211com *ic = &sc->sc_ic;
5412 struct iwm_node *in = (void *)ni;
5413 int old_txmcs = ni->ni_txmcs;
5414
5415 ieee80211_ra_choose(&in->in_rn, ic, ni);
5416
5417 /*
5418 * If RA has chosen a new TX rate we must update
5419 * the firmware's LQ rate table.
5420 */
5421 if (ni->ni_txmcs != old_txmcs)
5422 iwm_setrates(in, 1);
5423}
5424
5425void
5426iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5427 int txmcs, uint8_t failure_frame, int txfail)
5428{
5429 struct ieee80211com *ic = &sc->sc_ic;
5430 struct iwm_node *in = (void *)ni;
5431
5432 /* Ignore Tx reports which don't match our last LQ command. */
5433 if (txmcs != ni->ni_txmcs) {
5434 if (++in->lq_rate_mismatch > 15) {
5435 /* Try to sync firmware with the driver... */
5436 iwm_setrates(in, 1);
5437 in->lq_rate_mismatch = 0;
5438 }
5439 } else {
5440 int mcs = txmcs;
5441 const struct ieee80211_ht_rateset *rs =
5442 ieee80211_ra_get_ht_rateset(txmcs,
5443 ieee80211_node_supports_ht_chan40(ni),
5444 ieee80211_ra_use_ht_sgi(ni));
5445 unsigned int retries = 0, i;
5446
5447 in->lq_rate_mismatch = 0;
5448
5449 for (i = 0; i < failure_frame; i++) {
5450 if (mcs > rs->min_mcs) {
5451 ieee80211_ra_add_stats_ht(&in->in_rn,
5452 ic, ni, mcs, 1, 1);
5453 mcs--;
5454 } else
5455 retries++;
5456 }
5457
5458 if (txfail && failure_frame == 0) {
5459 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5460 txmcs, 1, 1);
5461 } else {
5462 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5463 mcs, retries + 1, retries);
5464 }
5465
5466 iwm_ra_choose(sc, ni);
5467 }
5468}
5469
5470void
5471iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5472 struct iwm_node *in, int txmcs, int txrate)
5473{
5474 struct ieee80211com *ic = &sc->sc_ic;
5475 struct ieee80211_node *ni = &in->in_ni;
5476 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
5477 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5478 int status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff;
5479 int txfail;
5480
5481 KASSERT(tx_resp->frame_count == 1)((tx_resp->frame_count == 1) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwm.c", 5481, "tx_resp->frame_count == 1"
))
;
5482
5483 txfail = (status != IWM_TX_STATUS_SUCCESS0x01 &&
5484 status != IWM_TX_STATUS_DIRECT_DONE0x02);
5485
5486 /*
5487 * Update rate control statistics.
5488 * Only report frames which were actually queued with the currently
5489 * selected Tx rate. Because Tx queues are relatively long we may
5490 * encounter previously selected rates here during Tx bursts.
5491 * Providing feedback based on such frames can lead to suboptimal
5492 * Tx rate control decisions.
5493 */
5494 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) == 0) {
5495 if (txrate != ni->ni_txrate) {
5496 if (++in->lq_rate_mismatch > 15) {
5497 /* Try to sync firmware with the driver... */
5498 iwm_setrates(in, 1);
5499 in->lq_rate_mismatch = 0;
5500 }
5501 } else {
5502 in->lq_rate_mismatch = 0;
5503
5504 in->in_amn.amn_txcnt++;
5505 if (txfail)
5506 in->in_amn.amn_retrycnt++;
5507 if (tx_resp->failure_frame > 0)
5508 in->in_amn.amn_retrycnt++;
5509 }
5510 } else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5511 (le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)) & IWM_RATE_MCS_HT_MSK(1 << 8))) {
5512 int txmcs = le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)) &
5513 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3));
5514 iwm_ht_single_rate_control(sc, ni, txmcs,
5515 tx_resp->failure_frame, txfail);
5516 }
5517
5518 if (txfail)
5519 ifp->if_oerrorsif_data.ifi_oerrors++;
5520}
5521
5522void
5523iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5524{
5525 struct ieee80211com *ic = &sc->sc_ic;
5526
5527 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
5528 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
;
5529 bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->map))
;
5530 m_freem(txd->m);
5531 txd->m = NULL((void *)0);
5532
5533 KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 5533, "txd->in"))
;
5534 ieee80211_release_node(ic, &txd->in->in_ni);
5535 txd->in = NULL((void *)0);
5536 txd->ampdu_nframes = 0;
5537 txd->ampdu_txmcs = 0;
5538}
5539
5540void
5541iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5542{
5543 struct iwm_tx_data *txd;
5544
5545 while (ring->tail != idx) {
5546 txd = &ring->data[ring->tail];
5547 if (txd->m != NULL((void *)0)) {
5548 if (ring->qid < IWM_FIRST_AGG_TX_QUEUE10)
5549 DPRINTF(("%s: missed Tx completion: tail=%d "do { ; } while (0)
5550 "idx=%d\n", __func__, ring->tail, idx))do { ; } while (0);
5551 iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID0);
5552 iwm_txd_done(sc, txd);
5553 ring->queued--;
5554 }
5555 ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT256;
5556 }
5557
5558 wakeup(ring);
5559}
5560
5561void
5562iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5563 struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5564 uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5565 struct iwm_agg_tx_status *agg_status)
5566{
5567 struct ieee80211com *ic = &sc->sc_ic;
5568 int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE10;
5569 struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5570 struct ieee80211_node *ni = &in->in_ni;
5571 struct ieee80211_tx_ba *ba;
5572 int txfail = (status != IWM_TX_STATUS_SUCCESS0x01 &&
5573 status != IWM_TX_STATUS_DIRECT_DONE0x02);
5574 uint16_t seq;
5575
5576 if (ic->ic_state != IEEE80211_S_RUN)
5577 return;
5578
5579 if (nframes > 1) {
5580 int i;
5581 /*
5582 * Collect information about this A-MPDU.
5583 */
5584
5585 for (i = 0; i < nframes; i++) {
5586 uint8_t qid = agg_status[i].qid;
5587 uint8_t idx = agg_status[i].idx;
5588 uint16_t txstatus = (le16toh(agg_status[i].status)((__uint16_t)(agg_status[i].status)) &
5589 IWM_AGG_TX_STATE_STATUS_MSK0x0fff);
5590
5591 if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED0x0000)
5592 continue;
5593
5594 if (qid != cmd_hdr->qid)
5595 continue;
5596
5597 txdata = &txq->data[idx];
5598 if (txdata->m == NULL((void *)0))
5599 continue;
5600
5601 /* The Tx rate was the same for all subframes. */
5602 txdata->ampdu_txmcs = initial_rate &
5603 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 |
5604 IWM_RATE_HT_MCS_NSS_MSK(3 << 3));
5605 txdata->ampdu_nframes = nframes;
5606 }
5607 return;
5608 }
5609
5610 ba = &ni->ni_tx_ba[tid];
5611 if (ba->ba_state != IEEE80211_BA_AGREED2)
5612 return;
5613 if (SEQ_LT(ssn, ba->ba_winstart)((((u_int16_t)(ssn) - (u_int16_t)(ba->ba_winstart)) & 0xfff
) > 2048)
)
5614 return;
5615
5616 /* This was a final single-frame Tx attempt for frame SSN-1. */
5617 seq = (ssn - 1) & 0xfff;
5618
5619 /*
5620 * Skip rate control if our Tx rate is fixed.
5621 * Don't report frames to MiRA which were sent at a different
5622 * Tx rate than ni->ni_txmcs.
5623 */
5624 if (ic->ic_fixed_mcs == -1) {
5625 if (txdata->ampdu_nframes > 1) {
5626 /*
5627 * This frame was once part of an A-MPDU.
5628 * Report one failed A-MPDU Tx attempt.
5629 * The firmware might have made several such
5630 * attempts but we don't keep track of this.
5631 */
5632 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5633 txdata->ampdu_txmcs, 1, 1);
5634 }
5635
5636 /* Report the final single-frame Tx attempt. */
5637 if (initial_rate & IWM_RATE_HT_MCS_RATE_CODE_MSK0x7) {
5638 int txmcs = initial_rate &
5639 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 |
5640 IWM_RATE_HT_MCS_NSS_MSK(3 << 3));
5641 iwm_ht_single_rate_control(sc, ni, txmcs,
5642 failure_frame, txfail);
5643 }
5644 }
5645
5646 if (txfail)
5647 ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5648
5649 /*
5650 * SSN corresponds to the first (perhaps not yet transmitted) frame
5651 * in firmware's BA window. Firmware is not going to retransmit any
5652 * frames before its BA window so mark them all as done.
5653 */
5654 ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5655 iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)));
5656 iwm_clear_oactive(sc, txq);
5657}
5658
5659void
5660iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5661 struct iwm_rx_data *data)
5662{
5663 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5664 int idx = cmd_hdr->idx;
5665 int qid = cmd_hdr->qid;
5666 struct iwm_tx_ring *ring = &sc->txq[qid];
5667 struct iwm_tx_data *txd;
5668 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5669 uint32_t ssn;
5670 uint32_t len = iwm_rx_packet_len(pkt);
5671
5672 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
5673 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
5674
5675 /* Sanity checks. */
5676 if (sizeof(*tx_resp) > len)
5677 return;
5678 if (qid < IWM_FIRST_AGG_TX_QUEUE10 && tx_resp->frame_count > 1)
5679 return;
5680 if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
5681 return;
5682 if (sizeof(*tx_resp) + sizeof(ssn) +
5683 tx_resp->frame_count * sizeof(tx_resp->status) > len)
5684 return;
5685
5686 sc->sc_tx_timer[qid] = 0;
5687
5688 txd = &ring->data[idx];
5689 if (txd->m == NULL((void *)0))
5690 return;
5691
5692 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn))__builtin_memcpy((&ssn), (&tx_resp->status + tx_resp
->frame_count), (sizeof(ssn)))
;
5693 ssn = le32toh(ssn)((__uint32_t)(ssn)) & 0xfff;
5694 if (qid >= IWM_FIRST_AGG_TX_QUEUE10) {
5695 int status;
5696 status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff;
5697 iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5698 le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)), tx_resp->frame_count,
5699 tx_resp->failure_frame, ssn, status, &tx_resp->status);
5700 } else {
5701 /*
5702 * Even though this is not an agg queue, we must only free
5703 * frames before the firmware's starting sequence number.
5704 */
5705 iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5706 iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)));
5707 iwm_clear_oactive(sc, ring);
5708 }
5709}
5710
5711void
5712iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5713{
5714 struct ieee80211com *ic = &sc->sc_ic;
5715 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
5716
5717 if (ring->queued < IWM_TX_RING_LOMARK192) {
5718 sc->qfullmsk &= ~(1 << ring->qid);
5719 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5720 ifq_clr_oactive(&ifp->if_snd);
5721 /*
5722 * Well, we're in interrupt context, but then again
5723 * I guess net80211 does all sorts of stunts in
5724 * interrupt context, so maybe this is no biggie.
5725 */
5726 (*ifp->if_start)(ifp);
5727 }
5728 }
5729}
5730
5731void
5732iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5733 struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5734{
5735 struct ieee80211com *ic = &sc->sc_ic;
5736 struct iwm_node *in = (void *)ni;
5737 int idx, end_idx;
5738
5739 /*
5740 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5741 */
5742 idx = IWM_AGG_SSN_TO_TXQ_IDX(seq)((seq) & (256 - 1));
5743 end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1));
5744 while (idx != end_idx) {
5745 struct iwm_tx_data *txdata = &txq->data[idx];
5746 if (txdata->m != NULL((void *)0) && txdata->ampdu_nframes > 1) {
5747 /*
5748 * We can assume that this subframe has been ACKed
5749 * because ACK failures come as single frames and
5750 * before failing an A-MPDU subframe the firmware
5751 * sends it as a single frame at least once.
5752 */
5753 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5754 txdata->ampdu_txmcs, 1, 0);
5755
5756 /* Report this frame only once. */
5757 txdata->ampdu_nframes = 0;
5758 }
5759
5760 idx = (idx + 1) % IWM_TX_RING_COUNT256;
5761 }
5762
5763 iwm_ra_choose(sc, ni);
5764}
5765
5766void
5767iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5768{
5769 struct iwm_ba_notif *ban = (void *)pkt->data;
5770 struct ieee80211com *ic = &sc->sc_ic;
5771 struct ieee80211_node *ni = ic->ic_bss;
5772 struct iwm_node *in = (void *)ni;
5773 struct ieee80211_tx_ba *ba;
5774 struct iwm_tx_ring *ring;
5775 uint16_t seq, ssn;
5776 int qid;
5777
5778 if (ic->ic_state != IEEE80211_S_RUN)
5779 return;
5780
5781 if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5782 return;
5783
5784 if (ban->sta_id != IWM_STATION_ID0 ||
5785 !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr)(__builtin_memcmp((in->in_macaddr), (ban->sta_addr), (6
)) == 0)
)
5786 return;
5787
5788 qid = le16toh(ban->scd_flow)((__uint16_t)(ban->scd_flow));
5789 if (qid < IWM_FIRST_AGG_TX_QUEUE10 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
5790 return;
5791
5792 /* Protect against a firmware bug where the queue/TID are off. */
5793 if (qid != IWM_FIRST_AGG_TX_QUEUE10 + ban->tid)
5794 return;
5795
5796 sc->sc_tx_timer[qid] = 0;
5797
5798 ba = &ni->ni_tx_ba[ban->tid];
5799 if (ba->ba_state != IEEE80211_BA_AGREED2)
5800 return;
5801
5802 ring = &sc->txq[qid];
5803
5804 /*
5805 * The first bit in ban->bitmap corresponds to the sequence number
5806 * stored in the sequence control field ban->seq_ctl.
5807 * Multiple BA notifications in a row may be using this number, with
5808 * additional bits being set in cba->bitmap. It is unclear how the
5809 * firmware decides to shift this window forward.
5810 * We rely on ba->ba_winstart instead.
5811 */
5812 seq = le16toh(ban->seq_ctl)((__uint16_t)(ban->seq_ctl)) >> IEEE80211_SEQ_SEQ_SHIFT4;
Value stored to 'seq' is never read
5813
5814 /*
5815 * The firmware's new BA window starting sequence number
5816 * corresponds to the first hole in ban->scd_ssn, implying
5817 * that all frames between 'seq' and 'ssn' (non-inclusive)
5818 * have been acked.
5819 */
5820 ssn = le16toh(ban->scd_ssn)((__uint16_t)(ban->scd_ssn));
5821
5822 if (SEQ_LT(ssn, ba->ba_winstart)((((u_int16_t)(ssn) - (u_int16_t)(ba->ba_winstart)) & 0xfff
) > 2048)
)
5823 return;
5824
5825 /* Skip rate control if our Tx rate is fixed. */
5826 if (ic->ic_fixed_mcs == -1)
5827 iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
5828 ba->ba_winstart, ssn);
5829
5830 /*
5831 * SSN corresponds to the first (perhaps not yet transmitted) frame
5832 * in firmware's BA window. Firmware is not going to retransmit any
5833 * frames before its BA window so mark them all as done.
5834 */
5835 ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
5836 iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)));
5837 iwm_clear_oactive(sc, ring);
5838}
5839
5840void
5841iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5842 struct iwm_rx_data *data)
5843{
5844 struct ieee80211com *ic = &sc->sc_ic;
5845 struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
5846 uint32_t missed;
5847
5848 if ((ic->ic_opmode != IEEE80211_M_STA) ||
5849 (ic->ic_state != IEEE80211_S_RUN))
5850 return;
5851
5852 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
5853 sizeof(*mbn), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
;
5854
5855 missed = le32toh(mbn->consec_missed_beacons_since_last_rx)((__uint32_t)(mbn->consec_missed_beacons_since_last_rx));
5856 if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5857 if (ic->ic_ific_ac.ac_if.if_flags & IFF_DEBUG0x4)
5858 printf("%s: receiving no beacons from %s; checking if "
5859 "this AP is still responding to probe requests\n",
5860 DEVNAME(sc)((sc)->sc_dev.dv_xname), ether_sprintf(ic->ic_bss->ni_macaddr));
5861 /*
5862 * Rather than go directly to scan state, try to send a
5863 * directed probe request first. If that fails then the
5864 * state machine will drop us into scanning after timing
5865 * out waiting for a probe response.
5866 */
5867 IEEE80211_SEND_MGMT(ic, ic->ic_bss,((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0))
5868 IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0)((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0));
5869 }
5870
5871}
5872
5873int
5874iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
5875{
5876 struct iwm_binding_cmd cmd;
5877 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
5878 uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)((in->in_id << (0)) | (in->in_color << (8))
)
;
5879 int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE0x10);
5880 uint32_t status;
5881 size_t len;
5882
5883 if (action == IWM_FW_CTXT_ACTION_ADD1 && active)
5884 panic("binding already added");
5885 if (action == IWM_FW_CTXT_ACTION_REMOVE3 && !active)
5886 panic("binding already removed");
5887
5888 if (phyctxt == NULL((void *)0)) /* XXX race with iwm_stop() */
5889 return EINVAL22;
5890
5891 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5892
5893 cmd.id_and_color
5894 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
5895 cmd.action = htole32(action)((__uint32_t)(action));
5896 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
5897
5898 cmd.macs[0] = htole32(mac_id)((__uint32_t)(mac_id));
5899 for (i = 1; i < IWM_MAX_MACS_IN_BINDING(3); i++)
5900 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
5901
5902 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel)(((phyctxt->channel)->ic_flags & 0x0080) != 0) ||
5903 !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
5904 cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX)((__uint32_t)(0));
5905 else
5906 cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX)((__uint32_t)(1));
5907
5908 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
)
5909 len = sizeof(cmd);
5910 else
5911 len = sizeof(struct iwm_binding_cmd_v1);
5912 status = 0;
5913 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD0x2b, len, &cmd,
5914 &status);
5915 if (err == 0 && status != 0)
5916 err = EIO5;
5917
5918 return err;
5919}
5920
5921void
5922iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5923 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
5924{
5925 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd))__builtin_memset((cmd), (0), (sizeof(struct iwm_phy_context_cmd
)))
;
5926
5927 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
5928 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
5929 cmd->action = htole32(action)((__uint32_t)(action));
5930 cmd->apply_time = htole32(apply_time)((__uint32_t)(apply_time));
5931}
5932
5933void
5934iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
5935 struct ieee80211_channel *chan, uint8_t chains_static,
5936 uint8_t chains_dynamic, uint8_t sco)
5937{
5938 struct ieee80211com *ic = &sc->sc_ic;
5939 uint8_t active_cnt, idle_cnt;
5940
5941 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
5942 IWM_PHY_BAND_24(1) : IWM_PHY_BAND_5(0);
5943 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
5944 if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
5945 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
5946 /* secondary chan above -> control chan below */
5947 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5948 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
5949 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
5950 /* secondary chan below -> control chan above */
5951 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
5952 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
5953 } else {
5954 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
5955 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5956 }
5957 } else {
5958 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
5959 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5960 }
5961
5962 /* Set rx the chains */
5963 idle_cnt = chains_static;
5964 active_cnt = chains_dynamic;
5965
5966 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)))
5967 IWM_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)));
5968 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
5969 cmd->rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
5970 IWM_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
5971
5972 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc))((__uint32_t)(iwm_fw_valid_tx_ant(sc)));
5973}
5974
5975int
5976iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5977 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5978 uint32_t apply_time, uint8_t sco)
5979{
5980 struct ieee80211com *ic = &sc->sc_ic;
5981 struct iwm_phy_context_cmd_uhb cmd;
5982 uint8_t active_cnt, idle_cnt;
5983 struct ieee80211_channel *chan = ctxt->channel;
5984
5985 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5986 cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
5987 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
5988 cmd.action = htole32(action)((__uint32_t)(action));
5989 cmd.apply_time = htole32(apply_time)((__uint32_t)(apply_time));
5990
5991 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
5992 IWM_PHY_BAND_24(1) : IWM_PHY_BAND_5(0);
5993 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan))((__uint32_t)(ieee80211_chan2ieee(ic, chan)));
5994 if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
5995 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
5996 /* secondary chan above -> control chan below */
5997 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5998 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
5999 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
6000 /* secondary chan below -> control chan above */
6001 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
6002 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
6003 } else {
6004 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
6005 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6006 }
6007 } else {
6008 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
6009 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6010 }
6011
6012 idle_cnt = chains_static;
6013 active_cnt = chains_dynamic;
6014 cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)))
6015 IWM_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)));
6016 cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
6017 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
6018 IWM_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
6019 cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc))((__uint32_t)(iwm_fw_valid_tx_ant(sc)));
6020
6021 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
6022}
6023
6024int
6025iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6026 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6027 uint32_t apply_time, uint8_t sco)
6028{
6029 struct iwm_phy_context_cmd cmd;
6030
6031 /*
6032 * Intel increased the size of the fw_channel_info struct and neglected
6033 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6034 * member in the middle.
6035 * To keep things simple we use a separate function to handle the larger
6036 * variant of the phy context command.
6037 */
6038 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)((sc->sc_enabled_capa)[(48)>>3] & (1<<((48
)&(8 -1))))
)
6039 return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6040 chains_dynamic, action, apply_time, sco);
6041
6042 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6043
6044 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6045 chains_static, chains_dynamic, sco);
6046
6047 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD0x8, 0,
6048 sizeof(struct iwm_phy_context_cmd), &cmd);
6049}
6050
6051int
6052iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6053{
6054 struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6055 struct iwm_tfd *desc;
6056 struct iwm_tx_data *txdata;
6057 struct iwm_device_cmd *cmd;
6058 struct mbuf *m;
6059 bus_addr_t paddr;
6060 uint32_t addr_lo;
6061 int err = 0, i, paylen, off, s;
6062 int idx, code, async, group_id;
6063 size_t hdrlen, datasz;
6064 uint8_t *data;
6065 int generation = sc->sc_generation;
6066
6067 code = hcmd->id;
6068 async = hcmd->flags & IWM_CMD_ASYNC;
6069 idx = ring->cur;
6070
6071 for (i = 0, paylen = 0; i < nitems(hcmd->len)(sizeof((hcmd->len)) / sizeof((hcmd->len)[0])); i++) {
6072 paylen += hcmd->len[i];
6073 }
6074
6075 /* If this command waits for a response, allocate response buffer. */
6076 hcmd->resp_pkt = NULL((void *)0);
6077 if (hcmd->flags & IWM_CMD_WANT_RESP) {
6078 uint8_t *resp_buf;
6079 KASSERT(!async)((!async) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6079, "!async"))
;
6080 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet))((hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet)) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6080, "hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet)"
))
;
6081 KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX)((hcmd->resp_pkt_len <= (1 << 12)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 6081, "hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX"
))
;
6082 if (sc->sc_cmd_resp_pkt[idx] != NULL((void *)0))
6083 return ENOSPC28;
6084 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF2,
6085 M_NOWAIT0x0002 | M_ZERO0x0008);
6086 if (resp_buf == NULL((void *)0))
6087 return ENOMEM12;
6088 sc->sc_cmd_resp_pkt[idx] = resp_buf;
6089 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6090 } else {
6091 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
6092 }
6093
6094 s = splnet()splraise(0x7);
6095
6096 desc = &ring->desc[idx];
6097 txdata = &ring->data[idx];
6098
6099 group_id = iwm_cmd_groupid(code);
6100 if (group_id != 0) {
6101 hdrlen = sizeof(cmd->hdr_wide);
6102 datasz = sizeof(cmd->data_wide);
6103 } else {
6104 hdrlen = sizeof(cmd->hdr);
6105 datasz = sizeof(cmd->data);
6106 }
6107
6108 if (paylen > datasz) {
6109 /* Command is too large to fit in pre-allocated space. */
6110 size_t totlen = hdrlen + paylen;
6111 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header))) {
6112 printf("%s: firmware command too long (%zd bytes)\n",
6113 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
6114 err = EINVAL22;
6115 goto out;
6116 }
6117 m = MCLGETL(NULL, M_DONTWAIT, totlen)m_clget((((void *)0)), (0x0002), (totlen));
6118 if (m == NULL((void *)0)) {
6119 printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6120 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
6121 err = ENOMEM12;
6122 goto out;
6123 }
6124 cmd = mtod(m, struct iwm_device_cmd *)((struct iwm_device_cmd *)((m)->m_hdr.mh_data));
6125 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
6126 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
;
6127 if (err) {
6128 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6129 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
6130 m_freem(m);
6131 goto out;
6132 }
6133 txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6134 paddr = txdata->map->dm_segs[0].ds_addr;
6135 } else {
6136 cmd = &ring->cmd[idx];
6137 paddr = txdata->cmd_paddr;
6138 }
6139
6140 if (group_id != 0) {
6141 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6142 cmd->hdr_wide.group_id = group_id;
6143 cmd->hdr_wide.qid = ring->qid;
6144 cmd->hdr_wide.idx = idx;
6145 cmd->hdr_wide.length = htole16(paylen)((__uint16_t)(paylen));
6146 cmd->hdr_wide.version = iwm_cmd_version(code);
6147 data = cmd->data_wide;
6148 } else {
6149 cmd->hdr.code = code;
6150 cmd->hdr.flags = 0;
6151 cmd->hdr.qid = ring->qid;
6152 cmd->hdr.idx = idx;
6153 data = cmd->data;
6154 }
6155
6156 for (i = 0, off = 0; i < nitems(hcmd->data)(sizeof((hcmd->data)) / sizeof((hcmd->data)[0])); i++) {
6157 if (hcmd->len[i] == 0)
6158 continue;
6159 memcpy(data + off, hcmd->data[i], hcmd->len[i])__builtin_memcpy((data + off), (hcmd->data[i]), (hcmd->
len[i]))
;
6160 off += hcmd->len[i];
6161 }
6162 KASSERT(off == paylen)((off == paylen) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6162, "off == paylen"))
;
6163
6164 /* lo field is not aligned */
6165 addr_lo = htole32((uint32_t)paddr)((__uint32_t)((uint32_t)paddr));
6166 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t))__builtin_memcpy((&desc->tbs[0].lo), (&addr_lo), (
sizeof(uint32_t)))
;
6167 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)((__uint16_t)(iwm_get_dma_hi_addr(paddr) | ((hdrlen + paylen)
<< 4)))
6168 | ((hdrlen + paylen) << 4))((__uint16_t)(iwm_get_dma_hi_addr(paddr) | ((hdrlen + paylen)
<< 4)))
;
6169 desc->num_tbs = 1;
6170
6171 if (paylen > datasz) {
6172 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
6173 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
;
6174 } else {
6175 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
6176 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
6177 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
;
6178 }
6179 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6180 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6181 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
6182
6183 /*
6184 * Wake up the NIC to make sure that the firmware will see the host
6185 * command - we will let the NIC sleep once all the host commands
6186 * returned. This needs to be done only on 7000 family NICs.
6187 */
6188 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
6189 if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6190 err = EBUSY16;
6191 goto out;
6192 }
6193 }
6194
6195 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6196
6197 /* Kick command ring. */
6198 ring->queued++;
6199 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT256;
6200 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 8 | ring->cur))))
;
6201
6202 if (!async) {
6203 err = tsleep_nsec(desc, PCATCH0x100, "iwmcmd", SEC_TO_NSEC(1));
6204 if (err == 0) {
6205 /* if hardware is no longer up, return error */
6206 if (generation != sc->sc_generation) {
6207 err = ENXIO6;
6208 goto out;
6209 }
6210
6211 /* Response buffer will be freed in iwm_free_resp(). */
6212 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6213 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
6214 } else if (generation == sc->sc_generation) {
6215 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
6216 sc->sc_cmd_resp_len[idx]);
6217 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
6218 }
6219 }
6220 out:
6221 splx(s)spllower(s);
6222
6223 return err;
6224}
6225
6226int
6227iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6228 uint16_t len, const void *data)
6229{
6230 struct iwm_host_cmd cmd = {
6231 .id = id,
6232 .len = { len, },
6233 .data = { data, },
6234 .flags = flags,
6235 };
6236
6237 return iwm_send_cmd(sc, &cmd);
6238}
6239
6240int
6241iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6242 uint32_t *status)
6243{
6244 struct iwm_rx_packet *pkt;
6245 struct iwm_cmd_response *resp;
6246 int err, resp_len;
6247
6248 KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0)(((cmd->flags & IWM_CMD_WANT_RESP) == 0) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 6248, "(cmd->flags & IWM_CMD_WANT_RESP) == 0"
))
;
6249 cmd->flags |= IWM_CMD_WANT_RESP;
6250 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6251
6252 err = iwm_send_cmd(sc, cmd);
6253 if (err)
6254 return err;
6255
6256 pkt = cmd->resp_pkt;
6257 if (pkt == NULL((void *)0) || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40))
6258 return EIO5;
6259
6260 resp_len = iwm_rx_packet_payload_len(pkt);
6261 if (resp_len != sizeof(*resp)) {
6262 iwm_free_resp(sc, cmd);
6263 return EIO5;
6264 }
6265
6266 resp = (void *)pkt->data;
6267 *status = le32toh(resp->status)((__uint32_t)(resp->status));
6268 iwm_free_resp(sc, cmd);
6269 return err;
6270}
6271
6272int
6273iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6274 const void *data, uint32_t *status)
6275{
6276 struct iwm_host_cmd cmd = {
6277 .id = id,
6278 .len = { len, },
6279 .data = { data, },
6280 };
6281
6282 return iwm_send_cmd_status(sc, &cmd, status);
6283}
6284
6285void
6286iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6287{
6288 KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP)(((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP
) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6288, "(hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP"
))
;
6289 free(hcmd->resp_pkt, M_DEVBUF2, hcmd->resp_pkt_len);
6290 hcmd->resp_pkt = NULL((void *)0);
6291}
6292
6293void
6294iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6295{
6296 struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6297 struct iwm_tx_data *data;
6298
6299 if (qid != sc->cmdqid) {
6300 return; /* Not a command ack. */
6301 }
6302
6303 data = &ring->data[idx];
6304
6305 if (data->m != NULL((void *)0)) {
6306 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
6307 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
6308 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
6309 m_freem(data->m);
6310 data->m = NULL((void *)0);
6311 }
6312 wakeup(&ring->desc[idx]);
6313
6314 if (ring->queued == 0) {
6315 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",do { ; } while (0)
6316 DEVNAME(sc), code))do { ; } while (0);
6317 } else if (--ring->queued == 0) {
6318 /*
6319 * 7000 family NICs are locked while commands are in progress.
6320 * All commands are now done so we may unlock the NIC again.
6321 */
6322 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
6323 iwm_nic_unlock(sc);
6324 }
6325}
6326
6327void
6328iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6329 uint16_t len)
6330{
6331 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6332 uint16_t val;
6333
6334 scd_bc_tbl = sc->sched_dma.vaddr;
6335
6336 len += IWM_TX_CRC_SIZE4 + IWM_TX_DELIMITER_SIZE4;
6337 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE(1 << 4))
6338 len = roundup(len, 4)((((len)+((4)-1))/(4))*(4)) / 4;
6339
6340 val = htole16(sta_id << 12 | len)((__uint16_t)(sta_id << 12 | len));
6341
6342 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
6343 0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
;
6344
6345 /* Update TX scheduler. */
6346 scd_bc_tbl[qid].tfd_offset[idx] = val;
6347 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP(64))
6348 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX(256) + idx] = val;
6349 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
6350 0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
;
6351}
6352
6353void
6354iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6355{
6356 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6357 uint16_t val;
6358
6359 scd_bc_tbl = sc->sched_dma.vaddr;
6360
6361 val = htole16(1 | (sta_id << 12))((__uint16_t)(1 | (sta_id << 12)));
6362
6363 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
6364 0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
;
6365
6366 /* Update TX scheduler. */
6367 scd_bc_tbl[qid].tfd_offset[idx] = val;
6368 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP(64))
6369 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX(256) + idx] = val;
6370
6371 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
6372 0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
;
6373}
6374
6375/*
6376 * Fill in various bit for management frames, and leave them
6377 * unfilled for data frames (firmware takes care of that).
6378 * Return the selected TX rate.
6379 */
6380const struct iwm_rate *
6381iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6382 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6383{
6384 struct ieee80211com *ic = &sc->sc_ic;
6385 struct ieee80211_node *ni = &in->in_ni;
6386 const struct iwm_rate *rinfo;
6387 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
6388 int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6389 int ridx, rate_flags;
6390
6391 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT3;
6392 tx->data_retry_limit = IWM_LOW_RETRY_LIMIT7;
6393
6394 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ||
6395 type != IEEE80211_FC0_TYPE_DATA0x08) {
6396 /* for non-data, use the lowest supported rate */
6397 ridx = min_ridx;
6398 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT3;
6399 } else if (ic->ic_fixed_mcs != -1) {
6400 ridx = sc->sc_fixed_ridx;
6401 } else if (ic->ic_fixed_rate != -1) {
6402 ridx = sc->sc_fixed_ridx;
6403 } else {
6404 int i;
6405 /* Use firmware rateset retry table. */
6406 tx->initial_rate_index = 0;
6407 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE)((__uint32_t)((1 << 4)));
6408 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
6409 ridx = iwm_mcs2ridx[ni->ni_txmcs];
6410 return &iwm_rates[ridx];
6411 }
6412 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x0100) != 0)) ?
6413 IWM_RIDX_OFDM4 : IWM_RIDX_CCK0;
6414 for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6415 if (iwm_rates[i].rate == (ni->ni_txrate &
6416 IEEE80211_RATE_VAL0x7f)) {
6417 ridx = i;
6418 break;
6419 }
6420 }
6421 return &iwm_rates[ridx];
6422 }
6423
6424 rinfo = &iwm_rates[ridx];
6425 if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6426 rate_flags = IWM_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
6427 else if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003)
6428 rate_flags = IWM_RATE_MCS_ANT_B_MSK(2 << 14);
6429 else
6430 rate_flags = IWM_RATE_MCS_ANT_A_MSK(1 << 14);
6431 if (IWM_RIDX_IS_CCK(ridx)((ridx) < 4))
6432 rate_flags |= IWM_RATE_MCS_CCK_MSK(1 << 9);
6433 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
6434 type == IEEE80211_FC0_TYPE_DATA0x08 &&
6435 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20) {
6436 uint8_t sco;
6437 if (ieee80211_node_supports_ht_chan40(ni))
6438 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
6439 else
6440 sco = IEEE80211_HTOP0_SCO_SCN0;
6441 rate_flags |= IWM_RATE_MCS_HT_MSK(1 << 8);
6442 if ((sco == IEEE80211_HTOP0_SCO_SCA1 ||
6443 sco == IEEE80211_HTOP0_SCO_SCB3) &&
6444 in->in_phyctxt != NULL((void *)0) && in->in_phyctxt->sco == sco) {
6445 rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40(1 << 11);
6446 if (ieee80211_node_supports_ht_sgi40(ni))
6447 rate_flags |= IWM_RATE_MCS_SGI_MSK(1 << 13);
6448 } else if (ieee80211_node_supports_ht_sgi20(ni))
6449 rate_flags |= IWM_RATE_MCS_SGI_MSK(1 << 13);
6450 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp)((__uint32_t)(rate_flags | rinfo->ht_plcp));
6451 } else
6452 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp)((__uint32_t)(rate_flags | rinfo->plcp));
6453
6454 return rinfo;
6455}
6456
6457#define TB0_SIZE16 16
6458int
6459iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6460{
6461 struct ieee80211com *ic = &sc->sc_ic;
6462 struct iwm_node *in = (void *)ni;
6463 struct iwm_tx_ring *ring;
6464 struct iwm_tx_data *data;
6465 struct iwm_tfd *desc;
6466 struct iwm_device_cmd *cmd;
6467 struct iwm_tx_cmd *tx;
6468 struct ieee80211_frame *wh;
6469 struct ieee80211_key *k = NULL((void *)0);
6470 const struct iwm_rate *rinfo;
6471 uint8_t *ivp;
6472 uint32_t flags;
6473 u_int hdrlen;
6474 bus_dma_segment_t *seg;
6475 uint8_t tid, type, subtype;
6476 int i, totlen, err, pad;
6477 int qid, hasqos;
6478
6479 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
6480 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
6481 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
6482 if (type == IEEE80211_FC0_TYPE_CTL0x04)
6483 hdrlen = sizeof(struct ieee80211_frame_min);
6484 else
6485 hdrlen = ieee80211_get_hdrlen(wh);
6486
6487 hasqos = ieee80211_has_qos(wh);
6488 if (type == IEEE80211_FC0_TYPE_DATA0x08)
6489 tid = IWM_TID_NON_QOS0;
6490 else
6491 tid = IWM_MAX_TID_COUNT8;
6492
6493 /*
6494 * Map EDCA categories to Tx data queues.
6495 *
6496 * We use static data queue assignments even in DQA mode. We do not
6497 * need to share Tx queues between stations because we only implement
6498 * client mode; the firmware's station table contains only one entry
6499 * which represents our access point.
6500 */
6501 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
6502 qid = IWM_DQA_MIN_MGMT_QUEUE5 + ac;
6503 else
6504 qid = ac;
6505
6506 /* If possible, put this frame on an aggregation queue. */
6507 if (hasqos) {
6508 struct ieee80211_tx_ba *ba;
6509 uint16_t qos = ieee80211_get_qos(wh);
6510 int qostid = qos & IEEE80211_QOS_TID0x000f;
6511 int agg_qid = IWM_FIRST_AGG_TX_QUEUE10 + qostid;
6512
6513 ba = &ni->ni_tx_ba[qostid];
6514 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
6515 type == IEEE80211_FC0_TYPE_DATA0x08 &&
6516 subtype != IEEE80211_FC0_SUBTYPE_NODATA0x40 &&
6517 (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6518 ba->ba_state == IEEE80211_BA_AGREED2) {
6519 qid = agg_qid;
6520 tid = qostid;
6521 ac = ieee80211_up_to_ac(ic, qostid);
6522 }
6523 }
6524
6525 ring = &sc->txq[qid];
6526 desc = &ring->desc[ring->cur];
6527 memset(desc, 0, sizeof(*desc))__builtin_memset((desc), (0), (sizeof(*desc)));
6528 data = &ring->data[ring->cur];
6529
6530 cmd = &ring->cmd[ring->cur];
6531 cmd->hdr.code = IWM_TX_CMD0x1c;
6532 cmd->hdr.flags = 0;
6533 cmd->hdr.qid = ring->qid;
6534 cmd->hdr.idx = ring->cur;
6535
6536 tx = (void *)cmd->data;
6537 memset(tx, 0, sizeof(*tx))__builtin_memset((tx), (0), (sizeof(*tx)));
6538
6539 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
6540
6541#if NBPFILTER1 > 0
6542 if (sc->sc_drvbpf != NULL((void *)0)) {
6543 struct iwm_tx_radiotap_header *tap = &sc->sc_txtapsc_txtapu.th;
6544 uint16_t chan_flags;
6545
6546 tap->wt_flags = 0;
6547 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq)((__uint16_t)(ni->ni_chan->ic_freq));
6548 chan_flags = ni->ni_chan->ic_flags;
6549 if (ic->ic_curmode != IEEE80211_MODE_11N)
6550 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
6551 tap->wt_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
6552 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
6553 !IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
6554 type == IEEE80211_FC0_TYPE_DATA0x08 &&
6555 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20) {
6556 tap->wt_rate = (0x80 | rinfo->ht_plcp);
6557 } else
6558 tap->wt_rate = rinfo->rate;
6559 if ((ic->ic_flags & IEEE80211_F_WEPON0x00000100) &&
6560 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40))
6561 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP0x04;
6562
6563 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6564 m, BPF_DIRECTION_OUT(1 << 1));
6565 }
6566#endif
6567 totlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
6568
6569 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40) {
6570 k = ieee80211_get_txkey(ic, wh, ni);
6571 if ((k->k_flags & IEEE80211_KEY_GROUP0x00000001) ||
6572 (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6573 if ((m = ieee80211_encrypt(ic, m, k)) == NULL((void *)0))
6574 return ENOBUFS55;
6575 /* 802.11 header may have moved. */
6576 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
6577 totlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
6578 k = NULL((void *)0); /* skip hardware crypto below */
6579 } else {
6580 /* HW appends CCMP MIC */
6581 totlen += IEEE80211_CCMP_HDRLEN8;
6582 }
6583 }
6584
6585 flags = 0;
6586 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) {
6587 flags |= IWM_TX_CMD_FLG_ACK(1 << 3);
6588 }
6589
6590 if (type == IEEE80211_FC0_TYPE_DATA0x08 &&
6591 !IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
6592 (totlen + IEEE80211_CRC_LEN4 > ic->ic_rtsthreshold ||
6593 (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)))
6594 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE(1 << 0);
6595
6596 tx->sta_id = IWM_STATION_ID0;
6597
6598 if (type == IEEE80211_FC0_TYPE_MGT0x00) {
6599 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ0x00 ||
6600 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ0x20)
6601 tx->pm_frame_timeout = htole16(3)((__uint16_t)(3));
6602 else
6603 tx->pm_frame_timeout = htole16(2)((__uint16_t)(2));
6604 } else {
6605 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
6606 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
6607 struct ieee80211_frame_min *mwh;
6608 uint8_t *barfrm;
6609 uint16_t ctl;
6610 mwh = mtod(m, struct ieee80211_frame_min *)((struct ieee80211_frame_min *)((m)->m_hdr.mh_data));
6611 barfrm = (uint8_t *)&mwh[1];
6612 ctl = LE_READ_2(barfrm)((u_int16_t) ((((const u_int8_t *)(barfrm))[0]) | (((const u_int8_t
*)(barfrm))[1] << 8)))
;
6613 tid = (ctl & IEEE80211_BA_TID_INFO_MASK0xf000) >>
6614 IEEE80211_BA_TID_INFO_SHIFT12;
6615 flags |= IWM_TX_CMD_FLG_ACK(1 << 3) | IWM_TX_CMD_FLG_BAR(1 << 6);
6616 tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT60;
6617 }
6618
6619 tx->pm_frame_timeout = htole16(0)((__uint16_t)(0));
6620 }
6621
6622 if (hdrlen & 3) {
6623 /* First segment length must be a multiple of 4. */
6624 flags |= IWM_TX_CMD_FLG_MH_PAD(1 << 20);
6625 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD)((__uint16_t)((1 << 13)));
6626 pad = 4 - (hdrlen & 3);
6627 } else
6628 pad = 0;
6629
6630 tx->len = htole16(totlen)((__uint16_t)(totlen));
6631 tx->tid_tspec = tid;
6632 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE)((__uint32_t)(0xFFFFFFFF));
6633
6634 /* Set physical address of "scratch area". */
6635 tx->dram_lsb_ptr = htole32(data->scratch_paddr)((__uint32_t)(data->scratch_paddr));
6636 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6637
6638 /* Copy 802.11 header in TX command. */
6639 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen)__builtin_memcpy((((uint8_t *)tx) + sizeof(*tx)), (wh), (hdrlen
))
;
6640
6641 if (k != NULL((void *)0) && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6642 /* Trim 802.11 header and prepend CCMP IV. */
6643 m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN8);
6644 ivp = mtod(m, u_int8_t *)((u_int8_t *)((m)->m_hdr.mh_data));
6645 k->k_tsc++; /* increment the 48-bit PN */
6646 ivp[0] = k->k_tsc; /* PN0 */
6647 ivp[1] = k->k_tsc >> 8; /* PN1 */
6648 ivp[2] = 0; /* Rsvd */
6649 ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV0x20;
6650 ivp[4] = k->k_tsc >> 16; /* PN2 */
6651 ivp[5] = k->k_tsc >> 24; /* PN3 */
6652 ivp[6] = k->k_tsc >> 32; /* PN4 */
6653 ivp[7] = k->k_tsc >> 40; /* PN5 */
6654
6655 tx->sec_ctl = IWM_TX_CMD_SEC_CCM0x02;
6656 memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len))__builtin_memcpy((tx->key), (k->k_key), ((((sizeof(tx->
key))<(k->k_len))?(sizeof(tx->key)):(k->k_len))))
;
6657 /* TX scheduler includes CCMP MIC length. */
6658 totlen += IEEE80211_CCMP_MICLEN8;
6659 } else {
6660 /* Trim 802.11 header. */
6661 m_adj(m, hdrlen);
6662 tx->sec_ctl = 0;
6663 }
6664
6665 flags |= IWM_TX_CMD_FLG_BT_DIS(1 << 12);
6666 if (!hasqos)
6667 flags |= IWM_TX_CMD_FLG_SEQ_CTL(1 << 13);
6668
6669 tx->tx_flags |= htole32(flags)((__uint32_t)(flags));
6670
6671 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
6672 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
6673 if (err && err != EFBIG27) {
6674 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6675 m_freem(m);
6676 return err;
6677 }
6678 if (err) {
6679 /* Too many DMA segments, linearize mbuf. */
6680 if (m_defrag(m, M_DONTWAIT0x0002)) {
6681 m_freem(m);
6682 return ENOBUFS55;
6683 }
6684 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
6685 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
6686 if (err) {
6687 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
6688 err);
6689 m_freem(m);
6690 return err;
6691 }
6692 }
6693 data->m = m;
6694 data->in = in;
6695 data->txmcs = ni->ni_txmcs;
6696 data->txrate = ni->ni_txrate;
6697 data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6698
6699 /* Fill TX descriptor. */
6700 desc->num_tbs = 2 + data->map->dm_nsegs;
6701
6702 desc->tbs[0].lo = htole32(data->cmd_paddr)((__uint32_t)(data->cmd_paddr));
6703 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | (16 <<
4)))
6704 (TB0_SIZE << 4))((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | (16 <<
4)))
;
6705 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE)((__uint32_t)(data->cmd_paddr + 16));
6706 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof
(struct iwm_cmd_header) + sizeof(*tx) + hdrlen + pad - 16) <<
4)))
6707 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof
(struct iwm_cmd_header) + sizeof(*tx) + hdrlen + pad - 16) <<
4)))
6708 + hdrlen + pad - TB0_SIZE) << 4))((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof
(struct iwm_cmd_header) + sizeof(*tx) + hdrlen + pad - 16) <<
4)))
;
6709
6710 /* Other DMA segments are for data payload. */
6711 seg = data->map->dm_segs;
6712 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6713 desc->tbs[i+2].lo = htole32(seg->ds_addr)((__uint32_t)(seg->ds_addr));
6714 desc->tbs[i+2].hi_n_len = \
6715 htole16(iwm_get_dma_hi_addr(seg->ds_addr)((__uint16_t)(iwm_get_dma_hi_addr(seg->ds_addr) | ((seg->
ds_len) << 4)))
6716 | ((seg->ds_len) << 4))((__uint16_t)(iwm_get_dma_hi_addr(seg->ds_addr) | ((seg->
ds_len) << 4)))
;
6717 }
6718
6719 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
6720 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
;
6721 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
6722 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
6723 sizeof (*cmd), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
;
6724 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6725 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6726 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
6727
6728 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6729
6730 /* Kick TX ring. */
6731 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT256;
6732 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 8 | ring->cur))))
;
6733
6734 /* Mark TX ring as full if we reach a certain threshold. */
6735 if (++ring->queued > IWM_TX_RING_HIMARK224) {
6736 sc->qfullmsk |= 1 << ring->qid;
6737 }
6738
6739 if (ic->ic_ific_ac.ac_if.if_flags & IFF_UP0x1)
6740 sc->sc_tx_timer[ring->qid] = 15;
6741
6742 return 0;
6743}
6744
6745int
6746iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
6747{
6748 struct iwm_tx_path_flush_cmd flush_cmd = {
6749 .sta_id = htole32(IWM_STATION_ID)((__uint32_t)(0)),
6750 .tid_mask = htole16(0xffff)((__uint16_t)(0xffff)),
6751 };
6752 int err;
6753
6754 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH0x1e, 0,
6755 sizeof(flush_cmd), &flush_cmd);
6756 if (err)
6757 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6758 return err;
6759}
6760
6761#define IWM_FLUSH_WAIT_MS2000 2000
6762
6763int
6764iwm_wait_tx_queues_empty(struct iwm_softc *sc)
6765{
6766 int i, err;
6767
6768 for (i = 0; i < IWM_MAX_QUEUES31; i++) {
6769 struct iwm_tx_ring *ring = &sc->txq[i];
6770
6771 if (i == sc->cmdqid)
6772 continue;
6773
6774 while (ring->queued > 0) {
6775 err = tsleep_nsec(ring, 0, "iwmflush",
6776 MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS2000));
6777 if (err)
6778 return err;
6779 }
6780 }
6781
6782 return 0;
6783}
6784
6785void
6786iwm_led_enable(struct iwm_softc *sc)
6787{
6788 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x094))), (
((0x60)))))
;
6789}
6790
6791void
6792iwm_led_disable(struct iwm_softc *sc)
6793{
6794 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x094))), (
((0x20)))))
;
6795}
6796
6797int
6798iwm_led_is_enabled(struct iwm_softc *sc)
6799{
6800 return (IWM_READ(sc, IWM_CSR_LED_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x094))))) == IWM_CSR_LED_REG_TURN_ON(0x60));
6801}
6802
6803#define IWM_LED_BLINK_TIMEOUT_MSEC200 200
6804
6805void
6806iwm_led_blink_timeout(void *arg)
6807{
6808 struct iwm_softc *sc = arg;
6809
6810 if (iwm_led_is_enabled(sc))
6811 iwm_led_disable(sc);
6812 else
6813 iwm_led_enable(sc);
6814
6815 timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC200);
6816}
6817
6818void
6819iwm_led_blink_start(struct iwm_softc *sc)
6820{
6821 timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC200);
6822 iwm_led_enable(sc);
6823}
6824
6825void
6826iwm_led_blink_stop(struct iwm_softc *sc)
6827{
6828 timeout_del(&sc->sc_led_blink_to);
6829 iwm_led_disable(sc);
6830}
6831
6832#define IWM_POWER_KEEP_ALIVE_PERIOD_SEC25 25
6833
6834int
6835iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
6836 struct iwm_beacon_filter_cmd *cmd)
6837{
6838 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD0xd2,
6839 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
6840}
6841
6842void
6843iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
6844 struct iwm_beacon_filter_cmd *cmd)
6845{
6846 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled)((__uint32_t)(sc->sc_bf.ba_enabled));
6847}
6848
6849int
6850iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
6851{
6852 struct iwm_beacon_filter_cmd cmd = {
6853 IWM_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
6854 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
6855 .ba_enable_beacon_abort = htole32(enable)((__uint32_t)(enable)),
6856 };
6857
6858 if (!sc->sc_bf.bf_enabled)
6859 return 0;
6860
6861 sc->sc_bf.ba_enabled = enable;
6862 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6863 return iwm_beacon_filter_send_cmd(sc, &cmd);
6864}
6865
6866void
6867iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
6868 struct iwm_mac_power_cmd *cmd)
6869{
6870 struct ieee80211com *ic = &sc->sc_ic;
6871 struct ieee80211_node *ni = &in->in_ni;
6872 int dtim_period, dtim_msec, keep_alive;
6873
6874 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
6875 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
6876 if (ni->ni_dtimperiod)
6877 dtim_period = ni->ni_dtimperiod;
6878 else
6879 dtim_period = 1;
6880
6881 /*
6882 * Regardless of power management state the driver must set
6883 * keep alive period. FW will use it for sending keep alive NDPs
6884 * immediately after association. Check that keep alive period
6885 * is at least 3 * DTIM.
6886 */
6887 dtim_msec = dtim_period * ni->ni_intval;
6888 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC)(((3 * dtim_msec)>(1000 * 25))?(3 * dtim_msec):(1000 * 25)
)
;
6889 keep_alive = roundup(keep_alive, 1000)((((keep_alive)+((1000)-1))/(1000))*(1000)) / 1000;
6890 cmd->keep_alive_seconds = htole16(keep_alive)((__uint16_t)(keep_alive));
6891
6892 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6893 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
6894}
6895
6896int
6897iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
6898{
6899 int err;
6900 int ba_enable;
6901 struct iwm_mac_power_cmd cmd;
6902
6903 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
6904
6905 iwm_power_build_cmd(sc, in, &cmd);
6906
6907 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE0xa9, 0,
6908 sizeof(cmd), &cmd);
6909 if (err != 0)
6910 return err;
6911
6912 ba_enable = !!(cmd.flags &
6913 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)((__uint16_t)((1 << 1))));
6914 return iwm_update_beacon_abort(sc, in, ba_enable);
6915}
6916
6917int
6918iwm_power_update_device(struct iwm_softc *sc)
6919{
6920 struct iwm_device_power_cmd cmd = { };
6921 struct ieee80211com *ic = &sc->sc_ic;
6922
6923 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6924 cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
6925
6926 return iwm_send_cmd_pdu(sc,
6927 IWM_POWER_TABLE_CMD0x77, 0, sizeof(cmd), &cmd);
6928}
6929
6930int
6931iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
6932{
6933 struct iwm_beacon_filter_cmd cmd = {
6934 IWM_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
6935 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
6936 };
6937 int err;
6938
6939 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6940 err = iwm_beacon_filter_send_cmd(sc, &cmd);
6941
6942 if (err == 0)
6943 sc->sc_bf.bf_enabled = 1;
6944
6945 return err;
6946}
6947
6948int
6949iwm_disable_beacon_filter(struct iwm_softc *sc)
6950{
6951 struct iwm_beacon_filter_cmd cmd;
6952 int err;
6953
6954 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
6955
6956 err = iwm_beacon_filter_send_cmd(sc, &cmd);
6957 if (err == 0)
6958 sc->sc_bf.bf_enabled = 0;
6959
6960 return err;
6961}
6962
6963int
6964iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
6965{
6966 struct iwm_add_sta_cmd add_sta_cmd;
6967 int err;
6968 uint32_t status;
6969 size_t cmdsize;
6970 struct ieee80211com *ic = &sc->sc_ic;
6971
6972 if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20))
6973 panic("STA already added");
6974
6975 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd))__builtin_memset((&add_sta_cmd), (0), (sizeof(add_sta_cmd
)))
;
6976
6977 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6978 add_sta_cmd.sta_id = IWM_MONITOR_STA_ID2;
6979 else
6980 add_sta_cmd.sta_id = IWM_STATION_ID0;
6981 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
) {
6982 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6983 add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE1;
6984 else
6985 add_sta_cmd.station_type = IWM_STA_LINK0;
6986 }
6987 add_sta_cmd.mac_id_n_color
6988 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
6989 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6990 int qid;
6991 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr)__builtin_memcpy((&add_sta_cmd.addr), (etheranyaddr), (6)
)
;
6992 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
6993 qid = IWM_DQA_INJECT_MONITOR_QUEUE2;
6994 else
6995 qid = IWM_AUX_QUEUE15;
6996 in->tfd_queue_msk |= (1 << qid);
6997 } else {
6998 int ac;
6999 for (ac = 0; ac < EDCA_NUM_AC4; ac++) {
7000 int qid = ac;
7001 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
7002 IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
7003 qid += IWM_DQA_MIN_MGMT_QUEUE5;
7004 in->tfd_queue_msk |= (1 << qid);
7005 }
7006 }
7007 if (!update) {
7008 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7009 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,__builtin_memcpy((&add_sta_cmd.addr), (etherbroadcastaddr
), (6))
7010 etherbroadcastaddr)__builtin_memcpy((&add_sta_cmd.addr), (etherbroadcastaddr
), (6))
;
7011 else
7012 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,__builtin_memcpy((&add_sta_cmd.addr), (in->in_macaddr)
, (6))
7013 in->in_macaddr)__builtin_memcpy((&add_sta_cmd.addr), (in->in_macaddr)
, (6))
;
7014 }
7015 add_sta_cmd.add_modify = update ? 1 : 0;
7016 add_sta_cmd.station_flags_msk
7017 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK)((__uint32_t)((3 << 26) | (3 << 28)));
7018 if (update) {
7019 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES(1 << 7) |
7020 IWM_STA_MODIFY_TID_DISABLE_TX(1 << 1));
7021 }
7022 add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu)((__uint16_t)(in->tid_disable_ampdu));
7023 add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk)((__uint32_t)(in->tfd_queue_msk));
7024
7025 if (in->in_ni.ni_flags & IEEE80211_NODE_HT0x0400) {
7026 add_sta_cmd.station_flags_msk
7027 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |((__uint32_t)((7 << 19) | (7 << 23)))
7028 IWM_STA_FLG_AGG_MPDU_DENS_MSK)((__uint32_t)((7 << 19) | (7 << 23)));
7029
7030 if (iwm_mimo_enabled(sc)) {
7031 if (in->in_ni.ni_rxmcs[1] != 0) {
7032 add_sta_cmd.station_flags |=
7033 htole32(IWM_STA_FLG_MIMO_EN_MIMO2)((__uint32_t)((1 << 28)));
7034 }
7035 if (in->in_ni.ni_rxmcs[2] != 0) {
7036 add_sta_cmd.station_flags |=
7037 htole32(IWM_STA_FLG_MIMO_EN_MIMO3)((__uint32_t)((2 << 28)));
7038 }
7039 }
7040
7041 if (ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7042 add_sta_cmd.station_flags |= htole32(((__uint32_t)((1 << 26)))
7043 IWM_STA_FLG_FAT_EN_40MHZ)((__uint32_t)((1 << 26)));
7044 }
7045
7046 add_sta_cmd.station_flags
7047 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K)((__uint32_t)((3 << 19)));
7048 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS0x1c) {
7049 case IEEE80211_AMPDU_PARAM_SS_2(4 << 2):
7050 add_sta_cmd.station_flags
7051 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US)((__uint32_t)((4 << 23)));
7052 break;
7053 case IEEE80211_AMPDU_PARAM_SS_4(5 << 2):
7054 add_sta_cmd.station_flags
7055 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US)((__uint32_t)((5 << 23)));
7056 break;
7057 case IEEE80211_AMPDU_PARAM_SS_8(6 << 2):
7058 add_sta_cmd.station_flags
7059 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US)((__uint32_t)((6 << 23)));
7060 break;
7061 case IEEE80211_AMPDU_PARAM_SS_16(7 << 2):
7062 add_sta_cmd.station_flags
7063 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US)((__uint32_t)((7 << 23)));
7064 break;
7065 default:
7066 break;
7067 }
7068 }
7069
7070 status = IWM_ADD_STA_SUCCESS0x1;
7071 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7072 cmdsize = sizeof(add_sta_cmd);
7073 else
7074 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7075 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize,
7076 &add_sta_cmd, &status);
7077 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
7078 err = EIO5;
7079
7080 return err;
7081}
7082
7083int
7084iwm_add_aux_sta(struct iwm_softc *sc)
7085{
7086 struct iwm_add_sta_cmd cmd;
7087 int err, qid;
7088 uint32_t status;
7089 size_t cmdsize;
7090
7091 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
) {
7092 qid = IWM_DQA_AUX_QUEUE1;
7093 err = iwm_enable_txq(sc, IWM_AUX_STA_ID1, qid,
7094 IWM_TX_FIFO_MCAST5, 0, IWM_MAX_TID_COUNT8, 0);
7095 } else {
7096 qid = IWM_AUX_QUEUE15;
7097 err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST5);
7098 }
7099 if (err)
7100 return err;
7101
7102 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7103 cmd.sta_id = IWM_AUX_STA_ID1;
7104 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7105 cmd.station_type = IWM_STA_AUX_ACTIVITY4;
7106 cmd.mac_id_n_color =
7107 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0))((__uint32_t)(((4 << (0)) | (0 << (8)))));
7108 cmd.tfd_queue_msk = htole32(1 << qid)((__uint32_t)(1 << qid));
7109 cmd.tid_disable_tx = htole16(0xffff)((__uint16_t)(0xffff));
7110
7111 status = IWM_ADD_STA_SUCCESS0x1;
7112 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7113 cmdsize = sizeof(cmd);
7114 else
7115 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7116 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd,
7117 &status);
7118 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
7119 err = EIO5;
7120
7121 return err;
7122}
7123
7124int
7125iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7126{
7127 struct iwm_add_sta_cmd cmd;
7128 int err;
7129 uint32_t status;
7130 size_t cmdsize;
7131
7132 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7133 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
7134 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
7135 cmd.sta_id = IWM_STATION_ID0;
7136 cmd.add_modify = IWM_STA_MODE_MODIFY1;
7137 cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12))) : 0;
7138 cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12)));
7139
7140 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7141 cmdsize = sizeof(cmd);
7142 else
7143 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7144
7145 status = IWM_ADD_STA_SUCCESS0x1;
7146 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18,
7147 cmdsize, &cmd, &status);
7148 if (err) {
7149 printf("%s: could not update sta (error %d)\n",
7150 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7151 return err;
7152 }
7153
7154 switch (status & IWM_ADD_STA_STATUS_MASK0xFF) {
7155 case IWM_ADD_STA_SUCCESS0x1:
7156 break;
7157 default:
7158 err = EIO5;
7159 printf("%s: Couldn't %s draining for station\n",
7160 DEVNAME(sc)((sc)->sc_dev.dv_xname), drain ? "enable" : "disable");
7161 break;
7162 }
7163
7164 return err;
7165}
7166
7167int
7168iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7169{
7170 int err;
7171
7172 sc->sc_flags |= IWM_FLAG_TXFLUSH0x400;
7173
7174 err = iwm_drain_sta(sc, in, 1);
7175 if (err)
7176 goto done;
7177
7178 err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7179 if (err) {
7180 printf("%s: could not flush Tx path (error %d)\n",
7181 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7182 goto done;
7183 }
7184
7185 /*
7186 * Flushing Tx rings may fail if the AP has disappeared.
7187 * We can rely on iwm_newstate_task() to reset everything and begin
7188 * scanning again if we are left with outstanding frames on queues.
7189 */
7190 err = iwm_wait_tx_queues_empty(sc);
7191 if (err)
7192 goto done;
7193
7194 err = iwm_drain_sta(sc, in, 0);
7195done:
7196 sc->sc_flags &= ~IWM_FLAG_TXFLUSH0x400;
7197 return err;
7198}
7199
7200int
7201iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7202{
7203 struct ieee80211com *ic = &sc->sc_ic;
7204 struct iwm_rm_sta_cmd rm_sta_cmd;
7205 int err;
7206
7207 if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20) == 0)
7208 panic("sta already removed");
7209
7210 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd))__builtin_memset((&rm_sta_cmd), (0), (sizeof(rm_sta_cmd))
)
;
7211 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7212 rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID2;
7213 else
7214 rm_sta_cmd.sta_id = IWM_STATION_ID0;
7215
7216 err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA0x19, 0, sizeof(rm_sta_cmd),
7217 &rm_sta_cmd);
7218
7219 return err;
7220}
7221
7222uint16_t
7223iwm_scan_rx_chain(struct iwm_softc *sc)
7224{
7225 uint16_t rx_chain;
7226 uint8_t rx_ant;
7227
7228 rx_ant = iwm_fw_valid_rx_ant(sc);
7229 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS(1);
7230 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS(7);
7231 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS(4);
7232 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS(0);
7233 return htole16(rx_chain)((__uint16_t)(rx_chain));
7234}
7235
7236uint32_t
7237iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7238{
7239 uint32_t tx_ant;
7240 int i, ind;
7241
7242 for (i = 0, ind = sc->sc_scan_last_antenna;
7243 i < IWM_RATE_MCS_ANT_NUM3; i++) {
7244 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM3;
7245 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7246 sc->sc_scan_last_antenna = ind;
7247 break;
7248 }
7249 }
7250 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS14;
7251
7252 if ((flags & IEEE80211_CHAN_2GHZ0x0080) && !no_cck)
7253 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |((__uint32_t)(10 | (1 << 9) | tx_ant))
7254 tx_ant)((__uint32_t)(10 | (1 << 9) | tx_ant));
7255 else
7256 return htole32(IWM_RATE_6M_PLCP | tx_ant)((__uint32_t)(13 | tx_ant));
7257}
7258
7259uint8_t
7260iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7261 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7262{
7263 struct ieee80211com *ic = &sc->sc_ic;
7264 struct ieee80211_channel *c;
7265 uint8_t nchan;
7266
7267 for (nchan = 0, c = &ic->ic_channels[1];
7268 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
7269 nchan < sc->sc_capa_n_scan_channels;
7270 c++) {
7271 if (c->ic_flags == 0)
7272 continue;
7273
7274 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0))((__uint16_t)(ieee80211_mhz2ieee(c->ic_freq, 0)));
7275 chan->iter_count = htole16(1)((__uint16_t)(1));
7276 chan->iter_interval = 0;
7277 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL)((__uint32_t)((1 << 28)));
7278 /*
7279 * Firmware may become unresponsive when asked to send
7280 * a directed probe request on a passive channel.
7281 */
7282 if (n_ssids != 0 && !bgscan &&
7283 (c->ic_flags & IEEE80211_CHAN_PASSIVE0x0200) == 0)
7284 chan->flags |= htole32(1 << 1)((__uint32_t)(1 << 1)); /* select SSID 0 */
7285 chan++;
7286 nchan++;
7287 }
7288
7289 return nchan;
7290}
7291
7292uint8_t
7293iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7294 struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7295{
7296 struct ieee80211com *ic = &sc->sc_ic;
7297 struct ieee80211_channel *c;
7298 uint8_t nchan;
7299
7300 for (nchan = 0, c = &ic->ic_channels[1];
7301 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
7302 nchan < sc->sc_capa_n_scan_channels;
7303 c++) {
7304 if (c->ic_flags == 0)
7305 continue;
7306
7307 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7308 chan->iter_count = 1;
7309 chan->iter_interval = htole16(0)((__uint16_t)(0));
7310 /*
7311 * Firmware may become unresponsive when asked to send
7312 * a directed probe request on a passive channel.
7313 */
7314 if (n_ssids != 0 && !bgscan &&
7315 (c->ic_flags & IEEE80211_CHAN_PASSIVE0x0200) == 0)
7316 chan->flags = htole32(1 << 0)((__uint32_t)(1 << 0)); /* select SSID 0 */
7317 chan++;
7318 nchan++;
7319 }
7320
7321 return nchan;
7322}
7323
7324int
7325iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7326{
7327 struct iwm_scan_probe_req preq2;
7328 int err, i;
7329
7330 err = iwm_fill_probe_req(sc, &preq2);
7331 if (err)
7332 return err;
7333
7334 preq1->mac_header = preq2.mac_header;
7335 for (i = 0; i < nitems(preq1->band_data)(sizeof((preq1->band_data)) / sizeof((preq1->band_data)
[0]))
; i++)
7336 preq1->band_data[i] = preq2.band_data[i];
7337 preq1->common_data = preq2.common_data;
7338 memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf))__builtin_memcpy((preq1->buf), (preq2.buf), (sizeof(preq1->
buf)))
;
7339 return 0;
7340}
7341
7342int
7343iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7344{
7345 struct ieee80211com *ic = &sc->sc_ic;
7346 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7347 struct ieee80211_rateset *rs;
7348 size_t remain = sizeof(preq->buf);
7349 uint8_t *frm, *pos;
7350
7351 memset(preq, 0, sizeof(*preq))__builtin_memset((preq), (0), (sizeof(*preq)));
7352
7353 if (remain < sizeof(*wh) + 2)
7354 return ENOBUFS55;
7355
7356 /*
7357 * Build a probe request frame. Most of the following code is a
7358 * copy & paste of what is done in net80211.
7359 */
7360 wh->i_fc[0] = IEEE80211_FC0_VERSION_00x00 | IEEE80211_FC0_TYPE_MGT0x00 |
7361 IEEE80211_FC0_SUBTYPE_PROBE_REQ0x40;
7362 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS0x00;
7363 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr)__builtin_memcpy((wh->i_addr1), (etherbroadcastaddr), (6));
7364 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr)__builtin_memcpy((wh->i_addr2), (ic->ic_myaddr), (6));
7365 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr)__builtin_memcpy((wh->i_addr3), (etherbroadcastaddr), (6));
7366 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
7367 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
7368
7369 frm = (uint8_t *)(wh + 1);
7370
7371 *frm++ = IEEE80211_ELEMID_SSID;
7372 *frm++ = 0;
7373 /* hardware inserts SSID */
7374
7375 /* Tell firmware where the MAC header and SSID IE are. */
7376 preq->mac_header.offset = 0;
7377 preq->mac_header.len = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7378 remain -= frm - (uint8_t *)wh;
7379
7380 /* Fill in 2GHz IEs and tell firmware where they are. */
7381 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7382 if (rs->rs_nrates > IEEE80211_RATE_SIZE8) {
7383 if (remain < 4 + rs->rs_nrates)
7384 return ENOBUFS55;
7385 } else if (remain < 2 + rs->rs_nrates)
7386 return ENOBUFS55;
7387 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7388 pos = frm;
7389 frm = ieee80211_add_rates(frm, rs);
7390 if (rs->rs_nrates > IEEE80211_RATE_SIZE8)
7391 frm = ieee80211_add_xrates(frm, rs);
7392 remain -= frm - pos;
7393
7394 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
7395 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
) {
7396 if (remain < 3)
7397 return ENOBUFS55;
7398 *frm++ = IEEE80211_ELEMID_DSPARMS;
7399 *frm++ = 1;
7400 *frm++ = 0;
7401 remain -= 3;
7402 }
7403 preq->band_data[0].len = htole16(frm - pos)((__uint16_t)(frm - pos));
7404
7405 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7406 /* Fill in 5GHz IEs. */
7407 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7408 if (rs->rs_nrates > IEEE80211_RATE_SIZE8) {
7409 if (remain < 4 + rs->rs_nrates)
7410 return ENOBUFS55;
7411 } else if (remain < 2 + rs->rs_nrates)
7412 return ENOBUFS55;
7413 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7414 pos = frm;
7415 frm = ieee80211_add_rates(frm, rs);
7416 if (rs->rs_nrates > IEEE80211_RATE_SIZE8)
7417 frm = ieee80211_add_xrates(frm, rs);
7418 preq->band_data[1].len = htole16(frm - pos)((__uint16_t)(frm - pos));
7419 remain -= frm - pos;
7420 }
7421
7422 /* Send 11n IEs on both 2GHz and 5GHz bands. */
7423 preq->common_data.offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7424 pos = frm;
7425 if (ic->ic_flags & IEEE80211_F_HTON0x02000000) {
7426 if (remain < 28)
7427 return ENOBUFS55;
7428 frm = ieee80211_add_htcaps(frm, ic);
7429 /* XXX add WME info? */
7430 }
7431 preq->common_data.len = htole16(frm - pos)((__uint16_t)(frm - pos));
7432
7433 return 0;
7434}
7435
7436int
7437iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7438{
7439 struct ieee80211com *ic = &sc->sc_ic;
7440 struct iwm_host_cmd hcmd = {
7441 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD0x51,
7442 .len = { 0, },
7443 .data = { NULL((void *)0), },
7444 .flags = 0,
7445 };
7446 struct iwm_scan_req_lmac *req;
7447 struct iwm_scan_probe_req_v1 *preq;
7448 size_t req_len;
7449 int err, async = bgscan;
7450
7451 req_len = sizeof(struct iwm_scan_req_lmac) +
7452 (sizeof(struct iwm_scan_channel_cfg_lmac) *
7453 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7454 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header)))
7455 return ENOMEM12;
7456 req = malloc(req_len, M_DEVBUF2,
7457 (async ? M_NOWAIT0x0002 : M_WAIT0x0001) | M_CANFAIL0x0004 | M_ZERO0x0008);
7458 if (req == NULL((void *)0))
7459 return ENOMEM12;
7460
7461 hcmd.len[0] = (uint16_t)req_len;
7462 hcmd.data[0] = (void *)req;
7463 hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7464
7465 /* These timings correspond to iwlwifi's UNASSOC scan. */
7466 req->active_dwell = 10;
7467 req->passive_dwell = 110;
7468 req->fragmented_dwell = 44;
7469 req->extended_dwell = 90;
7470 if (bgscan) {
7471 req->max_out_time = htole32(120)((__uint32_t)(120));
7472 req->suspend_time = htole32(120)((__uint32_t)(120));
7473 } else {
7474 req->max_out_time = htole32(0)((__uint32_t)(0));
7475 req->suspend_time = htole32(0)((__uint32_t)(0));
7476 }
7477 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
7478 req->rx_chain_select = iwm_scan_rx_chain(sc);
7479 req->iter_num = htole32(1)((__uint32_t)(1));
7480 req->delay = 0;
7481
7482 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 7
)))
7483 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 7
)))
7484 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL)((__uint32_t)((1 << 0) | (1 << 3) | (1 << 7
)))
;
7485 if (ic->ic_des_esslen == 0)
7486 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE)((__uint32_t)((1 << 1)));
7487 else
7488 req->scan_flags |=
7489 htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION)((__uint32_t)((1 << 2)));
7490 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
7491 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
&&
7492 isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
7493 IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
)
7494 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED)((__uint32_t)((1 << 6)));
7495
7496 req->flags = htole32(IWM_PHY_BAND_24)((__uint32_t)((1)));
7497 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7498 req->flags |= htole32(IWM_PHY_BAND_5)((__uint32_t)((0)));
7499 req->filter_flags =
7500 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON)((__uint32_t)((1 << 2) | (1 << 6)));
7501
7502 /* Tx flags 2 GHz. */
7503 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |((__uint32_t)((1 << 13) | (1 << 12)))
7504 IWM_TX_CMD_FLG_BT_DIS)((__uint32_t)((1 << 13) | (1 << 12)));
7505 req->tx_cmd[0].rate_n_flags =
7506 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ0x0080, 1/*XXX*/);
7507 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID1;
7508
7509 /* Tx flags 5 GHz. */
7510 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |((__uint32_t)((1 << 13) | (1 << 12)))
7511 IWM_TX_CMD_FLG_BT_DIS)((__uint32_t)((1 << 13) | (1 << 12)));
7512 req->tx_cmd[1].rate_n_flags =
7513 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ0x0100, 1/*XXX*/);
7514 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID1;
7515
7516 /* Check if we're doing an active directed scan. */
7517 if (ic->ic_des_esslen != 0) {
7518 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7519 req->direct_scan[0].len = ic->ic_des_esslen;
7520 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,__builtin_memcpy((req->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
7521 ic->ic_des_esslen)__builtin_memcpy((req->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
;
7522 }
7523
7524 req->n_channels = iwm_lmac_scan_fill_channels(sc,
7525 (struct iwm_scan_channel_cfg_lmac *)req->data,
7526 ic->ic_des_esslen != 0, bgscan);
7527
7528 preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7529 (sizeof(struct iwm_scan_channel_cfg_lmac) *
7530 sc->sc_capa_n_scan_channels));
7531 err = iwm_fill_probe_req_v1(sc, preq);
7532 if (err) {
7533 free(req, M_DEVBUF2, req_len);
7534 return err;
7535 }
7536
7537 /* Specify the scan plan: We'll do one iteration. */
7538 req->schedule[0].iterations = 1;
7539 req->schedule[0].full_scan_mul = 1;
7540
7541 /* Disable EBS. */
7542 req->channel_opt[0].non_ebs_ratio = 1;
7543 req->channel_opt[1].non_ebs_ratio = 1;
7544
7545 err = iwm_send_cmd(sc, &hcmd);
7546 free(req, M_DEVBUF2, req_len);
7547 return err;
7548}
7549
7550int
7551iwm_config_umac_scan(struct iwm_softc *sc)
7552{
7553 struct ieee80211com *ic = &sc->sc_ic;
7554 struct iwm_scan_config *scan_config;
7555 int err, nchan;
7556 size_t cmd_size;
7557 struct ieee80211_channel *c;
7558 struct iwm_host_cmd hcmd = {
7559 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD0xc, IWM_LONG_GROUP0x1, 0),
7560 .flags = 0,
7561 };
7562 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M(1 << 8) |
7563 IWM_SCAN_CONFIG_RATE_2M(1 << 9) | IWM_SCAN_CONFIG_RATE_5M(1 << 10) |
7564 IWM_SCAN_CONFIG_RATE_11M(1 << 11) | IWM_SCAN_CONFIG_RATE_6M(1 << 0) |
7565 IWM_SCAN_CONFIG_RATE_9M(1 << 1) | IWM_SCAN_CONFIG_RATE_12M(1 << 2) |
7566 IWM_SCAN_CONFIG_RATE_18M(1 << 3) | IWM_SCAN_CONFIG_RATE_24M(1 << 4) |
7567 IWM_SCAN_CONFIG_RATE_36M(1 << 5) | IWM_SCAN_CONFIG_RATE_48M(1 << 6) |
7568 IWM_SCAN_CONFIG_RATE_54M(1 << 7));
7569
7570 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7571
7572 scan_config = malloc(cmd_size, M_DEVBUF2, M_WAIT0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
7573 if (scan_config == NULL((void *)0))
7574 return ENOMEM12;
7575
7576 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc))((__uint32_t)(iwm_fw_valid_tx_ant(sc)));
7577 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc))((__uint32_t)(iwm_fw_valid_rx_ant(sc)));
7578 scan_config->legacy_rates = htole32(rates |((__uint32_t)(rates | ((rates) << 16)))
7579 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates))((__uint32_t)(rates | ((rates) << 16)));
7580
7581 /* These timings correspond to iwlwifi's UNASSOC scan. */
7582 scan_config->dwell_active = 10;
7583 scan_config->dwell_passive = 110;
7584 scan_config->dwell_fragmented = 44;
7585 scan_config->dwell_extended = 90;
7586 scan_config->out_of_channel_time = htole32(0)((__uint32_t)(0));
7587 scan_config->suspend_time = htole32(0)((__uint32_t)(0));
7588
7589 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr)__builtin_memcpy((scan_config->mac_addr), (sc->sc_ic.ic_myaddr
), (6))
;
7590
7591 scan_config->bcast_sta_id = IWM_AUX_STA_ID1;
7592 scan_config->channel_flags = 0;
7593
7594 for (c = &ic->ic_channels[1], nchan = 0;
7595 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
7596 nchan < sc->sc_capa_n_scan_channels; c++) {
7597 if (c->ic_flags == 0)
7598 continue;
7599 scan_config->channel_array[nchan++] =
7600 ieee80211_mhz2ieee(c->ic_freq, 0);
7601 }
7602
7603 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7604 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7605 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7606 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7607 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7608 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7609 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7610 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7611 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7612 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7613 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED)((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
;
7614
7615 hcmd.data[0] = scan_config;
7616 hcmd.len[0] = cmd_size;
7617
7618 err = iwm_send_cmd(sc, &hcmd);
7619 free(scan_config, M_DEVBUF2, cmd_size);
7620 return err;
7621}
7622
7623int
7624iwm_umac_scan_size(struct iwm_softc *sc)
7625{
7626 int base_size = IWM_SCAN_REQ_UMAC_SIZE_V136;
7627 int tail_size;
7628
7629 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
)
7630 base_size = IWM_SCAN_REQ_UMAC_SIZE_V8sizeof(struct iwm_scan_req_umac);
7631 else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
)
7632 base_size = IWM_SCAN_REQ_UMAC_SIZE_V748;
7633 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
)
7634 tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7635 else
7636 tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7637
7638 return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7639 sc->sc_capa_n_scan_channels + tail_size;
7640}
7641
7642struct iwm_scan_umac_chan_param *
7643iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7644 struct iwm_scan_req_umac *req)
7645{
7646 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
)
7647 return &req->v8.channel;
7648
7649 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
)
7650 return &req->v7.channel;
7651
7652 return &req->v1.channel;
7653}
7654
7655void *
7656iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7657{
7658 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
)
7659 return (void *)&req->v8.data;
7660
7661 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
)
7662 return (void *)&req->v7.data;
7663
7664 return (void *)&req->v1.data;
7665
7666}
7667
7668/* adaptive dwell max budget time [TU] for full scan */
7669#define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN300 300
7670/* adaptive dwell max budget time [TU] for directed scan */
7671#define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN100 100
7672/* adaptive dwell default high band APs number */
7673#define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS8 8
7674/* adaptive dwell default low band APs number */
7675#define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS2 2
7676/* adaptive dwell default APs number in social channels (1, 6, 11) */
7677#define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL10 10
7678
7679int
7680iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7681{
7682 struct ieee80211com *ic = &sc->sc_ic;
7683 struct iwm_host_cmd hcmd = {
7684 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC0xd, IWM_LONG_GROUP0x1, 0),
7685 .len = { 0, },
7686 .data = { NULL((void *)0), },
7687 .flags = 0,
7688 };
7689 struct iwm_scan_req_umac *req;
7690 void *cmd_data, *tail_data;
7691 struct iwm_scan_req_umac_tail_v2 *tail;
7692 struct iwm_scan_req_umac_tail_v1 *tailv1;
7693 struct iwm_scan_umac_chan_param *chanparam;
7694 size_t req_len;
7695 int err, async = bgscan;
7696
7697 req_len = iwm_umac_scan_size(sc);
7698 if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V136 +
7699 sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7700 req_len > IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header)))
7701 return ERANGE34;
7702 req = malloc(req_len, M_DEVBUF2,
7703 (async ? M_NOWAIT0x0002 : M_WAIT0x0001) | M_CANFAIL0x0004 | M_ZERO0x0008);
7704 if (req == NULL((void *)0))
7705 return ENOMEM12;
7706
7707 hcmd.len[0] = (uint16_t)req_len;
7708 hcmd.data[0] = (void *)req;
7709 hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7710
7711 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
) {
7712 req->v7.adwell_default_n_aps_social =
7713 IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL10;
7714 req->v7.adwell_default_n_aps =
7715 IWM_SCAN_ADWELL_DEFAULT_LB_N_APS2;
7716
7717 if (ic->ic_des_esslen != 0)
7718 req->v7.adwell_max_budget =
7719 htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN)((__uint16_t)(100));
7720 else
7721 req->v7.adwell_max_budget =
7722 htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN)((__uint16_t)(300));
7723
7724 req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
7725 req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX0] = 0;
7726 req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX0] = 0;
7727
7728 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
7729 IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
) {
7730 req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX0] = 10;
7731 req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX0] = 110;
7732 } else {
7733 req->v7.active_dwell = 10;
7734 req->v7.passive_dwell = 110;
7735 req->v7.fragmented_dwell = 44;
7736 }
7737 } else {
7738 /* These timings correspond to iwlwifi's UNASSOC scan. */
7739 req->v1.active_dwell = 10;
7740 req->v1.passive_dwell = 110;
7741 req->v1.fragmented_dwell = 44;
7742 req->v1.extended_dwell = 90;
7743
7744 req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
7745 }
7746
7747 if (bgscan) {
7748 const uint32_t timeout = htole32(120)((__uint32_t)(120));
7749 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
7750 IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
) {
7751 req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
7752 req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
7753 } else if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
7754 IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
) {
7755 req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
7756 req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
7757 } else {
7758 req->v1.max_out_time = timeout;
7759 req->v1.suspend_time = timeout;
7760 }
7761 }
7762
7763 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
7764
7765 cmd_data = iwm_get_scan_req_umac_data(sc, req);
7766 chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
7767 chanparam->count = iwm_umac_scan_fill_channels(sc,
7768 (struct iwm_scan_channel_cfg_umac *)cmd_data,
7769 ic->ic_des_esslen != 0, bgscan);
7770 chanparam->flags = 0;
7771
7772 tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
7773 sc->sc_capa_n_scan_channels;
7774 tail = tail_data;
7775 /* tail v1 layout differs in preq and direct_scan member fields. */
7776 tailv1 = tail_data;
7777
7778 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |((__uint32_t)((1 << 2) | (1 << 5)))
7779 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE)((__uint32_t)((1 << 2) | (1 << 5)));
7780 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
) {
7781 req->v8.general_flags2 =
7782 IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER(1 << 1);
7783 }
7784
7785 /*
7786 * Check if we're doing an active directed scan.
7787 * 9k devices may randomly lock up (no interrupts) after association
7788 * following active scans. Use passive scan only for now on 9k.
7789 */
7790 if (sc->sc_device_family != IWM_DEVICE_FAMILY_90003 &&
7791 ic->ic_des_esslen != 0) {
7792 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
7793 IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
) {
7794 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7795 tail->direct_scan[0].len = ic->ic_des_esslen;
7796 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,__builtin_memcpy((tail->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
7797 ic->ic_des_esslen)__builtin_memcpy((tail->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
;
7798 } else {
7799 tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7800 tailv1->direct_scan[0].len = ic->ic_des_esslen;
7801 memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,__builtin_memcpy((tailv1->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
7802 ic->ic_des_esslen)__builtin_memcpy((tailv1->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
;
7803 }
7804 req->general_flags |=
7805 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT)((__uint32_t)((1 << 4)));
7806 } else
7807 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE)((__uint32_t)((1 << 3)));
7808
7809 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
7810 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
&&
7811 isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
7812 IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
)
7813 req->general_flags |=
7814 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED)((__uint32_t)((1 << 8)));
7815
7816 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
) {
7817 req->general_flags |=
7818 htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL)((__uint32_t)((1 << 13)));
7819 } else {
7820 req->general_flags |=
7821 htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL)((__uint32_t)((1 << 10)));
7822 }
7823
7824 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
)
7825 err = iwm_fill_probe_req(sc, &tail->preq);
7826 else
7827 err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
7828 if (err) {
7829 free(req, M_DEVBUF2, req_len);
7830 return err;
7831 }
7832
7833 /* Specify the scan plan: We'll do one iteration. */
7834 tail->schedule[0].interval = 0;
7835 tail->schedule[0].iter_count = 1;
7836
7837 err = iwm_send_cmd(sc, &hcmd);
7838 free(req, M_DEVBUF2, req_len);
7839 return err;
7840}
7841
7842void
7843iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
7844{
7845 struct ieee80211com *ic = &sc->sc_ic;
7846 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
7847 char alpha2[3];
7848
7849 snprintf(alpha2, sizeof(alpha2), "%c%c",
7850 (le16toh(notif->mcc)((__uint16_t)(notif->mcc)) & 0xff00) >> 8, le16toh(notif->mcc)((__uint16_t)(notif->mcc)) & 0xff);
7851
7852 if (ifp->if_flags & IFF_DEBUG0x4) {
7853 printf("%s: firmware has detected regulatory domain '%s' "
7854 "(0x%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), alpha2, le16toh(notif->mcc)((__uint16_t)(notif->mcc)));
7855 }
7856
7857 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
7858}
7859
7860uint8_t
7861iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7862{
7863 int i;
7864 uint8_t rval;
7865
7866 for (i = 0; i < rs->rs_nrates; i++) {
7867 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL0x7f);
7868 if (rval == iwm_rates[ridx].rate)
7869 return rs->rs_rates[i];
7870 }
7871
7872 return 0;
7873}
7874
7875int
7876iwm_rval2ridx(int rval)
7877{
7878 int ridx;
7879
7880 for (ridx = 0; ridx < nitems(iwm_rates)(sizeof((iwm_rates)) / sizeof((iwm_rates)[0])); ridx++) {
7881 if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP0xff)
7882 continue;
7883 if (rval == iwm_rates[ridx].rate)
7884 break;
7885 }
7886
7887 return ridx;
7888}
7889
7890void
7891iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
7892 int *ofdm_rates)
7893{
7894 struct ieee80211_node *ni = &in->in_ni;
7895 struct ieee80211_rateset *rs = &ni->ni_rates;
7896 int lowest_present_ofdm = -1;
7897 int lowest_present_cck = -1;
7898 uint8_t cck = 0;
7899 uint8_t ofdm = 0;
7900 int i;
7901
7902 if (ni->ni_chan == IEEE80211_CHAN_ANYC((struct ieee80211_channel *) ((void *)0)) ||
7903 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x0080) != 0)) {
7904 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
7905 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC0x80) == 0)
7906 continue;
7907 cck |= (1 << i);
7908 if (lowest_present_cck == -1 || lowest_present_cck > i)
7909 lowest_present_cck = i;
7910 }
7911 }
7912 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
7913 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC0x80) == 0)
7914 continue;
7915 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
7916 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7917 lowest_present_ofdm = i;
7918 }
7919
7920 /*
7921 * Now we've got the basic rates as bitmaps in the ofdm and cck
7922 * variables. This isn't sufficient though, as there might not
7923 * be all the right rates in the bitmap. E.g. if the only basic
7924 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7925 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7926 *
7927 * [...] a STA responding to a received frame shall transmit
7928 * its Control Response frame [...] at the highest rate in the
7929 * BSSBasicRateSet parameter that is less than or equal to the
7930 * rate of the immediately previous frame in the frame exchange
7931 * sequence ([...]) and that is of the same modulation class
7932 * ([...]) as the received frame. If no rate contained in the
7933 * BSSBasicRateSet parameter meets these conditions, then the
7934 * control frame sent in response to a received frame shall be
7935 * transmitted at the highest mandatory rate of the PHY that is
7936 * less than or equal to the rate of the received frame, and
7937 * that is of the same modulation class as the received frame.
7938 *
7939 * As a consequence, we need to add all mandatory rates that are
7940 * lower than all of the basic rates to these bitmaps.
7941 */
7942
7943 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
7944 ofdm |= IWM_RATE_BIT_MSK(24)(1 << (IWM_RATE_24M_INDEX)) >> IWM_FIRST_OFDM_RATE;
7945 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
7946 ofdm |= IWM_RATE_BIT_MSK(12)(1 << (IWM_RATE_12M_INDEX)) >> IWM_FIRST_OFDM_RATE;
7947 /* 6M already there or needed so always add */
7948 ofdm |= IWM_RATE_BIT_MSK(6)(1 << (IWM_RATE_6M_INDEX)) >> IWM_FIRST_OFDM_RATE;
7949
7950 /*
7951 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7952 * Note, however:
7953 * - if no CCK rates are basic, it must be ERP since there must
7954 * be some basic rates at all, so they're OFDM => ERP PHY
7955 * (or we're in 5 GHz, and the cck bitmap will never be used)
7956 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7957 * - if 5.5M is basic, 1M and 2M are mandatory
7958 * - if 2M is basic, 1M is mandatory
7959 * - if 1M is basic, that's the only valid ACK rate.
7960 * As a consequence, it's not as complicated as it sounds, just add
7961 * any lower rates to the ACK rate bitmap.
7962 */
7963 if (IWM_RATE_11M_INDEX < lowest_present_cck)
7964 cck |= IWM_RATE_BIT_MSK(11)(1 << (IWM_RATE_11M_INDEX)) >> IWM_FIRST_CCK_RATE;
7965 if (IWM_RATE_5M_INDEX < lowest_present_cck)
7966 cck |= IWM_RATE_BIT_MSK(5)(1 << (IWM_RATE_5M_INDEX)) >> IWM_FIRST_CCK_RATE;
7967 if (IWM_RATE_2M_INDEX < lowest_present_cck)
7968 cck |= IWM_RATE_BIT_MSK(2)(1 << (IWM_RATE_2M_INDEX)) >> IWM_FIRST_CCK_RATE;
7969 /* 1M already there or needed so always add */
7970 cck |= IWM_RATE_BIT_MSK(1)(1 << (IWM_RATE_1M_INDEX)) >> IWM_FIRST_CCK_RATE;
7971
7972 *cck_rates = cck;
7973 *ofdm_rates = ofdm;
7974}
7975
7976void
7977iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
7978 struct iwm_mac_ctx_cmd *cmd, uint32_t action)
7979{
7980#define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
7981 struct ieee80211com *ic = &sc->sc_ic;
7982 struct ieee80211_node *ni = ic->ic_bss;
7983 int cck_ack_rates, ofdm_ack_rates;
7984 int i;
7985
7986 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
7987 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
7988 cmd->action = htole32(action)((__uint32_t)(action));
7989
7990 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7991 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER)((__uint32_t)(2));
7992 else if (ic->ic_opmode == IEEE80211_M_STA)
7993 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA)((__uint32_t)(5));
7994 else
7995 panic("unsupported operating mode %d", ic->ic_opmode);
7996 cmd->tsf_id = htole32(IWM_TSF_ID_A)((__uint32_t)(0));
7997
7998 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr)__builtin_memcpy((cmd->node_addr), (ic->ic_myaddr), (6)
)
;
7999 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8000 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr)__builtin_memcpy((cmd->bssid_addr), (etherbroadcastaddr), (
6))
;
8001 return;
8002 }
8003
8004 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr)__builtin_memcpy((cmd->bssid_addr), (in->in_macaddr), (
6))
;
8005 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8006 cmd->cck_rates = htole32(cck_ack_rates)((__uint32_t)(cck_ack_rates));
8007 cmd->ofdm_rates = htole32(ofdm_ack_rates)((__uint32_t)(ofdm_ack_rates));
8008
8009 cmd->cck_short_preamble
8010 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)((__uint32_t)((ic->ic_flags & 0x00040000) ? (1 <<
5) : 0))
8011 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0)((__uint32_t)((ic->ic_flags & 0x00040000) ? (1 <<
5) : 0))
;
8012 cmd->short_slot
8013 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)((__uint32_t)((ic->ic_flags & 0x00020000) ? (1 <<
4) : 0))
8014 ? IWM_MAC_FLG_SHORT_SLOT : 0)((__uint32_t)((ic->ic_flags & 0x00020000) ? (1 <<
4) : 0))
;
8015
8016 for (i = 0; i < EDCA_NUM_AC4; i++) {
8017 struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8018 int txf = iwm_ac_to_tx_fifo[i];
8019
8020 cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin))((__uint16_t)(IWM_EXP2(ac->ac_ecwmin)));
8021 cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax))((__uint16_t)(IWM_EXP2(ac->ac_ecwmax)));
8022 cmd->ac[txf].aifsn = ac->ac_aifsn;
8023 cmd->ac[txf].fifos_mask = (1 << txf);
8024 cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32)((__uint16_t)(ac->ac_txoplimit * 32));
8025 }
8026 if (ni->ni_flags & IEEE80211_NODE_QOS0x0002)
8027 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA)((__uint32_t)((1 << 0)));
8028
8029 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
8030 enum ieee80211_htprot htprot =
8031 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK0x0003);
8032 switch (htprot) {
8033 case IEEE80211_HTPROT_NONE:
8034 break;
8035 case IEEE80211_HTPROT_NONMEMBER:
8036 case IEEE80211_HTPROT_NONHT_MIXED:
8037 cmd->protection_flags |=
8038 htole32(IWM_MAC_PROT_FLG_HT_PROT |((__uint32_t)((1 << 23) | (1 << 24)))
8039 IWM_MAC_PROT_FLG_FAT_PROT)((__uint32_t)((1 << 23) | (1 << 24)));
8040 break;
8041 case IEEE80211_HTPROT_20MHZ:
8042 if (in->in_phyctxt &&
8043 (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA1 ||
8044 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB3)) {
8045 cmd->protection_flags |=
8046 htole32(IWM_MAC_PROT_FLG_HT_PROT |((__uint32_t)((1 << 23) | (1 << 24)))
8047 IWM_MAC_PROT_FLG_FAT_PROT)((__uint32_t)((1 << 23) | (1 << 24)));
8048 }
8049 break;
8050 default:
8051 break;
8052 }
8053
8054 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN)((__uint32_t)((1 << 1)));
8055 }
8056 if (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)
8057 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT)((__uint32_t)((1 << 3)));
8058
8059 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP)((__uint32_t)((1 << 2)));
8060#undef IWM_EXP2
8061}
8062
8063void
8064iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8065 struct iwm_mac_data_sta *sta, int assoc)
8066{
8067 struct ieee80211_node *ni = &in->in_ni;
8068 uint32_t dtim_off;
8069 uint64_t tsf;
8070
8071 dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU1024;
8072 memcpy(&tsf, ni->ni_tstamp, sizeof(tsf))__builtin_memcpy((&tsf), (ni->ni_tstamp), (sizeof(tsf)
))
;
8073 tsf = letoh64(tsf)((__uint64_t)(tsf));
8074
8075 sta->is_assoc = htole32(assoc)((__uint32_t)(assoc));
8076 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off)((__uint32_t)(ni->ni_rstamp + dtim_off));
8077 sta->dtim_tsf = htole64(tsf + dtim_off)((__uint64_t)(tsf + dtim_off));
8078 sta->bi = htole32(ni->ni_intval)((__uint32_t)(ni->ni_intval));
8079 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval))((__uint32_t)(iwm_reciprocal(ni->ni_intval)));
8080 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod)((__uint32_t)(ni->ni_intval * ni->ni_dtimperiod));
8081 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval))((__uint32_t)(iwm_reciprocal(sta->dtim_interval)));
8082 sta->listen_interval = htole32(10)((__uint32_t)(10));
8083 sta->assoc_id = htole32(ni->ni_associd)((__uint32_t)(ni->ni_associd));
8084 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp)((__uint32_t)(ni->ni_rstamp));
8085}
8086
8087int
8088iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8089 int assoc)
8090{
8091 struct ieee80211com *ic = &sc->sc_ic;
8092 struct ieee80211_node *ni = &in->in_ni;
8093 struct iwm_mac_ctx_cmd cmd;
8094 int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE0x08);
8095
8096 if (action == IWM_FW_CTXT_ACTION_ADD1 && active)
8097 panic("MAC already added");
8098 if (action == IWM_FW_CTXT_ACTION_REMOVE3 && !active)
8099 panic("MAC already removed");
8100
8101 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8102
8103 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8104
8105 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8106 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8107 IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8108 IWM_MAC_FILTER_ACCEPT_GRP |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8109 IWM_MAC_FILTER_IN_BEACON |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8110 IWM_MAC_FILTER_IN_PROBE_REQUEST |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8111 IWM_MAC_FILTER_IN_CRC32)((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
;
8112 } else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8113 /*
8114 * Allow beacons to pass through as long as we are not
8115 * associated or we do not have dtim period information.
8116 */
8117 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON)((__uint32_t)((1 << 6)));
8118 else
8119 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8120
8121 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD0x28, 0, sizeof(cmd), &cmd);
8122}
8123
8124int
8125iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8126{
8127 struct iwm_time_quota_cmd_v1 cmd;
8128 int i, idx, num_active_macs, quota, quota_rem;
8129 int colors[IWM_MAX_BINDINGS(4)] = { -1, -1, -1, -1, };
8130 int n_ifs[IWM_MAX_BINDINGS(4)] = {0, };
8131 uint16_t id;
8132
8133 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8134
8135 /* currently, PHY ID == binding ID */
8136 if (in && in->in_phyctxt) {
8137 id = in->in_phyctxt->id;
8138 KASSERT(id < IWM_MAX_BINDINGS)((id < (4)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 8138, "id < IWM_MAX_BINDINGS"))
;
8139 colors[id] = in->in_phyctxt->color;
8140 if (running)
8141 n_ifs[id] = 1;
8142 }
8143
8144 /*
8145 * The FW's scheduling session consists of
8146 * IWM_MAX_QUOTA fragments. Divide these fragments
8147 * equally between all the bindings that require quota
8148 */
8149 num_active_macs = 0;
8150 for (i = 0; i < IWM_MAX_BINDINGS(4); i++) {
8151 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
8152 num_active_macs += n_ifs[i];
8153 }
8154
8155 quota = 0;
8156 quota_rem = 0;
8157 if (num_active_macs) {
8158 quota = IWM_MAX_QUOTA128 / num_active_macs;
8159 quota_rem = IWM_MAX_QUOTA128 % num_active_macs;
8160 }
8161
8162 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS(4); i++) {
8163 if (colors[i] < 0)
8164 continue;
8165
8166 cmd.quotas[idx].id_and_color =
8167 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]))((__uint32_t)(((i << (0)) | (colors[i] << (8)))));
8168
8169 if (n_ifs[i] <= 0) {
8170 cmd.quotas[idx].quota = htole32(0)((__uint32_t)(0));
8171 cmd.quotas[idx].max_duration = htole32(0)((__uint32_t)(0));
8172 } else {
8173 cmd.quotas[idx].quota = htole32(quota * n_ifs[i])((__uint32_t)(quota * n_ifs[i]));
8174 cmd.quotas[idx].max_duration = htole32(0)((__uint32_t)(0));
8175 }
8176 idx++;
8177 }
8178
8179 /* Give the remainder of the session to the first binding */
8180 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem)((__uint32_t)(((__uint32_t)(cmd.quotas[0].quota)) + quota_rem
))
;
8181
8182 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)((sc->sc_ucode_api)[(38)>>3] & (1<<((38)&
(8 -1))))
) {
8183 struct iwm_time_quota_cmd cmd_v2;
8184
8185 memset(&cmd_v2, 0, sizeof(cmd_v2))__builtin_memset((&cmd_v2), (0), (sizeof(cmd_v2)));
8186 for (i = 0; i < IWM_MAX_BINDINGS(4); i++) {
8187 cmd_v2.quotas[i].id_and_color =
8188 cmd.quotas[i].id_and_color;
8189 cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8190 cmd_v2.quotas[i].max_duration =
8191 cmd.quotas[i].max_duration;
8192 }
8193 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD0x2c, 0,
8194 sizeof(cmd_v2), &cmd_v2);
8195 }
8196
8197 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD0x2c, 0, sizeof(cmd), &cmd);
8198}
8199
8200void
8201iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8202{
8203 int s = splnet()splraise(0x7);
8204
8205 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) {
8206 splx(s)spllower(s);
8207 return;
8208 }
8209
8210 refcnt_take(&sc->task_refs);
8211 if (!task_add(taskq, task))
8212 refcnt_rele_wake(&sc->task_refs);
8213 splx(s)spllower(s);
8214}
8215
8216void
8217iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8218{
8219 if (task_del(taskq, task))
8220 refcnt_rele(&sc->task_refs);
8221}
8222
8223int
8224iwm_scan(struct iwm_softc *sc)
8225{
8226 struct ieee80211com *ic = &sc->sc_ic;
8227 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
8228 int err;
8229
8230 if (sc->sc_flags & IWM_FLAG_BGSCAN0x200) {
8231 err = iwm_scan_abort(sc);
8232 if (err) {
8233 printf("%s: could not abort background scan\n",
8234 DEVNAME(sc)((sc)->sc_dev.dv_xname));
8235 return err;
8236 }
8237 }
8238
8239 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
)
8240 err = iwm_umac_scan(sc, 0);
8241 else
8242 err = iwm_lmac_scan(sc, 0);
8243 if (err) {
8244 printf("%s: could not initiate scan\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8245 return err;
8246 }
8247
8248 /*
8249 * The current mode might have been fixed during association.
8250 * Ensure all channels get scanned.
8251 */
8252 if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media)((ic->ic_media.ifm_cur->ifm_media) & 0x000000ff00000000ULL
)
== IFM_AUTO0ULL)
8253 ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8254
8255 sc->sc_flags |= IWM_FLAG_SCANNING0x04;
8256 if (ifp->if_flags & IFF_DEBUG0x4)
8257 printf("%s: %s -> %s\n", ifp->if_xname,
8258 ieee80211_state_name[ic->ic_state],
8259 ieee80211_state_name[IEEE80211_S_SCAN]);
8260 if ((sc->sc_flags & IWM_FLAG_BGSCAN0x200) == 0) {
8261 ieee80211_set_link_state(ic, LINK_STATE_DOWN2);
8262 ieee80211_node_cleanup(ic, ic->ic_bss);
8263 }
8264 ic->ic_state = IEEE80211_S_SCAN;
8265 iwm_led_blink_start(sc);
8266 wakeup(&ic->ic_state); /* wake iwm_init() */
8267
8268 return 0;
8269}
8270
8271int
8272iwm_bgscan(struct ieee80211com *ic)
8273{
8274 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
8275 int err;
8276
8277 if (sc->sc_flags & IWM_FLAG_SCANNING0x04)
8278 return 0;
8279
8280 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
)
8281 err = iwm_umac_scan(sc, 1);
8282 else
8283 err = iwm_lmac_scan(sc, 1);
8284 if (err) {
8285 printf("%s: could not initiate scan\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8286 return err;
8287 }
8288
8289 sc->sc_flags |= IWM_FLAG_BGSCAN0x200;
8290 return 0;
8291}
8292
8293void
8294iwm_bgscan_done(struct ieee80211com *ic,
8295 struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8296{
8297 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
8298
8299 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
8300 sc->bgscan_unref_arg = arg;
8301 sc->bgscan_unref_arg_size = arg_size;
8302 iwm_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
8303}
8304
8305void
8306iwm_bgscan_done_task(void *arg)
8307{
8308 struct iwm_softc *sc = arg;
8309 struct ieee80211com *ic = &sc->sc_ic;
8310 struct iwm_node *in = (void *)ic->ic_bss;
8311 struct ieee80211_node *ni = &in->in_ni;
8312 int tid, err = 0, s = splnet()splraise(0x7);
8313
8314 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
8315 (ic->ic_flags & IEEE80211_F_BGSCAN0x08000000) == 0 ||
8316 ic->ic_state != IEEE80211_S_RUN) {
8317 err = ENXIO6;
8318 goto done;
8319 }
8320
8321 for (tid = 0; tid < IWM_MAX_TID_COUNT8; tid++) {
8322 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
8323
8324 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8325 continue;
8326
8327 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8328 if (err)
8329 goto done;
8330 err = iwm_disable_txq(sc, IWM_STATION_ID0, qid, tid);
8331 if (err)
8332 goto done;
8333 in->tfd_queue_msk &= ~(1 << qid);
8334#if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8335 IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
8336 IEEE80211_ACTION_DELBA,((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
8337 IEEE80211_REASON_AUTH_LEAVE << 16 |((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
8338 IEEE80211_FC1_DIR_TODS << 8 | tid)((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
;
8339#endif
8340 ieee80211_node_tx_ba_clear(ni, tid);
8341 }
8342
8343 err = iwm_flush_sta(sc, in);
8344 if (err)
8345 goto done;
8346
8347 /*
8348 * Tx queues have been flushed and Tx agg has been stopped.
8349 * Allow roaming to proceed.
8350 */
8351 ni->ni_unref_arg = sc->bgscan_unref_arg;
8352 ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8353 sc->bgscan_unref_arg = NULL((void *)0);
8354 sc->bgscan_unref_arg_size = 0;
8355 ieee80211_node_tx_stopped(ic, &in->in_ni);
8356done:
8357 if (err) {
8358 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
8359 sc->bgscan_unref_arg = NULL((void *)0);
8360 sc->bgscan_unref_arg_size = 0;
8361 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
8362 task_add(systq, &sc->init_task);
8363 }
8364 refcnt_rele_wake(&sc->task_refs);
8365 splx(s)spllower(s);
8366}
8367
8368int
8369iwm_umac_scan_abort(struct iwm_softc *sc)
8370{
8371 struct iwm_umac_scan_abort cmd = { 0 };
8372
8373 return iwm_send_cmd_pdu(sc,
8374 IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC)((0x1 << 8) | 0xe),
8375 0, sizeof(cmd), &cmd);
8376}
8377
8378int
8379iwm_lmac_scan_abort(struct iwm_softc *sc)
8380{
8381 struct iwm_host_cmd cmd = {
8382 .id = IWM_SCAN_OFFLOAD_ABORT_CMD0x52,
8383 };
8384 int err, status;
8385
8386 err = iwm_send_cmd_status(sc, &cmd, &status);
8387 if (err)
8388 return err;
8389
8390 if (status != IWM_CAN_ABORT_STATUS1) {
8391 /*
8392 * The scan abort will return 1 for success or
8393 * 2 for "failure". A failure condition can be
8394 * due to simply not being in an active scan which
8395 * can occur if we send the scan abort before the
8396 * microcode has notified us that a scan is completed.
8397 */
8398 return EBUSY16;
8399 }
8400
8401 return 0;
8402}
8403
8404int
8405iwm_scan_abort(struct iwm_softc *sc)
8406{
8407 int err;
8408
8409 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
)
8410 err = iwm_umac_scan_abort(sc);
8411 else
8412 err = iwm_lmac_scan_abort(sc);
8413
8414 if (err == 0)
8415 sc->sc_flags &= ~(IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200);
8416 return err;
8417}
8418
8419int
8420iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8421 struct ieee80211_channel *chan, uint8_t chains_static,
8422 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco)
8423{
8424 uint16_t band_flags = (IEEE80211_CHAN_2GHZ0x0080 | IEEE80211_CHAN_5GHZ0x0100);
8425 int err;
8426
8427 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
8428 IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
&&
8429 (phyctxt->channel->ic_flags & band_flags) !=
8430 (chan->ic_flags & band_flags)) {
8431 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8432 chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE3, apply_time, sco);
8433 if (err) {
8434 printf("%s: could not remove PHY context "
8435 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8436 return err;
8437 }
8438 phyctxt->channel = chan;
8439 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8440 chains_dynamic, IWM_FW_CTXT_ACTION_ADD1, apply_time, sco);
8441 if (err) {
8442 printf("%s: could not add PHY context "
8443 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8444 return err;
8445 }
8446 } else {
8447 phyctxt->channel = chan;
8448 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8449 chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY2, apply_time, sco);
8450 if (err) {
8451 printf("%s: could not update PHY context (error %d)\n",
8452 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8453 return err;
8454 }
8455 }
8456
8457 phyctxt->sco = sco;
8458 return 0;
8459}
8460
8461int
8462iwm_auth(struct iwm_softc *sc)
8463{
8464 struct ieee80211com *ic = &sc->sc_ic;
8465 struct iwm_node *in = (void *)ic->ic_bss;
8466 uint32_t duration;
8467 int generation = sc->sc_generation, err;
8468
8469 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
8470
8471 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8472 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8473 ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
8474 if (err)
8475 return err;
8476 } else {
8477 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8478 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
8479 if (err)
8480 return err;
8481 }
8482 in->in_phyctxt = &sc->sc_phyctxt[0];
8483 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr)__builtin_memcpy((in->in_macaddr), (in->in_ni.ni_macaddr
), (6))
;
8484 iwm_setrates(in, 0);
8485
8486 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD1, 0);
8487 if (err) {
8488 printf("%s: could not add MAC context (error %d)\n",
8489 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8490 return err;
8491 }
8492 sc->sc_flags |= IWM_FLAG_MAC_ACTIVE0x08;
8493
8494 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD1);
8495 if (err) {
8496 printf("%s: could not add binding (error %d)\n",
8497 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8498 goto rm_mac_ctxt;
8499 }
8500 sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE0x10;
8501
8502 in->tid_disable_ampdu = 0xffff;
8503 err = iwm_add_sta_cmd(sc, in, 0);
8504 if (err) {
8505 printf("%s: could not add sta (error %d)\n",
8506 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8507 goto rm_binding;
8508 }
8509 sc->sc_flags |= IWM_FLAG_STA_ACTIVE0x20;
8510
8511 if (ic->ic_opmode == IEEE80211_M_MONITOR)
8512 return 0;
8513
8514 /*
8515 * Prevent the FW from wandering off channel during association
8516 * by "protecting" the session with a time event.
8517 */
8518 if (in->in_ni.ni_intval)
8519 duration = in->in_ni.ni_intval * 2;
8520 else
8521 duration = IEEE80211_DUR_TU1024;
8522 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8523
8524 return 0;
8525
8526rm_binding:
8527 if (generation == sc->sc_generation) {
8528 iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3);
8529 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE0x10;
8530 }
8531rm_mac_ctxt:
8532 if (generation == sc->sc_generation) {
8533 iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3, 0);
8534 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE0x08;
8535 }
8536 return err;
8537}
8538
8539int
8540iwm_deauth(struct iwm_softc *sc)
8541{
8542 struct ieee80211com *ic = &sc->sc_ic;
8543 struct iwm_node *in = (void *)ic->ic_bss;
8544 int err;
8545
8546 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
8547
8548 iwm_unprotect_session(sc, in);
8549
8550 if (sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20) {
8551 err = iwm_flush_sta(sc, in);
8552 if (err)
8553 return err;
8554 err = iwm_rm_sta_cmd(sc, in);
8555 if (err) {
8556 printf("%s: could not remove STA (error %d)\n",
8557 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8558 return err;
8559 }
8560 in->tid_disable_ampdu = 0xffff;
8561 sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE0x20;
8562 sc->sc_rx_ba_sessions = 0;
8563 sc->ba_rx.start_tidmask = 0;
8564 sc->ba_rx.stop_tidmask = 0;
8565 sc->tx_ba_queue_mask = 0;
8566 sc->ba_tx.start_tidmask = 0;
8567 sc->ba_tx.stop_tidmask = 0;
8568 }
8569
8570 if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE0x10) {
8571 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3);
8572 if (err) {
8573 printf("%s: could not remove binding (error %d)\n",
8574 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8575 return err;
8576 }
8577 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE0x10;
8578 }
8579
8580 if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE0x08) {
8581 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3, 0);
8582 if (err) {
8583 printf("%s: could not remove MAC context (error %d)\n",
8584 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8585 return err;
8586 }
8587 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE0x08;
8588 }
8589
8590 /* Move unused PHY context to a default channel. */
8591 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8592 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
8593 if (err)
8594 return err;
8595
8596 return 0;
8597}
8598
8599int
8600iwm_run(struct iwm_softc *sc)
8601{
8602 struct ieee80211com *ic = &sc->sc_ic;
8603 struct iwm_node *in = (void *)ic->ic_bss;
8604 struct ieee80211_node *ni = &in->in_ni;
8605 int err;
8606
8607 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
8608
8609 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8610 /* Add a MAC context and a sniffing STA. */
8611 err = iwm_auth(sc);
8612 if (err)
8613 return err;
8614 }
8615
8616 /* Configure Rx chains for MIMO and configure 40 MHz channel. */
8617 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8618 uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8619 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8620 in->in_phyctxt->channel, chains, chains,
8621 0, IEEE80211_HTOP0_SCO_SCN0);
8622 if (err) {
8623 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8624 return err;
8625 }
8626 } else if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
8627 uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8628 uint8_t sco;
8629 if (ieee80211_node_supports_ht_chan40(ni))
8630 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
8631 else
8632 sco = IEEE80211_HTOP0_SCO_SCN0;
8633 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8634 in->in_phyctxt->channel, chains, chains,
8635 0, sco);
8636 if (err) {
8637 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8638 return err;
8639 }
8640 }
8641
8642 /* Update STA again, for HT-related settings such as MIMO. */
8643 err = iwm_add_sta_cmd(sc, in, 1);
8644 if (err) {
8645 printf("%s: could not update STA (error %d)\n",
8646 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8647 return err;
8648 }
8649
8650 /* We have now been assigned an associd by the AP. */
8651 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 1);
8652 if (err) {
8653 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8654 return err;
8655 }
8656
8657 err = iwm_sf_config(sc, IWM_SF_FULL_ON1);
8658 if (err) {
8659 printf("%s: could not set sf full on (error %d)\n",
8660 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8661 return err;
8662 }
8663
8664 err = iwm_allow_mcast(sc);
8665 if (err) {
8666 printf("%s: could not allow mcast (error %d)\n",
8667 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8668 return err;
8669 }
8670
8671 err = iwm_power_update_device(sc);
8672 if (err) {
8673 printf("%s: could not send power command (error %d)\n",
8674 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8675 return err;
8676 }
8677#ifdef notyet
8678 /*
8679 * Disabled for now. Default beacon filter settings
8680 * prevent net80211 from getting ERP and HT protection
8681 * updates from beacons.
8682 */
8683 err = iwm_enable_beacon_filter(sc, in);
8684 if (err) {
8685 printf("%s: could not enable beacon filter\n",
8686 DEVNAME(sc)((sc)->sc_dev.dv_xname));
8687 return err;
8688 }
8689#endif
8690 err = iwm_power_mac_update_mode(sc, in);
8691 if (err) {
8692 printf("%s: could not update MAC power (error %d)\n",
8693 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8694 return err;
8695 }
8696
8697 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)((sc->sc_enabled_capa)[(44)>>3] & (1<<((44
)&(8 -1))))
) {
8698 err = iwm_update_quotas(sc, in, 1);
8699 if (err) {
8700 printf("%s: could not update quotas (error %d)\n",
8701 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8702 return err;
8703 }
8704 }
8705
8706 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
8707 ieee80211_ra_node_init(&in->in_rn);
8708
8709 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8710 iwm_led_blink_start(sc);
8711 return 0;
8712 }
8713
8714 /* Start at lowest available bit-rate, AMRR will raise. */
8715 in->in_ni.ni_txrate = 0;
8716 in->in_ni.ni_txmcs = 0;
8717 iwm_setrates(in, 0);
8718
8719 timeout_add_msec(&sc->sc_calib_to, 500);
8720 iwm_led_enable(sc);
8721
8722 return 0;
8723}
8724
8725int
8726iwm_run_stop(struct iwm_softc *sc)
8727{
8728 struct ieee80211com *ic = &sc->sc_ic;
8729 struct iwm_node *in = (void *)ic->ic_bss;
8730 struct ieee80211_node *ni = &in->in_ni;
8731 int err, i, tid;
8732
8733 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
8734
8735 /*
8736 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
8737 * for this when moving out of RUN state since it runs in a
8738 * separate thread.
8739 * Note that in->in_ni (struct ieee80211_node) already represents
8740 * our new access point in case we are roaming between APs.
8741 * This means we cannot rely on struct ieee802111_node to tell
8742 * us which BA sessions exist.
8743 */
8744 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
8745 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
8746 if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f)
8747 continue;
8748 err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8749 if (err)
8750 return err;
8751 iwm_clear_reorder_buffer(sc, rxba);
8752 if (sc->sc_rx_ba_sessions > 0)
8753 sc->sc_rx_ba_sessions--;
8754 }
8755 for (tid = 0; tid < IWM_MAX_TID_COUNT8; tid++) {
8756 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
8757 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8758 continue;
8759 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8760 if (err)
8761 return err;
8762 err = iwm_disable_txq(sc, IWM_STATION_ID0, qid, tid);
8763 if (err)
8764 return err;
8765 in->tfd_queue_msk &= ~(1 << qid);
8766 }
8767 ieee80211_ba_del(ni);
8768
8769 if (ic->ic_opmode == IEEE80211_M_MONITOR)
8770 iwm_led_blink_stop(sc);
8771
8772 err = iwm_sf_config(sc, IWM_SF_INIT_OFF3);
8773 if (err)
8774 return err;
8775
8776 iwm_disable_beacon_filter(sc);
8777
8778 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)((sc->sc_enabled_capa)[(44)>>3] & (1<<((44
)&(8 -1))))
) {
8779 err = iwm_update_quotas(sc, in, 0);
8780 if (err) {
8781 printf("%s: could not update quotas (error %d)\n",
8782 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8783 return err;
8784 }
8785 }
8786
8787 /* Mark station as disassociated. */
8788 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 0);
8789 if (err) {
8790 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8791 return err;
8792 }
8793
8794 /* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
8795 if (in->in_ni.ni_flags & IEEE80211_NODE_HT0x0400) {
8796 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8797 in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
8798 if (err) {
8799 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8800 return err;
8801 }
8802 }
8803
8804 return 0;
8805}
8806
8807struct ieee80211_node *
8808iwm_node_alloc(struct ieee80211com *ic)
8809{
8810 return malloc(sizeof (struct iwm_node), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
8811}
8812
8813int
8814iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
8815 struct ieee80211_key *k)
8816{
8817 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
8818 struct iwm_add_sta_key_cmd_v1 cmd;
8819
8820 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8821
8822 cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
8823 IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
8824 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
8825 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
;
8826 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001)
8827 cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST)((__uint16_t)((1 << 14)));
8828
8829 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
8830 cmd.common.key_offset = 0;
8831 cmd.common.sta_id = IWM_STATION_ID0;
8832
8833 return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC,
8834 sizeof(cmd), &cmd);
8835}
8836
8837int
8838iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8839 struct ieee80211_key *k)
8840{
8841 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
8842 struct iwm_add_sta_key_cmd cmd;
8843
8844 if ((k->k_flags & IEEE80211_KEY_GROUP0x00000001) ||
8845 k->k_cipher != IEEE80211_CIPHER_CCMP) {
8846 /* Fallback to software crypto for other ciphers. */
8847 return (ieee80211_set_key(ic, ni, k));
8848 }
8849
8850 if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS)((sc->sc_ucode_api)[(29)>>3] & (1<<((29)&
(8 -1))))
)
8851 return iwm_set_key_v1(ic, ni, k);
8852
8853 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8854
8855 cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
8856 IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
8857 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
8858 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
;
8859 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001)
8860 cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST)((__uint16_t)((1 << 14)));
8861
8862 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
8863 cmd.common.key_offset = 0;
8864 cmd.common.sta_id = IWM_STATION_ID0;
8865
8866 cmd.transmit_seq_cnt = htole64(k->k_tsc)((__uint64_t)(k->k_tsc));
8867
8868 return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC,
8869 sizeof(cmd), &cmd);
8870}
8871
8872void
8873iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
8874 struct ieee80211_key *k)
8875{
8876 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
8877 struct iwm_add_sta_key_cmd_v1 cmd;
8878
8879 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8880
8881 cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
8882 IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
8883 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
8884 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
;
8885 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
8886 cmd.common.key_offset = 0;
8887 cmd.common.sta_id = IWM_STATION_ID0;
8888
8889 iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
8890}
8891
8892void
8893iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8894 struct ieee80211_key *k)
8895{
8896 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
8897 struct iwm_add_sta_key_cmd cmd;
8898
8899 if ((k->k_flags & IEEE80211_KEY_GROUP0x00000001) ||
8900 (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
8901 /* Fallback to software crypto for other ciphers. */
8902 ieee80211_delete_key(ic, ni, k);
8903 return;
8904 }
8905
8906 if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS)((sc->sc_ucode_api)[(29)>>3] & (1<<((29)&
(8 -1))))
)
8907 return iwm_delete_key_v1(ic, ni, k);
8908
8909 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8910
8911 cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
8912 IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
8913 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
8914 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
;
8915 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
8916 cmd.common.key_offset = 0;
8917 cmd.common.sta_id = IWM_STATION_ID0;
8918
8919 iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
8920}
8921
8922void
8923iwm_calib_timeout(void *arg)
8924{
8925 struct iwm_softc *sc = arg;
8926 struct ieee80211com *ic = &sc->sc_ic;
8927 struct iwm_node *in = (void *)ic->ic_bss;
8928 struct ieee80211_node *ni = &in->in_ni;
8929 int s;
8930
8931 s = splnet()splraise(0x7);
8932 if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
8933 (ni->ni_flags & IEEE80211_NODE_HT0x0400) == 0 &&
8934 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
8935 int old_txrate = ni->ni_txrate;
8936 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
8937 /*
8938 * If AMRR has chosen a new TX rate we must update
8939 * the firwmare's LQ rate table.
8940 * ni_txrate may change again before the task runs so
8941 * cache the chosen rate in the iwm_node structure.
8942 */
8943 if (ni->ni_txrate != old_txrate)
8944 iwm_setrates(in, 1);
8945 }
8946
8947 splx(s)spllower(s);
8948
8949 timeout_add_msec(&sc->sc_calib_to, 500);
8950}
8951
8952void
8953iwm_setrates(struct iwm_node *in, int async)
8954{
8955 struct ieee80211_node *ni = &in->in_ni;
8956 struct ieee80211com *ic = ni->ni_ic;
8957 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
8958 struct iwm_lq_cmd lqcmd;
8959 struct ieee80211_rateset *rs = &ni->ni_rates;
8960 int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
8961 struct iwm_host_cmd cmd = {
8962 .id = IWM_LQ_CMD0x4e,
8963 .len = { sizeof(lqcmd), },
8964 };
8965
8966 cmd.flags = async ? IWM_CMD_ASYNC : 0;
8967
8968 memset(&lqcmd, 0, sizeof(lqcmd))__builtin_memset((&lqcmd), (0), (sizeof(lqcmd)));
8969 lqcmd.sta_id = IWM_STATION_ID0;
8970
8971 if (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)
8972 lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK(1 << 0);
8973
8974 /*
8975 * Fill the LQ rate selection table with legacy and/or HT rates
8976 * in descending order, i.e. with the node's current TX rate first.
8977 * In cases where throughput of an HT rate corresponds to a legacy
8978 * rate it makes no sense to add both. We rely on the fact that
8979 * iwm_rates is laid out such that equivalent HT/legacy rates share
8980 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
8981 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
8982 */
8983 j = 0;
8984 ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
8985 mimo = iwm_is_mimo_mcs(ni->ni_txmcs);
8986 ridx_max = (mimo ? IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) : IWM_LAST_HT_SISO_RATE);
8987 for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
8988 uint8_t plcp = iwm_rates[ridx].plcp;
8989 uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
8990
8991 if (j >= nitems(lqcmd.rs_table)(sizeof((lqcmd.rs_table)) / sizeof((lqcmd.rs_table)[0])))
8992 break;
8993 tab = 0;
8994 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
8995 if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP0x20)
8996 continue;
8997 /* Do not mix SISO and MIMO HT rates. */
8998 if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
8999 (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9000 continue;
9001 for (i = ni->ni_txmcs; i >= 0; i--) {
9002 if (isclr(ni->ni_rxmcs, i)(((ni->ni_rxmcs)[(i)>>3] & (1<<((i)&(8
-1)))) == 0)
)
9003 continue;
9004 if (ridx != iwm_mcs2ridx[i])
9005 continue;
9006 tab = ht_plcp;
9007 tab |= IWM_RATE_MCS_HT_MSK(1 << 8);
9008 /* First two Tx attempts may use 40MHz/SGI. */
9009 if (j > 1)
9010 break;
9011 if (in->in_phyctxt->sco ==
9012 IEEE80211_HTOP0_SCO_SCA1 ||
9013 in->in_phyctxt->sco ==
9014 IEEE80211_HTOP0_SCO_SCB3) {
9015 tab |= IWM_RATE_MCS_CHAN_WIDTH_40(1 << 11);
9016 tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK(1 << 30);
9017 }
9018 if (ieee80211_ra_use_ht_sgi(ni))
9019 tab |= IWM_RATE_MCS_SGI_MSK(1 << 13);
9020 break;
9021 }
9022 } else if (plcp != IWM_RATE_INVM_PLCP0xff) {
9023 for (i = ni->ni_txrate; i >= 0; i--) {
9024 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9025 IEEE80211_RATE_VAL0x7f)) {
9026 tab = plcp;
9027 break;
9028 }
9029 }
9030 }
9031
9032 if (tab == 0)
9033 continue;
9034
9035 if (iwm_is_mimo_ht_plcp(ht_plcp))
9036 tab |= IWM_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
9037 else if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003)
9038 tab |= IWM_RATE_MCS_ANT_B_MSK(2 << 14);
9039 else
9040 tab |= IWM_RATE_MCS_ANT_A_MSK(1 << 14);
9041
9042 if (IWM_RIDX_IS_CCK(ridx)((ridx) < 4))
9043 tab |= IWM_RATE_MCS_CCK_MSK(1 << 9);
9044 lqcmd.rs_table[j++] = htole32(tab)((__uint32_t)(tab));
9045 }
9046
9047 lqcmd.mimo_delim = (mimo ? j : 0);
9048
9049 /* Fill the rest with the lowest possible rate */
9050 while (j < nitems(lqcmd.rs_table)(sizeof((lqcmd.rs_table)) / sizeof((lqcmd.rs_table)[0]))) {
9051 tab = iwm_rates[ridx_min].plcp;
9052 if (IWM_RIDX_IS_CCK(ridx_min)((ridx_min) < 4))
9053 tab |= IWM_RATE_MCS_CCK_MSK(1 << 9);
9054 if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003)
9055 tab |= IWM_RATE_MCS_ANT_B_MSK(2 << 14);
9056 else
9057 tab |= IWM_RATE_MCS_ANT_A_MSK(1 << 14);
9058 lqcmd.rs_table[j++] = htole32(tab)((__uint32_t)(tab));
9059 }
9060
9061 if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003)
9062 lqcmd.single_stream_ant_msk = IWM_ANT_B(1 << 1);
9063 else
9064 lqcmd.single_stream_ant_msk = IWM_ANT_A(1 << 0);
9065 lqcmd.dual_stream_ant_msk = IWM_ANT_AB((1 << 0) | (1 << 1));
9066
9067 lqcmd.agg_time_limit = htole16(4000)((__uint16_t)(4000)); /* 4ms */
9068 lqcmd.agg_disable_start_th = 3;
9069 lqcmd.agg_frame_cnt_limit = 0x3f;
9070
9071 cmd.data[0] = &lqcmd;
9072 iwm_send_cmd(sc, &cmd);
9073}
9074
9075int
9076iwm_media_change(struct ifnet *ifp)
9077{
9078 struct iwm_softc *sc = ifp->if_softc;
9079 struct ieee80211com *ic = &sc->sc_ic;
9080 uint8_t rate, ridx;
9081 int err;
9082
9083 err = ieee80211_media_change(ifp);
9084 if (err != ENETRESET52)
9085 return err;
9086
9087 if (ic->ic_fixed_mcs != -1)
9088 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
9089 else if (ic->ic_fixed_rate != -1) {
9090 rate = ic->ic_sup_rates[ic->ic_curmode].
9091 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL0x7f;
9092 /* Map 802.11 rate to HW rate index. */
9093 for (ridx = 0; ridx <= IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1); ridx++)
9094 if (iwm_rates[ridx].rate == rate)
9095 break;
9096 sc->sc_fixed_ridx = ridx;
9097 }
9098
9099 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
9100 (IFF_UP0x1 | IFF_RUNNING0x40)) {
9101 iwm_stop(ifp);
9102 err = iwm_init(ifp);
9103 }
9104 return err;
9105}
9106
9107void
9108iwm_newstate_task(void *psc)
9109{
9110 struct iwm_softc *sc = (struct iwm_softc *)psc;
9111 struct ieee80211com *ic = &sc->sc_ic;
9112 enum ieee80211_state nstate = sc->ns_nstate;
9113 enum ieee80211_state ostate = ic->ic_state;
9114 int arg = sc->ns_arg;
9115 int err = 0, s = splnet()splraise(0x7);
9116
9117 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) {
9118 /* iwm_stop() is waiting for us. */
9119 refcnt_rele_wake(&sc->task_refs);
9120 splx(s)spllower(s);
9121 return;
9122 }
9123
9124 if (ostate == IEEE80211_S_SCAN) {
9125 if (nstate == ostate) {
9126 if (sc->sc_flags & IWM_FLAG_SCANNING0x04) {
9127 refcnt_rele_wake(&sc->task_refs);
9128 splx(s)spllower(s);
9129 return;
9130 }
9131 /* Firmware is no longer scanning. Do another scan. */
9132 goto next_scan;
9133 } else
9134 iwm_led_blink_stop(sc);
9135 }
9136
9137 if (nstate <= ostate) {
9138 switch (ostate) {
9139 case IEEE80211_S_RUN:
9140 err = iwm_run_stop(sc);
9141 if (err)
9142 goto out;
9143 /* FALLTHROUGH */
9144 case IEEE80211_S_ASSOC:
9145 case IEEE80211_S_AUTH:
9146 if (nstate <= IEEE80211_S_AUTH) {
9147 err = iwm_deauth(sc);
9148 if (err)
9149 goto out;
9150 }
9151 /* FALLTHROUGH */
9152 case IEEE80211_S_SCAN:
9153 case IEEE80211_S_INIT:
9154 break;
9155 }
9156
9157 /* Die now if iwm_stop() was called while we were sleeping. */
9158 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) {
9159 refcnt_rele_wake(&sc->task_refs);
9160 splx(s)spllower(s);
9161 return;
9162 }
9163 }
9164
9165 switch (nstate) {
9166 case IEEE80211_S_INIT:
9167 break;
9168
9169 case IEEE80211_S_SCAN:
9170next_scan:
9171 err = iwm_scan(sc);
9172 if (err)
9173 break;
9174 refcnt_rele_wake(&sc->task_refs);
9175 splx(s)spllower(s);
9176 return;
9177
9178 case IEEE80211_S_AUTH:
9179 err = iwm_auth(sc);
9180 break;
9181
9182 case IEEE80211_S_ASSOC:
9183 break;
9184
9185 case IEEE80211_S_RUN:
9186 err = iwm_run(sc);
9187 break;
9188 }
9189
9190out:
9191 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) {
9192 if (err)
9193 task_add(systq, &sc->init_task);
9194 else
9195 sc->sc_newstate(ic, nstate, arg);
9196 }
9197 refcnt_rele_wake(&sc->task_refs);
9198 splx(s)spllower(s);
9199}
9200
9201int
9202iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9203{
9204 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
9205 struct iwm_softc *sc = ifp->if_softc;
9206
9207 /*
9208 * Prevent attempts to transition towards the same state, unless
9209 * we are scanning in which case a SCAN -> SCAN transition
9210 * triggers another scan iteration. And AUTH -> AUTH is needed
9211 * to support band-steering.
9212 */
9213 if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9214 nstate != IEEE80211_S_AUTH)
9215 return 0;
9216
9217 if (ic->ic_state == IEEE80211_S_RUN) {
9218 timeout_del(&sc->sc_calib_to);
9219 iwm_del_task(sc, systq, &sc->ba_task);
9220 iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9221 iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9222 iwm_del_task(sc, systq, &sc->bgscan_done_task);
9223 }
9224
9225 sc->ns_nstate = nstate;
9226 sc->ns_arg = arg;
9227
9228 iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9229
9230 return 0;
9231}
9232
9233void
9234iwm_endscan(struct iwm_softc *sc)
9235{
9236 struct ieee80211com *ic = &sc->sc_ic;
9237
9238 if ((sc->sc_flags & (IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200)) == 0)
9239 return;
9240
9241 sc->sc_flags &= ~(IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200);
9242 ieee80211_end_scan(&ic->ic_ific_ac.ac_if);
9243}
9244
9245/*
9246 * Aging and idle timeouts for the different possible scenarios
9247 * in default configuration
9248 */
9249static const uint32_t
9250iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO5][IWM_SF_NUM_TIMEOUT_TYPES2] = {
9251 {
9252 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF)((__uint32_t)(400)),
9253 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)((__uint32_t)(160))
9254 },
9255 {
9256 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF)((__uint32_t)(400)),
9257 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)((__uint32_t)(160))
9258 },
9259 {
9260 htole32(IWM_SF_MCAST_AGING_TIMER_DEF)((__uint32_t)(400)),
9261 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)((__uint32_t)(160))
9262 },
9263 {
9264 htole32(IWM_SF_BA_AGING_TIMER_DEF)((__uint32_t)(400)),
9265 htole32(IWM_SF_BA_IDLE_TIMER_DEF)((__uint32_t)(160))
9266 },
9267 {
9268 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF)((__uint32_t)(400)),
9269 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)((__uint32_t)(160))
9270 },
9271};
9272
9273/*
9274 * Aging and idle timeouts for the different possible scenarios
9275 * in single BSS MAC configuration.
9276 */
9277static const uint32_t
9278iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO5][IWM_SF_NUM_TIMEOUT_TYPES2] = {
9279 {
9280 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER)((__uint32_t)(2016)),
9281 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)((__uint32_t)(320))
9282 },
9283 {
9284 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER)((__uint32_t)(2016)),
9285 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)((__uint32_t)(320))
9286 },
9287 {
9288 htole32(IWM_SF_MCAST_AGING_TIMER)((__uint32_t)(10016)),
9289 htole32(IWM_SF_MCAST_IDLE_TIMER)((__uint32_t)(2016))
9290 },
9291 {
9292 htole32(IWM_SF_BA_AGING_TIMER)((__uint32_t)(2016)),
9293 htole32(IWM_SF_BA_IDLE_TIMER)((__uint32_t)(320))
9294 },
9295 {
9296 htole32(IWM_SF_TX_RE_AGING_TIMER)((__uint32_t)(2016)),
9297 htole32(IWM_SF_TX_RE_IDLE_TIMER)((__uint32_t)(320))
9298 },
9299};
9300
9301void
9302iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9303 struct ieee80211_node *ni)
9304{
9305 int i, j, watermark;
9306
9307 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON0] = htole32(IWM_SF_W_MARK_SCAN)((__uint32_t)(4096));
9308
9309 /*
9310 * If we are in association flow - check antenna configuration
9311 * capabilities of the AP station, and choose the watermark accordingly.
9312 */
9313 if (ni) {
9314 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
9315 if (ni->ni_rxmcs[1] != 0)
9316 watermark = IWM_SF_W_MARK_MIMO28192;
9317 else
9318 watermark = IWM_SF_W_MARK_SISO4096;
9319 } else {
9320 watermark = IWM_SF_W_MARK_LEGACY4096;
9321 }
9322 /* default watermark value for unassociated mode. */
9323 } else {
9324 watermark = IWM_SF_W_MARK_MIMO28192;
9325 }
9326 sf_cmd->watermark[IWM_SF_FULL_ON1] = htole32(watermark)((__uint32_t)(watermark));
9327
9328 for (i = 0; i < IWM_SF_NUM_SCENARIO5; i++) {
9329 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES2; j++) {
9330 sf_cmd->long_delay_timeouts[i][j] =
9331 htole32(IWM_SF_LONG_DELAY_AGING_TIMER)((__uint32_t)(1000000));
9332 }
9333 }
9334
9335 if (ni) {
9336 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout
), (sizeof(iwm_sf_full_timeout)))
9337 sizeof(iwm_sf_full_timeout))__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout
), (sizeof(iwm_sf_full_timeout)))
;
9338 } else {
9339 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout_def
), (sizeof(iwm_sf_full_timeout_def)))
9340 sizeof(iwm_sf_full_timeout_def))__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout_def
), (sizeof(iwm_sf_full_timeout_def)))
;
9341 }
9342
9343}
9344
9345int
9346iwm_sf_config(struct iwm_softc *sc, int new_state)
9347{
9348 struct ieee80211com *ic = &sc->sc_ic;
9349 struct iwm_sf_cfg_cmd sf_cmd = {
9350 .state = htole32(new_state)((__uint32_t)(new_state)),
9351 };
9352 int err = 0;
9353
9354#if 0 /* only used for models with sdio interface, in iwlwifi */
9355 if (sc->sc_device_family == IWM_DEVICE_FAMILY_80002)
9356 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF)((__uint32_t)((1 << 16)));
9357#endif
9358
9359 switch (new_state) {
9360 case IWM_SF_UNINIT2:
9361 case IWM_SF_INIT_OFF3:
9362 iwm_fill_sf_command(sc, &sf_cmd, NULL((void *)0));
9363 break;
9364 case IWM_SF_FULL_ON1:
9365 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9366 break;
9367 default:
9368 return EINVAL22;
9369 }
9370
9371 err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD0xd1, IWM_CMD_ASYNC,
9372 sizeof(sf_cmd), &sf_cmd);
9373 return err;
9374}
9375
9376int
9377iwm_send_bt_init_conf(struct iwm_softc *sc)
9378{
9379 struct iwm_bt_coex_cmd bt_cmd;
9380
9381 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI)((__uint32_t)(0x3));
9382 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET)((__uint32_t)((1 << 4)));
9383
9384 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG0x9b, 0, sizeof(bt_cmd),
9385 &bt_cmd);
9386}
9387
9388int
9389iwm_send_soc_conf(struct iwm_softc *sc)
9390{
9391 struct iwm_soc_configuration_cmd cmd;
9392 int err;
9393 uint32_t cmd_id, flags = 0;
9394
9395 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9396
9397 /*
9398 * In VER_1 of this command, the discrete value is considered
9399 * an integer; In VER_2, it's a bitmask. Since we have only 2
9400 * values in VER_1, this is backwards-compatible with VER_2,
9401 * as long as we don't set any other flag bits.
9402 */
9403 if (!sc->sc_integrated) { /* VER_1 */
9404 flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE(1 << 0);
9405 } else { /* VER_2 */
9406 uint8_t scan_cmd_ver;
9407 if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE0)
9408 flags |= (sc->sc_ltr_delay &
9409 IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK0xc);
9410 scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP0x1,
9411 IWM_SCAN_REQ_UMAC0xd);
9412 if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN99 &&
9413 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9414 flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY(1 << 1);
9415 }
9416 cmd.flags = htole32(flags)((__uint32_t)(flags));
9417
9418 cmd.latency = htole32(sc->sc_xtal_latency)((__uint32_t)(sc->sc_xtal_latency));
9419
9420 cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD0x01, IWM_SYSTEM_GROUP0x2, 0);
9421 err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9422 if (err)
9423 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9424 return err;
9425}
9426
9427int
9428iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9429{
9430 struct iwm_mcc_update_cmd mcc_cmd;
9431 struct iwm_host_cmd hcmd = {
9432 .id = IWM_MCC_UPDATE_CMD0xc8,
9433 .flags = IWM_CMD_WANT_RESP,
9434 .resp_pkt_len = IWM_CMD_RESP_MAX(1 << 12),
9435 .data = { &mcc_cmd },
9436 };
9437 struct iwm_rx_packet *pkt;
9438 size_t resp_len;
9439 int err;
9440 int resp_v3 = isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(73)>>3] & (1<<((73
)&(8 -1))))
9441 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3)((sc->sc_enabled_capa)[(73)>>3] & (1<<((73
)&(8 -1))))
;
9442
9443 if (sc->sc_device_family == IWM_DEVICE_FAMILY_80002 &&
9444 !sc->sc_nvm.lar_enabled) {
9445 return 0;
9446 }
9447
9448 memset(&mcc_cmd, 0, sizeof(mcc_cmd))__builtin_memset((&mcc_cmd), (0), (sizeof(mcc_cmd)));
9449 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1])((__uint16_t)(alpha2[0] << 8 | alpha2[1]));
9450 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE)((sc->sc_ucode_api)[(9)>>3] & (1<<((9)&
(8 -1))))
||
9451 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC)((sc->sc_enabled_capa)[(29)>>3] & (1<<((29
)&(8 -1))))
)
9452 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT0x10;
9453 else
9454 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW0;
9455
9456 if (resp_v3) { /* same size as resp_v2 */
9457 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9458 } else {
9459 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9460 }
9461
9462 err = iwm_send_cmd(sc, &hcmd);
9463 if (err)
9464 return err;
9465
9466 pkt = hcmd.resp_pkt;
9467 if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40)) {
9468 err = EIO5;
9469 goto out;
9470 }
9471
9472 if (resp_v3) {
9473 struct iwm_mcc_update_resp_v3 *resp;
9474 resp_len = iwm_rx_packet_payload_len(pkt);
9475 if (resp_len < sizeof(*resp)) {
9476 err = EIO5;
9477 goto out;
9478 }
9479
9480 resp = (void *)pkt->data;
9481 if (resp_len != sizeof(*resp) +
9482 resp->n_channels * sizeof(resp->channels[0])) {
9483 err = EIO5;
9484 goto out;
9485 }
9486 } else {
9487 struct iwm_mcc_update_resp_v1 *resp_v1;
9488 resp_len = iwm_rx_packet_payload_len(pkt);
9489 if (resp_len < sizeof(*resp_v1)) {
9490 err = EIO5;
9491 goto out;
9492 }
9493
9494 resp_v1 = (void *)pkt->data;
9495 if (resp_len != sizeof(*resp_v1) +
9496 resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9497 err = EIO5;
9498 goto out;
9499 }
9500 }
9501out:
9502 iwm_free_resp(sc, &hcmd);
9503 return err;
9504}
9505
9506int
9507iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9508{
9509 struct iwm_temp_report_ths_cmd cmd;
9510 int err;
9511
9512 /*
9513 * In order to give responsibility for critical-temperature-kill
9514 * and TX backoff to FW we need to send an empty temperature
9515 * reporting command at init time.
9516 */
9517 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9518
9519 err = iwm_send_cmd_pdu(sc,
9520 IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD)((0x4 << 8) | 0x04),
9521 0, sizeof(cmd), &cmd);
9522 if (err)
9523 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9524 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9525
9526 return err;
9527}
9528
9529void
9530iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9531{
9532 struct iwm_host_cmd cmd = {
9533 .id = IWM_REPLY_THERMAL_MNG_BACKOFF0x7e,
9534 .len = { sizeof(uint32_t), },
9535 .data = { &backoff, },
9536 };
9537
9538 iwm_send_cmd(sc, &cmd);
9539}
9540
9541void
9542iwm_free_fw_paging(struct iwm_softc *sc)
9543{
9544 int i;
9545
9546 if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL((void *)0))
9547 return;
9548
9549 for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS33; i++) {
9550 iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9551 }
9552
9553 memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db))__builtin_memset((sc->fw_paging_db), (0), (sizeof(sc->fw_paging_db
)))
;
9554}
9555
9556int
9557iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9558{
9559 int sec_idx, idx;
9560 uint32_t offset = 0;
9561
9562 /*
9563 * find where is the paging image start point:
9564 * if CPU2 exist and it's in paging format, then the image looks like:
9565 * CPU1 sections (2 or more)
9566 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9567 * CPU2 sections (not paged)
9568 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9569 * non paged to CPU2 paging sec
9570 * CPU2 paging CSS
9571 * CPU2 paging image (including instruction and data)
9572 */
9573 for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX16; sec_idx++) {
9574 if (image->fw_sect[sec_idx].fws_devoff ==
9575 IWM_PAGING_SEPARATOR_SECTION0xAAAABBBB) {
9576 sec_idx++;
9577 break;
9578 }
9579 }
9580
9581 /*
9582 * If paging is enabled there should be at least 2 more sections left
9583 * (one for CSS and one for Paging data)
9584 */
9585 if (sec_idx >= nitems(image->fw_sect)(sizeof((image->fw_sect)) / sizeof((image->fw_sect)[0])
)
- 1) {
9586 printf("%s: Paging: Missing CSS and/or paging sections\n",
9587 DEVNAME(sc)((sc)->sc_dev.dv_xname));
9588 iwm_free_fw_paging(sc);
9589 return EINVAL22;
9590 }
9591
9592 /* copy the CSS block to the dram */
9593 DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",do { ; } while (0)
9594 DEVNAME(sc), sec_idx))do { ; } while (0);
9595
9596 memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,__builtin_memcpy((sc->fw_paging_db[0].fw_paging_block.vaddr
), (image->fw_sect[sec_idx].fws_data), (sc->fw_paging_db
[0].fw_paging_size))
9597 image->fw_sect[sec_idx].fws_data,__builtin_memcpy((sc->fw_paging_db[0].fw_paging_block.vaddr
), (image->fw_sect[sec_idx].fws_data), (sc->fw_paging_db
[0].fw_paging_size))
9598 sc->fw_paging_db[0].fw_paging_size)__builtin_memcpy((sc->fw_paging_db[0].fw_paging_block.vaddr
), (image->fw_sect[sec_idx].fws_data), (sc->fw_paging_db
[0].fw_paging_size))
;
9599
9600 DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",do { ; } while (0)
9601 DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size))do { ; } while (0);
9602
9603 sec_idx++;
9604
9605 /*
9606 * copy the paging blocks to the dram
9607 * loop index start from 1 since that CSS block already copied to dram
9608 * and CSS index is 0.
9609 * loop stop at num_of_paging_blk since that last block is not full.
9610 */
9611 for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
9612 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), (sc->fw_paging_db[idx].fw_paging_size))
9613 (const char *)image->fw_sect[sec_idx].fws_data + offset,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), (sc->fw_paging_db[idx].fw_paging_size))
9614 sc->fw_paging_db[idx].fw_paging_size)__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), (sc->fw_paging_db[idx].fw_paging_size))
;
9615
9616 DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",do { ; } while (0)
9617 DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx))do { ; } while (0);
9618
9619 offset += sc->fw_paging_db[idx].fw_paging_size;
9620 }
9621
9622 /* copy the last paging block */
9623 if (sc->num_of_pages_in_last_blk > 0) {
9624 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), ((1 << 12) * sc->num_of_pages_in_last_blk))
9625 (const char *)image->fw_sect[sec_idx].fws_data + offset,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), ((1 << 12) * sc->num_of_pages_in_last_blk))
9626 IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk)__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), ((1 << 12) * sc->num_of_pages_in_last_blk))
;
9627
9628 DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",do { ; } while (0)
9629 DEVNAME(sc), sc->num_of_pages_in_last_blk, idx))do { ; } while (0);
9630 }
9631
9632 return 0;
9633}
9634
9635int
9636iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9637{
9638 int blk_idx = 0;
9639 int error, num_of_pages;
9640
9641 if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL((void *)0)) {
9642 int i;
9643 /* Device got reset, and we setup firmware paging again */
9644 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
9645 sc->fw_paging_db[0].fw_paging_block.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
9646 0, IWM_FW_PAGING_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
9647 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
;
9648 for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
9649 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
9650 sc->fw_paging_db[i].fw_paging_block.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
9651 0, IWM_PAGING_BLOCK_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
9652 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
;
9653 }
9654 return 0;
9655 }
9656
9657 /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
9658#if (1 << IWM_BLOCK_2_EXP_SIZE(12 + 3)) != IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12))
9659#error IWM_BLOCK_2_EXP_SIZE(12 + 3) must be power of 2 of IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12))
9660#endif
9661
9662 num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE(1 << 12);
9663 sc->num_of_paging_blk =
9664 ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP(1 << 3)) + 1;
9665
9666 sc->num_of_pages_in_last_blk =
9667 num_of_pages -
9668 IWM_NUM_OF_PAGE_PER_GROUP(1 << 3) * (sc->num_of_paging_blk - 1);
9669
9670 DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"do { ; } while (0)
9671 " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),do { ; } while (0)
9672 sc->num_of_paging_blk,do { ; } while (0)
9673 sc->num_of_pages_in_last_blk))do { ; } while (0);
9674
9675 /* allocate block of 4Kbytes for paging CSS */
9676 error = iwm_dma_contig_alloc(sc->sc_dmat,
9677 &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE(1 << 12),
9678 4096);
9679 if (error) {
9680 /* free all the previous pages since we failed */
9681 iwm_free_fw_paging(sc);
9682 return ENOMEM12;
9683 }
9684
9685 sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE(1 << 12);
9686
9687 DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",do { ; } while (0)
9688 DEVNAME(sc)))do { ; } while (0);
9689
9690 /*
9691 * allocate blocks in dram.
9692 * since that CSS allocated in fw_paging_db[0] loop start from index 1
9693 */
9694 for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
9695 /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
9696 /* XXX Use iwm_dma_contig_alloc for allocating */
9697 error = iwm_dma_contig_alloc(sc->sc_dmat,
9698 &sc->fw_paging_db[blk_idx].fw_paging_block,
9699 IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12)), 4096);
9700 if (error) {
9701 /* free all the previous pages since we failed */
9702 iwm_free_fw_paging(sc);
9703 return ENOMEM12;
9704 }
9705
9706 sc->fw_paging_db[blk_idx].fw_paging_size =
9707 IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12));
9708
9709 DPRINTF((do { ; } while (0)
9710 "%s: Paging: allocated 32K bytes for firmware paging.\n",do { ; } while (0)
9711 DEVNAME(sc)))do { ; } while (0);
9712 }
9713
9714 return 0;
9715}
9716
9717int
9718iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
9719{
9720 int ret;
9721
9722 ret = iwm_alloc_fw_paging_mem(sc, fw);
9723 if (ret)
9724 return ret;
9725
9726 return iwm_fill_paging_mem(sc, fw);
9727}
9728
9729/* send paging cmd to FW in case CPU2 has paging image */
9730int
9731iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
9732{
9733 int blk_idx;
9734 uint32_t dev_phy_addr;
9735 struct iwm_fw_paging_cmd fw_paging_cmd = {
9736 .flags =
9737 htole32(IWM_PAGING_CMD_IS_SECURED |((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
9738 IWM_PAGING_CMD_IS_ENABLED |((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
9739 (sc->num_of_pages_in_last_blk <<((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
9740 IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS))((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
,
9741 .block_size = htole32(IWM_BLOCK_2_EXP_SIZE)((__uint32_t)((12 + 3))),
9742 .block_num = htole32(sc->num_of_paging_blk)((__uint32_t)(sc->num_of_paging_blk)),
9743 };
9744
9745 /* loop for for all paging blocks + CSS block */
9746 for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
9747 dev_phy_addr = htole32(((__uint32_t)(sc->fw_paging_db[blk_idx].fw_paging_block.paddr
>> 12))
9748 sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>((__uint32_t)(sc->fw_paging_db[blk_idx].fw_paging_block.paddr
>> 12))
9749 IWM_PAGE_2_EXP_SIZE)((__uint32_t)(sc->fw_paging_db[blk_idx].fw_paging_block.paddr
>> 12))
;
9750 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
9751 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
9752 sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
9753 blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
9754 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
;
9755 }
9756
9757 return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD0x4f,
9758 IWM_LONG_GROUP0x1, 0),
9759 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
9760}
9761
9762int
9763iwm_init_hw(struct iwm_softc *sc)
9764{
9765 struct ieee80211com *ic = &sc->sc_ic;
9766 int err, i, ac, qid, s;
9767
9768 err = iwm_run_init_mvm_ucode(sc, 0);
9769 if (err)
9770 return err;
9771
9772 /* Should stop and start HW since INIT image just loaded. */
9773 iwm_stop_device(sc);
9774 err = iwm_start_hw(sc);
9775 if (err) {
9776 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9777 return err;
9778 }
9779
9780 /* Restart, this time with the regular firmware */
9781 s = splnet()splraise(0x7);
9782 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
9783 if (err) {
9784 printf("%s: could not load firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9785 splx(s)spllower(s);
9786 return err;
9787 }
9788
9789 if (!iwm_nic_lock(sc)) {
9790 splx(s)spllower(s);
9791 return EBUSY16;
9792 }
9793
9794 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
9795 if (err) {
9796 printf("%s: could not init tx ant config (error %d)\n",
9797 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9798 goto err;
9799 }
9800
9801 err = iwm_send_phy_db_data(sc);
9802 if (err) {
9803 printf("%s: could not init phy db (error %d)\n",
9804 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9805 goto err;
9806 }
9807
9808 err = iwm_send_phy_cfg_cmd(sc);
9809 if (err) {
9810 printf("%s: could not send phy config (error %d)\n",
9811 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9812 goto err;
9813 }
9814
9815 err = iwm_send_bt_init_conf(sc);
9816 if (err) {
9817 printf("%s: could not init bt coex (error %d)\n",
9818 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9819 goto err;
9820 }
9821
9822 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(37)>>3] & (1<<((37
)&(8 -1))))
9823 IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)((sc->sc_enabled_capa)[(37)>>3] & (1<<((37
)&(8 -1))))
) {
9824 err = iwm_send_soc_conf(sc);
9825 if (err)
9826 goto err;
9827 }
9828
9829 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
) {
9830 err = iwm_send_dqa_cmd(sc);
9831 if (err)
9832 goto err;
9833 }
9834
9835 /* Add auxiliary station for scanning */
9836 err = iwm_add_aux_sta(sc);
9837 if (err) {
9838 printf("%s: could not add aux station (error %d)\n",
9839 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9840 goto err;
9841 }
9842
9843 for (i = 0; i < IWM_NUM_PHY_CTX3; i++) {
9844 /*
9845 * The channel used here isn't relevant as it's
9846 * going to be overwritten in the other flows.
9847 * For now use the first channel we have.
9848 */
9849 sc->sc_phyctxt[i].id = i;
9850 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9851 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9852 IWM_FW_CTXT_ACTION_ADD1, 0, IEEE80211_HTOP0_SCO_SCN0);
9853 if (err) {
9854 printf("%s: could not add phy context %d (error %d)\n",
9855 DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
9856 goto err;
9857 }
9858 }
9859
9860 /* Initialize tx backoffs to the minimum. */
9861 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
9862 iwm_tt_tx_backoff(sc, 0);
9863
9864
9865 err = iwm_config_ltr(sc);
9866 if (err) {
9867 printf("%s: PCIe LTR configuration failed (error %d)\n",
9868 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9869 }
9870
9871 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)((sc->sc_enabled_capa)[(74)>>3] & (1<<((74
)&(8 -1))))
) {
9872 err = iwm_send_temp_report_ths_cmd(sc);
9873 if (err)
9874 goto err;
9875 }
9876
9877 err = iwm_power_update_device(sc);
9878 if (err) {
9879 printf("%s: could not send power command (error %d)\n",
9880 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9881 goto err;
9882 }
9883
9884 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)((sc->sc_enabled_capa)[(1)>>3] & (1<<((1)&
(8 -1))))
) {
9885 err = iwm_send_update_mcc_cmd(sc, "ZZ");
9886 if (err) {
9887 printf("%s: could not init LAR (error %d)\n",
9888 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9889 goto err;
9890 }
9891 }
9892
9893 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
) {
9894 err = iwm_config_umac_scan(sc);
9895 if (err) {
9896 printf("%s: could not configure scan (error %d)\n",
9897 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9898 goto err;
9899 }
9900 }
9901
9902 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9903 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
9904 qid = IWM_DQA_INJECT_MONITOR_QUEUE2;
9905 else
9906 qid = IWM_AUX_QUEUE15;
9907 err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID2, qid,
9908 iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT8, 0);
9909 if (err) {
9910 printf("%s: could not enable monitor inject Tx queue "
9911 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9912 goto err;
9913 }
9914 } else {
9915 for (ac = 0; ac < EDCA_NUM_AC4; ac++) {
9916 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
9917 IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
9918 qid = ac + IWM_DQA_MIN_MGMT_QUEUE5;
9919 else
9920 qid = ac;
9921 err = iwm_enable_txq(sc, IWM_STATION_ID0, qid,
9922 iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS0, 0);
9923 if (err) {
9924 printf("%s: could not enable Tx queue %d "
9925 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), ac, err);
9926 goto err;
9927 }
9928 }
9929 }
9930
9931 err = iwm_disable_beacon_filter(sc);
9932 if (err) {
9933 printf("%s: could not disable beacon filter (error %d)\n",
9934 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9935 goto err;
9936 }
9937
9938err:
9939 iwm_nic_unlock(sc);
9940 splx(s)spllower(s);
9941 return err;
9942}
9943
9944/* Allow multicast from our BSSID. */
9945int
9946iwm_allow_mcast(struct iwm_softc *sc)
9947{
9948 struct ieee80211com *ic = &sc->sc_ic;
9949 struct iwm_node *in = (void *)ic->ic_bss;
9950 struct iwm_mcast_filter_cmd *cmd;
9951 size_t size;
9952 int err;
9953
9954 size = roundup(sizeof(*cmd), 4)((((sizeof(*cmd))+((4)-1))/(4))*(4));
9955 cmd = malloc(size, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
9956 if (cmd == NULL((void *)0))
9957 return ENOMEM12;
9958 cmd->filter_own = 1;
9959 cmd->port_id = 0;
9960 cmd->count = 0;
9961 cmd->pass_all = 1;
9962 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr)__builtin_memcpy((cmd->bssid), (in->in_macaddr), (6));
9963
9964 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD0xd0,
9965 0, size, cmd);
9966 free(cmd, M_DEVBUF2, size);
9967 return err;
9968}
9969
9970int
9971iwm_init(struct ifnet *ifp)
9972{
9973 struct iwm_softc *sc = ifp->if_softc;
9974 struct ieee80211com *ic = &sc->sc_ic;
9975 int err, generation;
9976
9977 rw_assert_wrlock(&sc->ioctl_rwl);
9978
9979 generation = ++sc->sc_generation;
9980
9981 KASSERT(sc->task_refs.refs == 0)((sc->task_refs.refs == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwm.c", 9981, "sc->task_refs.refs == 0"
))
;
9982 refcnt_init(&sc->task_refs);
9983
9984 err = iwm_preinit(sc);
9985 if (err)
9986 return err;
9987
9988 err = iwm_start_hw(sc);
9989 if (err) {
9990 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9991 return err;
9992 }
9993
9994 err = iwm_init_hw(sc);
9995 if (err) {
9996 if (generation == sc->sc_generation)
9997 iwm_stop(ifp);
9998 return err;
9999 }
10000
10001 if (sc->sc_nvm.sku_cap_11n_enable)
10002 iwm_setup_ht_rates(sc);
10003
10004 ifq_clr_oactive(&ifp->if_snd);
10005 ifp->if_flags |= IFF_RUNNING0x40;
10006
10007 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10008 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10009 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
10010 return 0;
10011 }
10012
10013 ieee80211_begin_scan(ifp);
10014
10015 /*
10016 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10017 * Wait until the transition to SCAN state has completed.
10018 */
10019 do {
10020 err = tsleep_nsec(&ic->ic_state, PCATCH0x100, "iwminit",
10021 SEC_TO_NSEC(1));
10022 if (generation != sc->sc_generation)
10023 return ENXIO6;
10024 if (err) {
10025 iwm_stop(ifp);
10026 return err;
10027 }
10028 } while (ic->ic_state != IEEE80211_S_SCAN);
10029
10030 return 0;
10031}
10032
10033void
10034iwm_start(struct ifnet *ifp)
10035{
10036 struct iwm_softc *sc = ifp->if_softc;
10037 struct ieee80211com *ic = &sc->sc_ic;
10038 struct ieee80211_node *ni;
10039 struct ether_header *eh;
10040 struct mbuf *m;
10041 int ac = EDCA_AC_BE; /* XXX */
10042
10043 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
10044 return;
10045
10046 for (;;) {
10047 /* why isn't this done per-queue? */
10048 if (sc->qfullmsk != 0) {
10049 ifq_set_oactive(&ifp->if_snd);
10050 break;
10051 }
10052
10053 /* Don't queue additional frames while flushing Tx queues. */
10054 if (sc->sc_flags & IWM_FLAG_TXFLUSH0x400)
10055 break;
10056
10057 /* need to send management frames even if we're not RUNning */
10058 m = mq_dequeue(&ic->ic_mgtq);
10059 if (m) {
10060 ni = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_cookie;
10061 goto sendit;
10062 }
10063
10064 if (ic->ic_state != IEEE80211_S_RUN ||
10065 (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY0x00000001))
10066 break;
10067
10068 m = ifq_dequeue(&ifp->if_snd);
10069 if (!m)
10070 break;
10071 if (m->m_lenm_hdr.mh_len < sizeof (*eh) &&
10072 (m = m_pullup(m, sizeof (*eh))) == NULL((void *)0)) {
10073 ifp->if_oerrorsif_data.ifi_oerrors++;
10074 continue;
10075 }
10076#if NBPFILTER1 > 0
10077 if (ifp->if_bpf != NULL((void *)0))
10078 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
10079#endif
10080 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL((void *)0)) {
10081 ifp->if_oerrorsif_data.ifi_oerrors++;
10082 continue;
10083 }
10084
10085 sendit:
10086#if NBPFILTER1 > 0
10087 if (ic->ic_rawbpf != NULL((void *)0))
10088 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT(1 << 1));
10089#endif
10090 if (iwm_tx(sc, m, ni, ac) != 0) {
10091 ieee80211_release_node(ic, ni);
10092 ifp->if_oerrorsif_data.ifi_oerrors++;
10093 continue;
10094 }
10095
10096 if (ifp->if_flags & IFF_UP0x1)
10097 ifp->if_timer = 1;
10098 }
10099
10100 return;
10101}
10102
10103void
10104iwm_stop(struct ifnet *ifp)
10105{
10106 struct iwm_softc *sc = ifp->if_softc;
10107 struct ieee80211com *ic = &sc->sc_ic;
10108 struct iwm_node *in = (void *)ic->ic_bss;
10109 int i, s = splnet()splraise(0x7);
10110
10111 rw_assert_wrlock(&sc->ioctl_rwl);
10112
10113 sc->sc_flags |= IWM_FLAG_SHUTDOWN0x100; /* Disallow new tasks. */
10114
10115 /* Cancel scheduled tasks and let any stale tasks finish up. */
10116 task_del(systq, &sc->init_task);
10117 iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10118 iwm_del_task(sc, systq, &sc->ba_task);
10119 iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10120 iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10121 iwm_del_task(sc, systq, &sc->bgscan_done_task);
10122 KASSERT(sc->task_refs.refs >= 1)((sc->task_refs.refs >= 1) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwm.c", 10122, "sc->task_refs.refs >= 1"
))
;
10123 refcnt_finalize(&sc->task_refs, "iwmstop");
10124
10125 iwm_stop_device(sc);
10126
10127 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
10128 sc->bgscan_unref_arg = NULL((void *)0);
10129 sc->bgscan_unref_arg_size = 0;
10130
10131 /* Reset soft state. */
10132
10133 sc->sc_generation++;
10134 for (i = 0; i < nitems(sc->sc_cmd_resp_pkt)(sizeof((sc->sc_cmd_resp_pkt)) / sizeof((sc->sc_cmd_resp_pkt
)[0]))
; i++) {
10135 free(sc->sc_cmd_resp_pkt[i], M_DEVBUF2, sc->sc_cmd_resp_len[i]);
10136 sc->sc_cmd_resp_pkt[i] = NULL((void *)0);
10137 sc->sc_cmd_resp_len[i] = 0;
10138 }
10139 ifp->if_flags &= ~IFF_RUNNING0x40;
10140 ifq_clr_oactive(&ifp->if_snd);
10141
10142 in->in_phyctxt = NULL((void *)0);
10143 in->tid_disable_ampdu = 0xffff;
10144 in->tfd_queue_msk = 0;
10145 IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr)__builtin_memcpy((in->in_macaddr), (etheranyaddr), (6));
10146
10147 sc->sc_flags &= ~(IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200);
10148 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE0x08;
10149 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE0x10;
10150 sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE0x20;
10151 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40;
10152 sc->sc_flags &= ~IWM_FLAG_HW_ERR0x80;
10153 sc->sc_flags &= ~IWM_FLAG_SHUTDOWN0x100;
10154 sc->sc_flags &= ~IWM_FLAG_TXFLUSH0x400;
10155
10156 sc->sc_rx_ba_sessions = 0;
10157 sc->ba_rx.start_tidmask = 0;
10158 sc->ba_rx.stop_tidmask = 0;
10159 sc->tx_ba_queue_mask = 0;
10160 sc->ba_tx.start_tidmask = 0;
10161 sc->ba_tx.stop_tidmask = 0;
10162
10163 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10164 sc->ns_nstate = IEEE80211_S_INIT;
10165
10166 timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10167 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
10168 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10169 iwm_clear_reorder_buffer(sc, rxba);
10170 }
10171 iwm_led_blink_stop(sc);
10172 memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer))__builtin_memset((sc->sc_tx_timer), (0), (sizeof(sc->sc_tx_timer
)))
;
10173 ifp->if_timer = 0;
10174
10175 splx(s)spllower(s);
10176}
10177
10178void
10179iwm_watchdog(struct ifnet *ifp)
10180{
10181 struct iwm_softc *sc = ifp->if_softc;
10182 int i;
10183
10184 ifp->if_timer = 0;
10185
10186 /*
10187 * We maintain a separate timer for each Tx queue because
10188 * Tx aggregation queues can get "stuck" while other queues
10189 * keep working. The Linux driver uses a similar workaround.
10190 */
10191 for (i = 0; i < nitems(sc->sc_tx_timer)(sizeof((sc->sc_tx_timer)) / sizeof((sc->sc_tx_timer)[0
]))
; i++) {
10192 if (sc->sc_tx_timer[i] > 0) {
10193 if (--sc->sc_tx_timer[i] == 0) {
10194 printf("%s: device timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10195 if (ifp->if_flags & IFF_DEBUG0x4) {
10196 iwm_nic_error(sc);
10197 iwm_dump_driver_status(sc);
10198 }
10199 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
10200 task_add(systq, &sc->init_task);
10201 ifp->if_oerrorsif_data.ifi_oerrors++;
10202 return;
10203 }
10204 ifp->if_timer = 1;
10205 }
10206 }
10207
10208 ieee80211_watchdog(ifp);
10209}
10210
10211int
10212iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10213{
10214 struct iwm_softc *sc = ifp->if_softc;
10215 int s, err = 0, generation = sc->sc_generation;
10216
10217 /*
10218 * Prevent processes from entering this function while another
10219 * process is tsleep'ing in it.
10220 */
10221 err = rw_enter(&sc->ioctl_rwl, RW_WRITE0x0001UL | RW_INTR0x0010UL);
10222 if (err == 0 && generation != sc->sc_generation) {
10223 rw_exit(&sc->ioctl_rwl);
10224 return ENXIO6;
10225 }
10226 if (err)
10227 return err;
10228 s = splnet()splraise(0x7);
10229
10230 switch (cmd) {
10231 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
10232 ifp->if_flags |= IFF_UP0x1;
10233 /* FALLTHROUGH */
10234 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
10235 if (ifp->if_flags & IFF_UP0x1) {
10236 if (!(ifp->if_flags & IFF_RUNNING0x40)) {
10237 /* Force reload of firmware image from disk. */
10238 sc->sc_fw.fw_status = IWM_FW_STATUS_NONE0;
10239 err = iwm_init(ifp);
10240 }
10241 } else {
10242 if (ifp->if_flags & IFF_RUNNING0x40)
10243 iwm_stop(ifp);
10244 }
10245 break;
10246
10247 default:
10248 err = ieee80211_ioctl(ifp, cmd, data);
10249 }
10250
10251 if (err == ENETRESET52) {
10252 err = 0;
10253 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
10254 (IFF_UP0x1 | IFF_RUNNING0x40)) {
10255 iwm_stop(ifp);
10256 err = iwm_init(ifp);
10257 }
10258 }
10259
10260 splx(s)spllower(s);
10261 rw_exit(&sc->ioctl_rwl);
10262
10263 return err;
10264}
10265
10266/*
10267 * Note: This structure is read from the device with IO accesses,
10268 * and the reading already does the endian conversion. As it is
10269 * read with uint32_t-sized accesses, any members with a different size
10270 * need to be ordered correctly though!
10271 */
10272struct iwm_error_event_table {
10273 uint32_t valid; /* (nonzero) valid, (0) log is empty */
10274 uint32_t error_id; /* type of error */
10275 uint32_t trm_hw_status0; /* TRM HW status */
10276 uint32_t trm_hw_status1; /* TRM HW status */
10277 uint32_t blink2; /* branch link */
10278 uint32_t ilink1; /* interrupt link */
10279 uint32_t ilink2; /* interrupt link */
10280 uint32_t data1; /* error-specific data */
10281 uint32_t data2; /* error-specific data */
10282 uint32_t data3; /* error-specific data */
10283 uint32_t bcon_time; /* beacon timer */
10284 uint32_t tsf_low; /* network timestamp function timer */
10285 uint32_t tsf_hi; /* network timestamp function timer */
10286 uint32_t gp1; /* GP1 timer register */
10287 uint32_t gp2; /* GP2 timer register */
10288 uint32_t fw_rev_type; /* firmware revision type */
10289 uint32_t major; /* uCode version major */
10290 uint32_t minor; /* uCode version minor */
10291 uint32_t hw_ver; /* HW Silicon version */
10292 uint32_t brd_ver; /* HW board version */
10293 uint32_t log_pc; /* log program counter */
10294 uint32_t frame_ptr; /* frame pointer */
10295 uint32_t stack_ptr; /* stack pointer */
10296 uint32_t hcmd; /* last host command header */
10297 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
10298 * rxtx_flag */
10299 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
10300 * host_flag */
10301 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
10302 * enc_flag */
10303 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
10304 * time_flag */
10305 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
10306 * wico interrupt */
10307 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
10308 uint32_t wait_event; /* wait event() caller address */
10309 uint32_t l2p_control; /* L2pControlField */
10310 uint32_t l2p_duration; /* L2pDurationField */
10311 uint32_t l2p_mhvalid; /* L2pMhValidBits */
10312 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
10313 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
10314 * (LMPM_PMG_SEL) */
10315 uint32_t u_timestamp; /* indicate when the date and time of the
10316 * compilation */
10317 uint32_t flow_handler; /* FH read/write pointers, RX credit */
10318} __packed__attribute__((__packed__)) /* LOG_ERROR_TABLE_API_S_VER_3 */;
10319
10320/*
10321 * UMAC error struct - relevant starting from family 8000 chip.
10322 * Note: This structure is read from the device with IO accesses,
10323 * and the reading already does the endian conversion. As it is
10324 * read with u32-sized accesses, any members with a different size
10325 * need to be ordered correctly though!
10326 */
10327struct iwm_umac_error_event_table {
10328 uint32_t valid; /* (nonzero) valid, (0) log is empty */
10329 uint32_t error_id; /* type of error */
10330 uint32_t blink1; /* branch link */
10331 uint32_t blink2; /* branch link */
10332 uint32_t ilink1; /* interrupt link */
10333 uint32_t ilink2; /* interrupt link */
10334 uint32_t data1; /* error-specific data */
10335 uint32_t data2; /* error-specific data */
10336 uint32_t data3; /* error-specific data */
10337 uint32_t umac_major;
10338 uint32_t umac_minor;
10339 uint32_t frame_pointer; /* core register 27*/
10340 uint32_t stack_pointer; /* core register 28 */
10341 uint32_t cmd_header; /* latest host cmd sent to UMAC */
10342 uint32_t nic_isr_pref; /* ISR status register */
10343} __packed__attribute__((__packed__));
10344
10345#define ERROR_START_OFFSET(1 * sizeof(uint32_t)) (1 * sizeof(uint32_t))
10346#define ERROR_ELEM_SIZE(7 * sizeof(uint32_t)) (7 * sizeof(uint32_t))
10347
10348void
10349iwm_nic_umac_error(struct iwm_softc *sc)
10350{
10351 struct iwm_umac_error_event_table table;
10352 uint32_t base;
10353
10354 base = sc->sc_uc.uc_umac_error_event_table;
10355
10356 if (base < 0x800000) {
10357 printf("%s: Invalid error log pointer 0x%08x\n",
10358 DEVNAME(sc)((sc)->sc_dev.dv_xname), base);
10359 return;
10360 }
10361
10362 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10363 printf("%s: reading errlog failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10364 return;
10365 }
10366
10367 if (ERROR_START_OFFSET(1 * sizeof(uint32_t)) <= table.valid * ERROR_ELEM_SIZE(7 * sizeof(uint32_t))) {
10368 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10369 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10370 sc->sc_flags, table.valid);
10371 }
10372
10373 printf("%s: 0x%08X | %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.error_id,
10374 iwm_desc_lookup(table.error_id));
10375 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink1);
10376 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink2);
10377 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink1);
10378 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink2);
10379 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data1);
10380 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data2);
10381 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data3);
10382 printf("%s: 0x%08X | umac major\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.umac_major);
10383 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.umac_minor);
10384 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10385 table.frame_pointer);
10386 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10387 table.stack_pointer);
10388 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.cmd_header);
10389 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10390 table.nic_isr_pref);
10391}
10392
10393#define IWM_FW_SYSASSERT_CPU_MASK0xf0000000 0xf0000000
10394static struct {
10395 const char *name;
10396 uint8_t num;
10397} advanced_lookup[] = {
10398 { "NMI_INTERRUPT_WDG", 0x34 },
10399 { "SYSASSERT", 0x35 },
10400 { "UCODE_VERSION_MISMATCH", 0x37 },
10401 { "BAD_COMMAND", 0x38 },
10402 { "BAD_COMMAND", 0x39 },
10403 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10404 { "FATAL_ERROR", 0x3D },
10405 { "NMI_TRM_HW_ERR", 0x46 },
10406 { "NMI_INTERRUPT_TRM", 0x4C },
10407 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10408 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10409 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10410 { "NMI_INTERRUPT_HOST", 0x66 },
10411 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10412 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10413 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10414 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
10415 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
10416 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10417 { "ADVANCED_SYSASSERT", 0 },
10418};
10419
10420const char *
10421iwm_desc_lookup(uint32_t num)
10422{
10423 int i;
10424
10425 for (i = 0; i < nitems(advanced_lookup)(sizeof((advanced_lookup)) / sizeof((advanced_lookup)[0])) - 1; i++)
10426 if (advanced_lookup[i].num ==
10427 (num & ~IWM_FW_SYSASSERT_CPU_MASK0xf0000000))
10428 return advanced_lookup[i].name;
10429
10430 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10431 return advanced_lookup[i].name;
10432}
10433
10434/*
10435 * Support for dumping the error log seemed like a good idea ...
10436 * but it's mostly hex junk and the only sensible thing is the
10437 * hw/ucode revision (which we know anyway). Since it's here,
10438 * I'll just leave it in, just in case e.g. the Intel guys want to
10439 * help us decipher some "ADVANCED_SYSASSERT" later.
10440 */
10441void
10442iwm_nic_error(struct iwm_softc *sc)
10443{
10444 struct iwm_error_event_table table;
10445 uint32_t base;
10446
10447 printf("%s: dumping device error log\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10448 base = sc->sc_uc.uc_error_event_table;
10449 if (base < 0x800000) {
10450 printf("%s: Invalid error log pointer 0x%08x\n",
10451 DEVNAME(sc)((sc)->sc_dev.dv_xname), base);
10452 return;
10453 }
10454
10455 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10456 printf("%s: reading errlog failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10457 return;
10458 }
10459
10460 if (!table.valid) {
10461 printf("%s: errlog not found, skipping\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10462 return;
10463 }
10464
10465 if (ERROR_START_OFFSET(1 * sizeof(uint32_t)) <= table.valid * ERROR_ELEM_SIZE(7 * sizeof(uint32_t))) {
10466 printf("%s: Start Error Log Dump:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10467 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10468 sc->sc_flags, table.valid);
10469 }
10470
10471 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.error_id,
10472 iwm_desc_lookup(table.error_id));
10473 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10474 table.trm_hw_status0);
10475 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10476 table.trm_hw_status1);
10477 printf("%s: %08X | branchlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink2);
10478 printf("%s: %08X | interruptlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink1);
10479 printf("%s: %08X | interruptlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink2);
10480 printf("%s: %08X | data1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data1);
10481 printf("%s: %08X | data2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data2);
10482 printf("%s: %08X | data3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data3);
10483 printf("%s: %08X | beacon time\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.bcon_time);
10484 printf("%s: %08X | tsf low\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.tsf_low);
10485 printf("%s: %08X | tsf hi\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.tsf_hi);
10486 printf("%s: %08X | time gp1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.gp1);
10487 printf("%s: %08X | time gp2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.gp2);
10488 printf("%s: %08X | uCode revision type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10489 table.fw_rev_type);
10490 printf("%s: %08X | uCode version major\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10491 table.major);
10492 printf("%s: %08X | uCode version minor\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10493 table.minor);
10494 printf("%s: %08X | hw version\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.hw_ver);
10495 printf("%s: %08X | board version\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.brd_ver);
10496 printf("%s: %08X | hcmd\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.hcmd);
10497 printf("%s: %08X | isr0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr0);
10498 printf("%s: %08X | isr1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr1);
10499 printf("%s: %08X | isr2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr2);
10500 printf("%s: %08X | isr3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr3);
10501 printf("%s: %08X | isr4\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr4);
10502 printf("%s: %08X | last cmd Id\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.last_cmd_id);
10503 printf("%s: %08X | wait_event\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.wait_event);
10504 printf("%s: %08X | l2p_control\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_control);
10505 printf("%s: %08X | l2p_duration\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_duration);
10506 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_mhvalid);
10507 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_addr_match);
10508 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.lmpm_pmg_sel);
10509 printf("%s: %08X | timestamp\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.u_timestamp);
10510 printf("%s: %08X | flow_handler\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.flow_handler);
10511
10512 if (sc->sc_uc.uc_umac_error_event_table)
10513 iwm_nic_umac_error(sc);
10514}
10515
10516void
10517iwm_dump_driver_status(struct iwm_softc *sc)
10518{
10519 int i;
10520
10521 printf("driver status:\n");
10522 for (i = 0; i < IWM_MAX_QUEUES31; i++) {
10523 struct iwm_tx_ring *ring = &sc->txq[i];
10524 printf(" tx ring %2d: qid=%-2d cur=%-3d "
10525 "queued=%-3d\n",
10526 i, ring->qid, ring->cur, ring->queued);
10527 }
10528 printf(" rx ring: cur=%d\n", sc->rxq.cur);
10529 printf(" 802.11 state %s\n",
10530 ieee80211_state_name[sc->sc_ic.ic_state]);
10531}
10532
10533#define SYNC_RESP_STRUCT(_var_, _pkt_)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02)
); _var_ = (void *)((_pkt_)+1); } while ( 0)
\
10534do { \
10535 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02))
10536 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02))
; \
10537 _var_ = (void *)((_pkt_)+1); \
10538} while (/*CONSTCOND*/0)
10539
10540#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02)); _ptr_
= (void *)((_pkt_)+1); } while ( 0)
\
10541do { \
10542 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02))
10543 sizeof(len), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02))
; \
10544 _ptr_ = (void *)((_pkt_)+1); \
10545} while (/*CONSTCOND*/0)
10546
10547#define ADVANCE_RXQ(sc)(sc->rxq.cur = (sc->rxq.cur + 1) % count); (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10548
10549int
10550iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10551{
10552 int qid, idx, code;
10553
10554 qid = pkt->hdr.qid & ~0x80;
10555 idx = pkt->hdr.idx;
10556 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)((pkt->hdr.flags << 8) | pkt->hdr.code);
10557
10558 return (!(qid == 0 && idx == 0 && code == 0) &&
10559 pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID)((__uint32_t)(0x55550000)));
10560}
10561
10562void
10563iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10564{
10565 struct ifnet *ifp = IC2IFP(&sc->sc_ic)(&(&sc->sc_ic)->ic_ac.ac_if);
10566 struct iwm_rx_packet *pkt, *nextpkt;
10567 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10568 struct mbuf *m0, *m;
10569 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10570 int qid, idx, code, handled = 1;
10571
10572 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
10573 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
10574
10575 m0 = data->m;
10576 while (m0 && offset + minsz < IWM_RBUF_SIZE4096) {
10577 pkt = (struct iwm_rx_packet *)(m0->m_datam_hdr.mh_data + offset);
10578 qid = pkt->hdr.qid;
10579 idx = pkt->hdr.idx;
10580
10581 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)((pkt->hdr.flags << 8) | pkt->hdr.code);
10582
10583 if (!iwm_rx_pkt_valid(pkt))
10584 break;
10585
10586 len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10587 if (len < minsz || len > (IWM_RBUF_SIZE4096 - offset))
10588 break;
10589
10590 if (code == IWM_REPLY_RX_MPDU_CMD0xc1 && ++nmpdu == 1) {
10591 /* Take mbuf m0 off the RX ring. */
10592 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE4096, sc->rxq.cur)) {
10593 ifp->if_ierrorsif_data.ifi_ierrors++;
10594 break;
10595 }
10596 KASSERT(data->m != m0)((data->m != m0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 10596, "data->m != m0"))
;
10597 }
10598
10599 switch (code) {
10600 case IWM_REPLY_RX_PHY_CMD0xc0:
10601 iwm_rx_rx_phy_cmd(sc, pkt, data);
10602 break;
10603
10604 case IWM_REPLY_RX_MPDU_CMD0xc1: {
10605 size_t maxlen = IWM_RBUF_SIZE4096 - offset - minsz;
10606 nextoff = offset +
10607 roundup(len, IWM_FH_RSCSR_FRAME_ALIGN)((((len)+((0x40)-1))/(0x40))*(0x40));
10608 nextpkt = (struct iwm_rx_packet *)
10609 (m0->m_datam_hdr.mh_data + nextoff);
10610 if (nextoff + minsz >= IWM_RBUF_SIZE4096 ||
10611 !iwm_rx_pkt_valid(nextpkt)) {
10612 /* No need to copy last frame in buffer. */
10613 if (offset > 0)
10614 m_adj(m0, offset);
10615 if (sc->sc_mqrx_supported)
10616 iwm_rx_mpdu_mq(sc, m0, pkt->data,
10617 maxlen, ml);
10618 else
10619 iwm_rx_mpdu(sc, m0, pkt->data,
10620 maxlen, ml);
10621 m0 = NULL((void *)0); /* stack owns m0 now; abort loop */
10622 } else {
10623 /*
10624 * Create an mbuf which points to the current
10625 * packet. Always copy from offset zero to
10626 * preserve m_pkthdr.
10627 */
10628 m = m_copym(m0, 0, M_COPYALL1000000000, M_DONTWAIT0x0002);
10629 if (m == NULL((void *)0)) {
10630 ifp->if_ierrorsif_data.ifi_ierrors++;
10631 m_freem(m0);
10632 m0 = NULL((void *)0);
10633 break;
10634 }
10635 m_adj(m, offset);
10636 if (sc->sc_mqrx_supported)
10637 iwm_rx_mpdu_mq(sc, m, pkt->data,
10638 maxlen, ml);
10639 else
10640 iwm_rx_mpdu(sc, m, pkt->data,
10641 maxlen, ml);
10642 }
10643 break;
10644 }
10645
10646 case IWM_TX_CMD0x1c:
10647 iwm_rx_tx_cmd(sc, pkt, data);
10648 break;
10649
10650 case IWM_BA_NOTIF0xc5:
10651 iwm_rx_compressed_ba(sc, pkt);
10652 break;
10653
10654 case IWM_MISSED_BEACONS_NOTIFICATION0xa2:
10655 iwm_rx_bmiss(sc, pkt, data);
10656 break;
10657
10658 case IWM_MFUART_LOAD_NOTIFICATION0xb1:
10659 break;
10660
10661 case IWM_ALIVE0x1: {
10662 struct iwm_alive_resp_v1 *resp1;
10663 struct iwm_alive_resp_v2 *resp2;
10664 struct iwm_alive_resp_v3 *resp3;
10665
10666 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
10667 SYNC_RESP_STRUCT(resp1, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp1))), (0x02));
resp1 = (void *)((pkt)+1); } while ( 0)
;
10668 sc->sc_uc.uc_error_event_table
10669 = le32toh(resp1->error_event_table_ptr)((__uint32_t)(resp1->error_event_table_ptr));
10670 sc->sc_uc.uc_log_event_table
10671 = le32toh(resp1->log_event_table_ptr)((__uint32_t)(resp1->log_event_table_ptr));
10672 sc->sched_base = le32toh(resp1->scd_base_ptr)((__uint32_t)(resp1->scd_base_ptr));
10673 if (resp1->status == IWM_ALIVE_STATUS_OK0xCAFE)
10674 sc->sc_uc.uc_ok = 1;
10675 else
10676 sc->sc_uc.uc_ok = 0;
10677 }
10678
10679 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
10680 SYNC_RESP_STRUCT(resp2, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp2))), (0x02));
resp2 = (void *)((pkt)+1); } while ( 0)
;
10681 sc->sc_uc.uc_error_event_table
10682 = le32toh(resp2->error_event_table_ptr)((__uint32_t)(resp2->error_event_table_ptr));
10683 sc->sc_uc.uc_log_event_table
10684 = le32toh(resp2->log_event_table_ptr)((__uint32_t)(resp2->log_event_table_ptr));
10685 sc->sched_base = le32toh(resp2->scd_base_ptr)((__uint32_t)(resp2->scd_base_ptr));
10686 sc->sc_uc.uc_umac_error_event_table
10687 = le32toh(resp2->error_info_addr)((__uint32_t)(resp2->error_info_addr));
10688 if (resp2->status == IWM_ALIVE_STATUS_OK0xCAFE)
10689 sc->sc_uc.uc_ok = 1;
10690 else
10691 sc->sc_uc.uc_ok = 0;
10692 }
10693
10694 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
10695 SYNC_RESP_STRUCT(resp3, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp3))), (0x02));
resp3 = (void *)((pkt)+1); } while ( 0)
;
10696 sc->sc_uc.uc_error_event_table
10697 = le32toh(resp3->error_event_table_ptr)((__uint32_t)(resp3->error_event_table_ptr));
10698 sc->sc_uc.uc_log_event_table
10699 = le32toh(resp3->log_event_table_ptr)((__uint32_t)(resp3->log_event_table_ptr));
10700 sc->sched_base = le32toh(resp3->scd_base_ptr)((__uint32_t)(resp3->scd_base_ptr));
10701 sc->sc_uc.uc_umac_error_event_table
10702 = le32toh(resp3->error_info_addr)((__uint32_t)(resp3->error_info_addr));
10703 if (resp3->status == IWM_ALIVE_STATUS_OK0xCAFE)
10704 sc->sc_uc.uc_ok = 1;
10705 else
10706 sc->sc_uc.uc_ok = 0;
10707 }
10708
10709 sc->sc_uc.uc_intr = 1;
10710 wakeup(&sc->sc_uc);
10711 break;
10712 }
10713
10714 case IWM_CALIB_RES_NOTIF_PHY_DB0x6b: {
10715 struct iwm_calib_res_notif_phy_db *phy_db_notif;
10716 SYNC_RESP_STRUCT(phy_db_notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(phy_db_notif))), (
0x02)); phy_db_notif = (void *)((pkt)+1); } while ( 0)
;
10717 iwm_phy_db_set_section(sc, phy_db_notif);
10718 sc->sc_init_complete |= IWM_CALIB_COMPLETE0x02;
10719 wakeup(&sc->sc_init_complete);
10720 break;
10721 }
10722
10723 case IWM_STATISTICS_NOTIFICATION0x9d: {
10724 struct iwm_notif_statistics *stats;
10725 SYNC_RESP_STRUCT(stats, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(stats))), (0x02));
stats = (void *)((pkt)+1); } while ( 0)
;
10726 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats))__builtin_memcpy((&sc->sc_stats), (stats), (sizeof(sc->
sc_stats)))
;
10727 sc->sc_noise = iwm_get_noise(&stats->rx.general);
10728 break;
10729 }
10730
10731 case IWM_MCC_CHUB_UPDATE_CMD0xc9: {
10732 struct iwm_mcc_chub_notif *notif;
10733 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10734 iwm_mcc_update(sc, notif);
10735 break;
10736 }
10737
10738 case IWM_DTS_MEASUREMENT_NOTIFICATION0xdd:
10739 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,((0x4 << 8) | 0xFF)
10740 IWM_DTS_MEASUREMENT_NOTIF_WIDE)((0x4 << 8) | 0xFF):
10741 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,((0x4 << 8) | 0x04)
10742 IWM_TEMP_REPORTING_THRESHOLDS_CMD)((0x4 << 8) | 0x04):
10743 break;
10744
10745 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,((0x4 << 8) | 0xFE)
10746 IWM_CT_KILL_NOTIFICATION)((0x4 << 8) | 0xFE): {
10747 struct iwm_ct_kill_notif *notif;
10748 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10749 printf("%s: device at critical temperature (%u degC), "
10750 "stopping device\n",
10751 DEVNAME(sc)((sc)->sc_dev.dv_xname), le16toh(notif->temperature)((__uint16_t)(notif->temperature)));
10752 sc->sc_flags |= IWM_FLAG_HW_ERR0x80;
10753 task_add(systq, &sc->init_task);
10754 break;
10755 }
10756
10757 case IWM_ADD_STA_KEY0x17:
10758 case IWM_PHY_CONFIGURATION_CMD0x6a:
10759 case IWM_TX_ANT_CONFIGURATION_CMD0x98:
10760 case IWM_ADD_STA0x18:
10761 case IWM_MAC_CONTEXT_CMD0x28:
10762 case IWM_REPLY_SF_CFG_CMD0xd1:
10763 case IWM_POWER_TABLE_CMD0x77:
10764 case IWM_LTR_CONFIG0xee:
10765 case IWM_PHY_CONTEXT_CMD0x8:
10766 case IWM_BINDING_CONTEXT_CMD0x2b:
10767 case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD)((0x1 << 8) | 0xc):
10768 case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC)((0x1 << 8) | 0xd):
10769 case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC)((0x1 << 8) | 0xe):
10770 case IWM_SCAN_OFFLOAD_REQUEST_CMD0x51:
10771 case IWM_SCAN_OFFLOAD_ABORT_CMD0x52:
10772 case IWM_REPLY_BEACON_FILTERING_CMD0xd2:
10773 case IWM_MAC_PM_POWER_TABLE0xa9:
10774 case IWM_TIME_QUOTA_CMD0x2c:
10775 case IWM_REMOVE_STA0x19:
10776 case IWM_TXPATH_FLUSH0x1e:
10777 case IWM_LQ_CMD0x4e:
10778 case IWM_WIDE_ID(IWM_LONG_GROUP,((0x1 << 8) | 0x4f)
10779 IWM_FW_PAGING_BLOCK_CMD)((0x1 << 8) | 0x4f):
10780 case IWM_BT_CONFIG0x9b:
10781 case IWM_REPLY_THERMAL_MNG_BACKOFF0x7e:
10782 case IWM_NVM_ACCESS_CMD0x88:
10783 case IWM_MCC_UPDATE_CMD0xc8:
10784 case IWM_TIME_EVENT_CMD0x29: {
10785 size_t pkt_len;
10786
10787 if (sc->sc_cmd_resp_pkt[idx] == NULL((void *)0))
10788 break;
10789
10790 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (sizeof(*pkt)), (0x02))
10791 sizeof(*pkt), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (sizeof(*pkt)), (0x02))
;
10792
10793 pkt_len = sizeof(pkt->len_n_flags) +
10794 iwm_rx_packet_len(pkt);
10795
10796 if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40) ||
10797 pkt_len < sizeof(*pkt) ||
10798 pkt_len > sc->sc_cmd_resp_len[idx]) {
10799 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
10800 sc->sc_cmd_resp_len[idx]);
10801 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
10802 break;
10803 }
10804
10805 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (pkt_len - sizeof(*pkt)), (0x02))
10806 pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (pkt_len - sizeof(*pkt)), (0x02))
;
10807 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len)__builtin_memcpy((sc->sc_cmd_resp_pkt[idx]), (pkt), (pkt_len
))
;
10808 break;
10809 }
10810
10811 /* ignore */
10812 case IWM_PHY_DB_CMD0x6c:
10813 break;
10814
10815 case IWM_INIT_COMPLETE_NOTIF0x4:
10816 sc->sc_init_complete |= IWM_INIT_COMPLETE0x01;
10817 wakeup(&sc->sc_init_complete);
10818 break;
10819
10820 case IWM_SCAN_OFFLOAD_COMPLETE0x6d: {
10821 struct iwm_periodic_scan_complete *notif;
10822 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10823 break;
10824 }
10825
10826 case IWM_SCAN_ITERATION_COMPLETE0xe7: {
10827 struct iwm_lmac_scan_complete_notif *notif;
10828 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10829 iwm_endscan(sc);
10830 break;
10831 }
10832
10833 case IWM_SCAN_COMPLETE_UMAC0xf: {
10834 struct iwm_umac_scan_complete *notif;
10835 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10836 iwm_endscan(sc);
10837 break;
10838 }
10839
10840 case IWM_SCAN_ITERATION_COMPLETE_UMAC0xb5: {
10841 struct iwm_umac_scan_iter_complete_notif *notif;
10842 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10843 iwm_endscan(sc);
10844 break;
10845 }
10846
10847 case IWM_REPLY_ERROR0x2: {
10848 struct iwm_error_resp *resp;
10849 SYNC_RESP_STRUCT(resp, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp))), (0x02)); resp
= (void *)((pkt)+1); } while ( 0)
;
10850 printf("%s: firmware error 0x%x, cmd 0x%x\n",
10851 DEVNAME(sc)((sc)->sc_dev.dv_xname), le32toh(resp->error_type)((__uint32_t)(resp->error_type)),
10852 resp->cmd_id);
10853 break;
10854 }
10855
10856 case IWM_TIME_EVENT_NOTIFICATION0x2a: {
10857 struct iwm_time_event_notif *notif;
10858 uint32_t action;
10859 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
10860
10861 if (sc->sc_time_event_uid != le32toh(notif->unique_id)((__uint32_t)(notif->unique_id)))
10862 break;
10863 action = le32toh(notif->action)((__uint32_t)(notif->action));
10864 if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END(1 << 1))
10865 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40;
10866 break;
10867 }
10868
10869 case IWM_WIDE_ID(IWM_SYSTEM_GROUP,((0x2 << 8) | 0xff)
10870 IWM_FSEQ_VER_MISMATCH_NOTIFICATION)((0x2 << 8) | 0xff):
10871 break;
10872
10873 /*
10874 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10875 * messages. Just ignore them for now.
10876 */
10877 case IWM_DEBUG_LOG_MSG0xf7:
10878 break;
10879
10880 case IWM_MCAST_FILTER_CMD0xd0:
10881 break;
10882
10883 case IWM_SCD_QUEUE_CFG0x1d: {
10884 struct iwm_scd_txq_cfg_rsp *rsp;
10885 SYNC_RESP_STRUCT(rsp, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(rsp))), (0x02)); rsp
= (void *)((pkt)+1); } while ( 0)
;
10886
10887 break;
10888 }
10889
10890 case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD)((0x5 << 8) | 0x00):
10891 break;
10892
10893 case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD)((0x2 << 8) | 0x01):
10894 break;
10895
10896 default:
10897 handled = 0;
10898 printf("%s: unhandled firmware response 0x%x/0x%x "
10899 "rx ring %d[%d]\n",
10900 DEVNAME(sc)((sc)->sc_dev.dv_xname), code, pkt->len_n_flags,
10901 (qid & ~0x80), idx);
10902 break;
10903 }
10904
10905 /*
10906 * uCode sets bit 0x80 when it originates the notification,
10907 * i.e. when the notification is not a direct response to a
10908 * command sent by the driver.
10909 * For example, uCode issues IWM_REPLY_RX when it sends a
10910 * received frame to the driver.
10911 */
10912 if (handled && !(qid & (1 << 7))) {
10913 iwm_cmd_done(sc, qid, idx, code);
10914 }
10915
10916 offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN)((((len)+((0x40)-1))/(0x40))*(0x40));
10917 }
10918
10919 if (m0 && m0 != data->m)
10920 m_freem(m0);
10921}
10922
10923void
10924iwm_notif_intr(struct iwm_softc *sc)
10925{
10926 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
10927 uint32_t wreg;
10928 uint16_t hw;
10929 int count;
10930
10931 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.stat_dma.map), (0), (sc->rxq.stat_dma.size), (0x02))
10932 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.stat_dma.map), (0), (sc->rxq.stat_dma.size), (0x02))
;
10933
10934 if (sc->sc_mqrx_supported) {
10935 count = IWM_RX_MQ_RING_COUNT512;
10936 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG0x1C80;
10937 } else {
10938 count = IWM_RX_RING_COUNT256;
10939 wreg = IWM_FH_RSCSR_CHNL0_WPTR(((((0x1000) + 0xBC0)) + 0x008));
10940 }
10941
10942 hw = le16toh(sc->rxq.stat->closed_rb_num)((__uint16_t)(sc->rxq.stat->closed_rb_num)) & 0xfff;
10943 hw &= (count - 1);
10944 while (sc->rxq.cur != hw) {
10945 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10946 iwm_rx_pkt(sc, data, &ml);
10947 ADVANCE_RXQ(sc)(sc->rxq.cur = (sc->rxq.cur + 1) % count);;
10948 }
10949 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
10950
10951 /*
10952 * Tell the firmware what we have processed.
10953 * Seems like the hardware gets upset unless we align the write by 8??
10954 */
10955 hw = (hw == 0) ? count - 1 : hw - 1;
10956 IWM_WRITE(sc, wreg, hw & ~7)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((wreg)), ((hw
& ~7))))
;
10957}
10958
10959int
10960iwm_intr(void *arg)
10961{
10962 struct iwm_softc *sc = arg;
10963 struct ieee80211com *ic = &sc->sc_ic;
10964 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
10965 int handled = 0;
10966 int rv = 0;
10967 uint32_t r1, r2;
10968
10969 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
10970
10971 if (sc->sc_flags & IWM_FLAG_USE_ICT0x01) {
10972 uint32_t *ict = sc->ict_dma.vaddr;
10973 int tmp;
10974
10975 tmp = htole32(ict[sc->ict_cur])((__uint32_t)(ict[sc->ict_cur]));
10976 if (!tmp)
10977 goto out_ena;
10978
10979 /*
10980 * ok, there was something. keep plowing until we have all.
10981 */
10982 r1 = r2 = 0;
10983 while (tmp) {
10984 r1 |= tmp;
10985 ict[sc->ict_cur] = 0;
10986 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT(4096 / sizeof (uint32_t));
10987 tmp = htole32(ict[sc->ict_cur])((__uint32_t)(ict[sc->ict_cur]));
10988 }
10989
10990 /* this is where the fun begins. don't ask */
10991 if (r1 == 0xffffffff)
10992 r1 = 0;
10993
10994 /*
10995 * Workaround for hardware bug where bits are falsely cleared
10996 * when using interrupt coalescing. Bit 15 should be set if
10997 * bits 18 and 19 are set.
10998 */
10999 if (r1 & 0xc0000)
11000 r1 |= 0x8000;
11001
11002 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11003 } else {
11004 r1 = IWM_READ(sc, IWM_CSR_INT)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x008)))));
11005 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x010)))));
11006 }
11007 if (r1 == 0 && r2 == 0) {
11008 goto out_ena;
11009 }
11010 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11011 goto out;
11012
11013 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(r1 | ~sc->sc_intmask))))
;
11014
11015 /* ignored */
11016 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE(1 << 0) /*| IWM_CSR_INT_BIT_SCD*/));
11017
11018 if (r1 & IWM_CSR_INT_BIT_RF_KILL(1 << 7)) {
11019 handled |= IWM_CSR_INT_BIT_RF_KILL(1 << 7);
11020 iwm_check_rfkill(sc);
11021 task_add(systq, &sc->init_task);
11022 rv = 1;
11023 goto out_ena;
11024 }
11025
11026 if (r1 & IWM_CSR_INT_BIT_SW_ERR(1 << 25)) {
11027 if (ifp->if_flags & IFF_DEBUG0x4) {
11028 iwm_nic_error(sc);
11029 iwm_dump_driver_status(sc);
11030 }
11031 printf("%s: fatal firmware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11032 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
11033 task_add(systq, &sc->init_task);
11034 rv = 1;
11035 goto out;
11036
11037 }
11038
11039 if (r1 & IWM_CSR_INT_BIT_HW_ERR(1 << 29)) {
11040 handled |= IWM_CSR_INT_BIT_HW_ERR(1 << 29);
11041 printf("%s: hardware error, stopping device \n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11042 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) {
11043 sc->sc_flags |= IWM_FLAG_HW_ERR0x80;
11044 task_add(systq, &sc->init_task);
11045 }
11046 rv = 1;
11047 goto out;
11048 }
11049
11050 /* firmware chunk loaded */
11051 if (r1 & IWM_CSR_INT_BIT_FH_TX(1 << 27)) {
11052 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(((1 << 1) | (1 << 0))))))
;
11053 handled |= IWM_CSR_INT_BIT_FH_TX(1 << 27);
11054
11055 sc->sc_fw_chunk_done = 1;
11056 wakeup(&sc->sc_fw);
11057 }
11058
11059 if (r1 & (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3) |
11060 IWM_CSR_INT_BIT_RX_PERIODIC(1 << 28))) {
11061 if (r1 & (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3))) {
11062 handled |= (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3));
11063 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(((1 << 30) | (1 << 17) | (1 << 16))))))
;
11064 }
11065 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC(1 << 28)) {
11066 handled |= IWM_CSR_INT_BIT_RX_PERIODIC(1 << 28);
11067 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
((1 << 28)))))
;
11068 }
11069
11070 /* Disable periodic interrupt; we use it as just a one-shot. */
11071 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0x00)))))
;
11072
11073 /*
11074 * Enable periodic interrupt in 8 msec only if we received
11075 * real RX interrupt (instead of just periodic int), to catch
11076 * any dangling Rx interrupt. If it was just the periodic
11077 * interrupt, there was no dangling Rx activity, and no need
11078 * to extend the periodic interrupt; one-shot is enough.
11079 */
11080 if (r1 & (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3)))
11081 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0xFF)))))
11082 IWM_CSR_INT_PERIODIC_ENA)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0xFF)))))
;
11083
11084 iwm_notif_intr(sc);
11085 }
11086
11087 rv = 1;
11088
11089 out_ena:
11090 iwm_restore_interrupts(sc);
11091 out:
11092 return rv;
11093}
11094
11095int
11096iwm_intr_msix(void *arg)
11097{
11098 struct iwm_softc *sc = arg;
11099 struct ieee80211com *ic = &sc->sc_ic;
11100 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
11101 uint32_t inta_fh, inta_hw;
11102 int vector = 0;
11103
11104 inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x800
)))))
;
11105 inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x808
)))))
;
11106 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x800))), ((inta_fh))))
;
11107 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x808))), ((inta_hw))))
;
11108 inta_fh &= sc->sc_fh_mask;
11109 inta_hw &= sc->sc_hw_mask;
11110
11111 if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11112 inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11113 iwm_notif_intr(sc);
11114 }
11115
11116 /* firmware chunk loaded */
11117 if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11118 sc->sc_fw_chunk_done = 1;
11119 wakeup(&sc->sc_fw);
11120 }
11121
11122 if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11123 (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11124 (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11125 if (ifp->if_flags & IFF_DEBUG0x4) {
11126 iwm_nic_error(sc);
11127 iwm_dump_driver_status(sc);
11128 }
11129 printf("%s: fatal firmware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11130 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
11131 task_add(systq, &sc->init_task);
11132 return 1;
11133 }
11134
11135 if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11136 iwm_check_rfkill(sc);
11137 task_add(systq, &sc->init_task);
11138 }
11139
11140 if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11141 printf("%s: hardware error, stopping device \n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11142 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) {
11143 sc->sc_flags |= IWM_FLAG_HW_ERR0x80;
11144 task_add(systq, &sc->init_task);
11145 }
11146 return 1;
11147 }
11148
11149 /*
11150 * Before sending the interrupt the HW disables it to prevent
11151 * a nested interrupt. This is done by writing 1 to the corresponding
11152 * bit in the mask register. After handling the interrupt, it should be
11153 * re-enabled by clearing this bit. This register is defined as
11154 * write 1 clear (W1C) register, meaning that it's being clear
11155 * by writing 1 to the bit.
11156 */
11157 IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x810))), ((1 << vector))))
;
11158 return 1;
11159}
11160
11161typedef void *iwm_match_t;
11162
11163static const struct pci_matchid iwm_devices[] = {
11164 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3160_10x08b3 },
11165 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3160_20x08b4 },
11166 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3165_10x3165 },
11167 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3165_20x3166 },
11168 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3168_10x24fb },
11169 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7260_10x08b1 },
11170 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7260_20x08b2 },
11171 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7265_10x095a },
11172 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7265_20x095b },
11173 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_8260_10x24f3 },
11174 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_8260_20x24f4 },
11175 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_8265_10x24fd },
11176 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9260_10x2526 },
11177 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9560_10x9df0 },
11178 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9560_20xa370 },
11179 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9560_30x31dc },
11180};
11181
11182int
11183iwm_match(struct device *parent, iwm_match_t match __unused__attribute__((__unused__)), void *aux)
11184{
11185 return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11186 nitems(iwm_devices)(sizeof((iwm_devices)) / sizeof((iwm_devices)[0])));
11187}
11188
11189int
11190iwm_preinit(struct iwm_softc *sc)
11191{
11192 struct ieee80211com *ic = &sc->sc_ic;
11193 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
11194 int err;
11195 static int attached;
11196
11197 err = iwm_prepare_card_hw(sc);
11198 if (err) {
11199 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11200 return err;
11201 }
11202
11203 if (attached) {
11204 /* Update MAC in case the upper layers changed it. */
11205 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (((struct arpcom *
)ifp)->ac_enaddr), (6))
11206 ((struct arpcom *)ifp)->ac_enaddr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (((struct arpcom *
)ifp)->ac_enaddr), (6))
;
11207 return 0;
11208 }
11209
11210 err = iwm_start_hw(sc);
11211 if (err) {
11212 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11213 return err;
11214 }
11215
11216 err = iwm_run_init_mvm_ucode(sc, 1);
11217 iwm_stop_device(sc);
11218 if (err)
11219 return err;
11220
11221 /* Print version info and MAC address on first successful fw load. */
11222 attached = 1;
11223 printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11224 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK(0x000FFF0),
11225 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11226
11227 if (sc->sc_nvm.sku_cap_11n_enable)
11228 iwm_setup_ht_rates(sc);
11229
11230 /* not all hardware can do 5GHz band */
11231 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11232 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,__builtin_memset((&ic->ic_sup_rates[IEEE80211_MODE_11A
]), (0), (sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])))
11233 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]))__builtin_memset((&ic->ic_sup_rates[IEEE80211_MODE_11A
]), (0), (sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])))
;
11234
11235 /* Configure channel information obtained from firmware. */
11236 ieee80211_channel_init(ifp);
11237
11238 /* Configure MAC address. */
11239 err = if_setlladdr(ifp, ic->ic_myaddr);
11240 if (err)
11241 printf("%s: could not set MAC address (error %d)\n",
11242 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
11243
11244 ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11245
11246 return 0;
11247}
11248
11249void
11250iwm_attach_hook(struct device *self)
11251{
11252 struct iwm_softc *sc = (void *)self;
11253
11254 KASSERT(!cold)((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 11254, "!cold"))
;
11255
11256 iwm_preinit(sc);
11257}
11258
11259void
11260iwm_attach(struct device *parent, struct device *self, void *aux)
11261{
11262 struct iwm_softc *sc = (void *)self;
11263 struct pci_attach_args *pa = aux;
11264 pci_intr_handle_t ih;
11265 pcireg_t reg, memtype;
11266 struct ieee80211com *ic = &sc->sc_ic;
11267 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
11268 const char *intrstr;
11269 int err;
11270 int txq_i, i, j;
11271
11272 sc->sc_pct = pa->pa_pc;
11273 sc->sc_pcitag = pa->pa_tag;
11274 sc->sc_dmat = pa->pa_dmat;
11275
11276 rw_init(&sc->ioctl_rwl, "iwmioctl")_rw_init_flags(&sc->ioctl_rwl, "iwmioctl", 0, ((void *
)0))
;
11277
11278 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11279 PCI_CAP_PCIEXPRESS0x10, &sc->sc_cap_off, NULL((void *)0));
11280 if (err == 0) {
11281 printf("%s: PCIe capability structure not found!\n",
11282 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11283 return;
11284 }
11285
11286 /*
11287 * We disable the RETRY_TIMEOUT register (0x41) to keep
11288 * PCI Tx retries from interfering with C3 CPU state.
11289 */
11290 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11291 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11292
11293 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START0x10);
11294 err = pci_mapreg_map(pa, PCI_MAPREG_START0x10, memtype, 0,
11295 &sc->sc_st, &sc->sc_sh, NULL((void *)0), &sc->sc_sz, 0);
11296 if (err) {
11297 printf("%s: can't map mem space\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11298 return;
11299 }
11300
11301 if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11302 sc->sc_msix = 1;
11303 } else if (pci_intr_map_msi(pa, &ih)) {
11304 if (pci_intr_map(pa, &ih)) {
11305 printf("%s: can't map interrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11306 return;
11307 }
11308 /* Hardware bug workaround. */
11309 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11310 PCI_COMMAND_STATUS_REG0x04);
11311 if (reg & PCI_COMMAND_INTERRUPT_DISABLE0x00000400)
11312 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE0x00000400;
11313 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11314 PCI_COMMAND_STATUS_REG0x04, reg);
11315 }
11316
11317 intrstr = pci_intr_string(sc->sc_pct, ih);
11318 if (sc->sc_msix)
11319 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET0x7,
11320 iwm_intr_msix, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
11321 else
11322 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET0x7,
11323 iwm_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
11324
11325 if (sc->sc_ih == NULL((void *)0)) {
11326 printf("\n");
11327 printf("%s: can't establish interrupt", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11328 if (intrstr != NULL((void *)0))
11329 printf(" at %s", intrstr);
11330 printf("\n");
11331 return;
11332 }
11333 printf(", %s\n", intrstr);
11334
11335 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x028)))));
11336 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
11337 case PCI_PRODUCT_INTEL_WL_3160_10x08b3:
11338 case PCI_PRODUCT_INTEL_WL_3160_20x08b4:
11339 sc->sc_fwname = "iwm-3160-17";
11340 sc->host_interrupt_operation_mode = 1;
11341 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11342 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11343 sc->sc_nvm_max_section_size = 16384;
11344 sc->nvm_type = IWM_NVM;
11345 break;
11346 case PCI_PRODUCT_INTEL_WL_3165_10x3165:
11347 case PCI_PRODUCT_INTEL_WL_3165_20x3166:
11348 sc->sc_fwname = "iwm-7265D-29";
11349 sc->host_interrupt_operation_mode = 0;
11350 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11351 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11352 sc->sc_nvm_max_section_size = 16384;
11353 sc->nvm_type = IWM_NVM;
11354 break;
11355 case PCI_PRODUCT_INTEL_WL_3168_10x24fb:
11356 sc->sc_fwname = "iwm-3168-29";
11357 sc->host_interrupt_operation_mode = 0;
11358 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11359 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11360 sc->sc_nvm_max_section_size = 16384;
11361 sc->nvm_type = IWM_NVM_SDP;
11362 break;
11363 case PCI_PRODUCT_INTEL_WL_7260_10x08b1:
11364 case PCI_PRODUCT_INTEL_WL_7260_20x08b2:
11365 sc->sc_fwname = "iwm-7260-17";
11366 sc->host_interrupt_operation_mode = 1;
11367 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11368 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11369 sc->sc_nvm_max_section_size = 16384;
11370 sc->nvm_type = IWM_NVM;
11371 break;
11372 case PCI_PRODUCT_INTEL_WL_7265_10x095a:
11373 case PCI_PRODUCT_INTEL_WL_7265_20x095b:
11374 sc->sc_fwname = "iwm-7265-17";
11375 sc->host_interrupt_operation_mode = 0;
11376 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11377 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11378 sc->sc_nvm_max_section_size = 16384;
11379 sc->nvm_type = IWM_NVM;
11380 break;
11381 case PCI_PRODUCT_INTEL_WL_8260_10x24f3:
11382 case PCI_PRODUCT_INTEL_WL_8260_20x24f4:
11383 sc->sc_fwname = "iwm-8000C-36";
11384 sc->host_interrupt_operation_mode = 0;
11385 sc->sc_device_family = IWM_DEVICE_FAMILY_80002;
11386 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11387 sc->sc_nvm_max_section_size = 32768;
11388 sc->nvm_type = IWM_NVM_EXT;
11389 break;
11390 case PCI_PRODUCT_INTEL_WL_8265_10x24fd:
11391 sc->sc_fwname = "iwm-8265-36";
11392 sc->host_interrupt_operation_mode = 0;
11393 sc->sc_device_family = IWM_DEVICE_FAMILY_80002;
11394 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11395 sc->sc_nvm_max_section_size = 32768;
11396 sc->nvm_type = IWM_NVM_EXT;
11397 break;
11398 case PCI_PRODUCT_INTEL_WL_9260_10x2526:
11399 sc->sc_fwname = "iwm-9260-46";
11400 sc->host_interrupt_operation_mode = 0;
11401 sc->sc_device_family = IWM_DEVICE_FAMILY_90003;
11402 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11403 sc->sc_nvm_max_section_size = 32768;
11404 sc->sc_mqrx_supported = 1;
11405 break;
11406 case PCI_PRODUCT_INTEL_WL_9560_10x9df0:
11407 case PCI_PRODUCT_INTEL_WL_9560_20xa370:
11408 case PCI_PRODUCT_INTEL_WL_9560_30x31dc:
11409 sc->sc_fwname = "iwm-9000-46";
11410 sc->host_interrupt_operation_mode = 0;
11411 sc->sc_device_family = IWM_DEVICE_FAMILY_90003;
11412 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11413 sc->sc_nvm_max_section_size = 32768;
11414 sc->sc_mqrx_supported = 1;
11415 sc->sc_integrated = 1;
11416 if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_INTEL_WL_9560_30x31dc) {
11417 sc->sc_xtal_latency = 670;
11418 sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK(1U << 31);
11419 } else
11420 sc->sc_xtal_latency = 650;
11421 break;
11422 default:
11423 printf("%s: unknown adapter type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11424 return;
11425 }
11426
11427 /*
11428 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11429 * changed, and now the revision step also includes bit 0-1 (no more
11430 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11431 * in the old format.
11432 */
11433 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) {
11434 uint32_t hw_step;
11435
11436 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11437 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2)(((sc->sc_hw_rev << 2) & 0x000000C) >> 2) << 2);
11438
11439 if (iwm_prepare_card_hw(sc) != 0) {
11440 printf("%s: could not initialize hardware\n",
11441 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11442 return;
11443 }
11444
11445 /*
11446 * In order to recognize C step the driver should read the
11447 * chip version id located at the AUX bus MISC address.
11448 */
11449 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
11450 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
11451 DELAY(2)(*delay_func)(2);
11452
11453 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024),
11454 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
11455 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
11456 25000);
11457 if (!err) {
11458 printf("%s: Failed to wake up the nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11459 return;
11460 }
11461
11462 if (iwm_nic_lock(sc)) {
11463 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG0xa03030);
11464 hw_step |= IWM_ENABLE_WFPM0x80000000;
11465 iwm_write_prph(sc, IWM_WFPM_CTRL_REG0xa03030, hw_step);
11466 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG0xa200b0);
11467 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS24) & 0xF;
11468 if (hw_step == 0x3)
11469 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11470 (IWM_SILICON_C_STEP2 << 2);
11471 iwm_nic_unlock(sc);
11472 } else {
11473 printf("%s: Failed to lock the nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11474 return;
11475 }
11476 }
11477
11478 /*
11479 * Allocate DMA memory for firmware transfers.
11480 * Must be aligned on a 16-byte boundary.
11481 */
11482 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11483 sc->sc_fwdmasegsz, 16);
11484 if (err) {
11485 printf("%s: could not allocate memory for firmware\n",
11486 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11487 return;
11488 }
11489
11490 /* Allocate "Keep Warm" page, used internally by the card. */
11491 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11492 if (err) {
11493 printf("%s: could not allocate keep warm page\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11494 goto fail1;
11495 }
11496
11497 /* Allocate interrupt cause table (ICT).*/
11498 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11499 IWM_ICT_SIZE4096, 1<<IWM_ICT_PADDR_SHIFT12);
11500 if (err) {
11501 printf("%s: could not allocate ICT table\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11502 goto fail2;
11503 }
11504
11505 /* TX scheduler rings must be aligned on a 1KB boundary. */
11506 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11507 nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11508 if (err) {
11509 printf("%s: could not allocate TX scheduler rings\n",
11510 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11511 goto fail3;
11512 }
11513
11514 for (txq_i = 0; txq_i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); txq_i++) {
11515 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11516 if (err) {
11517 printf("%s: could not allocate TX ring %d\n",
11518 DEVNAME(sc)((sc)->sc_dev.dv_xname), txq_i);
11519 goto fail4;
11520 }
11521 }
11522
11523 err = iwm_alloc_rx_ring(sc, &sc->rxq);
11524 if (err) {
11525 printf("%s: could not allocate RX ring\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11526 goto fail4;
11527 }
11528
11529 sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET0x7, 0);
11530 if (sc->sc_nswq == NULL((void *)0))
11531 goto fail4;
11532
11533 /* Clear pending interrupts. */
11534 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(0xffffffff))))
;
11535
11536 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
11537 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
11538 ic->ic_state = IEEE80211_S_INIT;
11539
11540 /* Set device capabilities. */
11541 ic->ic_caps =
11542 IEEE80211_C_QOS0x00000800 | IEEE80211_C_TX_AMPDU0x00010000 | /* A-MPDU */
11543 IEEE80211_C_WEP0x00000001 | /* WEP */
11544 IEEE80211_C_RSN0x00001000 | /* WPA/RSN */
11545 IEEE80211_C_SCANALL0x00000400 | /* device scans all channels at once */
11546 IEEE80211_C_SCANALLBAND0x00008000 | /* device scans all bands at once */
11547 IEEE80211_C_MONITOR0x00000200 | /* monitor mode supported */
11548 IEEE80211_C_SHSLOT0x00000080 | /* short slot time supported */
11549 IEEE80211_C_SHPREAMBLE0x00000100; /* short preamble supported */
11550
11551 ic->ic_htcaps = IEEE80211_HTCAP_SGI200x00000020 | IEEE80211_HTCAP_SGI400x00000040;
11552 ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_400x00000002;
11553 ic->ic_htcaps |=
11554 (IEEE80211_HTCAP_SMPS_DIS3 << IEEE80211_HTCAP_SMPS_SHIFT2);
11555 ic->ic_htxcaps = 0;
11556 ic->ic_txbfcaps = 0;
11557 ic->ic_aselcaps = 0;
11558 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4(5 << 2) | 0x3 /* 64k */);
11559
11560 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11561 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11562 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11563
11564 for (i = 0; i < nitems(sc->sc_phyctxt)(sizeof((sc->sc_phyctxt)) / sizeof((sc->sc_phyctxt)[0])
)
; i++) {
11565 sc->sc_phyctxt[i].id = i;
11566 sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN0;
11567 }
11568
11569 sc->sc_amrr.amrr_min_success_threshold = 1;
11570 sc->sc_amrr.amrr_max_success_threshold = 15;
11571
11572 /* IBSS channel undefined for now. */
11573 ic->ic_ibss_chan = &ic->ic_channels[1];
11574
11575 ic->ic_max_rssi = IWM_MAX_DBM-33 - IWM_MIN_DBM-100;
11576
11577 ifp->if_softc = sc;
11578 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
11579 ifp->if_ioctl = iwm_ioctl;
11580 ifp->if_start = iwm_start;
11581 ifp->if_watchdog = iwm_watchdog;
11582 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ)__builtin_memcpy((ifp->if_xname), (((sc)->sc_dev.dv_xname
)), (16))
;
11583
11584 if_attach(ifp);
11585 ieee80211_ifattach(ifp);
11586 ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11587
11588#if NBPFILTER1 > 0
11589 iwm_radiotap_attach(sc);
11590#endif
11591 timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
11592 timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
11593 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
11594 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
11595 rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID0x7f;
11596 rxba->sc = sc;
11597 timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
11598 rxba);
11599 timeout_set(&rxba->reorder_buf.reorder_timer,
11600 iwm_reorder_timer_expired, &rxba->reorder_buf);
11601 for (j = 0; j < nitems(rxba->entries)(sizeof((rxba->entries)) / sizeof((rxba->entries)[0])); j++)
11602 ml_init(&rxba->entries[j].frames);
11603 }
11604 task_set(&sc->init_task, iwm_init_task, sc);
11605 task_set(&sc->newstate_task, iwm_newstate_task, sc);
11606 task_set(&sc->ba_task, iwm_ba_task, sc);
11607 task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
11608 task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
11609 task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
11610
11611 ic->ic_node_alloc = iwm_node_alloc;
11612 ic->ic_bgscan_start = iwm_bgscan;
11613 ic->ic_bgscan_done = iwm_bgscan_done;
11614 ic->ic_set_key = iwm_set_key;
11615 ic->ic_delete_key = iwm_delete_key;
11616
11617 /* Override 802.11 state transition machine. */
11618 sc->sc_newstate = ic->ic_newstate;
11619 ic->ic_newstate = iwm_newstate;
11620 ic->ic_updateprot = iwm_updateprot;
11621 ic->ic_updateslot = iwm_updateslot;
11622 ic->ic_updateedca = iwm_updateedca;
11623 ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
11624 ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
11625 ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
11626 ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
11627 /*
11628 * We cannot read the MAC address without loading the
11629 * firmware from disk. Postpone until mountroot is done.
11630 */
11631 config_mountroot(self, iwm_attach_hook);
11632
11633 return;
11634
11635fail4: while (--txq_i >= 0)
11636 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
11637 iwm_free_rx_ring(sc, &sc->rxq);
11638 iwm_dma_contig_free(&sc->sched_dma);
11639fail3: if (sc->ict_dma.vaddr != NULL((void *)0))
11640 iwm_dma_contig_free(&sc->ict_dma);
11641
11642fail2: iwm_dma_contig_free(&sc->kw_dma);
11643fail1: iwm_dma_contig_free(&sc->fw_dma);
11644 return;
11645}
11646
11647#if NBPFILTER1 > 0
11648void
11649iwm_radiotap_attach(struct iwm_softc *sc)
11650{
11651 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_ific_ac.ac_if, DLT_IEEE802_11_RADIO127,
11652 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN64);
11653
11654 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
11655 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_len = htole16(sc->sc_rxtap_len)((__uint16_t)(sc->sc_rxtap_len));
11656 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_TSFT) | (1 <<
IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_RATE
) | (1 << IEEE80211_RADIOTAP_CHANNEL) | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL
) | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))))
;
11657
11658 sc->sc_txtap_len = sizeof sc->sc_txtapu;
11659 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_len = htole16(sc->sc_txtap_len)((__uint16_t)(sc->sc_txtap_len));
11660 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
))))
;
11661}
11662#endif
11663
11664void
11665iwm_init_task(void *arg1)
11666{
11667 struct iwm_softc *sc = arg1;
11668 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
11669 int s = splnet()splraise(0x7);
11670 int generation = sc->sc_generation;
11671 int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR0x80 | IWM_FLAG_RFKILL0x02));
11672
11673 rw_enter_write(&sc->ioctl_rwl);
11674 if (generation != sc->sc_generation) {
11675 rw_exit(&sc->ioctl_rwl);
11676 splx(s)spllower(s);
11677 return;
11678 }
11679
11680 if (ifp->if_flags & IFF_RUNNING0x40)
11681 iwm_stop(ifp);
11682 else
11683 sc->sc_flags &= ~IWM_FLAG_HW_ERR0x80;
11684
11685 if (!fatal && (ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == IFF_UP0x1)
11686 iwm_init(ifp);
11687
11688 rw_exit(&sc->ioctl_rwl);
11689 splx(s)spllower(s);
11690}
11691
11692void
11693iwm_resume(struct iwm_softc *sc)
11694{
11695 pcireg_t reg;
11696
11697 /*
11698 * We disable the RETRY_TIMEOUT register (0x41) to keep
11699 * PCI Tx retries from interfering with C3 CPU state.
11700 */
11701 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11702 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11703
11704 if (!sc->sc_msix) {
11705 /* Hardware bug workaround. */
11706 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11707 PCI_COMMAND_STATUS_REG0x04);
11708 if (reg & PCI_COMMAND_INTERRUPT_DISABLE0x00000400)
11709 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE0x00000400;
11710 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11711 PCI_COMMAND_STATUS_REG0x04, reg);
11712 }
11713
11714 iwm_disable_interrupts(sc);
11715}
11716
11717int
11718iwm_wakeup(struct iwm_softc *sc)
11719{
11720 struct ieee80211com *ic = &sc->sc_ic;
11721 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
11722 int err;
11723
11724 refcnt_init(&sc->task_refs);
11725
11726 err = iwm_start_hw(sc);
11727 if (err)
11728 return err;
11729
11730 err = iwm_init_hw(sc);
11731 if (err)
11732 return err;
11733
11734 ifq_clr_oactive(&ifp->if_snd);
11735 ifp->if_flags |= IFF_RUNNING0x40;
11736
11737 if (ic->ic_opmode == IEEE80211_M_MONITOR)
11738 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
11739 else
11740 ieee80211_begin_scan(ifp);
11741
11742 return 0;
11743}
11744
11745int
11746iwm_activate(struct device *self, int act)
11747{
11748 struct iwm_softc *sc = (struct iwm_softc *)self;
11749 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
11750 int err = 0;
11751
11752 switch (act) {
11753 case DVACT_QUIESCE2:
11754 if (ifp->if_flags & IFF_RUNNING0x40) {
11755 rw_enter_write(&sc->ioctl_rwl);
11756 iwm_stop(ifp);
11757 rw_exit(&sc->ioctl_rwl);
11758 }
11759 break;
11760 case DVACT_RESUME4:
11761 iwm_resume(sc);
11762 break;
11763 case DVACT_WAKEUP5:
11764 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == IFF_UP0x1) {
11765 err = iwm_wakeup(sc);
11766 if (err)
11767 printf("%s: could not initialize hardware\n",
11768 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11769 }
11770 break;
11771 }
11772
11773 return 0;
11774}
11775
11776struct cfdriver iwm_cd = {
11777 NULL((void *)0), "iwm", DV_IFNET
11778};
11779
11780struct cfattach iwm_ca = {
11781 sizeof(struct iwm_softc), iwm_match, iwm_attach,
11782 NULL((void *)0), iwm_activate
11783};