Bug Summary

File:dev/pci/if_iwx.c
Warning:line 9068, column 4
Value stored to 'handled' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_iwx.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_iwx.c
1/* $OpenBSD: if_iwx.c,v 1.133 2022/01/09 05:42:52 jsg Exp $ */
2
3/*
4 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 * Author: Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ******************************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * BSD LICENSE
46 *
47 * Copyright(c) 2017 Intel Deutschland GmbH
48 * Copyright(c) 2018 - 2019 Intel Corporation
49 * All rights reserved.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 *
55 * * Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * * Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in
59 * the documentation and/or other materials provided with the
60 * distribution.
61 * * Neither the name Intel Corporation nor the names of its
62 * contributors may be used to endorse or promote products derived
63 * from this software without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76 *
77 *****************************************************************************
78 */
79
80/*-
81 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82 *
83 * Permission to use, copy, modify, and distribute this software for any
84 * purpose with or without fee is hereby granted, provided that the above
85 * copyright notice and this permission notice appear in all copies.
86 *
87 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94 */
95
96#include "bpfilter.h"
97
98#include <sys/param.h>
99#include <sys/conf.h>
100#include <sys/kernel.h>
101#include <sys/malloc.h>
102#include <sys/mbuf.h>
103#include <sys/mutex.h>
104#include <sys/proc.h>
105#include <sys/rwlock.h>
106#include <sys/socket.h>
107#include <sys/sockio.h>
108#include <sys/systm.h>
109#include <sys/endian.h>
110
111#include <sys/refcnt.h>
112#include <sys/task.h>
113#include <machine/bus.h>
114#include <machine/intr.h>
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118#include <dev/pci/pcidevs.h>
119
120#if NBPFILTER1 > 0
121#include <net/bpf.h>
122#endif
123#include <net/if.h>
124#include <net/if_dl.h>
125#include <net/if_media.h>
126
127#include <netinet/in.h>
128#include <netinet/if_ether.h>
129
130#include <net80211/ieee80211_var.h>
131#include <net80211/ieee80211_radiotap.h>
132#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133#undef DPRINTF /* defined in ieee80211_priv.h */
134
135#define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname)
136
137#define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if)
138
139#define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_))))
140#define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_))))
141
142#ifdef IWX_DEBUG
143#define DPRINTF(x)do { ; } while (0) do { if (iwx_debug > 0) printf x; } while (0)
144#define DPRINTFN(n, x)do { ; } while (0) do { if (iwx_debug >= (n)) printf x; } while (0)
145int iwx_debug = 1;
146#else
147#define DPRINTF(x)do { ; } while (0) do { ; } while (0)
148#define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0)
149#endif
150
151#include <dev/pci/if_iwxreg.h>
152#include <dev/pci/if_iwxvar.h>
153
154const uint8_t iwx_nvm_channels_8000[] = {
155 /* 2.4 GHz */
156 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 /* 5 GHz */
158 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 149, 153, 157, 161, 165, 169, 173, 177, 181
161};
162
163static const uint8_t iwx_nvm_channels_uhb[] = {
164 /* 2.4 GHz */
165 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 /* 5 GHz */
167 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 149, 153, 157, 161, 165, 169, 173, 177, 181,
170 /* 6-7 GHz */
171 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175};
176
177#define IWX_NUM_2GHZ_CHANNELS14 14
178
179const struct iwx_rate {
180 uint16_t rate;
181 uint8_t plcp;
182 uint8_t ht_plcp;
183} iwx_rates[] = {
184 /* Legacy */ /* HT */
185 { 2, IWX_RATE_1M_PLCP10, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
186 { 4, IWX_RATE_2M_PLCP20, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
187 { 11, IWX_RATE_5M_PLCP55, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
188 { 22, IWX_RATE_11M_PLCP110, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
189 { 12, IWX_RATE_6M_PLCP13, IWX_RATE_HT_SISO_MCS_0_PLCP0 },
190 { 18, IWX_RATE_9M_PLCP15, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
191 { 24, IWX_RATE_12M_PLCP5, IWX_RATE_HT_SISO_MCS_1_PLCP1 },
192 { 26, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_8_PLCP0x8 },
193 { 36, IWX_RATE_18M_PLCP7, IWX_RATE_HT_SISO_MCS_2_PLCP2 },
194 { 48, IWX_RATE_24M_PLCP9, IWX_RATE_HT_SISO_MCS_3_PLCP3 },
195 { 52, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_9_PLCP0x9 },
196 { 72, IWX_RATE_36M_PLCP11, IWX_RATE_HT_SISO_MCS_4_PLCP4 },
197 { 78, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_10_PLCP0xA },
198 { 96, IWX_RATE_48M_PLCP1, IWX_RATE_HT_SISO_MCS_5_PLCP5 },
199 { 104, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_11_PLCP0xB },
200 { 108, IWX_RATE_54M_PLCP3, IWX_RATE_HT_SISO_MCS_6_PLCP6 },
201 { 128, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_SISO_MCS_7_PLCP7 },
202 { 156, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_12_PLCP0xC },
203 { 208, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_13_PLCP0xD },
204 { 234, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_14_PLCP0xE },
205 { 260, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_15_PLCP0xF },
206};
207#define IWX_RIDX_CCK0 0
208#define IWX_RIDX_OFDM4 4
209#define IWX_RIDX_MAX((sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1) (nitems(iwx_rates)(sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1)
210#define IWX_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWX_RIDX_OFDM4)
211#define IWX_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWX_RIDX_OFDM4)
212#define IWX_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22)
213
214/* Convert an MCS index into an iwx_rates[] index. */
215const int iwx_mcs2ridx[] = {
216 IWX_RATE_MCS_0_INDEX,
217 IWX_RATE_MCS_1_INDEX,
218 IWX_RATE_MCS_2_INDEX,
219 IWX_RATE_MCS_3_INDEX,
220 IWX_RATE_MCS_4_INDEX,
221 IWX_RATE_MCS_5_INDEX,
222 IWX_RATE_MCS_6_INDEX,
223 IWX_RATE_MCS_7_INDEX,
224 IWX_RATE_MCS_8_INDEX,
225 IWX_RATE_MCS_9_INDEX,
226 IWX_RATE_MCS_10_INDEX,
227 IWX_RATE_MCS_11_INDEX,
228 IWX_RATE_MCS_12_INDEX,
229 IWX_RATE_MCS_13_INDEX,
230 IWX_RATE_MCS_14_INDEX,
231 IWX_RATE_MCS_15_INDEX,
232};
233
234uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
235uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
236int iwx_is_mimo_ht_plcp(uint8_t);
237int iwx_is_mimo_mcs(int);
238int iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241int iwx_apply_debug_destination(struct iwx_softc *);
242int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
243void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
244void iwx_ctxt_info_free_paging(struct iwx_softc *);
245int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
246 struct iwx_context_info_dram *);
247void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
248int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
249 uint8_t *, size_t);
250int iwx_set_default_calib(struct iwx_softc *, const void *);
251void iwx_fw_info_free(struct iwx_fw_info *);
252int iwx_read_firmware(struct iwx_softc *);
253uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
254uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
255void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
256void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
257int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
258int iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
259int iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
260int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
261int iwx_nic_lock(struct iwx_softc *);
262void iwx_nic_assert_locked(struct iwx_softc *);
263void iwx_nic_unlock(struct iwx_softc *);
264int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
265 uint32_t);
266int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
267int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
268int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
269 bus_size_t);
270void iwx_dma_contig_free(struct iwx_dma_info *);
271int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
272void iwx_disable_rx_dma(struct iwx_softc *);
273void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
274void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
275int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
276void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
277void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
278void iwx_enable_rfkill_int(struct iwx_softc *);
279int iwx_check_rfkill(struct iwx_softc *);
280void iwx_enable_interrupts(struct iwx_softc *);
281void iwx_enable_fwload_interrupt(struct iwx_softc *);
282void iwx_restore_interrupts(struct iwx_softc *);
283void iwx_disable_interrupts(struct iwx_softc *);
284void iwx_ict_reset(struct iwx_softc *);
285int iwx_set_hw_ready(struct iwx_softc *);
286int iwx_prepare_card_hw(struct iwx_softc *);
287int iwx_force_power_gating(struct iwx_softc *);
288void iwx_apm_config(struct iwx_softc *);
289int iwx_apm_init(struct iwx_softc *);
290void iwx_apm_stop(struct iwx_softc *);
291int iwx_allow_mcast(struct iwx_softc *);
292void iwx_init_msix_hw(struct iwx_softc *);
293void iwx_conf_msix_hw(struct iwx_softc *, int);
294int iwx_clear_persistence_bit(struct iwx_softc *);
295int iwx_start_hw(struct iwx_softc *);
296void iwx_stop_device(struct iwx_softc *);
297void iwx_nic_config(struct iwx_softc *);
298int iwx_nic_rx_init(struct iwx_softc *);
299int iwx_nic_init(struct iwx_softc *);
300int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
301int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
302void iwx_post_alive(struct iwx_softc *);
303int iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
304 uint32_t);
305void iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
306void iwx_setup_ht_rates(struct iwx_softc *);
307int iwx_mimo_enabled(struct iwx_softc *);
308void iwx_mac_ctxt_task(void *);
309void iwx_phy_ctxt_task(void *);
310void iwx_updatechan(struct ieee80211com *);
311void iwx_updateprot(struct ieee80211com *);
312void iwx_updateslot(struct ieee80211com *);
313void iwx_updateedca(struct ieee80211com *);
314void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
315 uint16_t);
316void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
317int iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
318 uint8_t);
319void iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
320 uint8_t);
321int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
322 uint8_t);
323void iwx_rx_ba_session_expired(void *);
324void iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
325 struct mbuf_list *);
326void iwx_reorder_timer_expired(void *);
327void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
328 uint16_t, uint16_t, int, int);
329void iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
330 uint8_t);
331void iwx_ba_task(void *);
332
333int iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
334int iwx_is_valid_mac_addr(const uint8_t *);
335int iwx_nvm_get(struct iwx_softc *);
336int iwx_load_firmware(struct iwx_softc *);
337int iwx_start_fw(struct iwx_softc *);
338int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
339int iwx_send_phy_cfg_cmd(struct iwx_softc *);
340int iwx_load_ucode_wait_alive(struct iwx_softc *);
341int iwx_send_dqa_cmd(struct iwx_softc *);
342int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
343int iwx_config_ltr(struct iwx_softc *);
344void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
345int iwx_rx_addbuf(struct iwx_softc *, int, int);
346int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
347void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
348 struct iwx_rx_data *);
349int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
350int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
351 struct ieee80211_rxinfo *);
352int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
353 struct ieee80211_node *, struct ieee80211_rxinfo *);
354void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
355 uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
356void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
357void iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
358void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, int);
359void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
360 struct iwx_rx_data *);
361void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
362void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
363 struct iwx_rx_data *);
364int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
365int iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
366 uint8_t, uint32_t, uint8_t);
367int iwx_phy_ctxt_cmd_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
368 uint8_t, uint32_t, uint8_t);
369int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
370 uint8_t, uint32_t, uint32_t, uint8_t);
371int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
372int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
373 const void *);
374int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
375 uint32_t *);
376int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
377 const void *, uint32_t *);
378void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
379void iwx_cmd_done(struct iwx_softc *, int, int, int);
380const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
381 struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
382void iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
383int iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
384int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
385int iwx_wait_tx_queues_empty(struct iwx_softc *);
386int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
387int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
388int iwx_beacon_filter_send_cmd(struct iwx_softc *,
389 struct iwx_beacon_filter_cmd *);
390int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
391void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
392 struct iwx_mac_power_cmd *);
393int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
394int iwx_power_update_device(struct iwx_softc *);
395int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
396int iwx_disable_beacon_filter(struct iwx_softc *);
397int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
398int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
399int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
400int iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
401int iwx_config_umac_scan_reduced(struct iwx_softc *);
402uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
403void iwx_scan_umac_dwell_v10(struct iwx_softc *,
404 struct iwx_scan_general_params_v10 *, int);
405void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
406 struct iwx_scan_general_params_v10 *, uint16_t, int);
407void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
408 struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
409int iwx_umac_scan_v14(struct iwx_softc *, int);
410void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
411uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
412int iwx_rval2ridx(int);
413void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
414void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
415 struct iwx_mac_ctx_cmd *, uint32_t);
416void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
417 struct iwx_mac_data_sta *, int);
418int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
419int iwx_clear_statistics(struct iwx_softc *);
420void iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
421void iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
422int iwx_scan(struct iwx_softc *);
423int iwx_bgscan(struct ieee80211com *);
424void iwx_bgscan_done(struct ieee80211com *,
425 struct ieee80211_node_switch_bss_arg *, size_t);
426void iwx_bgscan_done_task(void *);
427int iwx_umac_scan_abort(struct iwx_softc *);
428int iwx_scan_abort(struct iwx_softc *);
429int iwx_enable_mgmt_queue(struct iwx_softc *);
430int iwx_rs_rval2idx(uint8_t);
431uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
432int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
433int iwx_enable_data_tx_queues(struct iwx_softc *);
434int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
435 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t);
436int iwx_auth(struct iwx_softc *);
437int iwx_deauth(struct iwx_softc *);
438int iwx_run(struct iwx_softc *);
439int iwx_run_stop(struct iwx_softc *);
440struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
441int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
442 struct ieee80211_key *);
443void iwx_setkey_task(void *);
444void iwx_delete_key(struct ieee80211com *,
445 struct ieee80211_node *, struct ieee80211_key *);
446int iwx_media_change(struct ifnet *);
447void iwx_newstate_task(void *);
448int iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
449void iwx_endscan(struct iwx_softc *);
450void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
451 struct ieee80211_node *);
452int iwx_sf_config(struct iwx_softc *, int);
453int iwx_send_bt_init_conf(struct iwx_softc *);
454int iwx_send_soc_conf(struct iwx_softc *);
455int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
456int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
457int iwx_init_hw(struct iwx_softc *);
458int iwx_init(struct ifnet *);
459void iwx_start(struct ifnet *);
460void iwx_stop(struct ifnet *);
461void iwx_watchdog(struct ifnet *);
462int iwx_ioctl(struct ifnet *, u_long, caddr_t);
463const char *iwx_desc_lookup(uint32_t);
464void iwx_nic_error(struct iwx_softc *);
465void iwx_dump_driver_status(struct iwx_softc *);
466void iwx_nic_umac_error(struct iwx_softc *);
467int iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
468 struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
469int iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
470void iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
471 struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
472 struct mbuf_list *);
473int iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
474 int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
475int iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
476 struct iwx_rx_mpdu_desc *, int, int, uint32_t,
477 struct ieee80211_rxinfo *, struct mbuf_list *);
478void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
479 struct mbuf_list *);
480int iwx_rx_pkt_valid(struct iwx_rx_packet *);
481void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
482 struct mbuf_list *);
483void iwx_notif_intr(struct iwx_softc *);
484int iwx_intr(void *);
485int iwx_intr_msix(void *);
486int iwx_match(struct device *, void *, void *);
487int iwx_preinit(struct iwx_softc *);
488void iwx_attach_hook(struct device *);
489void iwx_attach(struct device *, struct device *, void *);
490void iwx_init_task(void *);
491int iwx_activate(struct device *, int);
492void iwx_resume(struct iwx_softc *);
493int iwx_wakeup(struct iwx_softc *);
494
495#if NBPFILTER1 > 0
496void iwx_radiotap_attach(struct iwx_softc *);
497#endif
498
499uint8_t
500iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
501{
502 const struct iwx_fw_cmd_version *entry;
503 int i;
504
505 for (i = 0; i < sc->n_cmd_versions; i++) {
506 entry = &sc->cmd_versions[i];
507 if (entry->group == grp && entry->cmd == cmd)
508 return entry->cmd_ver;
509 }
510
511 return IWX_FW_CMD_VER_UNKNOWN99;
512}
513
514uint8_t
515iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
516{
517 const struct iwx_fw_cmd_version *entry;
518 int i;
519
520 for (i = 0; i < sc->n_cmd_versions; i++) {
521 entry = &sc->cmd_versions[i];
522 if (entry->group == grp && entry->cmd == cmd)
523 return entry->notif_ver;
524 }
525
526 return IWX_FW_CMD_VER_UNKNOWN99;
527}
528
529int
530iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
531{
532 return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 &&
533 (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK(3 << 3)));
534}
535
536int
537iwx_is_mimo_mcs(int mcs)
538{
539 int ridx = iwx_mcs2ridx[mcs];
540 return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
541
542}
543
544int
545iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
546{
547 struct iwx_fw_cscheme_list *l = (void *)data;
548
549 if (dlen < sizeof(*l) ||
550 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
551 return EINVAL22;
552
553 /* we don't actually store anything for now, always use s/w crypto */
554
555 return 0;
556}
557
558int
559iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
560 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
561{
562 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
563 if (err) {
564 printf("%s: could not allocate context info DMA memory\n",
565 DEVNAME(sc)((sc)->sc_dev.dv_xname));
566 return err;
567 }
568
569 memcpy(dram->vaddr, sec->fws_data, sec->fws_len)__builtin_memcpy((dram->vaddr), (sec->fws_data), (sec->
fws_len))
;
570
571 return 0;
572}
573
574void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
575{
576 struct iwx_self_init_dram *dram = &sc->init_dram;
577 int i;
578
579 if (!dram->paging)
580 return;
581
582 /* free paging*/
583 for (i = 0; i < dram->paging_cnt; i++)
584 iwx_dma_contig_free(&dram->paging[i]);
585
586 free(dram->paging, M_DEVBUF2, dram->paging_cnt * sizeof(*dram->paging));
587 dram->paging_cnt = 0;
588 dram->paging = NULL((void *)0);
589}
590
591int
592iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
593{
594 int i = 0;
595
596 while (start < fws->fw_count &&
597 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC &&
598 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION0xAAAABBBB) {
599 start++;
600 i++;
601 }
602
603 return i;
604}
605
606int
607iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
608 struct iwx_context_info_dram *ctxt_dram)
609{
610 struct iwx_self_init_dram *dram = &sc->init_dram;
611 int i, ret, fw_cnt = 0;
612
613 KASSERT(dram->paging == NULL)((dram->paging == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwx.c", 613, "dram->paging == NULL"
))
;
614
615 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
616 /* add 1 due to separator */
617 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
618 /* add 2 due to separators */
619 dram->paging_cnt = iwx_get_num_sections(fws,
620 dram->lmac_cnt + dram->umac_cnt + 2);
621
622 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
623 sizeof(*dram->fw), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
624 if (!dram->fw) {
625 printf("%s: could not allocate memory for firmware sections\n",
626 DEVNAME(sc)((sc)->sc_dev.dv_xname));
627 return ENOMEM12;
628 }
629
630 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
631 M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
632 if (!dram->paging) {
633 printf("%s: could not allocate memory for firmware paging\n",
634 DEVNAME(sc)((sc)->sc_dev.dv_xname));
635 return ENOMEM12;
636 }
637
638 /* initialize lmac sections */
639 for (i = 0; i < dram->lmac_cnt; i++) {
640 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
641 &dram->fw[fw_cnt]);
642 if (ret)
643 return ret;
644 ctxt_dram->lmac_img[i] =
645 htole64(dram->fw[fw_cnt].paddr)((__uint64_t)(dram->fw[fw_cnt].paddr));
646 DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
647 (unsigned long long)dram->fw[fw_cnt].paddr,do { ; } while (0)
648 (unsigned long long)dram->fw[fw_cnt].size))do { ; } while (0);
649 fw_cnt++;
650 }
651
652 /* initialize umac sections */
653 for (i = 0; i < dram->umac_cnt; i++) {
654 /* access FW with +1 to make up for lmac separator */
655 ret = iwx_ctxt_info_alloc_dma(sc,
656 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
657 if (ret)
658 return ret;
659 ctxt_dram->umac_img[i] =
660 htole64(dram->fw[fw_cnt].paddr)((__uint64_t)(dram->fw[fw_cnt].paddr));
661 DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
662 (unsigned long long)dram->fw[fw_cnt].paddr,do { ; } while (0)
663 (unsigned long long)dram->fw[fw_cnt].size))do { ; } while (0);
664 fw_cnt++;
665 }
666
667 /*
668 * Initialize paging.
669 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
670 * stored separately.
671 * This is since the timing of its release is different -
672 * while fw memory can be released on alive, the paging memory can be
673 * freed only when the device goes down.
674 * Given that, the logic here in accessing the fw image is a bit
675 * different - fw_cnt isn't changing so loop counter is added to it.
676 */
677 for (i = 0; i < dram->paging_cnt; i++) {
678 /* access FW with +2 to make up for lmac & umac separators */
679 int fw_idx = fw_cnt + i + 2;
680
681 ret = iwx_ctxt_info_alloc_dma(sc,
682 &fws->fw_sect[fw_idx], &dram->paging[i]);
683 if (ret)
684 return ret;
685
686 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr)((__uint64_t)(dram->paging[i].paddr));
687 DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
688 (unsigned long long)dram->paging[i].paddr,do { ; } while (0)
689 (unsigned long long)dram->paging[i].size))do { ; } while (0);
690 }
691
692 return 0;
693}
694
695void
696iwx_fw_version_str(char *buf, size_t bufsize,
697 uint32_t major, uint32_t minor, uint32_t api)
698{
699 /*
700 * Starting with major version 35 the Linux driver prints the minor
701 * version in hexadecimal.
702 */
703 if (major >= 35)
704 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
705 else
706 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
707}
708
709int
710iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
711 uint8_t min_power)
712{
713 struct iwx_dma_info *fw_mon = &sc->fw_mon;
714 uint32_t size = 0;
715 uint8_t power;
716 int err;
717
718 if (fw_mon->size)
719 return 0;
720
721 for (power = max_power; power >= min_power; power--) {
722 size = (1 << power);
723
724 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
725 if (err)
726 continue;
727
728 DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",do { ; } while (0)
729 DEVNAME(sc), size))do { ; } while (0);
730 break;
731 }
732
733 if (err) {
734 fw_mon->size = 0;
735 return err;
736 }
737
738 if (power != max_power)
739 DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",do { ; } while (0)
740 DEVNAME(sc), (unsigned long)(1 << (power - 10)),do { ; } while (0)
741 (unsigned long)(1 << (max_power - 10))))do { ; } while (0);
742
743 return 0;
744}
745
746int
747iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
748{
749 if (!max_power) {
750 /* default max_power is maximum */
751 max_power = 26;
752 } else {
753 max_power += 11;
754 }
755
756 if (max_power > 26) {
757 DPRINTF(("%s: External buffer size for monitor is too big %d, "do { ; } while (0)
758 "check the FW TLV\n", DEVNAME(sc), max_power))do { ; } while (0);
759 return 0;
760 }
761
762 if (sc->fw_mon.size)
763 return 0;
764
765 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
766}
767
768int
769iwx_apply_debug_destination(struct iwx_softc *sc)
770{
771 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
772 int i, err;
773 uint8_t mon_mode, size_power, base_shift, end_shift;
774 uint32_t base_reg, end_reg;
775
776 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
777 mon_mode = dest_v1->monitor_mode;
778 size_power = dest_v1->size_power;
779 base_reg = le32toh(dest_v1->base_reg)((__uint32_t)(dest_v1->base_reg));
780 end_reg = le32toh(dest_v1->end_reg)((__uint32_t)(dest_v1->end_reg));
781 base_shift = dest_v1->base_shift;
782 end_shift = dest_v1->end_shift;
783
784 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode))do { ; } while (0);
785
786 if (mon_mode == EXTERNAL_MODE) {
787 err = iwx_alloc_fw_monitor(sc, size_power);
788 if (err)
789 return err;
790 }
791
792 if (!iwx_nic_lock(sc))
793 return EBUSY16;
794
795 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
796 uint32_t addr, val;
797 uint8_t op;
798
799 addr = le32toh(dest_v1->reg_ops[i].addr)((__uint32_t)(dest_v1->reg_ops[i].addr));
800 val = le32toh(dest_v1->reg_ops[i].val)((__uint32_t)(dest_v1->reg_ops[i].val));
801 op = dest_v1->reg_ops[i].op;
802
803 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val))do { ; } while (0);
804 switch (op) {
805 case CSR_ASSIGN:
806 IWX_WRITE(sc, addr, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), ((val
))))
;
807 break;
808 case CSR_SETBIT:
809 IWX_SETBITS(sc, addr, (1 << val))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), (((
((sc)->sc_st)->read_4(((sc)->sc_sh), ((addr)))) | ((
1 << val))))))
;
810 break;
811 case CSR_CLEARBIT:
812 IWX_CLRBITS(sc, addr, (1 << val))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), (((
((sc)->sc_st)->read_4(((sc)->sc_sh), ((addr)))) &
~((1 << val))))))
;
813 break;
814 case PRPH_ASSIGN:
815 iwx_write_prph(sc, addr, val);
816 break;
817 case PRPH_SETBIT:
818 err = iwx_set_bits_prph(sc, addr, (1 << val));
819 if (err)
820 return err;
821 break;
822 case PRPH_CLEARBIT:
823 err = iwx_clear_bits_prph(sc, addr, (1 << val));
824 if (err)
825 return err;
826 break;
827 case PRPH_BLOCKBIT:
828 if (iwx_read_prph(sc, addr) & (1 << val))
829 goto monitor;
830 break;
831 default:
832 DPRINTF(("%s: FW debug - unknown OP %d\n",do { ; } while (0)
833 DEVNAME(sc), op))do { ; } while (0);
834 break;
835 }
836 }
837
838monitor:
839 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
840 iwx_write_prph(sc, le32toh(base_reg)((__uint32_t)(base_reg)),
841 sc->fw_mon.paddr >> base_shift);
842 iwx_write_prph(sc, end_reg,
843 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
844 >> end_shift);
845 }
846
847 iwx_nic_unlock(sc);
848 return 0;
849}
850
851int
852iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
853{
854 struct iwx_context_info *ctxt_info;
855 struct iwx_context_info_rbd_cfg *rx_cfg;
856 uint32_t control_flags = 0, rb_size;
857 uint64_t paddr;
858 int err;
859
860 ctxt_info = sc->ctxt_info_dma.vaddr;
861
862 ctxt_info->version.version = 0;
863 ctxt_info->version.mac_id =
864 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV))((__uint16_t)((uint16_t)(((sc)->sc_st)->read_4(((sc)->
sc_sh), (((0x028)))))))
;
865 /* size is in DWs */
866 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4)((__uint16_t)(sizeof(*ctxt_info) / 4));
867
868 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_225602)
869 rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
870 else
871 rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
872
873 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF)((((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1)) <
0xF) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 873, "IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF")
)
;
874 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
875 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE)((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1)) <<
876 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
877 (rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
878 ctxt_info->control.control_flags = htole32(control_flags)((__uint32_t)(control_flags));
879
880 /* initialize RX default queue */
881 rx_cfg = &ctxt_info->rbd_cfg;
882 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr)((__uint64_t)(sc->rxq.free_desc_dma.paddr));
883 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr)((__uint64_t)(sc->rxq.used_desc_dma.paddr));
884 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr)((__uint64_t)(sc->rxq.stat_dma.paddr));
885
886 /* initialize TX command queue */
887 ctxt_info->hcmd_cfg.cmd_queue_addr =
888 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr)((__uint64_t)(sc->txq[0].desc_dma.paddr));
889 ctxt_info->hcmd_cfg.cmd_queue_size =
890 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT)(((sizeof((256)) <= 4) ? (fls((256)) - 1) : (flsl((256)) -
1)) - 3)
;
891
892 /* allocate ucode sections in dram and set addresses */
893 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
894 if (err) {
895 iwx_ctxt_info_free_fw_img(sc);
896 return err;
897 }
898
899 /* Configure debug, if exists */
900 if (sc->sc_fw.dbg_dest_tlv_v1) {
901 err = iwx_apply_debug_destination(sc);
902 if (err) {
903 iwx_ctxt_info_free_fw_img(sc);
904 return err;
905 }
906 }
907
908 /*
909 * Write the context info DMA base address. The device expects a
910 * 64-bit address but a simple bus_space_write_8 to this register
911 * won't work on some devices, such as the AX201.
912 */
913 paddr = sc->ctxt_info_dma.paddr;
914 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x40)), ((paddr
& 0xffffffff))))
;
915 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x40 + 4)),
((paddr >> 32))))
;
916
917 /* kick FW self load */
918 if (!iwx_nic_lock(sc)) {
919 iwx_ctxt_info_free_fw_img(sc);
920 return EBUSY16;
921 }
922 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN(0xa05c44), 1);
923 iwx_nic_unlock(sc);
924
925 /* Context info will be released upon alive or failure to get one */
926
927 return 0;
928}
929
930void
931iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
932{
933 struct iwx_self_init_dram *dram = &sc->init_dram;
934 int i;
935
936 if (!dram->fw)
937 return;
938
939 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
940 iwx_dma_contig_free(&dram->fw[i]);
941
942 free(dram->fw, M_DEVBUF2,
943 (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
944 dram->lmac_cnt = 0;
945 dram->umac_cnt = 0;
946 dram->fw = NULL((void *)0);
947}
948
949int
950iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
951 uint8_t *data, size_t dlen)
952{
953 struct iwx_fw_sects *fws;
954 struct iwx_fw_onesect *fwone;
955
956 if (type >= IWX_UCODE_TYPE_MAX)
957 return EINVAL22;
958 if (dlen < sizeof(uint32_t))
959 return EINVAL22;
960
961 fws = &sc->sc_fw.fw_sects[type];
962 DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count))do { ; } while (0);
963 if (fws->fw_count >= IWX_UCODE_SECT_MAX49)
964 return EINVAL22;
965
966 fwone = &fws->fw_sect[fws->fw_count];
967
968 /* first 32bit are device load offset */
969 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof
(uint32_t)))
;
970
971 /* rest is data */
972 fwone->fws_data = data + sizeof(uint32_t);
973 fwone->fws_len = dlen - sizeof(uint32_t);
974
975 fws->fw_count++;
976 fws->fw_totlen += fwone->fws_len;
977
978 return 0;
979}
980
981#define IWX_DEFAULT_SCAN_CHANNELS40 40
982/* Newer firmware might support more channels. Raise this value if needed. */
983#define IWX_MAX_SCAN_CHANNELS67 67 /* as of iwx-cc-a0-62 firmware */
984
985struct iwx_tlv_calib_data {
986 uint32_t ucode_type;
987 struct iwx_tlv_calib_ctrl calib;
988} __packed__attribute__((__packed__));
989
990int
991iwx_set_default_calib(struct iwx_softc *sc, const void *data)
992{
993 const struct iwx_tlv_calib_data *def_calib = data;
994 uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type));
995
996 if (ucode_type >= IWX_UCODE_TYPE_MAX)
997 return EINVAL22;
998
999 sc->sc_default_calib[ucode_type].flow_trigger =
1000 def_calib->calib.flow_trigger;
1001 sc->sc_default_calib[ucode_type].event_trigger =
1002 def_calib->calib.event_trigger;
1003
1004 return 0;
1005}
1006
1007void
1008iwx_fw_info_free(struct iwx_fw_info *fw)
1009{
1010 free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize);
1011 fw->fw_rawdata = NULL((void *)0);
1012 fw->fw_rawsize = 0;
1013 /* don't touch fw->fw_status */
1014 memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects
)))
;
1015}
1016
1017#define IWX_FW_ADDR_CACHE_CONTROL0xC0000000 0xC0000000
1018
1019int
1020iwx_read_firmware(struct iwx_softc *sc)
1021{
1022 struct iwx_fw_info *fw = &sc->sc_fw;
1023 struct iwx_tlv_ucode_header *uhdr;
1024 struct iwx_ucode_tlv tlv;
1025 uint32_t tlv_type;
1026 uint8_t *data;
1027 int err;
1028 size_t len;
1029
1030 if (fw->fw_status == IWX_FW_STATUS_DONE2)
1031 return 0;
1032
1033 while (fw->fw_status == IWX_FW_STATUS_INPROGRESS1)
1034 tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP0xffffffffffffffffULL);
1035 fw->fw_status = IWX_FW_STATUS_INPROGRESS1;
1036
1037 if (fw->fw_rawdata != NULL((void *)0))
1038 iwx_fw_info_free(fw);
1039
1040 err = loadfirmware(sc->sc_fwname,
1041 (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1042 if (err) {
1043 printf("%s: could not read firmware %s (error %d)\n",
1044 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err);
1045 goto out;
1046 }
1047
1048 sc->sc_capaflags = 0;
1049 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS40;
1050 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc->
sc_enabled_capa)))
;
1051 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api
)))
;
1052 sc->n_cmd_versions = 0;
1053
1054 uhdr = (void *)fw->fw_rawdata;
1055 if (*(uint32_t *)fw->fw_rawdata != 0
1056 || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWX_TLV_UCODE_MAGIC0x0a4c5749) {
1057 printf("%s: invalid firmware %s\n",
1058 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname);
1059 err = EINVAL22;
1060 goto out;
1061 }
1062
1063 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1064 IWX_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24
)
,
1065 IWX_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16
)
,
1066 IWX_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8
)
);
1067
1068 data = uhdr->data;
1069 len = fw->fw_rawsize - sizeof(*uhdr);
1070
1071 while (len >= sizeof(tlv)) {
1072 size_t tlv_len;
1073 void *tlv_data;
1074
1075 memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv)));
1076 tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length));
1077 tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type));
1078
1079 len -= sizeof(tlv);
1080 data += sizeof(tlv);
1081 tlv_data = data;
1082
1083 if (len < tlv_len) {
1084 printf("%s: firmware too short: %zu bytes\n",
1085 DEVNAME(sc)((sc)->sc_dev.dv_xname), len);
1086 err = EINVAL22;
1087 goto parse_out;
1088 }
1089
1090 switch (tlv_type) {
1091 case IWX_UCODE_TLV_PROBE_MAX_LEN6:
1092 if (tlv_len < sizeof(uint32_t)) {
1093 err = EINVAL22;
1094 goto parse_out;
1095 }
1096 sc->sc_capa_max_probe_len
1097 = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1098 if (sc->sc_capa_max_probe_len >
1099 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE512) {
1100 err = EINVAL22;
1101 goto parse_out;
1102 }
1103 break;
1104 case IWX_UCODE_TLV_PAN7:
1105 if (tlv_len) {
1106 err = EINVAL22;
1107 goto parse_out;
1108 }
1109 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN(1 << 0);
1110 break;
1111 case IWX_UCODE_TLV_FLAGS18:
1112 if (tlv_len < sizeof(uint32_t)) {
1113 err = EINVAL22;
1114 goto parse_out;
1115 }
1116 /*
1117 * Apparently there can be many flags, but Linux driver
1118 * parses only the first one, and so do we.
1119 *
1120 * XXX: why does this override IWX_UCODE_TLV_PAN?
1121 * Intentional or a bug? Observations from
1122 * current firmware file:
1123 * 1) TLV_PAN is parsed first
1124 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1125 * ==> this resets TLV_PAN to itself... hnnnk
1126 */
1127 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1128 break;
1129 case IWX_UCODE_TLV_CSCHEME28:
1130 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1131 if (err)
1132 goto parse_out;
1133 break;
1134 case IWX_UCODE_TLV_NUM_OF_CPU27: {
1135 uint32_t num_cpu;
1136 if (tlv_len != sizeof(uint32_t)) {
1137 err = EINVAL22;
1138 goto parse_out;
1139 }
1140 num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1141 if (num_cpu < 1 || num_cpu > 2) {
1142 err = EINVAL22;
1143 goto parse_out;
1144 }
1145 break;
1146 }
1147 case IWX_UCODE_TLV_SEC_RT19:
1148 err = iwx_firmware_store_section(sc,
1149 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1150 if (err)
1151 goto parse_out;
1152 break;
1153 case IWX_UCODE_TLV_SEC_INIT20:
1154 err = iwx_firmware_store_section(sc,
1155 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1156 if (err)
1157 goto parse_out;
1158 break;
1159 case IWX_UCODE_TLV_SEC_WOWLAN21:
1160 err = iwx_firmware_store_section(sc,
1161 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1162 if (err)
1163 goto parse_out;
1164 break;
1165 case IWX_UCODE_TLV_DEF_CALIB22:
1166 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1167 err = EINVAL22;
1168 goto parse_out;
1169 }
1170 err = iwx_set_default_calib(sc, tlv_data);
1171 if (err)
1172 goto parse_out;
1173 break;
1174 case IWX_UCODE_TLV_PHY_SKU23:
1175 if (tlv_len != sizeof(uint32_t)) {
1176 err = EINVAL22;
1177 goto parse_out;
1178 }
1179 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1180 break;
1181
1182 case IWX_UCODE_TLV_API_CHANGES_SET29: {
1183 struct iwx_ucode_api *api;
1184 int idx, i;
1185 if (tlv_len != sizeof(*api)) {
1186 err = EINVAL22;
1187 goto parse_out;
1188 }
1189 api = (struct iwx_ucode_api *)tlv_data;
1190 idx = le32toh(api->api_index)((__uint32_t)(api->api_index));
1191 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) {
1192 err = EINVAL22;
1193 goto parse_out;
1194 }
1195 for (i = 0; i < 32; i++) {
1196 if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0)
1197 continue;
1198 setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
1199 }
1200 break;
1201 }
1202
1203 case IWX_UCODE_TLV_ENABLED_CAPABILITIES30: {
1204 struct iwx_ucode_capa *capa;
1205 int idx, i;
1206 if (tlv_len != sizeof(*capa)) {
1207 err = EINVAL22;
1208 goto parse_out;
1209 }
1210 capa = (struct iwx_ucode_capa *)tlv_data;
1211 idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index));
1212 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) {
1213 goto parse_out;
1214 }
1215 for (i = 0; i < 32; i++) {
1216 if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0)
1217 continue;
1218 setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
1219 }
1220 break;
1221 }
1222
1223 case IWX_UCODE_TLV_SDIO_ADMA_ADDR35:
1224 case IWX_UCODE_TLV_FW_GSCAN_CAPA50:
1225 /* ignore, not used by current driver */
1226 break;
1227
1228 case IWX_UCODE_TLV_SEC_RT_USNIFFER34:
1229 err = iwx_firmware_store_section(sc,
1230 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1231 tlv_len);
1232 if (err)
1233 goto parse_out;
1234 break;
1235
1236 case IWX_UCODE_TLV_PAGING32:
1237 if (tlv_len != sizeof(uint32_t)) {
1238 err = EINVAL22;
1239 goto parse_out;
1240 }
1241 break;
1242
1243 case IWX_UCODE_TLV_N_SCAN_CHANNELS31:
1244 if (tlv_len != sizeof(uint32_t)) {
1245 err = EINVAL22;
1246 goto parse_out;
1247 }
1248 sc->sc_capa_n_scan_channels =
1249 le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1250 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS67) {
1251 err = ERANGE34;
1252 goto parse_out;
1253 }
1254 break;
1255
1256 case IWX_UCODE_TLV_FW_VERSION36:
1257 if (tlv_len != sizeof(uint32_t) * 3) {
1258 err = EINVAL22;
1259 goto parse_out;
1260 }
1261
1262 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1263 le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])),
1264 le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])),
1265 le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2])));
1266 break;
1267
1268 case IWX_UCODE_TLV_FW_DBG_DEST38: {
1269 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL((void *)0);
1270
1271 fw->dbg_dest_ver = (uint8_t *)tlv_data;
1272 if (*fw->dbg_dest_ver != 0) {
1273 err = EINVAL22;
1274 goto parse_out;
1275 }
1276
1277 if (fw->dbg_dest_tlv_init)
1278 break;
1279 fw->dbg_dest_tlv_init = true1;
1280
1281 dest_v1 = (void *)tlv_data;
1282 fw->dbg_dest_tlv_v1 = dest_v1;
1283 fw->n_dest_reg = tlv_len -
1284 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops)__builtin_offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1285 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1286 DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg))do { ; } while (0);
1287 break;
1288 }
1289
1290 case IWX_UCODE_TLV_FW_DBG_CONF39: {
1291 struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1292
1293 if (!fw->dbg_dest_tlv_init ||
1294 conf->id >= nitems(fw->dbg_conf_tlv)(sizeof((fw->dbg_conf_tlv)) / sizeof((fw->dbg_conf_tlv)
[0]))
||
1295 fw->dbg_conf_tlv[conf->id] != NULL((void *)0))
1296 break;
1297
1298 DPRINTF(("Found debug configuration: %d\n", conf->id))do { ; } while (0);
1299 fw->dbg_conf_tlv[conf->id] = conf;
1300 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1301 break;
1302 }
1303
1304 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS54: {
1305 struct iwx_umac_debug_addrs *dbg_ptrs =
1306 (void *)tlv_data;
1307
1308 if (tlv_len != sizeof(*dbg_ptrs)) {
1309 err = EINVAL22;
1310 goto parse_out;
1311 }
1312 if (sc->sc_device_family < IWX_DEVICE_FAMILY_220001)
1313 break;
1314 sc->sc_uc.uc_umac_error_event_table =
1315 le32toh(dbg_ptrs->error_info_addr)((__uint32_t)(dbg_ptrs->error_info_addr)) &
1316 ~IWX_FW_ADDR_CACHE_CONTROL0xC0000000;
1317 sc->sc_uc.error_event_table_tlv_status |=
1318 IWX_ERROR_EVENT_TABLE_UMAC(1 << 2);
1319 break;
1320 }
1321
1322 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS55: {
1323 struct iwx_lmac_debug_addrs *dbg_ptrs =
1324 (void *)tlv_data;
1325
1326 if (tlv_len != sizeof(*dbg_ptrs)) {
1327 err = EINVAL22;
1328 goto parse_out;
1329 }
1330 if (sc->sc_device_family < IWX_DEVICE_FAMILY_220001)
1331 break;
1332 sc->sc_uc.uc_lmac_error_event_table[0] =
1333 le32toh(dbg_ptrs->error_event_table_ptr)((__uint32_t)(dbg_ptrs->error_event_table_ptr)) &
1334 ~IWX_FW_ADDR_CACHE_CONTROL0xC0000000;
1335 sc->sc_uc.error_event_table_tlv_status |=
1336 IWX_ERROR_EVENT_TABLE_LMAC1(1 << 0);
1337 break;
1338 }
1339
1340 case IWX_UCODE_TLV_FW_MEM_SEG51:
1341 break;
1342
1343 case IWX_UCODE_TLV_CMD_VERSIONS48:
1344 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1345 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1346 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1347 }
1348 if (sc->n_cmd_versions != 0) {
1349 err = EINVAL22;
1350 goto parse_out;
1351 }
1352 if (tlv_len > sizeof(sc->cmd_versions)) {
1353 err = EINVAL22;
1354 goto parse_out;
1355 }
1356 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), (
tlv_len))
;
1357 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1358 break;
1359
1360 case IWX_UCODE_TLV_FW_RECOVERY_INFO57:
1361 break;
1362
1363 case IWX_UCODE_TLV_FW_FSEQ_VERSION60:
1364 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION61:
1365 case IWX_UCODE_TLV_FW_NUM_STATIONS(0x100 + 0):
1366 break;
1367
1368 /* undocumented TLVs found in iwx-cc-a0-46 image */
1369 case 58:
1370 case 0x1000003:
1371 case 0x1000004:
1372 break;
1373
1374 /* undocumented TLVs found in iwx-cc-a0-48 image */
1375 case 0x1000000:
1376 case 0x1000002:
1377 break;
1378
1379 case IWX_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0):
1380 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1):
1381 case IWX_UCODE_TLV_TYPE_HCMD(0x1000005 + 2):
1382 case IWX_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3):
1383 case IWX_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4):
1384 case IWX_UCODE_TLV_TYPE_CONF_SET(0x1000005 + 5):
1385 break;
1386
1387 /* undocumented TLV found in iwx-cc-a0-67 image */
1388 case 0x100000b:
1389 break;
1390
1391 default:
1392 err = EINVAL22;
1393 goto parse_out;
1394 }
1395
1396 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1397 data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1398 }
1399
1400 KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 1400, "err == 0"))
;
1401
1402 parse_out:
1403 if (err) {
1404 printf("%s: firmware parse error %d, "
1405 "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type);
1406 }
1407
1408 out:
1409 if (err) {
1410 fw->fw_status = IWX_FW_STATUS_NONE0;
1411 if (fw->fw_rawdata != NULL((void *)0))
1412 iwx_fw_info_free(fw);
1413 } else
1414 fw->fw_status = IWX_FW_STATUS_DONE2;
1415 wakeup(&sc->sc_fw);
1416
1417 return err;
1418}
1419
1420uint32_t
1421iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1422{
1423 IWX_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
1424 IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1425 IWX_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x01 | 0x02)
;
1426 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050
)))))
;
1427}
1428
1429uint32_t
1430iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1431{
1432 iwx_nic_assert_locked(sc);
1433 return iwx_read_prph_unlocked(sc, addr);
1434}
1435
1436void
1437iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1438{
1439 IWX_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
1440 IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1441 IWX_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x02)
;
1442 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c
))), ((val))))
;
1443}
1444
1445void
1446iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1447{
1448 iwx_nic_assert_locked(sc);
1449 iwx_write_prph_unlocked(sc, addr, val);
1450}
1451
1452void
1453iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1454{
1455 iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1456 iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1457}
1458
1459int
1460iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1461{
1462 int offs, err = 0;
1463 uint32_t *vals = buf;
1464
1465 if (iwx_nic_lock(sc)) {
1466 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c
))), ((addr))))
;
1467 for (offs = 0; offs < dwords; offs++)
1468 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x400)+0x01c)))))))
;
1469 iwx_nic_unlock(sc);
1470 } else {
1471 err = EBUSY16;
1472 }
1473 return err;
1474}
1475
1476int
1477iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1478{
1479 int offs;
1480 const uint32_t *vals = buf;
1481
1482 if (iwx_nic_lock(sc)) {
1483 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010
))), ((addr))))
;
1484 /* WADDR auto-increments */
1485 for (offs = 0; offs < dwords; offs++) {
1486 uint32_t val = vals ? vals[offs] : 0;
1487 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018
))), ((val))))
;
1488 }
1489 iwx_nic_unlock(sc);
1490 } else {
1491 return EBUSY16;
1492 }
1493 return 0;
1494}
1495
1496int
1497iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1498{
1499 return iwx_write_mem(sc, addr, &val, 1);
1500}
1501
1502int
1503iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1504 int timo)
1505{
1506 for (;;) {
1507 if ((IWX_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) {
1508 return 1;
1509 }
1510 if (timo < 10) {
1511 return 0;
1512 }
1513 timo -= 10;
1514 DELAY(10)(*delay_func)(10);
1515 }
1516}
1517
1518int
1519iwx_nic_lock(struct iwx_softc *sc)
1520{
1521 if (sc->sc_nic_locks > 0) {
1522 iwx_nic_assert_locked(sc);
1523 sc->sc_nic_locks++;
1524 return 1; /* already locked */
1525 }
1526
1527 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
1528 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
;
1529
1530 DELAY(2)(*delay_func)(2);
1531
1532 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
1533 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001),
1534 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001)
1535 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) {
1536 sc->sc_nic_locks++;
1537 return 1;
1538 }
1539
1540 printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1541 return 0;
1542}
1543
1544void
1545iwx_nic_assert_locked(struct iwx_softc *sc)
1546{
1547 if (sc->sc_nic_locks <= 0)
1548 panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
1549}
1550
1551void
1552iwx_nic_unlock(struct iwx_softc *sc)
1553{
1554 if (sc->sc_nic_locks > 0) {
1555 if (--sc->sc_nic_locks == 0)
1556 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
1557 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
1558 } else
1559 printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1560}
1561
1562int
1563iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1564 uint32_t mask)
1565{
1566 uint32_t val;
1567
1568 if (iwx_nic_lock(sc)) {
1569 val = iwx_read_prph(sc, reg) & mask;
1570 val |= bits;
1571 iwx_write_prph(sc, reg, val);
1572 iwx_nic_unlock(sc);
1573 return 0;
1574 }
1575 return EBUSY16;
1576}
1577
1578int
1579iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1580{
1581 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1582}
1583
1584int
1585iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1586{
1587 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1588}
1589
1590int
1591iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1592 bus_size_t size, bus_size_t alignment)
1593{
1594 int nsegs, err;
1595 caddr_t va;
1596
1597 dma->tag = tag;
1598 dma->size = size;
1599
1600 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
1601 &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
;
1602 if (err)
1603 goto fail;
1604
1605 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
1606 BUS_DMA_NOWAIT)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
;
1607 if (err)
1608 goto fail;
1609
1610 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
1611 BUS_DMA_NOWAIT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
;
1612 if (err)
1613 goto fail;
1614 dma->vaddr = va;
1615
1616 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
1617 BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
;
1618 if (err)
1619 goto fail;
1620
1621 memset(dma->vaddr, 0, size)__builtin_memset((dma->vaddr), (0), (size));
1622 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), (
0x04))
;
1623 dma->paddr = dma->map->dm_segs[0].ds_addr;
1624
1625 return 0;
1626
1627fail: iwx_dma_contig_free(dma);
1628 return err;
1629}
1630
1631void
1632iwx_dma_contig_free(struct iwx_dma_info *dma)
1633{
1634 if (dma->map != NULL((void *)0)) {
1635 if (dma->vaddr != NULL((void *)0)) {
1636 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
1637 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
;
1638 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
1639 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (dma->size))
;
1640 bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
seg), (1))
;
1641 dma->vaddr = NULL((void *)0);
1642 }
1643 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
1644 dma->map = NULL((void *)0);
1645 }
1646}
1647
1648int
1649iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1650{
1651 bus_size_t size;
1652 int i, err;
1653
1654 ring->cur = 0;
1655
1656 /* Allocate RX descriptors (256-byte aligned). */
1657 size = IWX_RX_MQ_RING_COUNT512 * sizeof(uint64_t);
1658 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1659 if (err) {
1660 printf("%s: could not allocate RX ring DMA memory\n",
1661 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1662 goto fail;
1663 }
1664 ring->desc = ring->free_desc_dma.vaddr;
1665
1666 /* Allocate RX status area (16-byte aligned). */
1667 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1668 sizeof(*ring->stat), 16);
1669 if (err) {
1670 printf("%s: could not allocate RX status DMA memory\n",
1671 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1672 goto fail;
1673 }
1674 ring->stat = ring->stat_dma.vaddr;
1675
1676 size = IWX_RX_MQ_RING_COUNT512 * sizeof(uint32_t);
1677 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1678 size, 256);
1679 if (err) {
1680 printf("%s: could not allocate RX ring DMA memory\n",
1681 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1682 goto fail;
1683 }
1684
1685 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++) {
1686 struct iwx_rx_data *data = &ring->data[i];
1687
1688 memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data)));
1689 err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1690 IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1691 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
;
1692 if (err) {
1693 printf("%s: could not create RX buf DMA map\n",
1694 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1695 goto fail;
1696 }
1697
1698 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE4096, i);
1699 if (err)
1700 goto fail;
1701 }
1702 return 0;
1703
1704fail: iwx_free_rx_ring(sc, ring);
1705 return err;
1706}
1707
1708void
1709iwx_disable_rx_dma(struct iwx_softc *sc)
1710{
1711 int ntries;
1712
1713 if (iwx_nic_lock(sc)) {
1714 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG0xA09820, 0);
1715 for (ntries = 0; ntries < 1000; ntries++) {
1716 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS0xA09808) &
1717 IWX_RXF_DMA_IDLE(1U << 31))
1718 break;
1719 DELAY(10)(*delay_func)(10);
1720 }
1721 iwx_nic_unlock(sc);
1722 }
1723}
1724
1725void
1726iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1727{
1728 ring->cur = 0;
1729 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
1730 ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
;
1731 memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat
)))
;
1732 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
1733 ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
;
1734
1735}
1736
1737void
1738iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1739{
1740 int i;
1741
1742 iwx_dma_contig_free(&ring->free_desc_dma);
1743 iwx_dma_contig_free(&ring->stat_dma);
1744 iwx_dma_contig_free(&ring->used_desc_dma);
1745
1746 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++) {
1747 struct iwx_rx_data *data = &ring->data[i];
1748
1749 if (data->m != NULL((void *)0)) {
1750 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
1751 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
1752 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1753 m_freem(data->m);
1754 data->m = NULL((void *)0);
1755 }
1756 if (data->map != NULL((void *)0))
1757 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1758 }
1759}
1760
1761int
1762iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1763{
1764 bus_addr_t paddr;
1765 bus_size_t size;
1766 int i, err;
1767
1768 ring->qid = qid;
1769 ring->queued = 0;
1770 ring->cur = 0;
1771 ring->tail = 0;
1772
1773 /* Allocate TX descriptors (256-byte aligned). */
1774 size = IWX_TX_RING_COUNT(256) * sizeof(struct iwx_tfh_tfd);
1775 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1776 if (err) {
1777 printf("%s: could not allocate TX ring DMA memory\n",
1778 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1779 goto fail;
1780 }
1781 ring->desc = ring->desc_dma.vaddr;
1782
1783 /*
1784 * The hardware supports up to 512 Tx rings which is more
1785 * than we currently need.
1786 *
1787 * In DQA mode we use 1 command queue + 1 default queue for
1788 * management, control, and non-QoS data frames.
1789 * The command is queue sc->txq[0], our default queue is sc->txq[1].
1790 *
1791 * Tx aggregation requires additional queues, one queue per TID for
1792 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
1793 * Firmware may assign its own internal IDs for these queues
1794 * depending on which TID gets aggregation enabled first.
1795 * The driver maintains a table mapping driver-side queue IDs
1796 * to firmware-side queue IDs.
1797 */
1798
1799 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1800 sizeof(struct iwx_agn_scd_bc_tbl), 0);
1801 if (err) {
1802 printf("%s: could not allocate byte count table DMA memory\n",
1803 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1804 goto fail;
1805 }
1806
1807 size = IWX_TX_RING_COUNT(256) * sizeof(struct iwx_device_cmd);
1808 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1809 IWX_FIRST_TB_SIZE_ALIGN((20 + (64 - 1)) & ~(64 - 1)));
1810 if (err) {
1811 printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1812 goto fail;
1813 }
1814 ring->cmd = ring->cmd_dma.vaddr;
1815
1816 paddr = ring->cmd_dma.paddr;
1817 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
1818 struct iwx_tx_data *data = &ring->data[i];
1819 size_t mapsize;
1820
1821 data->cmd_paddr = paddr;
1822 paddr += sizeof(struct iwx_device_cmd);
1823
1824 /* FW commands may require more mapped space than packets. */
1825 if (qid == IWX_DQA_CMD_QUEUE0)
1826 mapsize = (sizeof(struct iwx_cmd_header) +
1827 IWX_MAX_CMD_PAYLOAD_SIZE(4096 - sizeof(struct iwx_cmd_header_wide)));
1828 else
1829 mapsize = MCLBYTES(1 << 11);
1830 err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
1831 IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
1832 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
;
1833 if (err) {
1834 printf("%s: could not create TX buf DMA map\n",
1835 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1836 goto fail;
1837 }
1838 }
1839 KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 1839, "paddr == ring->cmd_dma.paddr + size"
))
;
1840 return 0;
1841
1842fail: iwx_free_tx_ring(sc, ring);
1843 return err;
1844}
1845
1846void
1847iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1848{
1849 int i;
1850
1851 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
1852 struct iwx_tx_data *data = &ring->data[i];
1853
1854 if (data->m != NULL((void *)0)) {
1855 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1856 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1857 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1858 m_freem(data->m);
1859 data->m = NULL((void *)0);
1860 }
1861 }
1862
1863 /* Clear byte count table. */
1864 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size)__builtin_memset((ring->bc_tbl.vaddr), (0), (ring->bc_tbl
.size))
;
1865
1866 /* Clear TX descriptors. */
1867 memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size
))
;
1868 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
1869 ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
;
1870 sc->qfullmsk &= ~(1 << ring->qid);
1871 sc->qenablemsk &= ~(1 << ring->qid);
1872 for (i = 0; i < nitems(sc->aggqid)(sizeof((sc->aggqid)) / sizeof((sc->aggqid)[0])); i++) {
1873 if (sc->aggqid[i] == ring->qid) {
1874 sc->aggqid[i] = 0;
1875 break;
1876 }
1877 }
1878 ring->queued = 0;
1879 ring->cur = 0;
1880 ring->tail = 0;
1881 ring->tid = 0;
1882}
1883
1884void
1885iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1886{
1887 int i;
1888
1889 iwx_dma_contig_free(&ring->desc_dma);
1890 iwx_dma_contig_free(&ring->cmd_dma);
1891 iwx_dma_contig_free(&ring->bc_tbl);
1892
1893 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
1894 struct iwx_tx_data *data = &ring->data[i];
1895
1896 if (data->m != NULL((void *)0)) {
1897 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1898 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1899 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1900 m_freem(data->m);
1901 data->m = NULL((void *)0);
1902 }
1903 if (data->map != NULL((void *)0))
1904 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1905 }
1906}
1907
1908void
1909iwx_enable_rfkill_int(struct iwx_softc *sc)
1910{
1911 if (!sc->sc_msix) {
1912 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL(1 << 7);
1913 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1914 } else {
1915 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
1916 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
1917 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
1918 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
;
1919 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1920 }
1921
1922 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
1923 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
;
1924}
1925
1926int
1927iwx_check_rfkill(struct iwx_softc *sc)
1928{
1929 uint32_t v;
1930 int rv;
1931
1932 /*
1933 * "documentation" is not really helpful here:
1934 * 27: HW_RF_KILL_SW
1935 * Indicates state of (platform's) hardware RF-Kill switch
1936 *
1937 * But apparently when it's off, it's on ...
1938 */
1939 v = IWX_READ(sc, IWX_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))));
1940 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0;
1941 if (rv) {
1942 sc->sc_flags |= IWX_FLAG_RFKILL0x02;
1943 } else {
1944 sc->sc_flags &= ~IWX_FLAG_RFKILL0x02;
1945 }
1946
1947 return rv;
1948}
1949
1950void
1951iwx_enable_interrupts(struct iwx_softc *sc)
1952{
1953 if (!sc->sc_msix) {
1954 sc->sc_intmask = IWX_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 <<
25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 <<
0) | (1 << 28))
;
1955 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1956 } else {
1957 /*
1958 * fh/hw_mask keeps all the unmasked causes.
1959 * Unlike msi, in msix cause is enabled when it is unset.
1960 */
1961 sc->sc_hw_mask = sc->sc_hw_init_mask;
1962 sc->sc_fh_mask = sc->sc_fh_init_mask;
1963 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
1964 ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
;
1965 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
1966 ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
;
1967 }
1968}
1969
1970void
1971iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1972{
1973 if (!sc->sc_msix) {
1974 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE(1 << 0) | IWX_CSR_INT_BIT_FH_RX(1U << 31);
1975 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1976 } else {
1977 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE))))
1978 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE))))
;
1979 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1980 /*
1981 * Leave all the FH causes enabled to get the ALIVE
1982 * notification.
1983 */
1984 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_init_mask))))
1985 ~sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_init_mask))))
;
1986 sc->sc_fh_mask = sc->sc_fh_init_mask;
1987 }
1988}
1989
1990void
1991iwx_restore_interrupts(struct iwx_softc *sc)
1992{
1993 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1994}
1995
1996void
1997iwx_disable_interrupts(struct iwx_softc *sc)
1998{
1999 if (!sc->sc_msix) {
2000 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
2001
2002 /* acknowledge all interrupts */
2003 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
2004 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(~0))))
;
2005 } else {
2006 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
2007 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
2008 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
2009 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
2010 }
2011}
2012
2013void
2014iwx_ict_reset(struct iwx_softc *sc)
2015{
2016 iwx_disable_interrupts(sc);
2017
2018 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096));
2019 sc->ict_cur = 0;
2020
2021 /* Set physical address of ICT (4KB aligned). */
2022 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2023 IWX_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2024 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2025 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2026 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
;
2027
2028 /* Switch to ICT interrupt mode in driver. */
2029 sc->sc_flags |= IWX_FLAG_USE_ICT0x01;
2030
2031 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
2032 iwx_enable_interrupts(sc);
2033}
2034
2035#define IWX_HW_READY_TIMEOUT 50
2036int
2037iwx_set_hw_ready(struct iwx_softc *sc)
2038{
2039 int ready;
2040
2041 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
2042 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
;
2043
2044 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG(0x000),
2045 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
2046 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
2047 IWX_HW_READY_TIMEOUT);
2048 if (ready)
2049 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
2050 IWX_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
;
2051
2052 return ready;
2053}
2054#undef IWX_HW_READY_TIMEOUT
2055
2056int
2057iwx_prepare_card_hw(struct iwx_softc *sc)
2058{
2059 int t = 0;
2060 int ntries;
2061
2062 if (iwx_set_hw_ready(sc))
2063 return 0;
2064
2065 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
2066 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
2067 DELAY(1000)(*delay_func)(1000);
2068
2069 for (ntries = 0; ntries < 10; ntries++) {
2070 /* If HW is not ready, prepare the conditions to check again */
2071 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
2072 IWX_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
;
2073
2074 do {
2075 if (iwx_set_hw_ready(sc))
2076 return 0;
2077 DELAY(200)(*delay_func)(200);
2078 t += 200;
2079 } while (t < 150000);
2080 DELAY(25000)(*delay_func)(25000);
2081 }
2082
2083 return ETIMEDOUT60;
2084}
2085
2086int
2087iwx_force_power_gating(struct iwx_softc *sc)
2088{
2089 int err;
2090
2091 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2092 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE(1 << 10));
2093 if (err)
2094 return err;
2095 DELAY(20)(*delay_func)(20);
2096 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2097 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN(1 << 0) |
2098 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN(1 << 1));
2099 if (err)
2100 return err;
2101 DELAY(20)(*delay_func)(20);
2102 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2103 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE(1 << 10));
2104 return err;
2105}
2106
2107void
2108iwx_apm_config(struct iwx_softc *sc)
2109{
2110 pcireg_t lctl, cap;
2111
2112 /*
2113 * L0S states have been found to be unstable with our devices
2114 * and in newer hardware they are not officially supported at
2115 * all, so we must always set the L0S_DISABLED bit.
2116 */
2117 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
;
2118
2119 lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2120 sc->sc_cap_off + PCI_PCIE_LCSR0x10);
2121 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S0x00000001);
2122 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2123 sc->sc_cap_off + PCI_PCIE_DCSR20x28);
2124 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0;
2125 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0)
2126 DEVNAME(sc),do { ; } while (0)
2127 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0)
2128 sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0);
2129}
2130
2131/*
2132 * Start up NIC's basic functionality after it has been reset
2133 * e.g. after platform boot or shutdown.
2134 * NOTE: This does not load uCode nor start the embedded processor
2135 */
2136int
2137iwx_apm_init(struct iwx_softc *sc)
2138{
2139 int err = 0;
2140
2141 /*
2142 * Disable L0s without affecting L1;
2143 * don't wait for ICH L0s (ICH bug W/A)
2144 */
2145 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
2146 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
;
2147
2148 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2149 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240)))))
| ((0xFFFF0000))))))
;
2150
2151 /*
2152 * Enable HAP INTA (interrupt from management bus) to
2153 * wake device's PCI Express link L1a -> L0s
2154 */
2155 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
2156 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
;
2157
2158 iwx_apm_config(sc);
2159
2160 /*
2161 * Set "initialization complete" bit to move adapter from
2162 * D0U* --> D0A* (powered-up active) state.
2163 */
2164 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
2165
2166 /*
2167 * Wait for clock stabilization; once stabilized, access to
2168 * device-internal resources is supported, e.g. iwx_write_prph()
2169 * and accesses to uCode SRAM.
2170 */
2171 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
2172 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
2173 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
2174 printf("%s: timeout waiting for clock stabilization\n",
2175 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2176 err = ETIMEDOUT60;
2177 goto out;
2178 }
2179 out:
2180 if (err)
2181 printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
2182 return err;
2183}
2184
2185void
2186iwx_apm_stop(struct iwx_softc *sc)
2187{
2188 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
2189 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
2190 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
2191 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
2192 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
;
2193 DELAY(1000)(*delay_func)(1000);
2194 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
2195 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
;
2196 DELAY(5000)(*delay_func)(5000);
2197
2198 /* stop device's busmaster DMA activity */
2199 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000200))))))
;
2200
2201 if (!iwx_poll_bit(sc, IWX_CSR_RESET(0x020),
2202 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100),
2203 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100))
2204 printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2205
2206 /*
2207 * Clear "initialization complete" bit to move adapter from
2208 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2209 */
2210 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
2211 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
;
2212}
2213
2214void
2215iwx_init_msix_hw(struct iwx_softc *sc)
2216{
2217 iwx_conf_msix_hw(sc, 0);
2218
2219 if (!sc->sc_msix)
2220 return;
2221
2222 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804
)))))
;
2223 sc->sc_fh_mask = sc->sc_fh_init_mask;
2224 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C
)))))
;
2225 sc->sc_hw_mask = sc->sc_hw_init_mask;
2226}
2227
2228void
2229iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2230{
2231 int vector = 0;
2232
2233 if (!sc->sc_msix) {
2234 /* Newer chips default to MSIX. */
2235 if (!stopped && iwx_nic_lock(sc)) {
2236 iwx_write_prph(sc, IWX_UREG_CHICK0xa05c00,
2237 IWX_UREG_CHICK_MSI_ENABLE(1 << 24));
2238 iwx_nic_unlock(sc);
2239 }
2240 return;
2241 }
2242
2243 if (!stopped && iwx_nic_lock(sc)) {
2244 iwx_write_prph(sc, IWX_UREG_CHICK0xa05c00, IWX_UREG_CHICK_MSIX_ENABLE(1 << 25));
2245 iwx_nic_unlock(sc);
2246 }
2247
2248 /* Disable all interrupts */
2249 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~0))))
;
2250 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~0))))
;
2251
2252 /* Map fallback-queue (command/mgmt) to a single vector */
2253 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
2254 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
;
2255 /* Map RSS queue (data) to the same vector */
2256 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
2257 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
;
2258
2259 /* Enable the RX queues cause interrupts */
2260 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1
)))))
2261 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1
)))))
;
2262
2263 /* Map non-RX causes to the same vector */
2264 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
2265 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
;
2266 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
2267 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
;
2268 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
2269 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
;
2270 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
2271 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
;
2272 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
2273 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
;
2274 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
2275 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
;
2276 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
2277 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
;
2278 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
2279 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
;
2280 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
2281 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
;
2282 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
2283 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
;
2284 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
2285 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
;
2286 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
2287 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
;
2288 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
2289 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
;
2290 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
2291 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
;
2292 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
2293 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
;
2294
2295 /* Enable non-RX causes interrupts */
2296 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2297 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2298 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2299 IWX_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2300 IWX_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
;
2301 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2302 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2303 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2304 IWX_MSIX_HW_INT_CAUSES_REG_IML |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2305 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2306 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2307 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2308 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2309 IWX_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2310 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2311 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2312 IWX_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
;
2313}
2314
2315int
2316iwx_clear_persistence_bit(struct iwx_softc *sc)
2317{
2318 uint32_t hpm, wprot;
2319
2320 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG0xa03440);
2321 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT(1 << 12))) {
2322 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_220000xa04d00);
2323 if (wprot & IWX_PREG_WFPM_ACCESS(1 << 12)) {
2324 printf("%s: cannot clear persistence bit\n",
2325 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2326 return EPERM1;
2327 }
2328 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG0xa03440,
2329 hpm & ~IWX_PERSISTENCE_BIT(1 << 12));
2330 }
2331
2332 return 0;
2333}
2334
2335int
2336iwx_start_hw(struct iwx_softc *sc)
2337{
2338 int err;
2339
2340 err = iwx_prepare_card_hw(sc);
2341 if (err)
2342 return err;
2343
2344 err = iwx_clear_persistence_bit(sc);
2345 if (err)
2346 return err;
2347
2348 /* Reset the entire device */
2349 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2350 DELAY(5000)(*delay_func)(5000);
2351
2352 if (sc->sc_integrated) {
2353 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
2354 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
2355 DELAY(20)(*delay_func)(20);
2356 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
2357 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
2358 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
2359 printf("%s: timeout waiting for clock stabilization\n",
2360 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2361 return ETIMEDOUT60;
2362 }
2363
2364 err = iwx_force_power_gating(sc);
2365 if (err)
2366 return err;
2367
2368 /* Reset the entire device */
2369 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2370 DELAY(5000)(*delay_func)(5000);
2371 }
2372
2373 err = iwx_apm_init(sc);
2374 if (err)
2375 return err;
2376
2377 iwx_init_msix_hw(sc);
2378
2379 iwx_enable_rfkill_int(sc);
2380 iwx_check_rfkill(sc);
2381
2382 return 0;
2383}
2384
2385void
2386iwx_stop_device(struct iwx_softc *sc)
2387{
2388 struct ieee80211com *ic = &sc->sc_ic;
2389 struct ieee80211_node *ni = ic->ic_bss;
2390 int i;
2391
2392 iwx_disable_interrupts(sc);
2393 sc->sc_flags &= ~IWX_FLAG_USE_ICT0x01;
2394
2395 iwx_disable_rx_dma(sc);
2396 iwx_reset_rx_ring(sc, &sc->rxq);
2397 for (i = 0; i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); i++)
2398 iwx_reset_tx_ring(sc, &sc->txq[i]);
2399 for (i = 0; i < IEEE80211_NUM_TID16; i++) {
2400 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2401 if (ba->ba_state != IEEE80211_BA_AGREED2)
2402 continue;
2403 ieee80211_delba_request(ic, ni, 0, 1, i);
2404 }
2405
2406 /* Make sure (redundant) we've released our request to stay awake */
2407 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
2408 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
2409 if (sc->sc_nic_locks > 0)
2410 printf("%s: %d active NIC locks forcefully cleared\n",
2411 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
2412 sc->sc_nic_locks = 0;
2413
2414 /* Stop the device, and put it in low power state */
2415 iwx_apm_stop(sc);
2416
2417 /* Reset the on-board processor. */
2418 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2419 DELAY(5000)(*delay_func)(5000);
2420
2421 /*
2422 * Upon stop, the IVAR table gets erased, so msi-x won't
2423 * work. This causes a bug in RF-KILL flows, since the interrupt
2424 * that enables radio won't fire on the correct irq, and the
2425 * driver won't be able to handle the interrupt.
2426 * Configure the IVAR table again after reset.
2427 */
2428 iwx_conf_msix_hw(sc, 1);
2429
2430 /*
2431 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2432 * Clear the interrupt again.
2433 */
2434 iwx_disable_interrupts(sc);
2435
2436 /* Even though we stop the HW we still want the RF kill interrupt. */
2437 iwx_enable_rfkill_int(sc);
2438 iwx_check_rfkill(sc);
2439
2440 iwx_prepare_card_hw(sc);
2441
2442 iwx_ctxt_info_free_paging(sc);
2443}
2444
2445void
2446iwx_nic_config(struct iwx_softc *sc)
2447{
2448 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2449 uint32_t mask, val, reg_val = 0;
2450
2451 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >>
2452 IWX_FW_PHY_CFG_RADIO_TYPE_POS0;
2453 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >>
2454 IWX_FW_PHY_CFG_RADIO_STEP_POS2;
2455 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >>
2456 IWX_FW_PHY_CFG_RADIO_DASH_POS4;
2457
2458 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) <<
2459 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2);
2460 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) <<
2461 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0);
2462
2463 /* radio configuration */
2464 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10);
2465 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14);
2466 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12);
2467
2468 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) |
2469 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) |
2470 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) |
2471 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) |
2472 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) |
2473 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) |
2474 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100);
2475
2476 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))));
2477 val &= ~mask;
2478 val |= reg_val;
2479 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
(val))))
;
2480}
2481
2482int
2483iwx_nic_rx_init(struct iwx_softc *sc)
2484{
2485 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2486
2487 /*
2488 * We don't configure the RFH; the firmware will do that.
2489 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2490 */
2491 return 0;
2492}
2493
2494int
2495iwx_nic_init(struct iwx_softc *sc)
2496{
2497 int err;
2498
2499 iwx_apm_init(sc);
2500 iwx_nic_config(sc);
2501
2502 err = iwx_nic_rx_init(sc);
2503 if (err)
2504 return err;
2505
2506 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8)))))
| (0x800fffff)))))
;
2507
2508 return 0;
2509}
2510
2511/* Map a TID to an ieee80211_edca_ac category. */
2512const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT8] = {
2513 EDCA_AC_BE,
2514 EDCA_AC_BK,
2515 EDCA_AC_BK,
2516 EDCA_AC_BE,
2517 EDCA_AC_VI,
2518 EDCA_AC_VI,
2519 EDCA_AC_VO,
2520 EDCA_AC_VO,
2521};
2522
2523/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2524const uint8_t iwx_ac_to_tx_fifo[] = {
2525 IWX_GEN2_EDCA_TX_FIFO_BE,
2526 IWX_GEN2_EDCA_TX_FIFO_BK,
2527 IWX_GEN2_EDCA_TX_FIFO_VI,
2528 IWX_GEN2_EDCA_TX_FIFO_VO,
2529};
2530
2531int
2532iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2533 int num_slots)
2534{
2535 struct iwx_tx_queue_cfg_cmd cmd;
2536 struct iwx_rx_packet *pkt;
2537 struct iwx_tx_queue_cfg_rsp *resp;
2538 struct iwx_host_cmd hcmd = {
2539 .id = IWX_SCD_QUEUE_CFG0x1d,
2540 .flags = IWX_CMD_WANT_RESP,
2541 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2542 };
2543 struct iwx_tx_ring *ring = &sc->txq[qid];
2544 int err, fwqid;
2545 uint32_t wr_idx;
2546 size_t resp_len;
2547
2548 iwx_reset_tx_ring(sc, ring);
2549
2550 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2551 cmd.sta_id = sta_id;
2552 cmd.tid = tid;
2553 cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE)((__uint16_t)((1 << 0)));
2554 cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots))((__uint32_t)((((sizeof(num_slots) <= 4) ? (fls(num_slots)
- 1) : (flsl(num_slots) - 1)) - 3)))
;
2555 cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr)((__uint64_t)(ring->bc_tbl.paddr));
2556 cmd.tfdq_addr = htole64(ring->desc_dma.paddr)((__uint64_t)(ring->desc_dma.paddr));
2557
2558 hcmd.data[0] = &cmd;
2559 hcmd.len[0] = sizeof(cmd);
2560
2561 err = iwx_send_cmd(sc, &hcmd);
2562 if (err)
2563 return err;
2564
2565 pkt = hcmd.resp_pkt;
2566 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2567 DPRINTF(("SCD_QUEUE_CFG command failed\n"))do { ; } while (0);
2568 err = EIO5;
2569 goto out;
2570 }
2571
2572 resp_len = iwx_rx_packet_payload_len(pkt);
2573 if (resp_len != sizeof(*resp)) {
2574 DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)))do { ; } while (0);
2575 err = EIO5;
2576 goto out;
2577 }
2578
2579 resp = (void *)pkt->data;
2580 fwqid = le16toh(resp->queue_number)((__uint16_t)(resp->queue_number));
2581 wr_idx = le16toh(resp->write_pointer)((__uint16_t)(resp->write_pointer));
2582
2583 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2584 if (fwqid != qid) {
2585 DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid))do { ; } while (0);
2586 err = EIO5;
2587 goto out;
2588 }
2589
2590 if (wr_idx != ring->cur) {
2591 DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur))do { ; } while (0);
2592 err = EIO5;
2593 goto out;
2594 }
2595
2596 sc->qenablemsk |= (1 << qid);
2597 ring->tid = tid;
2598out:
2599 iwx_free_resp(sc, &hcmd);
2600 return err;
2601}
2602
2603int
2604iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2605{
2606 struct iwx_tx_queue_cfg_cmd cmd;
2607 struct iwx_rx_packet *pkt;
2608 struct iwx_tx_queue_cfg_rsp *resp;
2609 struct iwx_host_cmd hcmd = {
2610 .id = IWX_SCD_QUEUE_CFG0x1d,
2611 .flags = IWX_CMD_WANT_RESP,
2612 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2613 };
2614 struct iwx_tx_ring *ring = &sc->txq[qid];
2615 int err;
2616
2617 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2618 cmd.sta_id = sta_id;
2619 cmd.tid = tid;
2620 cmd.flags = htole16(0)((__uint16_t)(0)); /* clear "queue enabled" flag */
2621 cmd.cb_size = htole32(0)((__uint32_t)(0));
2622 cmd.byte_cnt_addr = htole64(0)((__uint64_t)(0));
2623 cmd.tfdq_addr = htole64(0)((__uint64_t)(0));
2624
2625 hcmd.data[0] = &cmd;
2626 hcmd.len[0] = sizeof(cmd);
2627
2628 err = iwx_send_cmd(sc, &hcmd);
2629 if (err)
2630 return err;
2631
2632 pkt = hcmd.resp_pkt;
2633 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2634 DPRINTF(("SCD_QUEUE_CFG command failed\n"))do { ; } while (0);
2635 err = EIO5;
2636 goto out;
2637 }
2638
2639 sc->qenablemsk &= ~(1 << qid);
2640 iwx_reset_tx_ring(sc, ring);
2641out:
2642 iwx_free_resp(sc, &hcmd);
2643 return err;
2644}
2645
2646void
2647iwx_post_alive(struct iwx_softc *sc)
2648{
2649 iwx_ict_reset(sc);
2650}
2651
2652/*
2653 * For the high priority TE use a time event type that has similar priority to
2654 * the FW's action scan priority.
2655 */
2656#define IWX_ROC_TE_TYPE_NORMAL4 IWX_TE_P2P_DEVICE_DISCOVERABLE4
2657#define IWX_ROC_TE_TYPE_MGMT_TX9 IWX_TE_P2P_CLIENT_ASSOC9
2658
2659int
2660iwx_send_time_event_cmd(struct iwx_softc *sc,
2661 const struct iwx_time_event_cmd *cmd)
2662{
2663 struct iwx_rx_packet *pkt;
2664 struct iwx_time_event_resp *resp;
2665 struct iwx_host_cmd hcmd = {
2666 .id = IWX_TIME_EVENT_CMD0x29,
2667 .flags = IWX_CMD_WANT_RESP,
2668 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2669 };
2670 uint32_t resp_len;
2671 int err;
2672
2673 hcmd.data[0] = cmd;
2674 hcmd.len[0] = sizeof(*cmd);
2675 err = iwx_send_cmd(sc, &hcmd);
2676 if (err)
2677 return err;
2678
2679 pkt = hcmd.resp_pkt;
2680 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2681 err = EIO5;
2682 goto out;
2683 }
2684
2685 resp_len = iwx_rx_packet_payload_len(pkt);
2686 if (resp_len != sizeof(*resp)) {
2687 err = EIO5;
2688 goto out;
2689 }
2690
2691 resp = (void *)pkt->data;
2692 if (le32toh(resp->status)((__uint32_t)(resp->status)) == 0)
2693 sc->sc_time_event_uid = le32toh(resp->unique_id)((__uint32_t)(resp->unique_id));
2694 else
2695 err = EIO5;
2696out:
2697 iwx_free_resp(sc, &hcmd);
2698 return err;
2699}
2700
2701int
2702iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2703 uint32_t duration)
2704{
2705 struct iwx_session_prot_cmd cmd = {
2706 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
2707 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
,
2708 .action = htole32(IWX_FW_CTXT_ACTION_ADD)((__uint32_t)(1)),
2709 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC)((__uint32_t)(IWX_SESSION_PROTECT_CONF_ASSOC)),
2710 .duration_tu = htole32(duration * IEEE80211_DUR_TU)((__uint32_t)(duration * 1024)),
2711 };
2712 uint32_t cmd_id;
2713
2714 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD0x05, IWX_MAC_CONF_GROUP0x3, 0);
2715 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2716}
2717
2718/*
2719 * NVM read access and content parsing. We do not support
2720 * external NVM or writing NVM.
2721 */
2722
2723uint8_t
2724iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2725{
2726 uint8_t tx_ant;
2727
2728 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN(0xf << 16))
2729 >> IWX_FW_PHY_CFG_TX_CHAIN_POS16);
2730
2731 if (sc->sc_nvm.valid_tx_ant)
2732 tx_ant &= sc->sc_nvm.valid_tx_ant;
2733
2734 return tx_ant;
2735}
2736
2737uint8_t
2738iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2739{
2740 uint8_t rx_ant;
2741
2742 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN(0xf << 20))
2743 >> IWX_FW_PHY_CFG_RX_CHAIN_POS20);
2744
2745 if (sc->sc_nvm.valid_rx_ant)
2746 rx_ant &= sc->sc_nvm.valid_rx_ant;
2747
2748 return rx_ant;
2749}
2750
2751void
2752iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2753 uint32_t *channel_profile_v4, int nchan_profile)
2754{
2755 struct ieee80211com *ic = &sc->sc_ic;
2756 struct iwx_nvm_data *data = &sc->sc_nvm;
2757 int ch_idx;
2758 struct ieee80211_channel *channel;
2759 uint32_t ch_flags;
2760 int is_5ghz;
2761 int flags, hw_value;
2762 int nchan;
2763 const uint8_t *nvm_channels;
2764
2765 if (sc->sc_uhb_supported) {
2766 nchan = nitems(iwx_nvm_channels_uhb)(sizeof((iwx_nvm_channels_uhb)) / sizeof((iwx_nvm_channels_uhb
)[0]))
;
2767 nvm_channels = iwx_nvm_channels_uhb;
2768 } else {
2769 nchan = nitems(iwx_nvm_channels_8000)(sizeof((iwx_nvm_channels_8000)) / sizeof((iwx_nvm_channels_8000
)[0]))
;
2770 nvm_channels = iwx_nvm_channels_8000;
2771 }
2772
2773 for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2774 if (channel_profile_v4)
2775 ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx)(((__uint32_t)(*(const uint32_t *)(channel_profile_v4 + ch_idx
))))
;
2776 else
2777 ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx)(((__uint16_t)(*(const uint16_t *)(channel_profile_v3 + ch_idx
))))
;
2778
2779 is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS14;
2780 if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2781 ch_flags &= ~IWX_NVM_CHANNEL_VALID(1 << 0);
2782
2783 hw_value = nvm_channels[ch_idx];
2784 channel = &ic->ic_channels[hw_value];
2785
2786 if (!(ch_flags & IWX_NVM_CHANNEL_VALID(1 << 0))) {
2787 channel->ic_freq = 0;
2788 channel->ic_flags = 0;
2789 continue;
2790 }
2791
2792 if (!is_5ghz) {
2793 flags = IEEE80211_CHAN_2GHZ0x0080;
2794 channel->ic_flags
2795 = IEEE80211_CHAN_CCK0x0020
2796 | IEEE80211_CHAN_OFDM0x0040
2797 | IEEE80211_CHAN_DYN0x0400
2798 | IEEE80211_CHAN_2GHZ0x0080;
2799 } else {
2800 flags = IEEE80211_CHAN_5GHZ0x0100;
2801 channel->ic_flags =
2802 IEEE80211_CHAN_A(0x0100 | 0x0040);
2803 }
2804 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2805
2806 if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE(1 << 3)))
2807 channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200;
2808
2809 if (data->sku_cap_11n_enable) {
2810 channel->ic_flags |= IEEE80211_CHAN_HT0x2000;
2811 if (ch_flags & IWX_NVM_CHANNEL_40MHZ(1 << 9))
2812 channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000;
2813 }
2814 }
2815}
2816
2817int
2818iwx_mimo_enabled(struct iwx_softc *sc)
2819{
2820 struct ieee80211com *ic = &sc->sc_ic;
2821
2822 return !sc->sc_nvm.sku_cap_mimo_disable &&
2823 (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0;
2824}
2825
2826void
2827iwx_setup_ht_rates(struct iwx_softc *sc)
2828{
2829 struct ieee80211com *ic = &sc->sc_ic;
2830 uint8_t rx_ant;
2831
2832 /* TX is supported with the same MCS as RX. */
2833 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01;
2834
2835 memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs
)))
;
2836 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2837
2838 if (!iwx_mimo_enabled(sc))
2839 return;
2840
2841 rx_ant = iwx_fw_valid_rx_ant(sc);
2842 if ((rx_ant & IWX_ANT_AB((1 << 0) | (1 << 1))) == IWX_ANT_AB((1 << 0) | (1 << 1)) ||
2843 (rx_ant & IWX_ANT_BC((1 << 1) | (1 << 2))) == IWX_ANT_BC((1 << 1) | (1 << 2)))
2844 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2845}
2846
2847void
2848iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
2849 uint16_t ssn, uint16_t buf_size)
2850{
2851 reorder_buf->head_sn = ssn;
2852 reorder_buf->num_stored = 0;
2853 reorder_buf->buf_size = buf_size;
2854 reorder_buf->last_amsdu = 0;
2855 reorder_buf->last_sub_index = 0;
2856 reorder_buf->removed = 0;
2857 reorder_buf->valid = 0;
2858 reorder_buf->consec_oldsn_drops = 0;
2859 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2860 reorder_buf->consec_oldsn_prev_drop = 0;
2861}
2862
2863void
2864iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
2865{
2866 int i;
2867 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2868 struct iwx_reorder_buf_entry *entry;
2869
2870 for (i = 0; i < reorder_buf->buf_size; i++) {
2871 entry = &rxba->entries[i];
2872 ml_purge(&entry->frames);
2873 timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time
)->tv_usec = 0
;
2874 }
2875
2876 reorder_buf->removed = 1;
2877 timeout_del(&reorder_buf->reorder_timer);
2878 timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)->
tv_usec = 0
;
2879 timeout_del(&rxba->session_timer);
2880 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID0x7f;
2881}
2882
2883#define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL)
2884
2885void
2886iwx_rx_ba_session_expired(void *arg)
2887{
2888 struct iwx_rxba_data *rxba = arg;
2889 struct iwx_softc *sc = rxba->sc;
2890 struct ieee80211com *ic = &sc->sc_ic;
2891 struct ieee80211_node *ni = ic->ic_bss;
2892 struct timeval now, timeout, expiry;
2893 int s;
2894
2895 s = splnet()splraise(0x7);
2896 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0 &&
2897 ic->ic_state == IEEE80211_S_RUN &&
2898 rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID0x7f) {
2899 getmicrouptime(&now);
2900 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
2901 timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec
+ (&timeout)->tv_sec; (&expiry)->tv_usec = (&
rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if
((&expiry)->tv_usec >= 1000000) { (&expiry)->
tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0)
;
2902 if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
) {
2903 timeout_add_usec(&rxba->session_timer, rxba->timeout);
2904 } else {
2905 ic->ic_stats.is_ht_rx_ba_timeout++;
2906 ieee80211_delba_request(ic, ni,
2907 IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2908 }
2909 }
2910 splx(s)spllower(s);
2911}
2912
2913void
2914iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
2915 struct mbuf_list *ml)
2916{
2917 struct ieee80211com *ic = &sc->sc_ic;
2918 struct ieee80211_node *ni = ic->ic_bss;
2919 struct iwx_bar_frame_release *release = (void *)pkt->data;
2920 struct iwx_reorder_buffer *buf;
2921 struct iwx_rxba_data *rxba;
2922 unsigned int baid, nssn, sta_id, tid;
2923
2924 if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
2925 return;
2926
2927 baid = (le32toh(release->ba_info)((__uint32_t)(release->ba_info)) & IWX_BAR_FRAME_RELEASE_BAID_MASK0x3f000000) >>
2928 IWX_BAR_FRAME_RELEASE_BAID_SHIFT24;
2929 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
2930 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
2931 return;
2932
2933 rxba = &sc->sc_rxba_data[baid];
2934 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
2935 return;
2936
2937 tid = le32toh(release->sta_tid)((__uint32_t)(release->sta_tid)) & IWX_BAR_FRAME_RELEASE_TID_MASK0x0000000f;
2938 sta_id = (le32toh(release->sta_tid)((__uint32_t)(release->sta_tid)) &
2939 IWX_BAR_FRAME_RELEASE_STA_MASK0x000001f0) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT4;
2940 if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID0)
2941 return;
2942
2943 nssn = le32toh(release->ba_info)((__uint32_t)(release->ba_info)) & IWX_BAR_FRAME_RELEASE_NSSN_MASK0x00000fff;
2944 buf = &rxba->reorder_buf;
2945 iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
2946}
2947
2948void
2949iwx_reorder_timer_expired(void *arg)
2950{
2951 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
2952 struct iwx_reorder_buffer *buf = arg;
2953 struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
2954 struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
2955 struct iwx_softc *sc = rxba->sc;
2956 struct ieee80211com *ic = &sc->sc_ic;
2957 struct ieee80211_node *ni = ic->ic_bss;
2958 int i, s;
2959 uint16_t sn = 0, index = 0;
2960 int expired = 0;
2961 int cont = 0;
2962 struct timeval now, timeout, expiry;
2963
2964 if (!buf->num_stored || buf->removed)
2965 return;
2966
2967 s = splnet()splraise(0x7);
2968 getmicrouptime(&now);
2969 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
2970
2971 for (i = 0; i < buf->buf_size ; i++) {
2972 index = (buf->head_sn + i) % buf->buf_size;
2973
2974 if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
2975 /*
2976 * If there is a hole and the next frame didn't expire
2977 * we want to break and not advance SN.
2978 */
2979 cont = 0;
2980 continue;
2981 }
2982 timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time
)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec
= (&entries[index].reorder_time)->tv_usec + (&timeout
)->tv_usec; if ((&expiry)->tv_usec >= 1000000) {
(&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000
; } } while (0)
;
2983 if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
)
2984 break;
2985
2986 expired = 1;
2987 /* continue until next hole after this expired frame */
2988 cont = 1;
2989 sn = (buf->head_sn + (i + 1)) & 0xfff;
2990 }
2991
2992 if (expired) {
2993 /* SN is set to the last expired frame + 1 */
2994 iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
2995 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
2996 ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
2997 } else {
2998 /*
2999 * If no frame expired and there are stored frames, index is now
3000 * pointing to the first unexpired frame - modify reorder timeout
3001 * accordingly.
3002 */
3003 timeout_add_usec(&buf->reorder_timer,
3004 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
3005 }
3006
3007 splx(s)spllower(s);
3008}
3009
3010#define IWX_MAX_RX_BA_SESSIONS16 16
3011
3012void
3013iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3014 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3015{
3016 struct ieee80211com *ic = &sc->sc_ic;
3017 struct iwx_add_sta_cmd cmd;
3018 struct iwx_node *in = (void *)ni;
3019 int err, s;
3020 uint32_t status;
3021 struct iwx_rxba_data *rxba = NULL((void *)0);
3022 uint8_t baid = 0;
3023
3024 s = splnet()splraise(0x7);
3025
3026 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS16) {
3027 ieee80211_addba_req_refuse(ic, ni, tid);
3028 splx(s)spllower(s);
3029 return;
3030 }
3031
3032 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3033
3034 cmd.sta_id = IWX_STATION_ID0;
3035 cmd.mac_id_n_color
3036 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3037 cmd.add_modify = IWX_STA_MODE_MODIFY1;
3038
3039 if (start) {
3040 cmd.add_immediate_ba_tid = (uint8_t)tid;
3041 cmd.add_immediate_ba_ssn = htole16(ssn)((__uint16_t)(ssn));
3042 cmd.rx_ba_window = htole16(winsize)((__uint16_t)(winsize));
3043 } else {
3044 cmd.remove_immediate_ba_tid = (uint8_t)tid;
3045 }
3046 cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID(1 << 3) :
3047 IWX_STA_MODIFY_REMOVE_BA_TID(1 << 4);
3048
3049 status = IWX_ADD_STA_SUCCESS0x1;
3050 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA0x18, sizeof(cmd), &cmd,
3051 &status);
3052
3053 if (err || (status & IWX_ADD_STA_STATUS_MASK0xFF) != IWX_ADD_STA_SUCCESS0x1) {
3054 if (start)
3055 ieee80211_addba_req_refuse(ic, ni, tid);
3056 splx(s)spllower(s);
3057 return;
3058 }
3059
3060 /* Deaggregation is done in hardware. */
3061 if (start) {
3062 if (!(status & IWX_ADD_STA_BAID_VALID_MASK0x8000)) {
3063 ieee80211_addba_req_refuse(ic, ni, tid);
3064 splx(s)spllower(s);
3065 return;
3066 }
3067 baid = (status & IWX_ADD_STA_BAID_MASK0x7F00) >>
3068 IWX_ADD_STA_BAID_SHIFT8;
3069 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
3070 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
) {
3071 ieee80211_addba_req_refuse(ic, ni, tid);
3072 splx(s)spllower(s);
3073 return;
3074 }
3075 rxba = &sc->sc_rxba_data[baid];
3076 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID0x7f) {
3077 ieee80211_addba_req_refuse(ic, ni, tid);
3078 splx(s)spllower(s);
3079 return;
3080 }
3081 rxba->sta_id = IWX_STATION_ID0;
3082 rxba->tid = tid;
3083 rxba->baid = baid;
3084 rxba->timeout = timeout_val;
3085 getmicrouptime(&rxba->last_rx);
3086 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3087 winsize);
3088 if (timeout_val != 0) {
3089 struct ieee80211_rx_ba *ba;
3090 timeout_add_usec(&rxba->session_timer,
3091 timeout_val);
3092 /* XXX disable net80211's BA timeout handler */
3093 ba = &ni->ni_rx_ba[tid];
3094 ba->ba_timeout_val = 0;
3095 }
3096 } else {
3097 int i;
3098 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
3099 rxba = &sc->sc_rxba_data[i];
3100 if (rxba->baid ==
3101 IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
3102 continue;
3103 if (rxba->tid != tid)
3104 continue;
3105 iwx_clear_reorder_buffer(sc, rxba);
3106 break;
3107 }
3108 }
3109
3110 if (start) {
3111 sc->sc_rx_ba_sessions++;
3112 ieee80211_addba_req_accept(ic, ni, tid);
3113 } else if (sc->sc_rx_ba_sessions > 0)
3114 sc->sc_rx_ba_sessions--;
3115
3116 splx(s)spllower(s);
3117}
3118
3119void
3120iwx_mac_ctxt_task(void *arg)
3121{
3122 struct iwx_softc *sc = arg;
3123 struct ieee80211com *ic = &sc->sc_ic;
3124 struct iwx_node *in = (void *)ic->ic_bss;
3125 int err, s = splnet()splraise(0x7);
3126
3127 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
3128 ic->ic_state != IEEE80211_S_RUN) {
3129 refcnt_rele_wake(&sc->task_refs);
3130 splx(s)spllower(s);
3131 return;
3132 }
3133
3134 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY2, 1);
3135 if (err)
3136 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3137
3138 refcnt_rele_wake(&sc->task_refs);
3139 splx(s)spllower(s);
3140}
3141
3142void
3143iwx_phy_ctxt_task(void *arg)
3144{
3145 struct iwx_softc *sc = arg;
3146 struct ieee80211com *ic = &sc->sc_ic;
3147 struct iwx_node *in = (void *)ic->ic_bss;
3148 struct ieee80211_node *ni = &in->in_ni;
3149 uint8_t chains, sco;
3150 int err, s = splnet()splraise(0x7);
3151
3152 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
3153 ic->ic_state != IEEE80211_S_RUN ||
3154 in->in_phyctxt == NULL((void *)0)) {
3155 refcnt_rele_wake(&sc->task_refs);
3156 splx(s)spllower(s);
3157 return;
3158 }
3159
3160 chains = iwx_mimo_enabled(sc) ? 2 : 1;
3161 if (ieee80211_node_supports_ht_chan40(ni))
3162 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
3163 else
3164 sco = IEEE80211_HTOP0_SCO_SCN0;
3165 if (in->in_phyctxt->sco != sco) {
3166 err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3167 in->in_phyctxt->channel, chains, chains, 0, sco);
3168 if (err)
3169 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3170 }
3171
3172 refcnt_rele_wake(&sc->task_refs);
3173 splx(s)spllower(s);
3174}
3175
3176void
3177iwx_updatechan(struct ieee80211com *ic)
3178{
3179 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3180
3181 if (ic->ic_state == IEEE80211_S_RUN &&
3182 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3183 iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3184}
3185
3186void
3187iwx_updateprot(struct ieee80211com *ic)
3188{
3189 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3190
3191 if (ic->ic_state == IEEE80211_S_RUN &&
3192 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3193 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3194}
3195
3196void
3197iwx_updateslot(struct ieee80211com *ic)
3198{
3199 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3200
3201 if (ic->ic_state == IEEE80211_S_RUN &&
3202 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3203 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3204}
3205
3206void
3207iwx_updateedca(struct ieee80211com *ic)
3208{
3209 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3210
3211 if (ic->ic_state == IEEE80211_S_RUN &&
3212 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3213 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3214}
3215
3216void
3217iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3218 uint8_t tid)
3219{
3220 struct ieee80211com *ic = &sc->sc_ic;
3221 struct ieee80211_tx_ba *ba;
3222 int err, qid;
3223 struct iwx_tx_ring *ring;
3224
3225 /* Ensure we can map this TID to an aggregation queue. */
3226 if (tid >= IWX_MAX_TID_COUNT8)
3227 return;
3228
3229 ba = &ni->ni_tx_ba[tid];
3230 if (ba->ba_state != IEEE80211_BA_REQUESTED1)
3231 return;
3232
3233 qid = sc->aggqid[tid];
3234 if (qid == 0) {
3235 /* Firmware should pick the next unused Tx queue. */
3236 qid = fls(sc->qenablemsk);
3237 }
3238
3239 /*
3240 * Simply enable the queue.
3241 * Firmware handles Tx Ba session setup and teardown.
3242 */
3243 if ((sc->qenablemsk & (1 << qid)) == 0) {
3244 if (!iwx_nic_lock(sc)) {
3245 ieee80211_addba_resp_refuse(ic, ni, tid,
3246 IEEE80211_STATUS_UNSPECIFIED);
3247 return;
3248 }
3249 err = iwx_enable_txq(sc, IWX_STATION_ID0, qid, tid,
3250 IWX_TX_RING_COUNT(256));
3251 iwx_nic_unlock(sc);
3252 if (err) {
3253 printf("%s: could not enable Tx queue %d "
3254 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err);
3255 ieee80211_addba_resp_refuse(ic, ni, tid,
3256 IEEE80211_STATUS_UNSPECIFIED);
3257 return;
3258 }
3259
3260 ba->ba_winstart = 0;
3261 } else
3262 ba->ba_winstart = ni->ni_qos_txseqs[tid];
3263
3264 ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3265
3266 ring = &sc->txq[qid];
3267 ba->ba_timeout_val = 0;
3268 ieee80211_addba_resp_accept(ic, ni, tid);
3269 sc->aggqid[tid] = qid;
3270}
3271
3272void
3273iwx_ba_task(void *arg)
3274{
3275 struct iwx_softc *sc = arg;
3276 struct ieee80211com *ic = &sc->sc_ic;
3277 struct ieee80211_node *ni = ic->ic_bss;
3278 int s = splnet()splraise(0x7);
3279 int tid;
3280
3281 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
3282 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
3283 break;
3284 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3285 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3286 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3287 ba->ba_winsize, ba->ba_timeout_val, 1);
3288 sc->ba_rx.start_tidmask &= ~(1 << tid);
3289 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3290 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3291 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3292 }
3293 }
3294
3295 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
3296 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
3297 break;
3298 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3299 iwx_sta_tx_agg_start(sc, ni, tid);
3300 sc->ba_tx.start_tidmask &= ~(1 << tid);
3301 }
3302 }
3303
3304 refcnt_rele_wake(&sc->task_refs);
3305 splx(s)spllower(s);
3306}
3307
3308/*
3309 * This function is called by upper layer when an ADDBA request is received
3310 * from another STA and before the ADDBA response is sent.
3311 */
3312int
3313iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3314 uint8_t tid)
3315{
3316 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3317
3318 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS16 ||
3319 tid >= IWX_MAX_TID_COUNT8)
3320 return ENOSPC28;
3321
3322 if (sc->ba_rx.start_tidmask & (1 << tid))
3323 return EBUSY16;
3324
3325 sc->ba_rx.start_tidmask |= (1 << tid);
3326 iwx_add_task(sc, systq, &sc->ba_task);
3327
3328 return EBUSY16;
3329}
3330
3331/*
3332 * This function is called by upper layer on teardown of an HT-immediate
3333 * Block Ack agreement (eg. upon receipt of a DELBA frame).
3334 */
3335void
3336iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3337 uint8_t tid)
3338{
3339 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3340
3341 if (tid >= IWX_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid))
3342 return;
3343
3344 sc->ba_rx.stop_tidmask = (1 << tid);
3345 iwx_add_task(sc, systq, &sc->ba_task);
3346}
3347
3348int
3349iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3350 uint8_t tid)
3351{
3352 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3353 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3354
3355 /*
3356 * Require a firmware version which uses an internal AUX queue.
3357 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3358 */
3359 if (sc->first_data_qid != IWX_DQA_CMD_QUEUE0 + 1)
3360 return ENOTSUP91;
3361
3362 /* Ensure we can map this TID to an aggregation queue. */
3363 if (tid >= IWX_MAX_TID_COUNT8)
3364 return EINVAL22;
3365
3366 /* We only support a fixed Tx aggregation window size, for now. */
3367 if (ba->ba_winsize != IWX_FRAME_LIMIT64)
3368 return ENOTSUP91;
3369
3370 /* Is firmware already using an agg queue with this TID? */
3371 if (sc->aggqid[tid] != 0)
3372 return ENOSPC28;
3373
3374 /* Are we already processing an ADDBA request? */
3375 if (sc->ba_tx.start_tidmask & (1 << tid))
3376 return EBUSY16;
3377
3378 sc->ba_tx.start_tidmask |= (1 << tid);
3379 iwx_add_task(sc, systq, &sc->ba_task);
3380
3381 return EBUSY16;
3382}
3383
3384/* Read the mac address from WFMP registers. */
3385int
3386iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3387{
3388 const uint8_t *hw_addr;
3389 uint32_t mac_addr0, mac_addr1;
3390
3391 if (!iwx_nic_lock(sc))
3392 return EBUSY16;
3393
3394 mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0))((__uint32_t)(iwx_read_prph(sc, 0xa03080)));
3395 mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1))((__uint32_t)(iwx_read_prph(sc, 0xa03084)));
3396
3397 hw_addr = (const uint8_t *)&mac_addr0;
3398 data->hw_addr[0] = hw_addr[3];
3399 data->hw_addr[1] = hw_addr[2];
3400 data->hw_addr[2] = hw_addr[1];
3401 data->hw_addr[3] = hw_addr[0];
3402
3403 hw_addr = (const uint8_t *)&mac_addr1;
3404 data->hw_addr[4] = hw_addr[1];
3405 data->hw_addr[5] = hw_addr[0];
3406
3407 iwx_nic_unlock(sc);
3408 return 0;
3409}
3410
3411int
3412iwx_is_valid_mac_addr(const uint8_t *addr)
3413{
3414 static const uint8_t reserved_mac[] = {
3415 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3416 };
3417
3418 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (addr), (6)) != 0 &&
3419 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (addr), (sizeof(etherbroadcastaddr
)))
!= 0 &&
3420 memcmp(etheranyaddr, addr, sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (addr), (sizeof(etheranyaddr
)))
!= 0 &&
3421 !ETHER_IS_MULTICAST(addr)(*(addr) & 0x01));
3422}
3423
3424int
3425iwx_nvm_get(struct iwx_softc *sc)
3426{
3427 struct iwx_nvm_get_info cmd = {};
3428 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3429 struct iwx_host_cmd hcmd = {
3430 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3431 .data = { &cmd, },
3432 .len = { sizeof(cmd) },
3433 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x02)
3434 IWX_NVM_GET_INFO)((0xc << 8) | 0x02)
3435 };
3436 int err;
3437 uint32_t mac_flags;
3438 /*
3439 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3440 * in v3, except for the channel profile part of the
3441 * regulatory. So we can just access the new struct, with the
3442 * exception of the latter.
3443 */
3444 struct iwx_nvm_get_info_rsp *rsp;
3445 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3446 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO)((sc->sc_ucode_api)[(48)>>3] & (1<<((48)&
(8 -1))))
;
3447 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3448
3449 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3450 err = iwx_send_cmd(sc, &hcmd);
3451 if (err)
3452 return err;
3453
3454 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3455 err = EIO5;
3456 goto out;
3457 }
3458
3459 memset(nvm, 0, sizeof(*nvm))__builtin_memset((nvm), (0), (sizeof(*nvm)));
3460
3461 iwx_set_mac_addr_from_csr(sc, nvm);
3462 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3463 printf("%s: no valid mac address was found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3464 err = EINVAL22;
3465 goto out;
3466 }
3467
3468 rsp = (void *)hcmd.resp_pkt->data;
3469
3470 /* Initialize general data */
3471 nvm->nvm_version = le16toh(rsp->general.nvm_version)((__uint16_t)(rsp->general.nvm_version));
3472 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3473
3474 /* Initialize MAC sku data */
3475 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags)((__uint32_t)(rsp->mac_sku.mac_sku_flags));
3476 nvm->sku_cap_11ac_enable =
3477 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED(1 << 3));
3478 nvm->sku_cap_11n_enable =
3479 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED(1 << 2));
3480 nvm->sku_cap_11ax_enable =
3481 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED(1 << 4));
3482 nvm->sku_cap_band_24GHz_enable =
3483 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED(1 << 0));
3484 nvm->sku_cap_band_52GHz_enable =
3485 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED(1 << 1));
3486 nvm->sku_cap_mimo_disable =
3487 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED(1 << 5));
3488
3489 /* Initialize PHY sku data */
3490 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains)((__uint32_t)(rsp->phy_sku.tx_chains));
3491 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains)((__uint32_t)(rsp->phy_sku.rx_chains));
3492
3493 if (le32toh(rsp->regulatory.lar_enabled)((__uint32_t)(rsp->regulatory.lar_enabled)) &&
3494 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)((sc->sc_enabled_capa)[(1)>>3] & (1<<((1)&
(8 -1))))
) {
3495 nvm->lar_enabled = 1;
3496 }
3497
3498 if (v4) {
3499 iwx_init_channel_map(sc, NULL((void *)0),
3500 rsp->regulatory.channel_profile, IWX_NUM_CHANNELS110);
3501 } else {
3502 rsp_v3 = (void *)rsp;
3503 iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3504 NULL((void *)0), IWX_NUM_CHANNELS_V151);
3505 }
3506out:
3507 iwx_free_resp(sc, &hcmd);
3508 return err;
3509}
3510
3511int
3512iwx_load_firmware(struct iwx_softc *sc)
3513{
3514 struct iwx_fw_sects *fws;
3515 int err;
3516
3517 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
3518
3519 sc->sc_uc.uc_intr = 0;
3520 sc->sc_uc.uc_ok = 0;
3521
3522 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3523 err = iwx_ctxt_info_init(sc, fws);
3524 if (err) {
3525 printf("%s: could not init context info\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3526 return err;
3527 }
3528
3529 /* wait for the firmware to load */
3530 err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3531 if (err || !sc->sc_uc.uc_ok) {
3532 printf("%s: could not load firmware, %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
3533 iwx_ctxt_info_free_paging(sc);
3534 }
3535
3536 iwx_ctxt_info_free_fw_img(sc);
3537
3538 if (!sc->sc_uc.uc_ok)
3539 return EINVAL22;
3540
3541 return err;
3542}
3543
3544int
3545iwx_start_fw(struct iwx_softc *sc)
3546{
3547 int err;
3548
3549 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
3550
3551 iwx_disable_interrupts(sc);
3552
3553 /* make sure rfkill handshake bits are cleared */
3554 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
3555 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
3556 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
;
3557
3558 /* clear (again), then enable firmware load interrupt */
3559 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
3560
3561 err = iwx_nic_init(sc);
3562 if (err) {
3563 printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3564 return err;
3565 }
3566
3567 iwx_enable_fwload_interrupt(sc);
3568
3569 return iwx_load_firmware(sc);
3570}
3571
3572int
3573iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3574{
3575 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3576 .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)),
3577 };
3578
3579 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD0x98,
3580 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3581}
3582
3583int
3584iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3585{
3586 struct iwx_phy_cfg_cmd phy_cfg_cmd;
3587
3588 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config)((__uint32_t)(sc->sc_fw_phy_config));
3589 phy_cfg_cmd.calib_control.event_trigger =
3590 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3591 phy_cfg_cmd.calib_control.flow_trigger =
3592 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3593
3594 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD0x6a, 0,
3595 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3596}
3597
3598int
3599iwx_send_dqa_cmd(struct iwx_softc *sc)
3600{
3601 struct iwx_dqa_enable_cmd dqa_cmd = {
3602 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE)((__uint32_t)(0)),
3603 };
3604 uint32_t cmd_id;
3605
3606 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD0x00, IWX_DATA_PATH_GROUP0x5, 0);
3607 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3608}
3609
3610int
3611iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3612{
3613 int err;
3614
3615 err = iwx_read_firmware(sc);
3616 if (err)
3617 return err;
3618
3619 err = iwx_start_fw(sc);
3620 if (err)
3621 return err;
3622
3623 iwx_post_alive(sc);
3624
3625 return 0;
3626}
3627
3628int
3629iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3630{
3631 const int wait_flags = IWX_INIT_COMPLETE0x01;
3632 struct iwx_nvm_access_complete_cmd nvm_complete = {};
3633 struct iwx_init_extended_cfg_cmd init_cfg = {
3634 .init_flags = htole32(IWX_INIT_NVM)((__uint32_t)((1 << 1))),
3635 };
3636 int err, s;
3637
3638 if ((sc->sc_flags & IWX_FLAG_RFKILL0x02) && !readnvm) {
3639 printf("%s: radio is disabled by hardware switch\n",
3640 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3641 return EPERM1;
3642 }
3643
3644 s = splnet()splraise(0x7);
3645 sc->sc_init_complete = 0;
3646 err = iwx_load_ucode_wait_alive(sc);
3647 if (err) {
3648 printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3649 splx(s)spllower(s);
3650 return err;
3651 }
3652
3653 /*
3654 * Send init config command to mark that we are sending NVM
3655 * access commands
3656 */
3657 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,((0x2 << 8) | 0x03)
3658 IWX_INIT_EXTENDED_CFG_CMD)((0x2 << 8) | 0x03), 0, sizeof(init_cfg), &init_cfg);
3659 if (err) {
3660 splx(s)spllower(s);
3661 return err;
3662 }
3663
3664 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x00)
3665 IWX_NVM_ACCESS_COMPLETE)((0xc << 8) | 0x00), 0, sizeof(nvm_complete), &nvm_complete);
3666 if (err) {
3667 splx(s)spllower(s);
3668 return err;
3669 }
3670
3671 /* Wait for the init complete notification from the firmware. */
3672 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3673 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3674 SEC_TO_NSEC(2));
3675 if (err) {
3676 splx(s)spllower(s);
3677 return err;
3678 }
3679 }
3680 splx(s)spllower(s);
3681 if (readnvm) {
3682 err = iwx_nvm_get(sc);
3683 if (err) {
3684 printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3685 return err;
3686 }
3687 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), (
6)) == 0)
)
3688 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
3689 sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
;
3690
3691 }
3692 return 0;
3693}
3694
3695int
3696iwx_config_ltr(struct iwx_softc *sc)
3697{
3698 struct iwx_ltr_config_cmd cmd = {
3699 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)),
3700 };
3701
3702 if (!sc->sc_ltr_enabled)
3703 return 0;
3704
3705 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd);
3706}
3707
3708void
3709iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3710{
3711 struct iwx_rx_data *data = &ring->data[idx];
3712
3713 ((uint64_t *)ring->desc)[idx] =
3714 htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff))((__uint64_t)(data->map->dm_segs[0].ds_addr | (idx &
0x0fff)))
;
3715 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
3716 idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
3717 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
;
3718}
3719
3720int
3721iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3722{
3723 struct iwx_rx_ring *ring = &sc->rxq;
3724 struct iwx_rx_data *data = &ring->data[idx];
3725 struct mbuf *m;
3726 int err;
3727 int fatal = 0;
3728
3729 m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1);
3730 if (m == NULL((void *)0))
3731 return ENOBUFS55;
3732
3733 if (size <= MCLBYTES(1 << 11)) {
3734 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
3735 } else {
3736 MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE)m_clget((m), (0x0002), (4096));
3737 }
3738 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
3739 m_freem(m);
3740 return ENOBUFS55;
3741 }
3742
3743 if (data->m != NULL((void *)0)) {
3744 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
3745 fatal = 1;
3746 }
3747
3748 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
3749 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
3750 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
;
3751 if (err) {
3752 /* XXX */
3753 if (fatal)
3754 panic("%s: could not load RX mbuf", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3755 m_freem(m);
3756 return err;
3757 }
3758 data->m = m;
3759 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (size), (0x01))
;
3760
3761 /* Update RX descriptor. */
3762 iwx_update_rx_desc(sc, ring, idx);
3763
3764 return 0;
3765}
3766
3767int
3768iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3769 struct iwx_rx_mpdu_desc *desc)
3770{
3771 int energy_a, energy_b;
3772
3773 energy_a = desc->v1.energy_a;
3774 energy_b = desc->v1.energy_b;
3775 energy_a = energy_a ? -energy_a : -256;
3776 energy_b = energy_b ? -energy_b : -256;
3777 return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
3778}
3779
3780void
3781iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3782 struct iwx_rx_data *data)
3783{
3784 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3785
3786 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
3787 sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
;
3788
3789 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), (
sizeof(sc->sc_last_phy_info)))
;
3790}
3791
3792/*
3793 * Retrieve the average noise (in dBm) among receivers.
3794 */
3795int
3796iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3797{
3798 int i, total, nbant, noise;
3799
3800 total = nbant = noise = 0;
3801 for (i = 0; i < 3; i++) {
3802 noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff;
3803 if (noise) {
3804 total += noise;
3805 nbant++;
3806 }
3807 }
3808
3809 /* There should be at least one antenna but check anyway. */
3810 return (nbant == 0) ? -127 : (total / nbant) - 107;
3811}
3812
3813int
3814iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3815 struct ieee80211_rxinfo *rxi)
3816{
3817 struct ieee80211com *ic = &sc->sc_ic;
3818 struct ieee80211_key *k;
3819 struct ieee80211_frame *wh;
3820 uint64_t pn, *prsc;
3821 uint8_t *ivp;
3822 uint8_t tid;
3823 int hdrlen, hasqos;
3824
3825 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
3826 hdrlen = ieee80211_get_hdrlen(wh);
3827 ivp = (uint8_t *)wh + hdrlen;
3828
3829 /* find key for decryption */
3830 k = ieee80211_get_rxkey(ic, m, ni);
3831 if (k == NULL((void *)0) || k->k_cipher != IEEE80211_CIPHER_CCMP)
3832 return 1;
3833
3834 /* Check that ExtIV bit is be set. */
3835 if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20))
3836 return 1;
3837
3838 hasqos = ieee80211_has_qos(wh);
3839 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
3840 prsc = &k->k_rsc[tid];
3841
3842 /* Extract the 48-bit PN from the CCMP header. */
3843 pn = (uint64_t)ivp[0] |
3844 (uint64_t)ivp[1] << 8 |
3845 (uint64_t)ivp[4] << 16 |
3846 (uint64_t)ivp[5] << 24 |
3847 (uint64_t)ivp[6] << 32 |
3848 (uint64_t)ivp[7] << 40;
3849 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) {
3850 if (pn < *prsc) {
3851 ic->ic_stats.is_ccmp_replays++;
3852 return 1;
3853 }
3854 } else if (pn <= *prsc) {
3855 ic->ic_stats.is_ccmp_replays++;
3856 return 1;
3857 }
3858 /* Last seen packet number is updated in ieee80211_inputm(). */
3859
3860 /*
3861 * Some firmware versions strip the MIC, and some don't. It is not
3862 * clear which of the capability flags could tell us what to expect.
3863 * For now, keep things simple and just leave the MIC in place if
3864 * it is present.
3865 *
3866 * The IV will be stripped by ieee80211_inputm().
3867 */
3868 return 0;
3869}
3870
3871int
3872iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
3873 struct ieee80211_rxinfo *rxi)
3874{
3875 struct ieee80211com *ic = &sc->sc_ic;
3876 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
3877 struct ieee80211_frame *wh;
3878 struct ieee80211_node *ni;
3879 int ret = 0;
3880 uint8_t type, subtype;
3881
3882 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
3883
3884 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
3885 if (type == IEEE80211_FC0_TYPE_CTL0x04)
3886 return 0;
3887
3888 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
3889 if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40))
3890 return 0;
3891
3892 ni = ieee80211_find_rxnode(ic, wh);
3893 /* Handle hardware decryption. */
3894 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) != IEEE80211_FC0_TYPE_CTL0x04)
3895 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40) &&
3896 (ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) &&
3897 ((!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
3898 ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
3899 (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
3900 ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
3901 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) !=
3902 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
3903 ic->ic_stats.is_ccmp_dec_errs++;
3904 ret = 1;
3905 goto out;
3906 }
3907 /* Check whether decryption was successful or not. */
3908 if ((rx_pkt_status &
3909 (IWX_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
3910 IWX_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) !=
3911 (IWX_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
3912 IWX_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) {
3913 ic->ic_stats.is_ccmp_dec_errs++;
3914 ret = 1;
3915 goto out;
3916 }
3917 rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
3918 }
3919out:
3920 if (ret)
3921 ifp->if_ierrorsif_data.ifi_ierrors++;
3922 ieee80211_release_node(ic, ni);
3923 return ret;
3924}
3925
3926void
3927iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3928 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3929 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3930 struct mbuf_list *ml)
3931{
3932 struct ieee80211com *ic = &sc->sc_ic;
3933 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
3934 struct ieee80211_frame *wh;
3935 struct ieee80211_node *ni;
3936 struct ieee80211_channel *bss_chan;
3937 uint8_t saved_bssid[IEEE80211_ADDR_LEN6] = { 0 };
3938
3939 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0
]))
)
3940 chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3941
3942 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
3943 ni = ieee80211_find_rxnode(ic, wh);
3944 if (ni == ic->ic_bss) {
3945 /*
3946 * We may switch ic_bss's channel during scans.
3947 * Record the current channel so we can restore it later.
3948 */
3949 bss_chan = ni->ni_chan;
3950 IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr)__builtin_memcpy((&saved_bssid), (ni->ni_macaddr), (6)
)
;
3951 }
3952 ni->ni_chan = &ic->ic_channels[chanidx];
3953
3954 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) &&
3955 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
3956 ifp->if_ierrorsif_data.ifi_ierrors++;
3957 m_freem(m);
3958 ieee80211_release_node(ic, ni);
3959 return;
3960 }
3961
3962#if NBPFILTER1 > 0
3963 if (sc->sc_drvbpf != NULL((void *)0)) {
3964 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th;
3965 uint16_t chan_flags;
3966
3967 tap->wr_flags = 0;
3968 if (is_shortpre)
3969 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
3970 tap->wr_chan_freq =
3971 htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq));
3972 chan_flags = ic->ic_channels[chanidx].ic_flags;
3973 if (ic->ic_curmode != IEEE80211_MODE_11N)
3974 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
3975 tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
3976 tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3977 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3978 tap->wr_tsft = device_timestamp;
3979 if (rate_n_flags & IWX_RATE_MCS_HT_MSK(1 << 8)) {
3980 uint8_t mcs = (rate_n_flags &
3981 (IWX_RATE_HT_MCS_RATE_CODE_MSK0x7 |
3982 IWX_RATE_HT_MCS_NSS_MSK(3 << 3)));
3983 tap->wr_rate = (0x80 | mcs);
3984 } else {
3985 uint8_t rate = (rate_n_flags &
3986 IWX_RATE_LEGACY_RATE_MSK0xff);
3987 switch (rate) {
3988 /* CCK rates. */
3989 case 10: tap->wr_rate = 2; break;
3990 case 20: tap->wr_rate = 4; break;
3991 case 55: tap->wr_rate = 11; break;
3992 case 110: tap->wr_rate = 22; break;
3993 /* OFDM rates. */
3994 case 0xd: tap->wr_rate = 12; break;
3995 case 0xf: tap->wr_rate = 18; break;
3996 case 0x5: tap->wr_rate = 24; break;
3997 case 0x7: tap->wr_rate = 36; break;
3998 case 0x9: tap->wr_rate = 48; break;
3999 case 0xb: tap->wr_rate = 72; break;
4000 case 0x1: tap->wr_rate = 96; break;
4001 case 0x3: tap->wr_rate = 108; break;
4002 /* Unknown rate: should not happen. */
4003 default: tap->wr_rate = 0;
4004 }
4005 }
4006
4007 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4008 m, BPF_DIRECTION_IN(1 << 0));
4009 }
4010#endif
4011 ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml);
4012 /*
4013 * ieee80211_inputm() might have changed our BSS.
4014 * Restore ic_bss's channel if we are still in the same BSS.
4015 */
4016 if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr)(__builtin_memcmp((saved_bssid), (ni->ni_macaddr), (6)) ==
0)
)
4017 ni->ni_chan = bss_chan;
4018 ieee80211_release_node(ic, ni);
4019}
4020
4021/*
4022 * Drop duplicate 802.11 retransmissions
4023 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4024 * and handle pseudo-duplicate frames which result from deaggregation
4025 * of A-MSDU frames in hardware.
4026 */
4027int
4028iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4029 struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4030{
4031 struct ieee80211com *ic = &sc->sc_ic;
4032 struct iwx_node *in = (void *)ic->ic_bss;
4033 struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4034 uint8_t tid = IWX_MAX_TID_COUNT8, subframe_idx;
4035 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4036 uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4037 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4038 int hasqos = ieee80211_has_qos(wh);
4039 uint16_t seq;
4040
4041 if (type == IEEE80211_FC0_TYPE_CTL0x04 ||
4042 (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) ||
4043 IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4044 return 0;
4045
4046 if (hasqos) {
4047 tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f);
4048 if (tid > IWX_MAX_TID_COUNT8)
4049 tid = IWX_MAX_TID_COUNT8;
4050 }
4051
4052 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4053 subframe_idx = desc->amsdu_info &
4054 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f;
4055
4056 seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4;
4057 if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) &&
4058 dup_data->last_seq[tid] == seq &&
4059 dup_data->last_sub_frame[tid] >= subframe_idx)
4060 return 1;
4061
4062 /*
4063 * Allow the same frame sequence number for all A-MSDU subframes
4064 * following the first subframe.
4065 * Otherwise these subframes would be discarded as replays.
4066 */
4067 if (dup_data->last_seq[tid] == seq &&
4068 subframe_idx > dup_data->last_sub_frame[tid] &&
4069 (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40)) {
4070 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4071 }
4072
4073 dup_data->last_seq[tid] = seq;
4074 dup_data->last_sub_frame[tid] = subframe_idx;
4075
4076 return 0;
4077}
4078
4079/*
4080 * Returns true if sn2 - buffer_size < sn1 < sn2.
4081 * To be used only in order to compare reorder buffer head with NSSN.
4082 * We fully trust NSSN unless it is behind us due to reorder timeout.
4083 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4084 */
4085int
4086iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4087{
4088 return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048
)
&& !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff
) > 2048)
;
4089}
4090
4091void
4092iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4093 struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4094 uint16_t nssn, struct mbuf_list *ml)
4095{
4096 struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4097 uint16_t ssn = reorder_buf->head_sn;
4098
4099 /* ignore nssn smaller than head sn - this can happen due to timeout */
4100 if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4101 goto set_timer;
4102
4103 while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4104 int index = ssn % reorder_buf->buf_size;
4105 struct mbuf *m;
4106 int chanidx, is_shortpre;
4107 uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4108 struct ieee80211_rxinfo *rxi;
4109
4110 /* This data is the same for all A-MSDU subframes. */
4111 chanidx = entries[index].chanidx;
4112 rx_pkt_status = entries[index].rx_pkt_status;
4113 is_shortpre = entries[index].is_shortpre;
4114 rate_n_flags = entries[index].rate_n_flags;
4115 device_timestamp = entries[index].device_timestamp;
4116 rxi = &entries[index].rxi;
4117
4118 /*
4119 * Empty the list. Will have more than one frame for A-MSDU.
4120 * Empty list is valid as well since nssn indicates frames were
4121 * received.
4122 */
4123 while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) {
4124 iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4125 rate_n_flags, device_timestamp, rxi, ml);
4126 reorder_buf->num_stored--;
4127
4128 /*
4129 * Allow the same frame sequence number and CCMP PN for
4130 * all A-MSDU subframes following the first subframe.
4131 * Otherwise they would be discarded as replays.
4132 */
4133 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4134 rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
4135 }
4136
4137 ssn = (ssn + 1) & 0xfff;
4138 }
4139 reorder_buf->head_sn = nssn;
4140
4141set_timer:
4142 if (reorder_buf->num_stored && !reorder_buf->removed) {
4143 timeout_add_usec(&reorder_buf->reorder_timer,
4144 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
4145 } else
4146 timeout_del(&reorder_buf->reorder_timer);
4147}
4148
4149int
4150iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4151 struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4152{
4153 struct ieee80211com *ic = &sc->sc_ic;
4154
4155 if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4156 /* we have a new (A-)MPDU ... */
4157
4158 /*
4159 * reset counter to 0 if we didn't have any oldsn in
4160 * the last A-MPDU (as detected by GP2 being identical)
4161 */
4162 if (!buffer->consec_oldsn_prev_drop)
4163 buffer->consec_oldsn_drops = 0;
4164
4165 /* either way, update our tracking state */
4166 buffer->consec_oldsn_ampdu_gp2 = gp2;
4167 } else if (buffer->consec_oldsn_prev_drop) {
4168 /*
4169 * tracking state didn't change, and we had an old SN
4170 * indication before - do nothing in this case, we
4171 * already noted this one down and are waiting for the
4172 * next A-MPDU (by GP2)
4173 */
4174 return 0;
4175 }
4176
4177 /* return unless this MPDU has old SN */
4178 if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN0x80000000))
4179 return 0;
4180
4181 /* update state */
4182 buffer->consec_oldsn_prev_drop = 1;
4183 buffer->consec_oldsn_drops++;
4184
4185 /* if limit is reached, send del BA and reset state */
4186 if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA10) {
4187 ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4188 0, tid);
4189 buffer->consec_oldsn_prev_drop = 0;
4190 buffer->consec_oldsn_drops = 0;
4191 return 1;
4192 }
4193
4194 return 0;
4195}
4196
4197/*
4198 * Handle re-ordering of frames which were de-aggregated in hardware.
4199 * Returns 1 if the MPDU was consumed (buffered or dropped).
4200 * Returns 0 if the MPDU should be passed to upper layer.
4201 */
4202int
4203iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4204 struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4205 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4206 struct mbuf_list *ml)
4207{
4208 struct ieee80211com *ic = &sc->sc_ic;
4209 struct ieee80211_frame *wh;
4210 struct ieee80211_node *ni;
4211 struct iwx_rxba_data *rxba;
4212 struct iwx_reorder_buffer *buffer;
4213 uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data));
4214 int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40);
4215 int last_subframe =
4216 (desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME0x80);
4217 uint8_t tid;
4218 uint8_t subframe_idx = (desc->amsdu_info &
4219 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
4220 struct iwx_reorder_buf_entry *entries;
4221 int index;
4222 uint16_t nssn, sn;
4223 uint8_t baid, type, subtype;
4224 int hasqos;
4225
4226 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4227 hasqos = ieee80211_has_qos(wh);
4228 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
4229
4230 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4231 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4232
4233 /*
4234 * We are only interested in Block Ack requests and unicast QoS data.
4235 */
4236 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4237 return 0;
4238 if (hasqos) {
4239 if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)
4240 return 0;
4241 } else {
4242 if (type != IEEE80211_FC0_TYPE_CTL0x04 ||
4243 subtype != IEEE80211_FC0_SUBTYPE_BAR0x80)
4244 return 0;
4245 }
4246
4247 baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK0x7f000000) >>
4248 IWX_RX_MPDU_REORDER_BAID_SHIFT24;
4249 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
4250 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
4251 return 0;
4252
4253 rxba = &sc->sc_rxba_data[baid];
4254 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
4255 tid != rxba->tid || rxba->sta_id != IWX_STATION_ID0)
4256 return 0;
4257
4258 if (rxba->timeout != 0)
4259 getmicrouptime(&rxba->last_rx);
4260
4261 /* Bypass A-MPDU re-ordering in net80211. */
4262 rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002;
4263
4264 nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK0x00000fff;
4265 sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK0x00fff000) >>
4266 IWX_RX_MPDU_REORDER_SN_SHIFT12;
4267
4268 buffer = &rxba->reorder_buf;
4269 entries = &rxba->entries[0];
4270
4271 if (!buffer->valid) {
4272 if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN0x80000000)
4273 return 0;
4274 buffer->valid = 1;
4275 }
4276
4277 ni = ieee80211_find_rxnode(ic, wh);
4278 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
4279 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
4280 iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4281 goto drop;
4282 }
4283
4284 /*
4285 * If there was a significant jump in the nssn - adjust.
4286 * If the SN is smaller than the NSSN it might need to first go into
4287 * the reorder buffer, in which case we just release up to it and the
4288 * rest of the function will take care of storing it and releasing up to
4289 * the nssn.
4290 */
4291 if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4292 buffer->buf_size) ||
4293 !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer->
buf_size)) & 0xfff) > 2048)
) {
4294 uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
? sn : nssn;
4295 ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4296 iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4297 }
4298
4299 if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4300 device_timestamp)) {
4301 /* BA session will be torn down. */
4302 ic->ic_stats.is_ht_rx_ba_window_jump++;
4303 goto drop;
4304
4305 }
4306
4307 /* drop any outdated packets */
4308 if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff
) > 2048)
) {
4309 ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4310 goto drop;
4311 }
4312
4313 /* release immediately if allowed by nssn and no stored frames */
4314 if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
) {
4315 if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4316 (!is_amsdu || last_subframe))
4317 buffer->head_sn = nssn;
4318 ieee80211_release_node(ic, ni);
4319 return 0;
4320 }
4321
4322 /*
4323 * release immediately if there are no stored frames, and the sn is
4324 * equal to the head.
4325 * This can happen due to reorder timer, where NSSN is behind head_sn.
4326 * When we released everything, and we got the next frame in the
4327 * sequence, according to the NSSN we can't release immediately,
4328 * while technically there is no hole and we can move forward.
4329 */
4330 if (!buffer->num_stored && sn == buffer->head_sn) {
4331 if (!is_amsdu || last_subframe)
4332 buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4333 ieee80211_release_node(ic, ni);
4334 return 0;
4335 }
4336
4337 index = sn % buffer->buf_size;
4338
4339 /*
4340 * Check if we already stored this frame
4341 * As AMSDU is either received or not as whole, logic is simple:
4342 * If we have frames in that position in the buffer and the last frame
4343 * originated from AMSDU had a different SN then it is a retransmission.
4344 * If it is the same SN then if the subframe index is incrementing it
4345 * is the same AMSDU - otherwise it is a retransmission.
4346 */
4347 if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
4348 if (!is_amsdu) {
4349 ic->ic_stats.is_ht_rx_ba_no_buf++;
4350 goto drop;
4351 } else if (sn != buffer->last_amsdu ||
4352 buffer->last_sub_index >= subframe_idx) {
4353 ic->ic_stats.is_ht_rx_ba_no_buf++;
4354 goto drop;
4355 }
4356 } else {
4357 /* This data is the same for all A-MSDU subframes. */
4358 entries[index].chanidx = chanidx;
4359 entries[index].is_shortpre = is_shortpre;
4360 entries[index].rate_n_flags = rate_n_flags;
4361 entries[index].device_timestamp = device_timestamp;
4362 memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries
[index].rxi)))
;
4363 }
4364
4365 /* put in reorder buffer */
4366 ml_enqueue(&entries[index].frames, m);
4367 buffer->num_stored++;
4368 getmicrouptime(&entries[index].reorder_time);
4369
4370 if (is_amsdu) {
4371 buffer->last_amsdu = sn;
4372 buffer->last_sub_index = subframe_idx;
4373 }
4374
4375 /*
4376 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4377 * The reason is that NSSN advances on the first sub-frame, and may
4378 * cause the reorder buffer to advance before all the sub-frames arrive.
4379 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4380 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4381 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4382 * already ahead and it will be dropped.
4383 * If the last sub-frame is not on this queue - we will get frame
4384 * release notification with up to date NSSN.
4385 */
4386 if (!is_amsdu || last_subframe)
4387 iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4388
4389 ieee80211_release_node(ic, ni);
4390 return 1;
4391
4392drop:
4393 m_freem(m);
4394 ieee80211_release_node(ic, ni);
4395 return 1;
4396}
4397
4398void
4399iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4400 size_t maxlen, struct mbuf_list *ml)
4401{
4402 struct ieee80211com *ic = &sc->sc_ic;
4403 struct ieee80211_rxinfo rxi;
4404 struct iwx_rx_mpdu_desc *desc;
4405 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4406 int rssi;
4407 uint8_t chanidx;
4408 uint16_t phy_info;
4409
4410 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4411
4412 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) ||
4413 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) {
4414 m_freem(m);
4415 return; /* drop */
4416 }
4417
4418 len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len));
4419 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4420 /* Allow control frames in monitor mode. */
4421 if (len < sizeof(struct ieee80211_frame_cts)) {
4422 ic->ic_stats.is_rx_tooshort++;
4423 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4424 m_freem(m);
4425 return;
4426 }
4427 } else if (len < sizeof(struct ieee80211_frame)) {
4428 ic->ic_stats.is_rx_tooshort++;
4429 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4430 m_freem(m);
4431 return;
4432 }
4433 if (len > maxlen - sizeof(*desc)) {
4434 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4435 m_freem(m);
4436 return;
4437 }
4438
4439 m->m_datam_hdr.mh_data = pktdata + sizeof(*desc);
4440 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
4441
4442 /* Account for padding following the frame header. */
4443 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD0x20) {
4444 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4445 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4446 if (type == IEEE80211_FC0_TYPE_CTL0x04) {
4447 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) {
4448 case IEEE80211_FC0_SUBTYPE_CTS0xc0:
4449 hdrlen = sizeof(struct ieee80211_frame_cts);
4450 break;
4451 case IEEE80211_FC0_SUBTYPE_ACK0xd0:
4452 hdrlen = sizeof(struct ieee80211_frame_ack);
4453 break;
4454 default:
4455 hdrlen = sizeof(struct ieee80211_frame_min);
4456 break;
4457 }
4458 } else
4459 hdrlen = ieee80211_get_hdrlen(wh);
4460
4461 if ((le16toh(desc->status)((__uint16_t)(desc->status)) &
4462 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) ==
4463 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
4464 /* Padding is inserted after the IV. */
4465 hdrlen += IEEE80211_CCMP_HDRLEN8;
4466 }
4467
4468 memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data
), (hdrlen))
;
4469 m_adj(m, 2);
4470 }
4471
4472 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
4473
4474 /*
4475 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4476 * in place for each subframe. But it leaves the 'A-MSDU present'
4477 * bit set in the frame header. We need to clear this bit ourselves.
4478 * (XXX This workaround is not required on AX200/AX201 devices that
4479 * have been tested by me, but it's unclear when this problem was
4480 * fixed in the hardware. It definitely affects the 9k generation.
4481 * Leaving this in place for now since some 9k/AX200 hybrids seem
4482 * to exist that we may eventually add support for.)
4483 *
4484 * And we must allow the same CCMP PN for subframes following the
4485 * first subframe. Otherwise they would be discarded as replays.
4486 */
4487 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40) {
4488 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4489 uint8_t subframe_idx = (desc->amsdu_info &
4490 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
4491 if (subframe_idx > 0)
4492 rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
4493 if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4494 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4495 struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data))
4496 struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data));
4497 qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
4498 } else if (ieee80211_has_qos(wh) &&
4499 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) {
4500 struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data))
4501 struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data));
4502 qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
4503 }
4504 }
4505
4506 /*
4507 * Verify decryption before duplicate detection. The latter uses
4508 * the TID supplied in QoS frame headers and this TID is implicitly
4509 * verified as part of the CCMP nonce.
4510 */
4511 if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) {
4512 m_freem(m);
4513 return;
4514 }
4515
4516 if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
4517 m_freem(m);
4518 return;
4519 }
4520
4521 phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info));
4522 rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags));
4523 chanidx = desc->v1.channel;
4524 device_timestamp = desc->v1.gp2_on_air_rise;
4525
4526 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4527 rssi = (0 - IWX_MIN_DBM-100) + rssi; /* normalize */
4528 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
4529
4530 rxi.rxi_rssi = rssi;
4531 rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise)((__uint64_t)(desc->v1.tsf_on_air_rise));
4532
4533 if (iwx_rx_reorder(sc, m, chanidx, desc,
4534 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
4535 rate_n_flags, device_timestamp, &rxi, ml))
4536 return;
4537
4538 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)),
4539 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
4540 rate_n_flags, device_timestamp, &rxi, ml);
4541}
4542
4543void
4544iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4545{
4546 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4547 uint8_t num_tbs = le16toh(desc->num_tbs)((__uint16_t)(desc->num_tbs)) & 0x1f;
4548 int i;
4549
4550 /* First TB is never cleared - it is bidirectional DMA data. */
4551 for (i = 1; i < num_tbs; i++) {
4552 struct iwx_tfh_tb *tb = &desc->tbs[i];
4553 memset(tb, 0, sizeof(*tb))__builtin_memset((tb), (0), (sizeof(*tb)));
4554 }
4555 desc->num_tbs = 0;
4556
4557 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
4558 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
4559 sizeof(*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
;
4560}
4561
4562void
4563iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
4564{
4565 struct ieee80211com *ic = &sc->sc_ic;
4566
4567 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
4568 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
;
4569 bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->map))
;
4570 m_freem(txd->m);
4571 txd->m = NULL((void *)0);
4572
4573 KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 4573, "txd->in"))
;
4574 ieee80211_release_node(ic, &txd->in->in_ni);
4575 txd->in = NULL((void *)0);
4576}
4577
4578void
4579iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4580{
4581 struct iwx_tx_data *txd;
4582
4583 while (ring->tail != idx) {
4584 txd = &ring->data[ring->tail];
4585 if (txd->m != NULL((void *)0)) {
4586 iwx_clear_tx_desc(sc, ring, ring->tail);
4587 iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
4588 iwx_txd_done(sc, txd);
4589 ring->queued--;
4590 }
4591 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT(256);
4592 }
4593}
4594
4595void
4596iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4597 struct iwx_rx_data *data)
4598{
4599 struct ieee80211com *ic = &sc->sc_ic;
4600 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4601 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4602 int qid = cmd_hdr->qid, status, txfail;
4603 struct iwx_tx_ring *ring = &sc->txq[qid];
4604 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4605 uint32_t ssn;
4606 uint32_t len = iwx_rx_packet_len(pkt);
4607
4608 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
4609 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
4610
4611 /* Sanity checks. */
4612 if (sizeof(*tx_resp) > len)
4613 return;
4614 if (qid < IWX_FIRST_AGG_TX_QUEUE(1 + 1) && tx_resp->frame_count > 1)
4615 return;
4616 if (qid >= IWX_FIRST_AGG_TX_QUEUE(1 + 1) && sizeof(*tx_resp) + sizeof(ssn) +
4617 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4618 return;
4619
4620 sc->sc_tx_timer[qid] = 0;
4621
4622 if (tx_resp->frame_count > 1) /* A-MPDU */
4623 return;
4624
4625 status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWX_TX_STATUS_MSK0x000000ff;
4626 txfail = (status != IWX_TX_STATUS_SUCCESS0x01 &&
4627 status != IWX_TX_STATUS_DIRECT_DONE0x02);
4628
4629 if (txfail)
4630 ifp->if_oerrorsif_data.ifi_oerrors++;
4631
4632 /*
4633 * On hardware supported by iwx(4) the SSN counter is only
4634 * 8 bit and corresponds to a Tx ring index rather than a
4635 * sequence number. Frames up to this index (non-inclusive)
4636 * can now be freed.
4637 */
4638 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn))__builtin_memcpy((&ssn), (&tx_resp->status + tx_resp
->frame_count), (sizeof(ssn)))
;
4639 ssn = le32toh(ssn)((__uint32_t)(ssn)) & 0xff;
4640 iwx_txq_advance(sc, ring, ssn);
4641 iwx_clear_oactive(sc, ring);
4642}
4643
4644void
4645iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4646{
4647 struct ieee80211com *ic = &sc->sc_ic;
4648 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4649
4650 if (ring->queued < IWX_TX_RING_LOMARK192) {
4651 sc->qfullmsk &= ~(1 << ring->qid);
4652 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4653 ifq_clr_oactive(&ifp->if_snd);
4654 /*
4655 * Well, we're in interrupt context, but then again
4656 * I guess net80211 does all sorts of stunts in
4657 * interrupt context, so maybe this is no biggie.
4658 */
4659 (*ifp->if_start)(ifp);
4660 }
4661 }
4662}
4663
4664void
4665iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4666{
4667 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4668 struct ieee80211com *ic = &sc->sc_ic;
4669 struct ieee80211_node *ni;
4670 struct ieee80211_tx_ba *ba;
4671 struct iwx_node *in;
4672 struct iwx_tx_ring *ring;
4673 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4674 int qid;
4675
4676 if (ic->ic_state != IEEE80211_S_RUN)
4677 return;
4678
4679 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4680 return;
4681
4682 if (ba_res->sta_id != IWX_STATION_ID0)
4683 return;
4684
4685 ni = ic->ic_bss;
4686 in = (void *)ni;
4687
4688 tfd_cnt = le16toh(ba_res->tfd_cnt)((__uint16_t)(ba_res->tfd_cnt));
4689 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt)((__uint16_t)(ba_res->ra_tid_cnt));
4690 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4691 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4692 sizeof(ba_res->tfd[0]) * tfd_cnt))
4693 return;
4694
4695 for (i = 0; i < tfd_cnt; i++) {
4696 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4697 uint8_t tid;
4698
4699 tid = ba_tfd->tid;
4700 if (tid >= nitems(sc->aggqid)(sizeof((sc->aggqid)) / sizeof((sc->aggqid)[0])))
4701 continue;
4702
4703 qid = sc->aggqid[tid];
4704 if (qid != htole16(ba_tfd->q_num)((__uint16_t)(ba_tfd->q_num)))
4705 continue;
4706
4707 ring = &sc->txq[qid];
4708
4709 ba = &ni->ni_tx_ba[tid];
4710 if (ba->ba_state != IEEE80211_BA_AGREED2)
4711 continue;
4712
4713 idx = le16toh(ba_tfd->tfd_index)((__uint16_t)(ba_tfd->tfd_index));
4714 if (idx >= IWX_TX_RING_COUNT(256))
4715 continue;
4716 sc->sc_tx_timer[qid] = 0;
4717 iwx_txq_advance(sc, ring, idx);
4718 iwx_clear_oactive(sc, ring);
4719 }
4720}
4721
4722void
4723iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4724 struct iwx_rx_data *data)
4725{
4726 struct ieee80211com *ic = &sc->sc_ic;
4727 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4728 uint32_t missed;
4729
4730 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4731 (ic->ic_state != IEEE80211_S_RUN))
4732 return;
4733
4734 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
4735 sizeof(*mbn), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
;
4736
4737 missed = le32toh(mbn->consec_missed_beacons_since_last_rx)((__uint32_t)(mbn->consec_missed_beacons_since_last_rx));
4738 if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4739 if (ic->ic_ific_ac.ac_if.if_flags & IFF_DEBUG0x4)
4740 printf("%s: receiving no beacons from %s; checking if "
4741 "this AP is still responding to probe requests\n",
4742 DEVNAME(sc)((sc)->sc_dev.dv_xname), ether_sprintf(ic->ic_bss->ni_macaddr));
4743 /*
4744 * Rather than go directly to scan state, try to send a
4745 * directed probe request first. If that fails then the
4746 * state machine will drop us into scanning after timing
4747 * out waiting for a probe response.
4748 */
4749 IEEE80211_SEND_MGMT(ic, ic->ic_bss,((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0))
4750 IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0)((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0));
4751 }
4752
4753}
4754
4755int
4756iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4757{
4758 struct iwx_binding_cmd cmd;
4759 struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
4760 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)((in->in_id << (0)) | (in->in_color << (8))
)
;
4761 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE0x10);
4762 uint32_t status;
4763
4764 if (action == IWX_FW_CTXT_ACTION_ADD1 && active)
4765 panic("binding already added");
4766 if (action == IWX_FW_CTXT_ACTION_REMOVE3 && !active)
4767 panic("binding already removed");
4768
4769 if (phyctxt == NULL((void *)0)) /* XXX race with iwx_stop() */
4770 return EINVAL22;
4771
4772 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
4773
4774 cmd.id_and_color
4775 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
4776 cmd.action = htole32(action)((__uint32_t)(action));
4777 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
4778
4779 cmd.macs[0] = htole32(mac_id)((__uint32_t)(mac_id));
4780 for (i = 1; i < IWX_MAX_MACS_IN_BINDING(3); i++)
4781 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
4782
4783 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel)(((phyctxt->channel)->ic_flags & 0x0080) != 0) ||
4784 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
4785 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
4786 else
4787 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
4788
4789 status = 0;
4790 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD0x2b, sizeof(cmd),
4791 &cmd, &status);
4792 if (err == 0 && status != 0)
4793 err = EIO5;
4794
4795 return err;
4796}
4797
4798int
4799iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4800 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4801{
4802 struct ieee80211com *ic = &sc->sc_ic;
4803 struct iwx_phy_context_cmd_uhb cmd;
4804 uint8_t active_cnt, idle_cnt;
4805 struct ieee80211_channel *chan = ctxt->channel;
4806
4807 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
4808 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
4809 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
4810 cmd.action = htole32(action)((__uint32_t)(action));
4811
4812 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel)(((ctxt->channel)->ic_flags & 0x0080) != 0) ||
4813 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
4814 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
4815 else
4816 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
4817
4818 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
4819 IWX_PHY_BAND_24(1) : IWX_PHY_BAND_5(0);
4820 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan))((__uint32_t)(ieee80211_chan2ieee(ic, chan)));
4821 if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
4822 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
4823 /* secondary chan above -> control chan below */
4824 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4825 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4826 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
4827 /* secondary chan below -> control chan above */
4828 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
4829 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4830 } else {
4831 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4832 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4833 }
4834 } else {
4835 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4836 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4837 }
4838
4839 idle_cnt = chains_static;
4840 active_cnt = chains_dynamic;
4841 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)))
4842 IWX_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)));
4843 cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
4844 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
4845 IWX_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
4846
4847 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
4848}
4849
4850int
4851iwx_phy_ctxt_cmd_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4852 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4853{
4854 struct ieee80211com *ic = &sc->sc_ic;
4855 struct iwx_phy_context_cmd cmd;
4856 uint8_t active_cnt, idle_cnt;
4857 struct ieee80211_channel *chan = ctxt->channel;
4858
4859 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
4860 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
4861 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
4862 cmd.action = htole32(action)((__uint32_t)(action));
4863
4864 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel)(((ctxt->channel)->ic_flags & 0x0080) != 0) ||
4865 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
4866 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
4867 else
4868 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
4869
4870 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
4871 IWX_PHY_BAND_24(1) : IWX_PHY_BAND_5(0);
4872 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
4873 if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
4874 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
4875 /* secondary chan above -> control chan below */
4876 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4877 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4878 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
4879 /* secondary chan below -> control chan above */
4880 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
4881 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4882 } else {
4883 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4884 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4885 }
4886 } else {
4887 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4888 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4889 }
4890
4891 idle_cnt = chains_static;
4892 active_cnt = chains_dynamic;
4893 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)))
4894 IWX_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)));
4895 cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
4896 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
4897 IWX_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
4898
4899 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
4900}
4901
4902int
4903iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4904 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4905 uint32_t apply_time, uint8_t sco)
4906{
4907 int cmdver;
4908
4909 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP0x1, IWX_PHY_CONTEXT_CMD0x8);
4910 if (cmdver != 3) {
4911 printf("%s: firmware does not support phy-context-cmd v3\n",
4912 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4913 return ENOTSUP91;
4914 }
4915
4916 /*
4917 * Intel increased the size of the fw_channel_info struct and neglected
4918 * to bump the phy_context_cmd struct, which contains an fw_channel_info
4919 * member in the middle.
4920 * To keep things simple we use a separate function to handle the larger
4921 * variant of the phy context command.
4922 */
4923 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)((sc->sc_enabled_capa)[(48)>>3] & (1<<((48
)&(8 -1))))
) {
4924 return iwx_phy_ctxt_cmd_uhb_v3(sc, ctxt, chains_static,
4925 chains_dynamic, action, sco);
4926 }
4927
4928 return iwx_phy_ctxt_cmd_v3(sc, ctxt, chains_static, chains_dynamic,
4929 action, sco);
4930}
4931
4932int
4933iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4934{
4935 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE0];
4936 struct iwx_tfh_tfd *desc;
4937 struct iwx_tx_data *txdata;
4938 struct iwx_device_cmd *cmd;
4939 struct mbuf *m;
4940 bus_addr_t paddr;
4941 uint64_t addr;
4942 int err = 0, i, paylen, off, s;
4943 int idx, code, async, group_id;
4944 size_t hdrlen, datasz;
4945 uint8_t *data;
4946 int generation = sc->sc_generation;
4947
4948 code = hcmd->id;
4949 async = hcmd->flags & IWX_CMD_ASYNC;
4950 idx = ring->cur;
4951
4952 for (i = 0, paylen = 0; i < nitems(hcmd->len)(sizeof((hcmd->len)) / sizeof((hcmd->len)[0])); i++) {
4953 paylen += hcmd->len[i];
4954 }
4955
4956 /* If this command waits for a response, allocate response buffer. */
4957 hcmd->resp_pkt = NULL((void *)0);
4958 if (hcmd->flags & IWX_CMD_WANT_RESP) {
4959 uint8_t *resp_buf;
4960 KASSERT(!async)((!async) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 4960, "!async"))
;
4961 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet))((hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet)) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 4961, "hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet)"
))
;
4962 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX)((hcmd->resp_pkt_len <= (1 << 12)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 4962, "hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX"
))
;
4963 if (sc->sc_cmd_resp_pkt[idx] != NULL((void *)0))
4964 return ENOSPC28;
4965 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF2,
4966 M_NOWAIT0x0002 | M_ZERO0x0008);
4967 if (resp_buf == NULL((void *)0))
4968 return ENOMEM12;
4969 sc->sc_cmd_resp_pkt[idx] = resp_buf;
4970 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4971 } else {
4972 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
4973 }
4974
4975 s = splnet()splraise(0x7);
4976
4977 desc = &ring->desc[idx];
4978 txdata = &ring->data[idx];
4979
4980 /*
4981 * XXX Intel inside (tm)
4982 * Firmware API versions >= 50 reject old-style commands in
4983 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
4984 * that such commands were in the LONG_GROUP instead in order
4985 * for firmware to accept them.
4986 */
4987 if (iwx_cmd_groupid(code) == 0) {
4988 code = IWX_WIDE_ID(IWX_LONG_GROUP, code)((0x1 << 8) | code);
4989 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW0x01;
4990 } else
4991 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW0x01;
4992
4993 group_id = iwx_cmd_groupid(code);
4994
4995 hdrlen = sizeof(cmd->hdr_wide);
4996 datasz = sizeof(cmd->data_wide);
4997
4998 if (paylen > datasz) {
4999 /* Command is too large to fit in pre-allocated space. */
5000 size_t totlen = hdrlen + paylen;
5001 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE(4096 - sizeof(struct iwx_cmd_header_wide))) {
5002 printf("%s: firmware command too long (%zd bytes)\n",
5003 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
5004 err = EINVAL22;
5005 goto out;
5006 }
5007 m = MCLGETL(NULL, M_DONTWAIT, totlen)m_clget((((void *)0)), (0x0002), (totlen));
5008 if (m == NULL((void *)0)) {
5009 printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5010 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
5011 err = ENOMEM12;
5012 goto out;
5013 }
5014 cmd = mtod(m, struct iwx_device_cmd *)((struct iwx_device_cmd *)((m)->m_hdr.mh_data));
5015 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
5016 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
;
5017 if (err) {
5018 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5019 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
5020 m_freem(m);
5021 goto out;
5022 }
5023 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5024 paddr = txdata->map->dm_segs[0].ds_addr;
5025 } else {
5026 cmd = &ring->cmd[idx];
5027 paddr = txdata->cmd_paddr;
5028 }
5029
5030 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
5031 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5032 cmd->hdr_wide.group_id = group_id;
5033 cmd->hdr_wide.qid = ring->qid;
5034 cmd->hdr_wide.idx = idx;
5035 cmd->hdr_wide.length = htole16(paylen)((__uint16_t)(paylen));
5036 cmd->hdr_wide.version = iwx_cmd_version(code);
5037 data = cmd->data_wide;
5038
5039 for (i = 0, off = 0; i < nitems(hcmd->data)(sizeof((hcmd->data)) / sizeof((hcmd->data)[0])); i++) {
5040 if (hcmd->len[i] == 0)
5041 continue;
5042 memcpy(data + off, hcmd->data[i], hcmd->len[i])__builtin_memcpy((data + off), (hcmd->data[i]), (hcmd->
len[i]))
;
5043 off += hcmd->len[i];
5044 }
5045 KASSERT(off == paylen)((off == paylen) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 5045, "off == paylen"))
;
5046
5047 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE))((__uint16_t)((((hdrlen + paylen)<(20))?(hdrlen + paylen):
(20))))
;
5048 addr = htole64(paddr)((__uint64_t)(paddr));
5049 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr))__builtin_memcpy((&desc->tbs[0].addr), (&addr), (sizeof
(addr)))
;
5050 if (hdrlen + paylen > IWX_FIRST_TB_SIZE20) {
5051 desc->tbs[1].tb_len = htole16(hdrlen + paylen -((__uint16_t)(hdrlen + paylen - 20))
5052 IWX_FIRST_TB_SIZE)((__uint16_t)(hdrlen + paylen - 20));
5053 addr = htole64(paddr + IWX_FIRST_TB_SIZE)((__uint64_t)(paddr + 20));
5054 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr))__builtin_memcpy((&desc->tbs[1].addr), (&addr), (sizeof
(addr)))
;
5055 desc->num_tbs = htole16(2)((__uint16_t)(2));
5056 } else
5057 desc->num_tbs = htole16(1)((__uint16_t)(1));
5058
5059 if (paylen > datasz) {
5060 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
5061 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
;
5062 } else {
5063 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
5064 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
5065 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
;
5066 }
5067 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5068 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5069 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
5070 /* Kick command ring. */
5071 DPRINTF(("%s: sending command 0x%x\n", __func__, code))do { ; } while (0);
5072 ring->queued++;
5073 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT(256);
5074 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 16 | ring->cur))))
;
5075
5076 if (!async) {
5077 err = tsleep_nsec(desc, PCATCH0x100, "iwxcmd", SEC_TO_NSEC(1));
5078 if (err == 0) {
5079 /* if hardware is no longer up, return error */
5080 if (generation != sc->sc_generation) {
5081 err = ENXIO6;
5082 goto out;
5083 }
5084
5085 /* Response buffer will be freed in iwx_free_resp(). */
5086 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5087 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
5088 } else if (generation == sc->sc_generation) {
5089 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
5090 sc->sc_cmd_resp_len[idx]);
5091 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
5092 }
5093 }
5094 out:
5095 splx(s)spllower(s);
5096
5097 return err;
5098}
5099
5100int
5101iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5102 uint16_t len, const void *data)
5103{
5104 struct iwx_host_cmd cmd = {
5105 .id = id,
5106 .len = { len, },
5107 .data = { data, },
5108 .flags = flags,
5109 };
5110
5111 return iwx_send_cmd(sc, &cmd);
5112}
5113
5114int
5115iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5116 uint32_t *status)
5117{
5118 struct iwx_rx_packet *pkt;
5119 struct iwx_cmd_response *resp;
5120 int err, resp_len;
5121
5122 KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0)(((cmd->flags & IWX_CMD_WANT_RESP) == 0) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 5122, "(cmd->flags & IWX_CMD_WANT_RESP) == 0"
))
;
5123 cmd->flags |= IWX_CMD_WANT_RESP;
5124 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5125
5126 err = iwx_send_cmd(sc, cmd);
5127 if (err)
5128 return err;
5129
5130 pkt = cmd->resp_pkt;
5131 if (pkt == NULL((void *)0) || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40))
5132 return EIO5;
5133
5134 resp_len = iwx_rx_packet_payload_len(pkt);
5135 if (resp_len != sizeof(*resp)) {
5136 iwx_free_resp(sc, cmd);
5137 return EIO5;
5138 }
5139
5140 resp = (void *)pkt->data;
5141 *status = le32toh(resp->status)((__uint32_t)(resp->status));
5142 iwx_free_resp(sc, cmd);
5143 return err;
5144}
5145
5146int
5147iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5148 const void *data, uint32_t *status)
5149{
5150 struct iwx_host_cmd cmd = {
5151 .id = id,
5152 .len = { len, },
5153 .data = { data, },
5154 };
5155
5156 return iwx_send_cmd_status(sc, &cmd, status);
5157}
5158
5159void
5160iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5161{
5162 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP)(((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP
) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 5162, "(hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP"
))
;
5163 free(hcmd->resp_pkt, M_DEVBUF2, hcmd->resp_pkt_len);
5164 hcmd->resp_pkt = NULL((void *)0);
5165}
5166
5167void
5168iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5169{
5170 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE0];
5171 struct iwx_tx_data *data;
5172
5173 if (qid != IWX_DQA_CMD_QUEUE0) {
5174 return; /* Not a command ack. */
5175 }
5176
5177 data = &ring->data[idx];
5178
5179 if (data->m != NULL((void *)0)) {
5180 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
5181 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
5182 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
5183 m_freem(data->m);
5184 data->m = NULL((void *)0);
5185 }
5186 wakeup(&ring->desc[idx]);
5187
5188 DPRINTF(("%s: command 0x%x done\n", __func__, code))do { ; } while (0);
5189 if (ring->queued == 0) {
5190 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",do { ; } while (0)
5191 DEVNAME(sc), code))do { ; } while (0);
5192 } else if (ring->queued > 0)
5193 ring->queued--;
5194}
5195
5196/*
5197 * Fill in various bit for management frames, and leave them
5198 * unfilled for data frames (firmware takes care of that).
5199 * Return the selected TX rate.
5200 */
5201const struct iwx_rate *
5202iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5203 struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
5204{
5205 struct ieee80211com *ic = &sc->sc_ic;
5206 struct ieee80211_node *ni = &in->in_ni;
5207 struct ieee80211_rateset *rs = &ni->ni_rates;
5208 const struct iwx_rate *rinfo;
5209 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5210 int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
5211 int ridx, rate_flags;
5212 uint32_t flags = 0;
5213
5214 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ||
5215 type != IEEE80211_FC0_TYPE_DATA0x08) {
5216 /* for non-data, use the lowest supported rate */
5217 ridx = min_ridx;
5218 flags |= IWX_TX_FLAGS_CMD_RATE(1 << 0);
5219 } else if (ic->ic_fixed_mcs != -1) {
5220 ridx = sc->sc_fixed_ridx;
5221 flags |= IWX_TX_FLAGS_CMD_RATE(1 << 0);
5222 } else if (ic->ic_fixed_rate != -1) {
5223 ridx = sc->sc_fixed_ridx;
5224 flags |= IWX_TX_FLAGS_CMD_RATE(1 << 0);
5225 } else if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
5226 ridx = iwx_mcs2ridx[ni->ni_txmcs];
5227 } else {
5228 uint8_t rval;
5229 rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL0x7f);
5230 ridx = iwx_rval2ridx(rval);
5231 if (ridx < min_ridx)
5232 ridx = min_ridx;
5233 }
5234
5235 if ((ic->ic_flags & IEEE80211_F_RSNON0x00200000) &&
5236 ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
5237 flags |= IWX_TX_FLAGS_HIGH_PRI(1 << 2);
5238 tx->flags = htole32(flags)((__uint32_t)(flags));
5239
5240 rinfo = &iwx_rates[ridx];
5241 if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
5242 rate_flags = IWX_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
5243 else
5244 rate_flags = IWX_RATE_MCS_ANT_A_MSK(1 << 14);
5245 if (IWX_RIDX_IS_CCK(ridx)((ridx) < 4))
5246 rate_flags |= IWX_RATE_MCS_CCK_MSK(1 << 9);
5247 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
5248 type == IEEE80211_FC0_TYPE_DATA0x08 &&
5249 rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP0x20) {
5250 uint8_t sco;
5251 if (ieee80211_node_supports_ht_chan40(ni))
5252 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
5253 else
5254 sco = IEEE80211_HTOP0_SCO_SCN0;
5255 rate_flags |= IWX_RATE_MCS_HT_MSK(1 << 8);
5256 if ((sco == IEEE80211_HTOP0_SCO_SCA1 ||
5257 sco == IEEE80211_HTOP0_SCO_SCB3) &&
5258 in->in_phyctxt != NULL((void *)0) && in->in_phyctxt->sco == sco) {
5259 rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_40(1 << 11);
5260 if (ieee80211_node_supports_ht_sgi40(ni))
5261 rate_flags |= IWX_RATE_MCS_SGI_MSK(1 << 13);
5262 } else if (ieee80211_node_supports_ht_sgi20(ni))
5263 rate_flags |= IWX_RATE_MCS_SGI_MSK(1 << 13);
5264 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp)((__uint32_t)(rate_flags | rinfo->ht_plcp));
5265 } else
5266 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp)((__uint32_t)(rate_flags | rinfo->plcp));
5267
5268 return rinfo;
5269}
5270
5271void
5272iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
5273 uint16_t num_tbs)
5274{
5275 uint8_t filled_tfd_size, num_fetch_chunks;
5276 uint16_t len = byte_cnt;
5277 uint16_t bc_ent;
5278 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5279
5280 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs)__builtin_offsetof(struct iwx_tfh_tfd, tbs) +
5281 num_tbs * sizeof(struct iwx_tfh_tb);
5282 /*
5283 * filled_tfd_size contains the number of filled bytes in the TFD.
5284 * Dividing it by 64 will give the number of chunks to fetch
5285 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5286 * If, for example, TFD contains only 3 TBs then 32 bytes
5287 * of the TFD are used, and only one chunk of 64 bytes should
5288 * be fetched
5289 */
5290 num_fetch_chunks = howmany(filled_tfd_size, 64)(((filled_tfd_size) + ((64) - 1)) / (64)) - 1;
5291
5292 /* Before AX210, the HW expects DW */
5293 len = howmany(len, 4)(((len) + ((4) - 1)) / (4));
5294 bc_ent = htole16(len | (num_fetch_chunks << 12))((__uint16_t)(len | (num_fetch_chunks << 12)));
5295 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5296}
5297
5298int
5299iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5300{
5301 struct ieee80211com *ic = &sc->sc_ic;
5302 struct iwx_node *in = (void *)ni;
5303 struct iwx_tx_ring *ring;
5304 struct iwx_tx_data *data;
5305 struct iwx_tfh_tfd *desc;
5306 struct iwx_device_cmd *cmd;
5307 struct iwx_tx_cmd_gen2 *tx;
5308 struct ieee80211_frame *wh;
5309 struct ieee80211_key *k = NULL((void *)0);
5310 const struct iwx_rate *rinfo;
5311 uint64_t paddr;
5312 u_int hdrlen;
5313 bus_dma_segment_t *seg;
5314 uint16_t num_tbs;
5315 uint8_t type, subtype;
5316 int i, totlen, err, pad, qid;
5317
5318 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5319 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5320 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
5321 if (type == IEEE80211_FC0_TYPE_CTL0x04)
5322 hdrlen = sizeof(struct ieee80211_frame_min);
5323 else
5324 hdrlen = ieee80211_get_hdrlen(wh);
5325
5326 qid = sc->first_data_qid;
5327
5328 /* Put QoS frames on the data queue which maps to their TID. */
5329 if (ieee80211_has_qos(wh)) {
5330 struct ieee80211_tx_ba *ba;
5331 uint16_t qos = ieee80211_get_qos(wh);
5332 uint8_t tid = qos & IEEE80211_QOS_TID0x000f;
5333
5334 ba = &ni->ni_tx_ba[tid];
5335 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
5336 type == IEEE80211_FC0_TYPE_DATA0x08 &&
5337 subtype != IEEE80211_FC0_SUBTYPE_NODATA0x40 &&
5338 sc->aggqid[tid] != 0 &&
5339 ba->ba_state == IEEE80211_BA_AGREED2) {
5340 qid = sc->aggqid[tid];
5341 }
5342 }
5343
5344 ring = &sc->txq[qid];
5345 desc = &ring->desc[ring->cur];
5346 memset(desc, 0, sizeof(*desc))__builtin_memset((desc), (0), (sizeof(*desc)));
5347 data = &ring->data[ring->cur];
5348
5349 cmd = &ring->cmd[ring->cur];
5350 cmd->hdr.code = IWX_TX_CMD0x1c;
5351 cmd->hdr.flags = 0;
5352 cmd->hdr.qid = ring->qid;
5353 cmd->hdr.idx = ring->cur;
5354
5355 tx = (void *)cmd->data;
5356 memset(tx, 0, sizeof(*tx))__builtin_memset((tx), (0), (sizeof(*tx)));
5357
5358 rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
5359
5360#if NBPFILTER1 > 0
5361 if (sc->sc_drvbpf != NULL((void *)0)) {
5362 struct iwx_tx_radiotap_header *tap = &sc->sc_txtapsc_txtapu.th;
5363 uint16_t chan_flags;
5364
5365 tap->wt_flags = 0;
5366 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq)((__uint16_t)(ni->ni_chan->ic_freq));
5367 chan_flags = ni->ni_chan->ic_flags;
5368 if (ic->ic_curmode != IEEE80211_MODE_11N)
5369 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
5370 tap->wt_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
5371 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
5372 !IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
5373 type == IEEE80211_FC0_TYPE_DATA0x08 &&
5374 rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP0x20) {
5375 tap->wt_rate = (0x80 | rinfo->ht_plcp);
5376 } else
5377 tap->wt_rate = rinfo->rate;
5378 if ((ic->ic_flags & IEEE80211_F_WEPON0x00000100) &&
5379 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40))
5380 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP0x04;
5381
5382 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
5383 m, BPF_DIRECTION_OUT(1 << 1));
5384 }
5385#endif
5386
5387 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40) {
5388 k = ieee80211_get_txkey(ic, wh, ni);
5389 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5390 if ((m = ieee80211_encrypt(ic, m, k)) == NULL((void *)0))
5391 return ENOBUFS55;
5392 /* 802.11 header may have moved. */
5393 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5394 tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS)((__uint32_t)((1 << 1)));
5395 } else {
5396 k->k_tsc++;
5397 /* Hardware increments PN internally and adds IV. */
5398 }
5399 } else
5400 tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS)((__uint32_t)((1 << 1)));
5401
5402 totlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
5403
5404 if (hdrlen & 3) {
5405 /* First segment length must be a multiple of 4. */
5406 pad = 4 - (hdrlen & 3);
5407 tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD)((__uint16_t)((1 << 13)));
5408 } else
5409 pad = 0;
5410
5411 tx->len = htole16(totlen)((__uint16_t)(totlen));
5412
5413 /* Copy 802.11 header in TX command. */
5414 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen)__builtin_memcpy((((uint8_t *)tx) + sizeof(*tx)), (wh), (hdrlen
))
;
5415
5416 /* Trim 802.11 header. */
5417 m_adj(m, hdrlen);
5418
5419 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
5420 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
5421 if (err && err != EFBIG27) {
5422 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5423 m_freem(m);
5424 return err;
5425 }
5426 if (err) {
5427 /* Too many DMA segments, linearize mbuf. */
5428 if (m_defrag(m, M_DONTWAIT0x0002)) {
5429 m_freem(m);
5430 return ENOBUFS55;
5431 }
5432 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
5433 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
5434 if (err) {
5435 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
5436 err);
5437 m_freem(m);
5438 return err;
5439 }
5440 }
5441 data->m = m;
5442 data->in = in;
5443
5444 /* Fill TX descriptor. */
5445 num_tbs = 2 + data->map->dm_nsegs;
5446 desc->num_tbs = htole16(num_tbs)((__uint16_t)(num_tbs));
5447
5448 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE)((__uint16_t)(20));
5449 paddr = htole64(data->cmd_paddr)((__uint64_t)(data->cmd_paddr));
5450 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr))__builtin_memcpy((&desc->tbs[0].addr), (&paddr), (
sizeof(paddr)))
;
5451 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)((__uint32_t)(desc->tbs[0].tb_len))) >> 32)
5452 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__))do { ; } while (0);
5453 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +((__uint16_t)(sizeof(struct iwx_cmd_header) + sizeof(*tx) + hdrlen
+ pad - 20))
5454 sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE)((__uint16_t)(sizeof(struct iwx_cmd_header) + sizeof(*tx) + hdrlen
+ pad - 20))
;
5455 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE)((__uint64_t)(data->cmd_paddr + 20));
5456 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr))__builtin_memcpy((&desc->tbs[1].addr), (&paddr), (
sizeof(paddr)))
;
5457
5458 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)((__uint32_t)(desc->tbs[1].tb_len))) >> 32)
5459 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__))do { ; } while (0);
5460
5461 /* Other DMA segments are for data payload. */
5462 seg = data->map->dm_segs;
5463 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5464 desc->tbs[i + 2].tb_len = htole16(seg->ds_len)((__uint16_t)(seg->ds_len));
5465 paddr = htole64(seg->ds_addr)((__uint64_t)(seg->ds_addr));
5466 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr))__builtin_memcpy((&desc->tbs[i + 2].addr), (&paddr
), (sizeof(paddr)))
;
5467 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)((__uint32_t)(desc->tbs[i + 2].tb_len))) >> 32)
5468 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2))do { ; } while (0);
5469 }
5470
5471 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
5472 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
;
5473 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
5474 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
5475 sizeof (*cmd), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
;
5476 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5477 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5478 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
5479
5480 iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
5481
5482 /* Kick TX ring. */
5483 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT(256);
5484 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 16 | ring->cur))))
;
5485
5486 /* Mark TX ring as full if we reach a certain threshold. */
5487 if (++ring->queued > IWX_TX_RING_HIMARK224) {
5488 sc->qfullmsk |= 1 << ring->qid;
5489 }
5490
5491 if (ic->ic_ific_ac.ac_if.if_flags & IFF_UP0x1)
5492 sc->sc_tx_timer[ring->qid] = 15;
5493
5494 return 0;
5495}
5496
5497int
5498iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5499{
5500 struct iwx_rx_packet *pkt;
5501 struct iwx_tx_path_flush_cmd_rsp *resp;
5502 struct iwx_tx_path_flush_cmd flush_cmd = {
5503 .sta_id = htole32(sta_id)((__uint32_t)(sta_id)),
5504 .tid_mask = htole16(tids)((__uint16_t)(tids)),
5505 };
5506 struct iwx_host_cmd hcmd = {
5507 .id = IWX_TXPATH_FLUSH0x1e,
5508 .len = { sizeof(flush_cmd), },
5509 .data = { &flush_cmd, },
5510 .flags = IWX_CMD_WANT_RESP,
5511 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5512 };
5513 int err, resp_len, i, num_flushed_queues;
5514
5515 err = iwx_send_cmd(sc, &hcmd);
5516 if (err)
5517 return err;
5518
5519 pkt = hcmd.resp_pkt;
5520 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
5521 err = EIO5;
5522 goto out;
5523 }
5524
5525 resp_len = iwx_rx_packet_payload_len(pkt);
5526 /* Some firmware versions don't provide a response. */
5527 if (resp_len == 0)
5528 goto out;
5529 else if (resp_len != sizeof(*resp)) {
5530 err = EIO5;
5531 goto out;
5532 }
5533
5534 resp = (void *)pkt->data;
5535
5536 if (le16toh(resp->sta_id)((__uint16_t)(resp->sta_id)) != sta_id) {
5537 err = EIO5;
5538 goto out;
5539 }
5540
5541 num_flushed_queues = le16toh(resp->num_flushed_queues)((__uint16_t)(resp->num_flushed_queues));
5542 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP16) {
5543 err = EIO5;
5544 goto out;
5545 }
5546
5547 for (i = 0; i < num_flushed_queues; i++) {
5548 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5549 uint16_t tid = le16toh(queue_info->tid)((__uint16_t)(queue_info->tid));
5550 uint16_t read_after = le16toh(queue_info->read_after_flush)((__uint16_t)(queue_info->read_after_flush));
5551 uint16_t qid = le16toh(queue_info->queue_num)((__uint16_t)(queue_info->queue_num));
5552 struct iwx_tx_ring *txq;
5553
5554 if (qid >= nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])))
5555 continue;
5556
5557 txq = &sc->txq[qid];
5558 if (tid != txq->tid)
5559 continue;
5560
5561 iwx_txq_advance(sc, txq, read_after);
5562 }
5563out:
5564 iwx_free_resp(sc, &hcmd);
5565 return err;
5566}
5567
5568#define IWX_FLUSH_WAIT_MS2000 2000
5569
5570int
5571iwx_wait_tx_queues_empty(struct iwx_softc *sc)
5572{
5573 int i, err;
5574
5575 for (i = 0; i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); i++) {
5576 struct iwx_tx_ring *ring = &sc->txq[i];
5577
5578 if (i == IWX_DQA_CMD_QUEUE0)
5579 continue;
5580
5581 while (ring->queued > 0) {
5582 err = tsleep_nsec(ring, 0, "iwxflush",
5583 MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS2000));
5584 if (err)
5585 return err;
5586 }
5587 }
5588
5589 return 0;
5590}
5591
5592int
5593iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5594{
5595 struct iwx_add_sta_cmd cmd;
5596 int err;
5597 uint32_t status;
5598
5599 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5600 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
5601 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
5602 cmd.sta_id = IWX_STATION_ID0;
5603 cmd.add_modify = IWX_STA_MODE_MODIFY1;
5604 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12))) : 0;
5605 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12)));
5606
5607 status = IWX_ADD_STA_SUCCESS0x1;
5608 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA0x18,
5609 sizeof(cmd), &cmd, &status);
5610 if (err) {
5611 printf("%s: could not update sta (error %d)\n",
5612 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5613 return err;
5614 }
5615
5616 switch (status & IWX_ADD_STA_STATUS_MASK0xFF) {
5617 case IWX_ADD_STA_SUCCESS0x1:
5618 break;
5619 default:
5620 err = EIO5;
5621 printf("%s: Couldn't %s draining for station\n",
5622 DEVNAME(sc)((sc)->sc_dev.dv_xname), drain ? "enable" : "disable");
5623 break;
5624 }
5625
5626 return err;
5627}
5628
5629int
5630iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5631{
5632 int err;
5633
5634 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
5635
5636 sc->sc_flags |= IWX_FLAG_TXFLUSH0x400;
5637
5638 err = iwx_drain_sta(sc, in, 1);
5639 if (err)
5640 goto done;
5641
5642 err = iwx_flush_sta_tids(sc, IWX_STATION_ID0, 0xffff);
5643 if (err) {
5644 printf("%s: could not flush Tx path (error %d)\n",
5645 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5646 goto done;
5647 }
5648
5649 err = iwx_wait_tx_queues_empty(sc);
5650 if (err) {
5651 printf("%s: Could not empty Tx queues (error %d)\n",
5652 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5653 goto done;
5654 }
5655
5656 err = iwx_drain_sta(sc, in, 0);
5657done:
5658 sc->sc_flags &= ~IWX_FLAG_TXFLUSH0x400;
5659 return err;
5660}
5661
5662#define IWX_POWER_KEEP_ALIVE_PERIOD_SEC25 25
5663
5664int
5665iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5666 struct iwx_beacon_filter_cmd *cmd)
5667{
5668 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD0xd2,
5669 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5670}
5671
5672int
5673iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5674{
5675 struct iwx_beacon_filter_cmd cmd = {
5676 IWX_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
5677 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
5678 .ba_enable_beacon_abort = htole32(enable)((__uint32_t)(enable)),
5679 };
5680
5681 if (!sc->sc_bf.bf_enabled)
5682 return 0;
5683
5684 sc->sc_bf.ba_enabled = enable;
5685 return iwx_beacon_filter_send_cmd(sc, &cmd);
5686}
5687
5688void
5689iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5690 struct iwx_mac_power_cmd *cmd)
5691{
5692 struct ieee80211com *ic = &sc->sc_ic;
5693 struct ieee80211_node *ni = &in->in_ni;
5694 int dtim_period, dtim_msec, keep_alive;
5695
5696 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
5697 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
5698 if (ni->ni_dtimperiod)
5699 dtim_period = ni->ni_dtimperiod;
5700 else
5701 dtim_period = 1;
5702
5703 /*
5704 * Regardless of power management state the driver must set
5705 * keep alive period. FW will use it for sending keep alive NDPs
5706 * immediately after association. Check that keep alive period
5707 * is at least 3 * DTIM.
5708 */
5709 dtim_msec = dtim_period * ni->ni_intval;
5710 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC)(((3 * dtim_msec)>(1000 * 25))?(3 * dtim_msec):(1000 * 25)
)
;
5711 keep_alive = roundup(keep_alive, 1000)((((keep_alive)+((1000)-1))/(1000))*(1000)) / 1000;
5712 cmd->keep_alive_seconds = htole16(keep_alive)((__uint16_t)(keep_alive));
5713
5714 if (ic->ic_opmode != IEEE80211_M_MONITOR)
5715 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
5716}
5717
5718int
5719iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5720{
5721 int err;
5722 int ba_enable;
5723 struct iwx_mac_power_cmd cmd;
5724
5725 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5726
5727 iwx_power_build_cmd(sc, in, &cmd);
5728
5729 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE0xa9, 0,
5730 sizeof(cmd), &cmd);
5731 if (err != 0)
5732 return err;
5733
5734 ba_enable = !!(cmd.flags &
5735 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)((__uint16_t)((1 << 1))));
5736 return iwx_update_beacon_abort(sc, in, ba_enable);
5737}
5738
5739int
5740iwx_power_update_device(struct iwx_softc *sc)
5741{
5742 struct iwx_device_power_cmd cmd = { };
5743 struct ieee80211com *ic = &sc->sc_ic;
5744
5745 if (ic->ic_opmode != IEEE80211_M_MONITOR)
5746 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
5747
5748 return iwx_send_cmd_pdu(sc,
5749 IWX_POWER_TABLE_CMD0x77, 0, sizeof(cmd), &cmd);
5750}
5751
5752int
5753iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
5754{
5755 struct iwx_beacon_filter_cmd cmd = {
5756 IWX_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
5757 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
5758 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled)((__uint32_t)(sc->sc_bf.ba_enabled)),
5759 };
5760 int err;
5761
5762 err = iwx_beacon_filter_send_cmd(sc, &cmd);
5763 if (err == 0)
5764 sc->sc_bf.bf_enabled = 1;
5765
5766 return err;
5767}
5768
5769int
5770iwx_disable_beacon_filter(struct iwx_softc *sc)
5771{
5772 struct iwx_beacon_filter_cmd cmd;
5773 int err;
5774
5775 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5776
5777 err = iwx_beacon_filter_send_cmd(sc, &cmd);
5778 if (err == 0)
5779 sc->sc_bf.bf_enabled = 0;
5780
5781 return err;
5782}
5783
5784int
5785iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
5786{
5787 struct iwx_add_sta_cmd add_sta_cmd;
5788 int err;
5789 uint32_t status;