Bug Summary

File:dev/pci/if_iwx.c
Warning:line 10241, column 3
Value stored to 'handled' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_iwx.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_iwx.c
1/* $OpenBSD: if_iwx.c,v 1.180 2023/12/30 16:55:44 stsp Exp $ */
2
3/*
4 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 * Author: Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ******************************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * BSD LICENSE
46 *
47 * Copyright(c) 2017 Intel Deutschland GmbH
48 * Copyright(c) 2018 - 2019 Intel Corporation
49 * All rights reserved.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 *
55 * * Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * * Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in
59 * the documentation and/or other materials provided with the
60 * distribution.
61 * * Neither the name Intel Corporation nor the names of its
62 * contributors may be used to endorse or promote products derived
63 * from this software without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76 *
77 *****************************************************************************
78 */
79
80/*-
81 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82 *
83 * Permission to use, copy, modify, and distribute this software for any
84 * purpose with or without fee is hereby granted, provided that the above
85 * copyright notice and this permission notice appear in all copies.
86 *
87 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94 */
95
96#include "bpfilter.h"
97
98#include <sys/param.h>
99#include <sys/conf.h>
100#include <sys/kernel.h>
101#include <sys/malloc.h>
102#include <sys/mbuf.h>
103#include <sys/mutex.h>
104#include <sys/proc.h>
105#include <sys/rwlock.h>
106#include <sys/socket.h>
107#include <sys/sockio.h>
108#include <sys/systm.h>
109#include <sys/endian.h>
110
111#include <sys/refcnt.h>
112#include <sys/task.h>
113#include <machine/bus.h>
114#include <machine/intr.h>
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118#include <dev/pci/pcidevs.h>
119
120#if NBPFILTER1 > 0
121#include <net/bpf.h>
122#endif
123#include <net/if.h>
124#include <net/if_dl.h>
125#include <net/if_media.h>
126
127#include <netinet/in.h>
128#include <netinet/if_ether.h>
129
130#include <net80211/ieee80211_var.h>
131#include <net80211/ieee80211_radiotap.h>
132#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133#undef DPRINTF /* defined in ieee80211_priv.h */
134
135#define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname)
136
137#define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if)
138
139#define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_))))
140#define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_))))
141
142#ifdef IWX_DEBUG
143#define DPRINTF(x)do { ; } while (0) do { if (iwx_debug > 0) printf x; } while (0)
144#define DPRINTFN(n, x)do { ; } while (0) do { if (iwx_debug >= (n)) printf x; } while (0)
145int iwx_debug = 1;
146#else
147#define DPRINTF(x)do { ; } while (0) do { ; } while (0)
148#define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0)
149#endif
150
151#include <dev/pci/if_iwxreg.h>
152#include <dev/pci/if_iwxvar.h>
153
154const uint8_t iwx_nvm_channels_8000[] = {
155 /* 2.4 GHz */
156 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 /* 5 GHz */
158 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 149, 153, 157, 161, 165, 169, 173, 177, 181
161};
162
163static const uint8_t iwx_nvm_channels_uhb[] = {
164 /* 2.4 GHz */
165 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 /* 5 GHz */
167 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 149, 153, 157, 161, 165, 169, 173, 177, 181,
170 /* 6-7 GHz */
171 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175};
176
177#define IWX_NUM_2GHZ_CHANNELS14 14
178#define IWX_NUM_5GHZ_CHANNELS37 37
179
180const struct iwx_rate {
181 uint16_t rate;
182 uint8_t plcp;
183 uint8_t ht_plcp;
184} iwx_rates[] = {
185 /* Legacy */ /* HT */
186 { 2, IWX_RATE_1M_PLCP10, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
187 { 4, IWX_RATE_2M_PLCP20, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
188 { 11, IWX_RATE_5M_PLCP55, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
189 { 22, IWX_RATE_11M_PLCP110, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
190 { 12, IWX_RATE_6M_PLCP13, IWX_RATE_HT_SISO_MCS_0_PLCP0 },
191 { 18, IWX_RATE_9M_PLCP15, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
192 { 24, IWX_RATE_12M_PLCP5, IWX_RATE_HT_SISO_MCS_1_PLCP1 },
193 { 26, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_8_PLCP0x8 },
194 { 36, IWX_RATE_18M_PLCP7, IWX_RATE_HT_SISO_MCS_2_PLCP2 },
195 { 48, IWX_RATE_24M_PLCP9, IWX_RATE_HT_SISO_MCS_3_PLCP3 },
196 { 52, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_9_PLCP0x9 },
197 { 72, IWX_RATE_36M_PLCP11, IWX_RATE_HT_SISO_MCS_4_PLCP4 },
198 { 78, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_10_PLCP0xA },
199 { 96, IWX_RATE_48M_PLCP1, IWX_RATE_HT_SISO_MCS_5_PLCP5 },
200 { 104, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_11_PLCP0xB },
201 { 108, IWX_RATE_54M_PLCP3, IWX_RATE_HT_SISO_MCS_6_PLCP6 },
202 { 128, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_SISO_MCS_7_PLCP7 },
203 { 156, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_12_PLCP0xC },
204 { 208, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_13_PLCP0xD },
205 { 234, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_14_PLCP0xE },
206 { 260, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_15_PLCP0xF },
207};
208#define IWX_RIDX_CCK0 0
209#define IWX_RIDX_OFDM4 4
210#define IWX_RIDX_MAX((sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1) (nitems(iwx_rates)(sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1)
211#define IWX_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWX_RIDX_OFDM4)
212#define IWX_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWX_RIDX_OFDM4)
213#define IWX_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22)
214
215/* Convert an MCS index into an iwx_rates[] index. */
216const int iwx_mcs2ridx[] = {
217 IWX_RATE_MCS_0_INDEX,
218 IWX_RATE_MCS_1_INDEX,
219 IWX_RATE_MCS_2_INDEX,
220 IWX_RATE_MCS_3_INDEX,
221 IWX_RATE_MCS_4_INDEX,
222 IWX_RATE_MCS_5_INDEX,
223 IWX_RATE_MCS_6_INDEX,
224 IWX_RATE_MCS_7_INDEX,
225 IWX_RATE_MCS_8_INDEX,
226 IWX_RATE_MCS_9_INDEX,
227 IWX_RATE_MCS_10_INDEX,
228 IWX_RATE_MCS_11_INDEX,
229 IWX_RATE_MCS_12_INDEX,
230 IWX_RATE_MCS_13_INDEX,
231 IWX_RATE_MCS_14_INDEX,
232 IWX_RATE_MCS_15_INDEX,
233};
234
235uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
236uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
237int iwx_is_mimo_ht_plcp(uint8_t);
238int iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241int iwx_apply_debug_destination(struct iwx_softc *);
242void iwx_set_ltr(struct iwx_softc *);
243int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
244int iwx_ctxt_info_gen3_init(struct iwx_softc *,
245 const struct iwx_fw_sects *);
246void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
247void iwx_ctxt_info_free_paging(struct iwx_softc *);
248int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
249 struct iwx_context_info_dram *);
250void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
251int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
252 uint8_t *, size_t);
253int iwx_set_default_calib(struct iwx_softc *, const void *);
254void iwx_fw_info_free(struct iwx_fw_info *);
255int iwx_read_firmware(struct iwx_softc *);
256uint32_t iwx_prph_addr_mask(struct iwx_softc *);
257uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
258uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
259void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
260void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
261uint32_t iwx_read_umac_prph_unlocked(struct iwx_softc *, uint32_t);
262uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
263void iwx_write_umac_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
264void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
265int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
266int iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
267int iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
268int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
269int iwx_nic_lock(struct iwx_softc *);
270void iwx_nic_assert_locked(struct iwx_softc *);
271void iwx_nic_unlock(struct iwx_softc *);
272int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
273 uint32_t);
274int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
275int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
276int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
277 bus_size_t);
278void iwx_dma_contig_free(struct iwx_dma_info *);
279int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
280void iwx_disable_rx_dma(struct iwx_softc *);
281void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
282void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
283int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
284void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
285void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
286void iwx_enable_rfkill_int(struct iwx_softc *);
287int iwx_check_rfkill(struct iwx_softc *);
288void iwx_enable_interrupts(struct iwx_softc *);
289void iwx_enable_fwload_interrupt(struct iwx_softc *);
290void iwx_restore_interrupts(struct iwx_softc *);
291void iwx_disable_interrupts(struct iwx_softc *);
292void iwx_ict_reset(struct iwx_softc *);
293int iwx_set_hw_ready(struct iwx_softc *);
294int iwx_prepare_card_hw(struct iwx_softc *);
295int iwx_force_power_gating(struct iwx_softc *);
296void iwx_apm_config(struct iwx_softc *);
297int iwx_apm_init(struct iwx_softc *);
298void iwx_apm_stop(struct iwx_softc *);
299int iwx_allow_mcast(struct iwx_softc *);
300void iwx_init_msix_hw(struct iwx_softc *);
301void iwx_conf_msix_hw(struct iwx_softc *, int);
302int iwx_clear_persistence_bit(struct iwx_softc *);
303int iwx_start_hw(struct iwx_softc *);
304void iwx_stop_device(struct iwx_softc *);
305void iwx_nic_config(struct iwx_softc *);
306int iwx_nic_rx_init(struct iwx_softc *);
307int iwx_nic_init(struct iwx_softc *);
308int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
309int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
310void iwx_post_alive(struct iwx_softc *);
311int iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
312 uint32_t);
313void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
314void iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
315void iwx_setup_ht_rates(struct iwx_softc *);
316void iwx_setup_vht_rates(struct iwx_softc *);
317int iwx_mimo_enabled(struct iwx_softc *);
318void iwx_mac_ctxt_task(void *);
319void iwx_phy_ctxt_task(void *);
320void iwx_updatechan(struct ieee80211com *);
321void iwx_updateprot(struct ieee80211com *);
322void iwx_updateslot(struct ieee80211com *);
323void iwx_updateedca(struct ieee80211com *);
324void iwx_updatedtim(struct ieee80211com *);
325void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
326 uint16_t);
327void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
328int iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
329 uint8_t);
330void iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
331 uint8_t);
332int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
333 uint8_t);
334void iwx_rx_ba_session_expired(void *);
335void iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
336 struct mbuf_list *);
337void iwx_reorder_timer_expired(void *);
338void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
339 uint16_t, uint16_t, int, int);
340void iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
341 uint8_t);
342void iwx_ba_task(void *);
343
344void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
345int iwx_is_valid_mac_addr(const uint8_t *);
346void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
347int iwx_nvm_get(struct iwx_softc *);
348int iwx_load_firmware(struct iwx_softc *);
349int iwx_start_fw(struct iwx_softc *);
350int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
351int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
352void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
353int iwx_load_pnvm(struct iwx_softc *);
354int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
355int iwx_send_phy_cfg_cmd(struct iwx_softc *);
356int iwx_load_ucode_wait_alive(struct iwx_softc *);
357int iwx_send_dqa_cmd(struct iwx_softc *);
358int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
359int iwx_config_ltr(struct iwx_softc *);
360void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
361int iwx_rx_addbuf(struct iwx_softc *, int, int);
362int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
363void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
364 struct iwx_rx_data *);
365int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
366int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
367 struct ieee80211_rxinfo *);
368int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
369 struct ieee80211_node *, struct ieee80211_rxinfo *);
370void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
371 uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
372void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
373void iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
374void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
375void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
376 struct iwx_rx_data *);
377void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
378void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
379 struct iwx_rx_data *);
380int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
381uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
382int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
383 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
384int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
385 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
386int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
387 uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
388int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
389int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
390 const void *);
391int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
392 uint32_t *);
393int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
394 const void *, uint32_t *);
395void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
396void iwx_cmd_done(struct iwx_softc *, int, int, int);
397uint32_t iwx_fw_rateidx_ofdm(uint8_t);
398uint32_t iwx_fw_rateidx_cck(uint8_t);
399const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
400 struct ieee80211_frame *, uint16_t *, uint32_t *);
401void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
402 uint16_t, uint16_t);
403int iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
404int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
405int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
406int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
407int iwx_beacon_filter_send_cmd(struct iwx_softc *,
408 struct iwx_beacon_filter_cmd *);
409int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
410void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
411 struct iwx_mac_power_cmd *);
412int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
413int iwx_power_update_device(struct iwx_softc *);
414int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
415int iwx_disable_beacon_filter(struct iwx_softc *);
416int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
417int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
418int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
419int iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
420int iwx_config_umac_scan_reduced(struct iwx_softc *);
421uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
422void iwx_scan_umac_dwell_v10(struct iwx_softc *,
423 struct iwx_scan_general_params_v10 *, int);
424void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
425 struct iwx_scan_general_params_v10 *, uint16_t, int);
426void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
427 struct iwx_scan_channel_params_v6 *, uint32_t, int);
428int iwx_umac_scan_v14(struct iwx_softc *, int);
429void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
430uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
431int iwx_rval2ridx(int);
432void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
433void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
434 struct iwx_mac_ctx_cmd *, uint32_t);
435void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
436 struct iwx_mac_data_sta *, int);
437int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
438int iwx_clear_statistics(struct iwx_softc *);
439void iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
440void iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
441int iwx_scan(struct iwx_softc *);
442int iwx_bgscan(struct ieee80211com *);
443void iwx_bgscan_done(struct ieee80211com *,
444 struct ieee80211_node_switch_bss_arg *, size_t);
445void iwx_bgscan_done_task(void *);
446int iwx_umac_scan_abort(struct iwx_softc *);
447int iwx_scan_abort(struct iwx_softc *);
448int iwx_enable_mgmt_queue(struct iwx_softc *);
449int iwx_disable_mgmt_queue(struct iwx_softc *);
450int iwx_rs_rval2idx(uint8_t);
451uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
452uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
453int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
454int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
455int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
456int iwx_enable_data_tx_queues(struct iwx_softc *);
457int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
458 uint8_t, uint8_t);
459int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
460 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
461 uint8_t);
462int iwx_auth(struct iwx_softc *);
463int iwx_deauth(struct iwx_softc *);
464int iwx_run(struct iwx_softc *);
465int iwx_run_stop(struct iwx_softc *);
466struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
467int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
468 struct ieee80211_key *);
469void iwx_setkey_task(void *);
470void iwx_delete_key(struct ieee80211com *,
471 struct ieee80211_node *, struct ieee80211_key *);
472int iwx_media_change(struct ifnet *);
473void iwx_newstate_task(void *);
474int iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
475void iwx_endscan(struct iwx_softc *);
476void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
477 struct ieee80211_node *);
478int iwx_sf_config(struct iwx_softc *, int);
479int iwx_send_bt_init_conf(struct iwx_softc *);
480int iwx_send_soc_conf(struct iwx_softc *);
481int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
482int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
483int iwx_init_hw(struct iwx_softc *);
484int iwx_init(struct ifnet *);
485void iwx_start(struct ifnet *);
486void iwx_stop(struct ifnet *);
487void iwx_watchdog(struct ifnet *);
488int iwx_ioctl(struct ifnet *, u_long, caddr_t);
489const char *iwx_desc_lookup(uint32_t);
490void iwx_nic_error(struct iwx_softc *);
491void iwx_dump_driver_status(struct iwx_softc *);
492void iwx_nic_umac_error(struct iwx_softc *);
493int iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
494 struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
495int iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
496void iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
497 struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
498 struct mbuf_list *);
499int iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
500 int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
501int iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
502 struct iwx_rx_mpdu_desc *, int, int, uint32_t,
503 struct ieee80211_rxinfo *, struct mbuf_list *);
504void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
505 struct mbuf_list *);
506int iwx_rx_pkt_valid(struct iwx_rx_packet *);
507void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
508 struct mbuf_list *);
509void iwx_notif_intr(struct iwx_softc *);
510int iwx_intr(void *);
511int iwx_intr_msix(void *);
512int iwx_match(struct device *, void *, void *);
513int iwx_preinit(struct iwx_softc *);
514void iwx_attach_hook(struct device *);
515const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
516void iwx_attach(struct device *, struct device *, void *);
517void iwx_init_task(void *);
518int iwx_activate(struct device *, int);
519void iwx_resume(struct iwx_softc *);
520int iwx_wakeup(struct iwx_softc *);
521
522#if NBPFILTER1 > 0
523void iwx_radiotap_attach(struct iwx_softc *);
524#endif
525
526uint8_t
527iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
528{
529 const struct iwx_fw_cmd_version *entry;
530 int i;
531
532 for (i = 0; i < sc->n_cmd_versions; i++) {
533 entry = &sc->cmd_versions[i];
534 if (entry->group == grp && entry->cmd == cmd)
535 return entry->cmd_ver;
536 }
537
538 return IWX_FW_CMD_VER_UNKNOWN99;
539}
540
541uint8_t
542iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
543{
544 const struct iwx_fw_cmd_version *entry;
545 int i;
546
547 for (i = 0; i < sc->n_cmd_versions; i++) {
548 entry = &sc->cmd_versions[i];
549 if (entry->group == grp && entry->cmd == cmd)
550 return entry->notif_ver;
551 }
552
553 return IWX_FW_CMD_VER_UNKNOWN99;
554}
555
556int
557iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
558{
559 switch (ht_plcp) {
560 case IWX_RATE_HT_MIMO2_MCS_8_PLCP0x8:
561 case IWX_RATE_HT_MIMO2_MCS_9_PLCP0x9:
562 case IWX_RATE_HT_MIMO2_MCS_10_PLCP0xA:
563 case IWX_RATE_HT_MIMO2_MCS_11_PLCP0xB:
564 case IWX_RATE_HT_MIMO2_MCS_12_PLCP0xC:
565 case IWX_RATE_HT_MIMO2_MCS_13_PLCP0xD:
566 case IWX_RATE_HT_MIMO2_MCS_14_PLCP0xE:
567 case IWX_RATE_HT_MIMO2_MCS_15_PLCP0xF:
568 return 1;
569 default:
570 break;
571 }
572
573 return 0;
574}
575
576int
577iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
578{
579 struct iwx_fw_cscheme_list *l = (void *)data;
580
581 if (dlen < sizeof(*l) ||
582 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
583 return EINVAL22;
584
585 /* we don't actually store anything for now, always use s/w crypto */
586
587 return 0;
588}
589
590int
591iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
592 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
593{
594 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
595 if (err) {
596 printf("%s: could not allocate context info DMA memory\n",
597 DEVNAME(sc)((sc)->sc_dev.dv_xname));
598 return err;
599 }
600
601 memcpy(dram->vaddr, sec->fws_data, sec->fws_len)__builtin_memcpy((dram->vaddr), (sec->fws_data), (sec->
fws_len))
;
602
603 return 0;
604}
605
606void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
607{
608 struct iwx_self_init_dram *dram = &sc->init_dram;
609 int i;
610
611 if (!dram->paging)
612 return;
613
614 /* free paging*/
615 for (i = 0; i < dram->paging_cnt; i++)
616 iwx_dma_contig_free(&dram->paging[i]);
617
618 free(dram->paging, M_DEVBUF2, dram->paging_cnt * sizeof(*dram->paging));
619 dram->paging_cnt = 0;
620 dram->paging = NULL((void *)0);
621}
622
623int
624iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
625{
626 int i = 0;
627
628 while (start < fws->fw_count &&
629 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC &&
630 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION0xAAAABBBB) {
631 start++;
632 i++;
633 }
634
635 return i;
636}
637
638int
639iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
640 struct iwx_context_info_dram *ctxt_dram)
641{
642 struct iwx_self_init_dram *dram = &sc->init_dram;
643 int i, ret, fw_cnt = 0;
644
645 KASSERT(dram->paging == NULL)((dram->paging == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwx.c", 645, "dram->paging == NULL"
))
;
646
647 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
648 /* add 1 due to separator */
649 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
650 /* add 2 due to separators */
651 dram->paging_cnt = iwx_get_num_sections(fws,
652 dram->lmac_cnt + dram->umac_cnt + 2);
653
654 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
655 sizeof(*dram->fw), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
656 if (!dram->fw) {
657 printf("%s: could not allocate memory for firmware sections\n",
658 DEVNAME(sc)((sc)->sc_dev.dv_xname));
659 return ENOMEM12;
660 }
661
662 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
663 M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
664 if (!dram->paging) {
665 printf("%s: could not allocate memory for firmware paging\n",
666 DEVNAME(sc)((sc)->sc_dev.dv_xname));
667 return ENOMEM12;
668 }
669
670 /* initialize lmac sections */
671 for (i = 0; i < dram->lmac_cnt; i++) {
672 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
673 &dram->fw[fw_cnt]);
674 if (ret)
675 return ret;
676 ctxt_dram->lmac_img[i] =
677 htole64(dram->fw[fw_cnt].paddr)((__uint64_t)(dram->fw[fw_cnt].paddr));
678 DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
679 (unsigned long long)dram->fw[fw_cnt].paddr,do { ; } while (0)
680 (unsigned long long)dram->fw[fw_cnt].size))do { ; } while (0);
681 fw_cnt++;
682 }
683
684 /* initialize umac sections */
685 for (i = 0; i < dram->umac_cnt; i++) {
686 /* access FW with +1 to make up for lmac separator */
687 ret = iwx_ctxt_info_alloc_dma(sc,
688 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
689 if (ret)
690 return ret;
691 ctxt_dram->umac_img[i] =
692 htole64(dram->fw[fw_cnt].paddr)((__uint64_t)(dram->fw[fw_cnt].paddr));
693 DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
694 (unsigned long long)dram->fw[fw_cnt].paddr,do { ; } while (0)
695 (unsigned long long)dram->fw[fw_cnt].size))do { ; } while (0);
696 fw_cnt++;
697 }
698
699 /*
700 * Initialize paging.
701 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
702 * stored separately.
703 * This is since the timing of its release is different -
704 * while fw memory can be released on alive, the paging memory can be
705 * freed only when the device goes down.
706 * Given that, the logic here in accessing the fw image is a bit
707 * different - fw_cnt isn't changing so loop counter is added to it.
708 */
709 for (i = 0; i < dram->paging_cnt; i++) {
710 /* access FW with +2 to make up for lmac & umac separators */
711 int fw_idx = fw_cnt + i + 2;
712
713 ret = iwx_ctxt_info_alloc_dma(sc,
714 &fws->fw_sect[fw_idx], &dram->paging[i]);
715 if (ret)
716 return ret;
717
718 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr)((__uint64_t)(dram->paging[i].paddr));
719 DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
720 (unsigned long long)dram->paging[i].paddr,do { ; } while (0)
721 (unsigned long long)dram->paging[i].size))do { ; } while (0);
722 }
723
724 return 0;
725}
726
727void
728iwx_fw_version_str(char *buf, size_t bufsize,
729 uint32_t major, uint32_t minor, uint32_t api)
730{
731 /*
732 * Starting with major version 35 the Linux driver prints the minor
733 * version in hexadecimal.
734 */
735 if (major >= 35)
736 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
737 else
738 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
739}
740
741int
742iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
743 uint8_t min_power)
744{
745 struct iwx_dma_info *fw_mon = &sc->fw_mon;
746 uint32_t size = 0;
747 uint8_t power;
748 int err;
749
750 if (fw_mon->size)
751 return 0;
752
753 for (power = max_power; power >= min_power; power--) {
754 size = (1 << power);
755
756 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
757 if (err)
758 continue;
759
760 DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",do { ; } while (0)
761 DEVNAME(sc), size))do { ; } while (0);
762 break;
763 }
764
765 if (err) {
766 fw_mon->size = 0;
767 return err;
768 }
769
770 if (power != max_power)
771 DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",do { ; } while (0)
772 DEVNAME(sc), (unsigned long)(1 << (power - 10)),do { ; } while (0)
773 (unsigned long)(1 << (max_power - 10))))do { ; } while (0);
774
775 return 0;
776}
777
778int
779iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
780{
781 if (!max_power) {
782 /* default max_power is maximum */
783 max_power = 26;
784 } else {
785 max_power += 11;
786 }
787
788 if (max_power > 26) {
789 DPRINTF(("%s: External buffer size for monitor is too big %d, "do { ; } while (0)
790 "check the FW TLV\n", DEVNAME(sc), max_power))do { ; } while (0);
791 return 0;
792 }
793
794 if (sc->fw_mon.size)
795 return 0;
796
797 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
798}
799
800int
801iwx_apply_debug_destination(struct iwx_softc *sc)
802{
803 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
804 int i, err;
805 uint8_t mon_mode, size_power, base_shift, end_shift;
806 uint32_t base_reg, end_reg;
807
808 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
809 mon_mode = dest_v1->monitor_mode;
810 size_power = dest_v1->size_power;
811 base_reg = le32toh(dest_v1->base_reg)((__uint32_t)(dest_v1->base_reg));
812 end_reg = le32toh(dest_v1->end_reg)((__uint32_t)(dest_v1->end_reg));
813 base_shift = dest_v1->base_shift;
814 end_shift = dest_v1->end_shift;
815
816 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode))do { ; } while (0);
817
818 if (mon_mode == EXTERNAL_MODE) {
819 err = iwx_alloc_fw_monitor(sc, size_power);
820 if (err)
821 return err;
822 }
823
824 if (!iwx_nic_lock(sc))
825 return EBUSY16;
826
827 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
828 uint32_t addr, val;
829 uint8_t op;
830
831 addr = le32toh(dest_v1->reg_ops[i].addr)((__uint32_t)(dest_v1->reg_ops[i].addr));
832 val = le32toh(dest_v1->reg_ops[i].val)((__uint32_t)(dest_v1->reg_ops[i].val));
833 op = dest_v1->reg_ops[i].op;
834
835 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val))do { ; } while (0);
836 switch (op) {
837 case CSR_ASSIGN:
838 IWX_WRITE(sc, addr, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), ((val
))))
;
839 break;
840 case CSR_SETBIT:
841 IWX_SETBITS(sc, addr, (1 << val))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), (((
((sc)->sc_st)->read_4(((sc)->sc_sh), ((addr)))) | ((
1 << val))))))
;
842 break;
843 case CSR_CLEARBIT:
844 IWX_CLRBITS(sc, addr, (1 << val))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), (((
((sc)->sc_st)->read_4(((sc)->sc_sh), ((addr)))) &
~((1 << val))))))
;
845 break;
846 case PRPH_ASSIGN:
847 iwx_write_prph(sc, addr, val);
848 break;
849 case PRPH_SETBIT:
850 err = iwx_set_bits_prph(sc, addr, (1 << val));
851 if (err)
852 return err;
853 break;
854 case PRPH_CLEARBIT:
855 err = iwx_clear_bits_prph(sc, addr, (1 << val));
856 if (err)
857 return err;
858 break;
859 case PRPH_BLOCKBIT:
860 if (iwx_read_prph(sc, addr) & (1 << val))
861 goto monitor;
862 break;
863 default:
864 DPRINTF(("%s: FW debug - unknown OP %d\n",do { ; } while (0)
865 DEVNAME(sc), op))do { ; } while (0);
866 break;
867 }
868 }
869
870monitor:
871 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
872 iwx_write_prph(sc, le32toh(base_reg)((__uint32_t)(base_reg)),
873 sc->fw_mon.paddr >> base_shift);
874 iwx_write_prph(sc, end_reg,
875 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
876 >> end_shift);
877 }
878
879 iwx_nic_unlock(sc);
880 return 0;
881}
882
883void
884iwx_set_ltr(struct iwx_softc *sc)
885{
886 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ0x80000000 |
887 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC2 <<
888 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT24) &
889 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK0x1c000000) |
890 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT16) &
891 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK0x03ff0000) |
892 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ0x00008000 |
893 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC2 <<
894 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT8) &
895 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK0x00001c00) |
896 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL0x000003ff);
897
898 /*
899 * To workaround hardware latency issues during the boot process,
900 * initialize the LTR to ~250 usec (see ltr_val above).
901 * The firmware initializes this again later (to a smaller value).
902 */
903 if (!sc->sc_integrated) {
904 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0d4))), (
(ltr_val))))
;
905 } else if (sc->sc_integrated &&
906 sc->sc_device_family == IWX_DEVICE_FAMILY_220001) {
907 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR0xa0348c,
908 IWX_HPM_MAC_LRT_ENABLE_ALL0xf);
909 iwx_write_prph(sc, IWX_HPM_UMAC_LTR0xa03480, ltr_val);
910 }
911}
912
913int
914iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
915{
916 struct iwx_context_info *ctxt_info;
917 struct iwx_context_info_rbd_cfg *rx_cfg;
918 uint32_t control_flags = 0;
919 uint64_t paddr;
920 int err;
921
922 ctxt_info = sc->ctxt_info_dma.vaddr;
923 memset(ctxt_info, 0, sizeof(*ctxt_info))__builtin_memset((ctxt_info), (0), (sizeof(*ctxt_info)));
924
925 ctxt_info->version.version = 0;
926 ctxt_info->version.mac_id =
927 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV))((__uint16_t)((uint16_t)(((sc)->sc_st)->read_4(((sc)->
sc_sh), (((0x028)))))))
;
928 /* size is in DWs */
929 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4)((__uint16_t)(sizeof(*ctxt_info) / 4));
930
931 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF)((((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1)) <
0xF) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 931, "IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF")
)
;
932 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
933 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE)((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1)) <<
934 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
935 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
936 ctxt_info->control.control_flags = htole32(control_flags)((__uint32_t)(control_flags));
937
938 /* initialize RX default queue */
939 rx_cfg = &ctxt_info->rbd_cfg;
940 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr)((__uint64_t)(sc->rxq.free_desc_dma.paddr));
941 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr)((__uint64_t)(sc->rxq.used_desc_dma.paddr));
942 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr)((__uint64_t)(sc->rxq.stat_dma.paddr));
943
944 /* initialize TX command queue */
945 ctxt_info->hcmd_cfg.cmd_queue_addr =
946 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr)((__uint64_t)(sc->txq[0].desc_dma.paddr));
947 ctxt_info->hcmd_cfg.cmd_queue_size =
948 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT)(((sizeof((256)) <= 4) ? (fls((256)) - 1) : (flsl((256)) -
1)) - 3)
;
949
950 /* allocate ucode sections in dram and set addresses */
951 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
952 if (err) {
953 iwx_ctxt_info_free_fw_img(sc);
954 return err;
955 }
956
957 /* Configure debug, if exists */
958 if (sc->sc_fw.dbg_dest_tlv_v1) {
959 err = iwx_apply_debug_destination(sc);
960 if (err) {
961 iwx_ctxt_info_free_fw_img(sc);
962 return err;
963 }
964 }
965
966 /*
967 * Write the context info DMA base address. The device expects a
968 * 64-bit address but a simple bus_space_write_8 to this register
969 * won't work on some devices, such as the AX201.
970 */
971 paddr = sc->ctxt_info_dma.paddr;
972 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x40)), ((paddr
& 0xffffffff))))
;
973 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x40 + 4)),
((paddr >> 32))))
;
974
975 /* kick FW self load */
976 if (!iwx_nic_lock(sc)) {
977 iwx_ctxt_info_free_fw_img(sc);
978 return EBUSY16;
979 }
980
981 iwx_set_ltr(sc);
982 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN(0xa05c44), 1);
983 iwx_nic_unlock(sc);
984
985 /* Context info will be released upon alive or failure to get one */
986
987 return 0;
988}
989
990int
991iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
992{
993 struct iwx_context_info_gen3 *ctxt_info_gen3;
994 struct iwx_prph_scratch *prph_scratch;
995 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
996 uint16_t cb_size;
997 uint32_t control_flags, scratch_size;
998 uint64_t paddr;
999 int err;
1000
1001 if (sc->sc_fw.iml == NULL((void *)0) || sc->sc_fw.iml_len == 0) {
1002 printf("%s: no image loader found in firmware file\n",
1003 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1004 iwx_ctxt_info_free_fw_img(sc);
1005 return EINVAL22;
1006 }
1007
1008 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1009 sc->sc_fw.iml_len, 0);
1010 if (err) {
1011 printf("%s: could not allocate DMA memory for "
1012 "firmware image loader\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1013 iwx_ctxt_info_free_fw_img(sc);
1014 return ENOMEM12;
1015 }
1016
1017 prph_scratch = sc->prph_scratch_dma.vaddr;
1018 memset(prph_scratch, 0, sizeof(*prph_scratch))__builtin_memset((prph_scratch), (0), (sizeof(*prph_scratch))
)
;
1019 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1020 prph_sc_ctrl->version.version = 0;
1021 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV))((__uint16_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
((0x028)))))))
;
1022 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4)((__uint16_t)(sizeof(*prph_scratch) / 4));
1023
1024 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K(1 << 16) |
1025 IWX_PRPH_SCRATCH_MTR_MODE(1 << 17) |
1026 (IWX_PRPH_MTR_FORMAT_256B0xC0000 & IWX_PRPH_SCRATCH_MTR_FORMAT((1 << 18) | (1 << 19)));
1027 if (sc->sc_imr_enabled)
1028 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN(1 << 1);
1029 prph_sc_ctrl->control.control_flags = htole32(control_flags)((__uint32_t)(control_flags));
1030
1031 /* initialize RX default queue */
1032 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1033 htole64(sc->rxq.free_desc_dma.paddr)((__uint64_t)(sc->rxq.free_desc_dma.paddr));
1034
1035 /* allocate ucode sections in dram and set addresses */
1036 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1037 if (err) {
1038 iwx_dma_contig_free(&sc->iml_dma);
1039 iwx_ctxt_info_free_fw_img(sc);
1040 return err;
1041 }
1042
1043 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1044 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3))__builtin_memset((ctxt_info_gen3), (0), (sizeof(*ctxt_info_gen3
)))
;
1045 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr)((__uint64_t)(sc->prph_info_dma.paddr));
1046 ctxt_info_gen3->prph_scratch_base_addr =
1047 htole64(sc->prph_scratch_dma.paddr)((__uint64_t)(sc->prph_scratch_dma.paddr));
1048 scratch_size = sizeof(*prph_scratch);
1049 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size)((__uint32_t)(scratch_size));
1050 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1051 htole64(sc->rxq.stat_dma.paddr)((__uint64_t)(sc->rxq.stat_dma.paddr));
1052 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1053 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2)((__uint64_t)(sc->prph_info_dma.paddr + (1 << 12) / 2
))
;
1054 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1055 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4)((__uint64_t)(sc->prph_info_dma.paddr + 3 * (1 << 12
) / 4))
;
1056 ctxt_info_gen3->mtr_base_addr =
1057 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr)((__uint64_t)(sc->txq[0].desc_dma.paddr));
1058 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr)((__uint64_t)(sc->rxq.used_desc_dma.paddr));
1059 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT)(((sizeof((256)) <= 4) ? (fls((256)) - 1) : (flsl((256)) -
1)) - 3)
;
1060 ctxt_info_gen3->mtr_size = htole16(cb_size)((__uint16_t)(cb_size));
1061 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE)((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1));
1062 ctxt_info_gen3->mcr_size = htole16(cb_size)((__uint16_t)(cb_size));
1063
1064 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len)__builtin_memcpy((sc->iml_dma.vaddr), (sc->sc_fw.iml), (
sc->sc_fw.iml_len))
;
1065
1066 paddr = sc->ctxt_info_dma.paddr;
1067 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x118)), ((
paddr & 0xffffffff))))
;
1068 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x118 + 4))
, ((paddr >> 32))))
;
1069
1070 paddr = sc->iml_dma.paddr;
1071 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x120)), ((
paddr & 0xffffffff))))
;
1072 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x120 + 4))
, ((paddr >> 32))))
;
1073 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x128)), ((
sc->sc_fw.iml_len))))
;
1074
1075 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x0)), ((((
(sc)->sc_st)->read_4(((sc)->sc_sh), ((0x0)))) | ((1 <<
1))))))
1076 IWX_CSR_AUTO_FUNC_BOOT_ENA)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x0)), ((((
(sc)->sc_st)->read_4(((sc)->sc_sh), ((0x0)))) | ((1 <<
1))))))
;
1077
1078 /* kick FW self load */
1079 if (!iwx_nic_lock(sc)) {
1080 iwx_dma_contig_free(&sc->iml_dma);
1081 iwx_ctxt_info_free_fw_img(sc);
1082 return EBUSY16;
1083 }
1084 iwx_set_ltr(sc);
1085 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN(0xa05c44), 1);
1086 iwx_nic_unlock(sc);
1087
1088 /* Context info will be released upon alive or failure to get one */
1089 return 0;
1090}
1091
1092void
1093iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1094{
1095 struct iwx_self_init_dram *dram = &sc->init_dram;
1096 int i;
1097
1098 if (!dram->fw)
1099 return;
1100
1101 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1102 iwx_dma_contig_free(&dram->fw[i]);
1103
1104 free(dram->fw, M_DEVBUF2,
1105 (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
1106 dram->lmac_cnt = 0;
1107 dram->umac_cnt = 0;
1108 dram->fw = NULL((void *)0);
1109}
1110
1111int
1112iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1113 uint8_t *data, size_t dlen)
1114{
1115 struct iwx_fw_sects *fws;
1116 struct iwx_fw_onesect *fwone;
1117
1118 if (type >= IWX_UCODE_TYPE_MAX)
1119 return EINVAL22;
1120 if (dlen < sizeof(uint32_t))
1121 return EINVAL22;
1122
1123 fws = &sc->sc_fw.fw_sects[type];
1124 DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count))do { ; } while (0);
1125 if (fws->fw_count >= IWX_UCODE_SECT_MAX57)
1126 return EINVAL22;
1127
1128 fwone = &fws->fw_sect[fws->fw_count];
1129
1130 /* first 32bit are device load offset */
1131 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof
(uint32_t)))
;
1132
1133 /* rest is data */
1134 fwone->fws_data = data + sizeof(uint32_t);
1135 fwone->fws_len = dlen - sizeof(uint32_t);
1136
1137 fws->fw_count++;
1138 fws->fw_totlen += fwone->fws_len;
1139
1140 return 0;
1141}
1142
1143#define IWX_DEFAULT_SCAN_CHANNELS40 40
1144/* Newer firmware might support more channels. Raise this value if needed. */
1145#define IWX_MAX_SCAN_CHANNELS67 67 /* as of iwx-cc-a0-62 firmware */
1146
1147struct iwx_tlv_calib_data {
1148 uint32_t ucode_type;
1149 struct iwx_tlv_calib_ctrl calib;
1150} __packed__attribute__((__packed__));
1151
1152int
1153iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1154{
1155 const struct iwx_tlv_calib_data *def_calib = data;
1156 uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type));
1157
1158 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1159 return EINVAL22;
1160
1161 sc->sc_default_calib[ucode_type].flow_trigger =
1162 def_calib->calib.flow_trigger;
1163 sc->sc_default_calib[ucode_type].event_trigger =
1164 def_calib->calib.event_trigger;
1165
1166 return 0;
1167}
1168
1169void
1170iwx_fw_info_free(struct iwx_fw_info *fw)
1171{
1172 free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize);
1173 fw->fw_rawdata = NULL((void *)0);
1174 fw->fw_rawsize = 0;
1175 /* don't touch fw->fw_status */
1176 memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects
)))
;
1177 free(fw->iml, M_DEVBUF2, fw->iml_len);
1178 fw->iml = NULL((void *)0);
1179 fw->iml_len = 0;
1180}
1181
1182#define IWX_FW_ADDR_CACHE_CONTROL0xC0000000 0xC0000000
1183
1184int
1185iwx_read_firmware(struct iwx_softc *sc)
1186{
1187 struct ieee80211com *ic = &sc->sc_ic;
1188 struct iwx_fw_info *fw = &sc->sc_fw;
1189 struct iwx_tlv_ucode_header *uhdr;
1190 struct iwx_ucode_tlv tlv;
1191 uint32_t tlv_type;
1192 uint8_t *data;
1193 int err;
1194 size_t len;
1195
1196 if (fw->fw_status == IWX_FW_STATUS_DONE2)
1197 return 0;
1198
1199 while (fw->fw_status == IWX_FW_STATUS_INPROGRESS1)
1200 tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP0xffffffffffffffffULL);
1201 fw->fw_status = IWX_FW_STATUS_INPROGRESS1;
1202
1203 if (fw->fw_rawdata != NULL((void *)0))
1204 iwx_fw_info_free(fw);
1205
1206 err = loadfirmware(sc->sc_fwname,
1207 (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1208 if (err) {
1209 printf("%s: could not read firmware %s (error %d)\n",
1210 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err);
1211 goto out;
1212 }
1213
1214 if (ic->ic_ific_ac.ac_if.if_flags & IFF_DEBUG0x4)
1215 printf("%s: using firmware %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname);
1216
1217 sc->sc_capaflags = 0;
1218 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS40;
1219 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc->
sc_enabled_capa)))
;
1220 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api
)))
;
1221 sc->n_cmd_versions = 0;
1222
1223 uhdr = (void *)fw->fw_rawdata;
1224 if (*(uint32_t *)fw->fw_rawdata != 0
1225 || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWX_TLV_UCODE_MAGIC0x0a4c5749) {
1226 printf("%s: invalid firmware %s\n",
1227 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname);
1228 err = EINVAL22;
1229 goto out;
1230 }
1231
1232 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1233 IWX_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24
)
,
1234 IWX_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16
)
,
1235 IWX_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8
)
);
1236
1237 data = uhdr->data;
1238 len = fw->fw_rawsize - sizeof(*uhdr);
1239
1240 while (len >= sizeof(tlv)) {
1241 size_t tlv_len;
1242 void *tlv_data;
1243
1244 memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv)));
1245 tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length));
1246 tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type));
1247
1248 len -= sizeof(tlv);
1249 data += sizeof(tlv);
1250 tlv_data = data;
1251
1252 if (len < tlv_len) {
1253 printf("%s: firmware too short: %zu bytes\n",
1254 DEVNAME(sc)((sc)->sc_dev.dv_xname), len);
1255 err = EINVAL22;
1256 goto parse_out;
1257 }
1258
1259 switch (tlv_type) {
1260 case IWX_UCODE_TLV_PROBE_MAX_LEN6:
1261 if (tlv_len < sizeof(uint32_t)) {
1262 err = EINVAL22;
1263 goto parse_out;
1264 }
1265 sc->sc_capa_max_probe_len
1266 = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1267 if (sc->sc_capa_max_probe_len >
1268 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE512) {
1269 err = EINVAL22;
1270 goto parse_out;
1271 }
1272 break;
1273 case IWX_UCODE_TLV_PAN7:
1274 if (tlv_len) {
1275 err = EINVAL22;
1276 goto parse_out;
1277 }
1278 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN(1 << 0);
1279 break;
1280 case IWX_UCODE_TLV_FLAGS18:
1281 if (tlv_len < sizeof(uint32_t)) {
1282 err = EINVAL22;
1283 goto parse_out;
1284 }
1285 /*
1286 * Apparently there can be many flags, but Linux driver
1287 * parses only the first one, and so do we.
1288 *
1289 * XXX: why does this override IWX_UCODE_TLV_PAN?
1290 * Intentional or a bug? Observations from
1291 * current firmware file:
1292 * 1) TLV_PAN is parsed first
1293 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1294 * ==> this resets TLV_PAN to itself... hnnnk
1295 */
1296 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1297 break;
1298 case IWX_UCODE_TLV_CSCHEME28:
1299 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1300 if (err)
1301 goto parse_out;
1302 break;
1303 case IWX_UCODE_TLV_NUM_OF_CPU27: {
1304 uint32_t num_cpu;
1305 if (tlv_len != sizeof(uint32_t)) {
1306 err = EINVAL22;
1307 goto parse_out;
1308 }
1309 num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1310 if (num_cpu < 1 || num_cpu > 2) {
1311 err = EINVAL22;
1312 goto parse_out;
1313 }
1314 break;
1315 }
1316 case IWX_UCODE_TLV_SEC_RT19:
1317 err = iwx_firmware_store_section(sc,
1318 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1319 if (err)
1320 goto parse_out;
1321 break;
1322 case IWX_UCODE_TLV_SEC_INIT20:
1323 err = iwx_firmware_store_section(sc,
1324 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1325 if (err)
1326 goto parse_out;
1327 break;
1328 case IWX_UCODE_TLV_SEC_WOWLAN21:
1329 err = iwx_firmware_store_section(sc,
1330 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1331 if (err)
1332 goto parse_out;
1333 break;
1334 case IWX_UCODE_TLV_DEF_CALIB22:
1335 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1336 err = EINVAL22;
1337 goto parse_out;
1338 }
1339 err = iwx_set_default_calib(sc, tlv_data);
1340 if (err)
1341 goto parse_out;
1342 break;
1343 case IWX_UCODE_TLV_PHY_SKU23:
1344 if (tlv_len != sizeof(uint32_t)) {
1345 err = EINVAL22;
1346 goto parse_out;
1347 }
1348 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1349 break;
1350
1351 case IWX_UCODE_TLV_API_CHANGES_SET29: {
1352 struct iwx_ucode_api *api;
1353 int idx, i;
1354 if (tlv_len != sizeof(*api)) {
1355 err = EINVAL22;
1356 goto parse_out;
1357 }
1358 api = (struct iwx_ucode_api *)tlv_data;
1359 idx = le32toh(api->api_index)((__uint32_t)(api->api_index));
1360 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) {
1361 err = EINVAL22;
1362 goto parse_out;
1363 }
1364 for (i = 0; i < 32; i++) {
1365 if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0)
1366 continue;
1367 setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
1368 }
1369 break;
1370 }
1371
1372 case IWX_UCODE_TLV_ENABLED_CAPABILITIES30: {
1373 struct iwx_ucode_capa *capa;
1374 int idx, i;
1375 if (tlv_len != sizeof(*capa)) {
1376 err = EINVAL22;
1377 goto parse_out;
1378 }
1379 capa = (struct iwx_ucode_capa *)tlv_data;
1380 idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index));
1381 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) {
1382 goto parse_out;
1383 }
1384 for (i = 0; i < 32; i++) {
1385 if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0)
1386 continue;
1387 setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
1388 }
1389 break;
1390 }
1391
1392 case IWX_UCODE_TLV_SDIO_ADMA_ADDR35:
1393 case IWX_UCODE_TLV_FW_GSCAN_CAPA50:
1394 /* ignore, not used by current driver */
1395 break;
1396
1397 case IWX_UCODE_TLV_SEC_RT_USNIFFER34:
1398 err = iwx_firmware_store_section(sc,
1399 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1400 tlv_len);
1401 if (err)
1402 goto parse_out;
1403 break;
1404
1405 case IWX_UCODE_TLV_PAGING32:
1406 if (tlv_len != sizeof(uint32_t)) {
1407 err = EINVAL22;
1408 goto parse_out;
1409 }
1410 break;
1411
1412 case IWX_UCODE_TLV_N_SCAN_CHANNELS31:
1413 if (tlv_len != sizeof(uint32_t)) {
1414 err = EINVAL22;
1415 goto parse_out;
1416 }
1417 sc->sc_capa_n_scan_channels =
1418 le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1419 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS67) {
1420 err = ERANGE34;
1421 goto parse_out;
1422 }
1423 break;
1424
1425 case IWX_UCODE_TLV_FW_VERSION36:
1426 if (tlv_len != sizeof(uint32_t) * 3) {
1427 err = EINVAL22;
1428 goto parse_out;
1429 }
1430
1431 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1432 le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])),
1433 le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])),
1434 le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2])));
1435 break;
1436
1437 case IWX_UCODE_TLV_FW_DBG_DEST38: {
1438 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL((void *)0);
1439
1440 fw->dbg_dest_ver = (uint8_t *)tlv_data;
1441 if (*fw->dbg_dest_ver != 0) {
1442 err = EINVAL22;
1443 goto parse_out;
1444 }
1445
1446 if (fw->dbg_dest_tlv_init)
1447 break;
1448 fw->dbg_dest_tlv_init = true1;
1449
1450 dest_v1 = (void *)tlv_data;
1451 fw->dbg_dest_tlv_v1 = dest_v1;
1452 fw->n_dest_reg = tlv_len -
1453 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops)__builtin_offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1454 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1455 DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg))do { ; } while (0);
1456 break;
1457 }
1458
1459 case IWX_UCODE_TLV_FW_DBG_CONF39: {
1460 struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1461
1462 if (!fw->dbg_dest_tlv_init ||
1463 conf->id >= nitems(fw->dbg_conf_tlv)(sizeof((fw->dbg_conf_tlv)) / sizeof((fw->dbg_conf_tlv)
[0]))
||
1464 fw->dbg_conf_tlv[conf->id] != NULL((void *)0))
1465 break;
1466
1467 DPRINTF(("Found debug configuration: %d\n", conf->id))do { ; } while (0);
1468 fw->dbg_conf_tlv[conf->id] = conf;
1469 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1470 break;
1471 }
1472
1473 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS54: {
1474 struct iwx_umac_debug_addrs *dbg_ptrs =
1475 (void *)tlv_data;
1476
1477 if (tlv_len != sizeof(*dbg_ptrs)) {
1478 err = EINVAL22;
1479 goto parse_out;
1480 }
1481 if (sc->sc_device_family < IWX_DEVICE_FAMILY_220001)
1482 break;
1483 sc->sc_uc.uc_umac_error_event_table =
1484 le32toh(dbg_ptrs->error_info_addr)((__uint32_t)(dbg_ptrs->error_info_addr)) &
1485 ~IWX_FW_ADDR_CACHE_CONTROL0xC0000000;
1486 sc->sc_uc.error_event_table_tlv_status |=
1487 IWX_ERROR_EVENT_TABLE_UMAC(1 << 2);
1488 break;
1489 }
1490
1491 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS55: {
1492 struct iwx_lmac_debug_addrs *dbg_ptrs =
1493 (void *)tlv_data;
1494
1495 if (tlv_len != sizeof(*dbg_ptrs)) {
1496 err = EINVAL22;
1497 goto parse_out;
1498 }
1499 if (sc->sc_device_family < IWX_DEVICE_FAMILY_220001)
1500 break;
1501 sc->sc_uc.uc_lmac_error_event_table[0] =
1502 le32toh(dbg_ptrs->error_event_table_ptr)((__uint32_t)(dbg_ptrs->error_event_table_ptr)) &
1503 ~IWX_FW_ADDR_CACHE_CONTROL0xC0000000;
1504 sc->sc_uc.error_event_table_tlv_status |=
1505 IWX_ERROR_EVENT_TABLE_LMAC1(1 << 0);
1506 break;
1507 }
1508
1509 case IWX_UCODE_TLV_FW_MEM_SEG51:
1510 break;
1511
1512 case IWX_UCODE_TLV_IML52:
1513 if (sc->sc_fw.iml != NULL((void *)0)) {
1514 free(fw->iml, M_DEVBUF2, fw->iml_len);
1515 fw->iml_len = 0;
1516 }
1517 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF2,
1518 M_WAIT0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
1519 if (sc->sc_fw.iml == NULL((void *)0)) {
1520 err = ENOMEM12;
1521 goto parse_out;
1522 }
1523 memcpy(sc->sc_fw.iml, tlv_data, tlv_len)__builtin_memcpy((sc->sc_fw.iml), (tlv_data), (tlv_len));
1524 sc->sc_fw.iml_len = tlv_len;
1525 break;
1526
1527 case IWX_UCODE_TLV_CMD_VERSIONS48:
1528 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1529 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1530 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1531 }
1532 if (sc->n_cmd_versions != 0) {
1533 err = EINVAL22;
1534 goto parse_out;
1535 }
1536 if (tlv_len > sizeof(sc->cmd_versions)) {
1537 err = EINVAL22;
1538 goto parse_out;
1539 }
1540 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), (
tlv_len))
;
1541 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1542 break;
1543
1544 case IWX_UCODE_TLV_FW_RECOVERY_INFO57:
1545 break;
1546
1547 case IWX_UCODE_TLV_FW_FSEQ_VERSION60:
1548 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION61:
1549 case IWX_UCODE_TLV_FW_NUM_STATIONS(0x100 + 0):
1550 case IWX_UCODE_TLV_FW_NUM_BEACONS(0x100 + 2):
1551 break;
1552
1553 /* undocumented TLVs found in iwx-cc-a0-46 image */
1554 case 58:
1555 case 0x1000003:
1556 case 0x1000004:
1557 break;
1558
1559 /* undocumented TLVs found in iwx-cc-a0-48 image */
1560 case 0x1000000:
1561 case 0x1000002:
1562 break;
1563
1564 case IWX_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0):
1565 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1):
1566 case IWX_UCODE_TLV_TYPE_HCMD(0x1000005 + 2):
1567 case IWX_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3):
1568 case IWX_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4):
1569 case IWX_UCODE_TLV_TYPE_CONF_SET(0x1000005 + 5):
1570 case IWX_UCODE_TLV_SEC_TABLE_ADDR66:
1571 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR67:
1572 case IWX_UCODE_TLV_CURRENT_PC68:
1573 break;
1574
1575 /* undocumented TLV found in iwx-cc-a0-67 image */
1576 case 0x100000b:
1577 break;
1578
1579 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1580 case 0x101:
1581 break;
1582
1583 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1584 case 0x100000c:
1585 break;
1586
1587 default:
1588 err = EINVAL22;
1589 goto parse_out;
1590 }
1591
1592 /*
1593 * Check for size_t overflow and ignore missing padding at
1594 * end of firmware file.
1595 */
1596 if (roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)) > len)
1597 break;
1598
1599 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1600 data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1601 }
1602
1603 KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 1603, "err == 0"))
;
1604
1605 parse_out:
1606 if (err) {
1607 printf("%s: firmware parse error %d, "
1608 "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type);
1609 }
1610
1611 out:
1612 if (err) {
1613 fw->fw_status = IWX_FW_STATUS_NONE0;
1614 if (fw->fw_rawdata != NULL((void *)0))
1615 iwx_fw_info_free(fw);
1616 } else
1617 fw->fw_status = IWX_FW_STATUS_DONE2;
1618 wakeup(&sc->sc_fw);
1619
1620 return err;
1621}
1622
1623uint32_t
1624iwx_prph_addr_mask(struct iwx_softc *sc)
1625{
1626 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
1627 return 0x00ffffff;
1628 else
1629 return 0x000fffff;
1630}
1631
1632uint32_t
1633iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1634{
1635 uint32_t mask = iwx_prph_addr_mask(sc);
1636 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & mask) | (3 << 24))))))
;
1637 IWX_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x01 | 0x02)
;
1638 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050
)))))
;
1639}
1640
1641uint32_t
1642iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1643{
1644 iwx_nic_assert_locked(sc);
1645 return iwx_read_prph_unlocked(sc, addr);
1646}
1647
1648void
1649iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1650{
1651 uint32_t mask = iwx_prph_addr_mask(sc);
1652 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & mask) | (3 << 24))))))
;
1653 IWX_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x02)
;
1654 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c
))), ((val))))
;
1655}
1656
1657void
1658iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1659{
1660 iwx_nic_assert_locked(sc);
1661 iwx_write_prph_unlocked(sc, addr, val);
1662}
1663
1664void
1665iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1666{
1667 iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1668 iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1669}
1670
1671uint32_t
1672iwx_read_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1673{
1674 return iwx_read_prph_unlocked(sc, addr + sc->sc_umac_prph_offset);
1675}
1676
1677uint32_t
1678iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1679{
1680 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1681}
1682
1683void
1684iwx_write_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1685{
1686 iwx_write_prph_unlocked(sc, addr + sc->sc_umac_prph_offset, val);
1687}
1688
1689void
1690iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1691{
1692 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1693}
1694
1695int
1696iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1697{
1698 int offs, err = 0;
1699 uint32_t *vals = buf;
1700
1701 if (iwx_nic_lock(sc)) {
1702 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c
))), ((addr))))
;
1703 for (offs = 0; offs < dwords; offs++)
1704 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x400)+0x01c)))))))
;
1705 iwx_nic_unlock(sc);
1706 } else {
1707 err = EBUSY16;
1708 }
1709 return err;
1710}
1711
1712int
1713iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1714{
1715 int offs;
1716 const uint32_t *vals = buf;
1717
1718 if (iwx_nic_lock(sc)) {
1719 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010
))), ((addr))))
;
1720 /* WADDR auto-increments */
1721 for (offs = 0; offs < dwords; offs++) {
1722 uint32_t val = vals ? vals[offs] : 0;
1723 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018
))), ((val))))
;
1724 }
1725 iwx_nic_unlock(sc);
1726 } else {
1727 return EBUSY16;
1728 }
1729 return 0;
1730}
1731
1732int
1733iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1734{
1735 return iwx_write_mem(sc, addr, &val, 1);
1736}
1737
1738int
1739iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1740 int timo)
1741{
1742 for (;;) {
1743 if ((IWX_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) {
1744 return 1;
1745 }
1746 if (timo < 10) {
1747 return 0;
1748 }
1749 timo -= 10;
1750 DELAY(10)(*delay_func)(10);
1751 }
1752}
1753
1754int
1755iwx_nic_lock(struct iwx_softc *sc)
1756{
1757 if (sc->sc_nic_locks > 0) {
1758 iwx_nic_assert_locked(sc);
1759 sc->sc_nic_locks++;
1760 return 1; /* already locked */
1761 }
1762
1763 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
1764 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
;
1765
1766 DELAY(2)(*delay_func)(2);
1767
1768 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
1769 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001),
1770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001)
1771 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) {
1772 sc->sc_nic_locks++;
1773 return 1;
1774 }
1775
1776 printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1777 return 0;
1778}
1779
1780void
1781iwx_nic_assert_locked(struct iwx_softc *sc)
1782{
1783 if (sc->sc_nic_locks <= 0)
1784 panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
1785}
1786
1787void
1788iwx_nic_unlock(struct iwx_softc *sc)
1789{
1790 if (sc->sc_nic_locks > 0) {
1791 if (--sc->sc_nic_locks == 0)
1792 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
1793 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
1794 } else
1795 printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1796}
1797
1798int
1799iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1800 uint32_t mask)
1801{
1802 uint32_t val;
1803
1804 if (iwx_nic_lock(sc)) {
1805 val = iwx_read_prph(sc, reg) & mask;
1806 val |= bits;
1807 iwx_write_prph(sc, reg, val);
1808 iwx_nic_unlock(sc);
1809 return 0;
1810 }
1811 return EBUSY16;
1812}
1813
1814int
1815iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1816{
1817 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1818}
1819
1820int
1821iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1822{
1823 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1824}
1825
1826int
1827iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1828 bus_size_t size, bus_size_t alignment)
1829{
1830 int nsegs, err;
1831 caddr_t va;
1832
1833 dma->tag = tag;
1834 dma->size = size;
1835
1836 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
1837 &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
;
1838 if (err)
1839 goto fail;
1840
1841 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001 | 0x1000))
1842 BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001 | 0x1000))
;
1843 if (err)
1844 goto fail;
1845
1846 if (nsegs > 1) {
1847 err = ENOMEM12;
1848 goto fail;
1849 }
1850
1851 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001 | 0x0004))
1852 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001 | 0x0004))
;
1853 if (err)
1854 goto fail;
1855 dma->vaddr = va;
1856
1857 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
1858 BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
;
1859 if (err)
1860 goto fail;
1861
1862 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), (
0x04))
;
1863 dma->paddr = dma->map->dm_segs[0].ds_addr;
1864
1865 return 0;
1866
1867fail: iwx_dma_contig_free(dma);
1868 return err;
1869}
1870
1871void
1872iwx_dma_contig_free(struct iwx_dma_info *dma)
1873{
1874 if (dma->map != NULL((void *)0)) {
1875 if (dma->vaddr != NULL((void *)0)) {
1876 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
1877 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
;
1878 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
1879 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (dma->size))
;
1880 bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
seg), (1))
;
1881 dma->vaddr = NULL((void *)0);
1882 }
1883 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
1884 dma->map = NULL((void *)0);
1885 }
1886}
1887
1888int
1889iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1890{
1891 bus_size_t size;
1892 int i, err;
1893
1894 ring->cur = 0;
1895
1896 /* Allocate RX descriptors (256-byte aligned). */
1897 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
1898 size = sizeof(struct iwx_rx_transfer_desc);
1899 else
1900 size = sizeof(uint64_t);
1901 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1902 size * IWX_RX_MQ_RING_COUNT512, 256);
1903 if (err) {
1904 printf("%s: could not allocate RX ring DMA memory\n",
1905 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1906 goto fail;
1907 }
1908 ring->desc = ring->free_desc_dma.vaddr;
1909
1910 /* Allocate RX status area (16-byte aligned). */
1911 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
1912 size = sizeof(uint16_t);
1913 else
1914 size = sizeof(*ring->stat);
1915 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
1916 if (err) {
1917 printf("%s: could not allocate RX status DMA memory\n",
1918 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1919 goto fail;
1920 }
1921 ring->stat = ring->stat_dma.vaddr;
1922
1923 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
1924 size = sizeof(struct iwx_rx_completion_desc);
1925 else
1926 size = sizeof(uint32_t);
1927 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1928 size * IWX_RX_MQ_RING_COUNT512, 256);
1929 if (err) {
1930 printf("%s: could not allocate RX ring DMA memory\n",
1931 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1932 goto fail;
1933 }
1934
1935 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++) {
1936 struct iwx_rx_data *data = &ring->data[i];
1937
1938 memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data)));
1939 err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1940 IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1941 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
;
1942 if (err) {
1943 printf("%s: could not create RX buf DMA map\n",
1944 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1945 goto fail;
1946 }
1947
1948 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE4096, i);
1949 if (err)
1950 goto fail;
1951 }
1952 return 0;
1953
1954fail: iwx_free_rx_ring(sc, ring);
1955 return err;
1956}
1957
1958void
1959iwx_disable_rx_dma(struct iwx_softc *sc)
1960{
1961 int ntries;
1962
1963 if (iwx_nic_lock(sc)) {
1964 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
1965 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN30xA07880, 0);
1966 else
1967 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG0xA09820, 0);
1968 for (ntries = 0; ntries < 1000; ntries++) {
1969 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
1970 if (iwx_read_umac_prph(sc,
1971 IWX_RFH_GEN_STATUS_GEN30xA07824) & IWX_RXF_DMA_IDLE(1U << 31))
1972 break;
1973 } else {
1974 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS0xA09808) &
1975 IWX_RXF_DMA_IDLE(1U << 31))
1976 break;
1977 }
1978 DELAY(10)(*delay_func)(10);
1979 }
1980 iwx_nic_unlock(sc);
1981 }
1982}
1983
1984void
1985iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986{
1987 ring->cur = 0;
1988 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
1989 ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
;
1990 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
1991 uint16_t *status = sc->rxq.stat_dma.vaddr;
1992 *status = 0;
1993 } else
1994 memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat
)))
;
1995 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
1996 ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
;
1997
1998}
1999
2000void
2001iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2002{
2003 int i;
2004
2005 iwx_dma_contig_free(&ring->free_desc_dma);
2006 iwx_dma_contig_free(&ring->stat_dma);
2007 iwx_dma_contig_free(&ring->used_desc_dma);
2008
2009 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++) {
2010 struct iwx_rx_data *data = &ring->data[i];
2011
2012 if (data->m != NULL((void *)0)) {
2013 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
2014 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
2015 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
2016 m_freem(data->m);
2017 data->m = NULL((void *)0);
2018 }
2019 if (data->map != NULL((void *)0))
2020 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
2021 }
2022}
2023
2024int
2025iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2026{
2027 bus_addr_t paddr;
2028 bus_size_t size;
2029 int i, err;
2030 size_t bc_tbl_size;
2031 bus_size_t bc_align;
2032
2033 ring->qid = qid;
2034 ring->queued = 0;
2035 ring->cur = 0;
2036 ring->cur_hw = 0;
2037 ring->tail = 0;
2038 ring->tail_hw = 0;
2039
2040 /* Allocate TX descriptors (256-byte aligned). */
2041 size = IWX_TX_RING_COUNT(256) * sizeof(struct iwx_tfh_tfd);
2042 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2043 if (err) {
2044 printf("%s: could not allocate TX ring DMA memory\n",
2045 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2046 goto fail;
2047 }
2048 ring->desc = ring->desc_dma.vaddr;
2049
2050 /*
2051 * The hardware supports up to 512 Tx rings which is more
2052 * than we currently need.
2053 *
2054 * In DQA mode we use 1 command queue + 1 default queue for
2055 * management, control, and non-QoS data frames.
2056 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2057 *
2058 * Tx aggregation requires additional queues, one queue per TID for
2059 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2060 * Firmware may assign its own internal IDs for these queues
2061 * depending on which TID gets aggregation enabled first.
2062 * The driver maintains a table mapping driver-side queue IDs
2063 * to firmware-side queue IDs.
2064 */
2065
2066 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
2067 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2068 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX2101024;
2069 bc_align = 128;
2070 } else {
2071 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2072 bc_align = 64;
2073 }
2074 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2075 bc_align);
2076 if (err) {
2077 printf("%s: could not allocate byte count table DMA memory\n",
2078 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2079 goto fail;
2080 }
2081
2082 size = IWX_TX_RING_COUNT(256) * sizeof(struct iwx_device_cmd);
2083 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2084 IWX_FIRST_TB_SIZE_ALIGN((20 + (64 - 1)) & ~(64 - 1)));
2085 if (err) {
2086 printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2087 goto fail;
2088 }
2089 ring->cmd = ring->cmd_dma.vaddr;
2090
2091 paddr = ring->cmd_dma.paddr;
2092 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
2093 struct iwx_tx_data *data = &ring->data[i];
2094 size_t mapsize;
2095
2096 data->cmd_paddr = paddr;
2097 paddr += sizeof(struct iwx_device_cmd);
2098
2099 /* FW commands may require more mapped space than packets. */
2100 if (qid == IWX_DQA_CMD_QUEUE0)
2101 mapsize = (sizeof(struct iwx_cmd_header) +
2102 IWX_MAX_CMD_PAYLOAD_SIZE(4096 - sizeof(struct iwx_cmd_header_wide)));
2103 else
2104 mapsize = MCLBYTES(1 << 11);
2105 err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
2106 IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
2107 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
;
2108 if (err) {
2109 printf("%s: could not create TX buf DMA map\n",
2110 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2111 goto fail;
2112 }
2113 }
2114 KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 2114, "paddr == ring->cmd_dma.paddr + size"
))
;
2115 return 0;
2116
2117fail: iwx_free_tx_ring(sc, ring);
2118 return err;
2119}
2120
2121void
2122iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2123{
2124 int i;
2125
2126 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
2127 struct iwx_tx_data *data = &ring->data[i];
2128
2129 if (data->m != NULL((void *)0)) {
2130 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
2131 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
2132 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
2133 m_freem(data->m);
2134 data->m = NULL((void *)0);
2135 }
2136 }
2137
2138 /* Clear byte count table. */
2139 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size)__builtin_memset((ring->bc_tbl.vaddr), (0), (ring->bc_tbl
.size))
;
2140
2141 /* Clear TX descriptors. */
2142 memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size
))
;
2143 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
2144 ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
;
2145 sc->qfullmsk &= ~(1 << ring->qid);
2146 sc->qenablemsk &= ~(1 << ring->qid);
2147 for (i = 0; i < nitems(sc->aggqid)(sizeof((sc->aggqid)) / sizeof((sc->aggqid)[0])); i++) {
2148 if (sc->aggqid[i] == ring->qid) {
2149 sc->aggqid[i] = 0;
2150 break;
2151 }
2152 }
2153 ring->queued = 0;
2154 ring->cur = 0;
2155 ring->cur_hw = 0;
2156 ring->tail = 0;
2157 ring->tail_hw = 0;
2158 ring->tid = 0;
2159}
2160
2161void
2162iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2163{
2164 int i;
2165
2166 iwx_dma_contig_free(&ring->desc_dma);
2167 iwx_dma_contig_free(&ring->cmd_dma);
2168 iwx_dma_contig_free(&ring->bc_tbl);
2169
2170 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
2171 struct iwx_tx_data *data = &ring->data[i];
2172
2173 if (data->m != NULL((void *)0)) {
2174 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
2175 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
2176 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
2177 m_freem(data->m);
2178 data->m = NULL((void *)0);
2179 }
2180 if (data->map != NULL((void *)0))
2181 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
2182 }
2183}
2184
2185void
2186iwx_enable_rfkill_int(struct iwx_softc *sc)
2187{
2188 if (!sc->sc_msix) {
2189 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL(1 << 7);
2190 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
2191 } else {
2192 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
2193 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
2194 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
2195 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
;
2196 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2197 }
2198
2199 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
2200 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
;
2201}
2202
2203int
2204iwx_check_rfkill(struct iwx_softc *sc)
2205{
2206 uint32_t v;
2207 int rv;
2208
2209 /*
2210 * "documentation" is not really helpful here:
2211 * 27: HW_RF_KILL_SW
2212 * Indicates state of (platform's) hardware RF-Kill switch
2213 *
2214 * But apparently when it's off, it's on ...
2215 */
2216 v = IWX_READ(sc, IWX_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))));
2217 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0;
2218 if (rv) {
2219 sc->sc_flags |= IWX_FLAG_RFKILL0x02;
2220 } else {
2221 sc->sc_flags &= ~IWX_FLAG_RFKILL0x02;
2222 }
2223
2224 return rv;
2225}
2226
2227void
2228iwx_enable_interrupts(struct iwx_softc *sc)
2229{
2230 if (!sc->sc_msix) {
2231 sc->sc_intmask = IWX_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 <<
25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 <<
0) | (1 << 28))
;
2232 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
2233 } else {
2234 /*
2235 * fh/hw_mask keeps all the unmasked causes.
2236 * Unlike msi, in msix cause is enabled when it is unset.
2237 */
2238 sc->sc_hw_mask = sc->sc_hw_init_mask;
2239 sc->sc_fh_mask = sc->sc_fh_init_mask;
2240 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
2241 ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
;
2242 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
2243 ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
;
2244 }
2245}
2246
2247void
2248iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2249{
2250 if (!sc->sc_msix) {
2251 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE(1 << 0) | IWX_CSR_INT_BIT_FH_RX(1U << 31);
2252 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
2253 } else {
2254 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE))))
2255 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE))))
;
2256 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2257 /*
2258 * Leave all the FH causes enabled to get the ALIVE
2259 * notification.
2260 */
2261 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_init_mask))))
2262 ~sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_init_mask))))
;
2263 sc->sc_fh_mask = sc->sc_fh_init_mask;
2264 }
2265}
2266
2267void
2268iwx_restore_interrupts(struct iwx_softc *sc)
2269{
2270 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
2271}
2272
2273void
2274iwx_disable_interrupts(struct iwx_softc *sc)
2275{
2276 if (!sc->sc_msix) {
2277 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
2278
2279 /* acknowledge all interrupts */
2280 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
2281 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(~0))))
;
2282 } else {
2283 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
2284 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
2285 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
2286 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
2287 }
2288}
2289
2290void
2291iwx_ict_reset(struct iwx_softc *sc)
2292{
2293 iwx_disable_interrupts(sc);
2294
2295 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096));
2296 sc->ict_cur = 0;
2297
2298 /* Set physical address of ICT (4KB aligned). */
2299 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2300 IWX_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2301 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2302 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2303 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
;
2304
2305 /* Switch to ICT interrupt mode in driver. */
2306 sc->sc_flags |= IWX_FLAG_USE_ICT0x01;
2307
2308 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
2309 iwx_enable_interrupts(sc);
2310}
2311
2312#define IWX_HW_READY_TIMEOUT 50
2313int
2314iwx_set_hw_ready(struct iwx_softc *sc)
2315{
2316 int ready;
2317
2318 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
2319 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
;
2320
2321 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG(0x000),
2322 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
2323 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
2324 IWX_HW_READY_TIMEOUT);
2325 if (ready)
2326 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
2327 IWX_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
;
2328
2329 return ready;
2330}
2331#undef IWX_HW_READY_TIMEOUT
2332
2333int
2334iwx_prepare_card_hw(struct iwx_softc *sc)
2335{
2336 int t = 0;
2337 int ntries;
2338
2339 if (iwx_set_hw_ready(sc))
2340 return 0;
2341
2342 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
2343 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
2344 DELAY(1000)(*delay_func)(1000);
2345
2346 for (ntries = 0; ntries < 10; ntries++) {
2347 /* If HW is not ready, prepare the conditions to check again */
2348 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
2349 IWX_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
;
2350
2351 do {
2352 if (iwx_set_hw_ready(sc))
2353 return 0;
2354 DELAY(200)(*delay_func)(200);
2355 t += 200;
2356 } while (t < 150000);
2357 DELAY(25000)(*delay_func)(25000);
2358 }
2359
2360 return ETIMEDOUT60;
2361}
2362
2363int
2364iwx_force_power_gating(struct iwx_softc *sc)
2365{
2366 int err;
2367
2368 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2369 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE(1 << 10));
2370 if (err)
2371 return err;
2372 DELAY(20)(*delay_func)(20);
2373 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2374 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN(1 << 0) |
2375 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN(1 << 1));
2376 if (err)
2377 return err;
2378 DELAY(20)(*delay_func)(20);
2379 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2380 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE(1 << 10));
2381 return err;
2382}
2383
2384void
2385iwx_apm_config(struct iwx_softc *sc)
2386{
2387 pcireg_t lctl, cap;
2388
2389 /*
2390 * L0S states have been found to be unstable with our devices
2391 * and in newer hardware they are not officially supported at
2392 * all, so we must always set the L0S_DISABLED bit.
2393 */
2394 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
;
2395
2396 lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2397 sc->sc_cap_off + PCI_PCIE_LCSR0x10);
2398 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S0x00000001);
2399 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2400 sc->sc_cap_off + PCI_PCIE_DCSR20x28);
2401 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0;
2402 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0)
2403 DEVNAME(sc),do { ; } while (0)
2404 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0)
2405 sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0);
2406}
2407
2408/*
2409 * Start up NIC's basic functionality after it has been reset
2410 * e.g. after platform boot or shutdown.
2411 * NOTE: This does not load uCode nor start the embedded processor
2412 */
2413int
2414iwx_apm_init(struct iwx_softc *sc)
2415{
2416 int err = 0;
2417
2418 /*
2419 * Disable L0s without affecting L1;
2420 * don't wait for ICH L0s (ICH bug W/A)
2421 */
2422 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
2423 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
;
2424
2425 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2426 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240)))))
| ((0xFFFF0000))))))
;
2427
2428 /*
2429 * Enable HAP INTA (interrupt from management bus) to
2430 * wake device's PCI Express link L1a -> L0s
2431 */
2432 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
2433 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
;
2434
2435 iwx_apm_config(sc);
2436
2437 /*
2438 * Set "initialization complete" bit to move adapter from
2439 * D0U* --> D0A* (powered-up active) state.
2440 */
2441 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
2442
2443 /*
2444 * Wait for clock stabilization; once stabilized, access to
2445 * device-internal resources is supported, e.g. iwx_write_prph()
2446 * and accesses to uCode SRAM.
2447 */
2448 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
2449 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
2450 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
2451 printf("%s: timeout waiting for clock stabilization\n",
2452 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2453 err = ETIMEDOUT60;
2454 goto out;
2455 }
2456 out:
2457 if (err)
2458 printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
2459 return err;
2460}
2461
2462void
2463iwx_apm_stop(struct iwx_softc *sc)
2464{
2465 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
2466 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
2467 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
2468 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
2469 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
;
2470 DELAY(1000)(*delay_func)(1000);
2471 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
2472 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
;
2473 DELAY(5000)(*delay_func)(5000);
2474
2475 /* stop device's busmaster DMA activity */
2476 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000200))))))
;
2477
2478 if (!iwx_poll_bit(sc, IWX_CSR_RESET(0x020),
2479 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100),
2480 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100))
2481 printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2482
2483 /*
2484 * Clear "initialization complete" bit to move adapter from
2485 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2486 */
2487 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
2488 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
;
2489}
2490
2491void
2492iwx_init_msix_hw(struct iwx_softc *sc)
2493{
2494 iwx_conf_msix_hw(sc, 0);
2495
2496 if (!sc->sc_msix)
2497 return;
2498
2499 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804
)))))
;
2500 sc->sc_fh_mask = sc->sc_fh_init_mask;
2501 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C
)))))
;
2502 sc->sc_hw_mask = sc->sc_hw_init_mask;
2503}
2504
2505void
2506iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2507{
2508 int vector = 0;
2509
2510 if (!sc->sc_msix) {
2511 /* Newer chips default to MSIX. */
2512 if (!stopped && iwx_nic_lock(sc)) {
2513 iwx_write_umac_prph(sc, IWX_UREG_CHICK0xa05c00,
2514 IWX_UREG_CHICK_MSI_ENABLE(1 << 24));
2515 iwx_nic_unlock(sc);
2516 }
2517 return;
2518 }
2519
2520 if (!stopped && iwx_nic_lock(sc)) {
2521 iwx_write_umac_prph(sc, IWX_UREG_CHICK0xa05c00,
2522 IWX_UREG_CHICK_MSIX_ENABLE(1 << 25));
2523 iwx_nic_unlock(sc);
2524 }
2525
2526 /* Disable all interrupts */
2527 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~0))))
;
2528 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~0))))
;
2529
2530 /* Map fallback-queue (command/mgmt) to a single vector */
2531 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
2532 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
;
2533 /* Map RSS queue (data) to the same vector */
2534 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
2535 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
;
2536
2537 /* Enable the RX queues cause interrupts */
2538 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1
)))))
2539 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1
)))))
;
2540
2541 /* Map non-RX causes to the same vector */
2542 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
2543 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
;
2544 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
2545 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
;
2546 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
2547 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
;
2548 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
2549 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
;
2550 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
2551 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
;
2552 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
2553 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
;
2554 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE)))), ((vector |
(1 << 7)))))
2555 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE)))), ((vector |
(1 << 7)))))
;
2556 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
2557 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
;
2558 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
2559 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
;
2560 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
2561 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
;
2562 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
2563 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
;
2564 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
2565 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
;
2566 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
2567 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
;
2568 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
2569 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
;
2570 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
2571 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
;
2572
2573 /* Enable non-RX causes interrupts */
2574 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2575 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2576 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2577 IWX_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2578 IWX_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
;
2579 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2580 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2581 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2582 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2583 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2584 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2585 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2586 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2587 IWX_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2588 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2589 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2590 IWX_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
;
2591}
2592
2593int
2594iwx_clear_persistence_bit(struct iwx_softc *sc)
2595{
2596 uint32_t hpm, wprot;
2597
2598 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG0xa03440);
2599 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT(1 << 12))) {
2600 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_220000xa04d00);
2601 if (wprot & IWX_PREG_WFPM_ACCESS(1 << 12)) {
2602 printf("%s: cannot clear persistence bit\n",
2603 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2604 return EPERM1;
2605 }
2606 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG0xa03440,
2607 hpm & ~IWX_PERSISTENCE_BIT(1 << 12));
2608 }
2609
2610 return 0;
2611}
2612
2613int
2614iwx_start_hw(struct iwx_softc *sc)
2615{
2616 int err;
2617
2618 err = iwx_prepare_card_hw(sc);
2619 if (err)
2620 return err;
2621
2622 if (sc->sc_device_family == IWX_DEVICE_FAMILY_220001) {
2623 err = iwx_clear_persistence_bit(sc);
2624 if (err)
2625 return err;
2626 }
2627
2628 /* Reset the entire device */
2629 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2630 DELAY(5000)(*delay_func)(5000);
2631
2632 if (sc->sc_device_family == IWX_DEVICE_FAMILY_220001 &&
2633 sc->sc_integrated) {
2634 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
2635 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
2636 DELAY(20)(*delay_func)(20);
2637 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
2638 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
2639 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
2640 printf("%s: timeout waiting for clock stabilization\n",
2641 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2642 return ETIMEDOUT60;
2643 }
2644
2645 err = iwx_force_power_gating(sc);
2646 if (err)
2647 return err;
2648
2649 /* Reset the entire device */
2650 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2651 DELAY(5000)(*delay_func)(5000);
2652 }
2653
2654 err = iwx_apm_init(sc);
2655 if (err)
2656 return err;
2657
2658 iwx_init_msix_hw(sc);
2659
2660 iwx_enable_rfkill_int(sc);
2661 iwx_check_rfkill(sc);
2662
2663 return 0;
2664}
2665
2666void
2667iwx_stop_device(struct iwx_softc *sc)
2668{
2669 struct ieee80211com *ic = &sc->sc_ic;
2670 struct ieee80211_node *ni = ic->ic_bss;
2671 int i;
2672
2673 iwx_disable_interrupts(sc);
2674 sc->sc_flags &= ~IWX_FLAG_USE_ICT0x01;
2675
2676 iwx_disable_rx_dma(sc);
2677 iwx_reset_rx_ring(sc, &sc->rxq);
2678 for (i = 0; i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); i++)
2679 iwx_reset_tx_ring(sc, &sc->txq[i]);
2680 for (i = 0; i < IEEE80211_NUM_TID16; i++) {
2681 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2682 if (ba->ba_state != IEEE80211_BA_AGREED2)
2683 continue;
2684 ieee80211_delba_request(ic, ni, 0, 1, i);
2685 }
2686
2687 /* Make sure (redundant) we've released our request to stay awake */
2688 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
2689 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
2690 if (sc->sc_nic_locks > 0)
2691 printf("%s: %d active NIC locks forcefully cleared\n",
2692 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
2693 sc->sc_nic_locks = 0;
2694
2695 /* Stop the device, and put it in low power state */
2696 iwx_apm_stop(sc);
2697
2698 /* Reset the on-board processor. */
2699 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2700 DELAY(5000)(*delay_func)(5000);
2701
2702 /*
2703 * Upon stop, the IVAR table gets erased, so msi-x won't
2704 * work. This causes a bug in RF-KILL flows, since the interrupt
2705 * that enables radio won't fire on the correct irq, and the
2706 * driver won't be able to handle the interrupt.
2707 * Configure the IVAR table again after reset.
2708 */
2709 iwx_conf_msix_hw(sc, 1);
2710
2711 /*
2712 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2713 * Clear the interrupt again.
2714 */
2715 iwx_disable_interrupts(sc);
2716
2717 /* Even though we stop the HW we still want the RF kill interrupt. */
2718 iwx_enable_rfkill_int(sc);
2719 iwx_check_rfkill(sc);
2720
2721 iwx_prepare_card_hw(sc);
2722
2723 iwx_ctxt_info_free_paging(sc);
2724 iwx_dma_contig_free(&sc->pnvm_dma);
2725}
2726
2727void
2728iwx_nic_config(struct iwx_softc *sc)
2729{
2730 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2731 uint32_t mask, val, reg_val = 0;
2732
2733 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >>
2734 IWX_FW_PHY_CFG_RADIO_TYPE_POS0;
2735 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >>
2736 IWX_FW_PHY_CFG_RADIO_STEP_POS2;
2737 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >>
2738 IWX_FW_PHY_CFG_RADIO_DASH_POS4;
2739
2740 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) <<
2741 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2);
2742 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) <<
2743 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0);
2744
2745 /* radio configuration */
2746 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10);
2747 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14);
2748 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12);
2749
2750 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) |
2751 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) |
2752 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) |
2753 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) |
2754 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) |
2755 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) |
2756 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100);
2757
2758 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))));
2759 val &= ~mask;
2760 val |= reg_val;
2761 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
(val))))
;
2762}
2763
2764int
2765iwx_nic_rx_init(struct iwx_softc *sc)
2766{
2767 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2768
2769 /*
2770 * We don't configure the RFH; the firmware will do that.
2771 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2772 */
2773 return 0;
2774}
2775
2776int
2777iwx_nic_init(struct iwx_softc *sc)
2778{
2779 int err;
2780
2781 iwx_apm_init(sc);
2782 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX2102)
2783 iwx_nic_config(sc);
2784
2785 err = iwx_nic_rx_init(sc);
2786 if (err)
2787 return err;
2788
2789 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8)))))
| (0x800fffff)))))
;
2790
2791 return 0;
2792}
2793
2794/* Map a TID to an ieee80211_edca_ac category. */
2795const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT8] = {
2796 EDCA_AC_BE,
2797 EDCA_AC_BK,
2798 EDCA_AC_BK,
2799 EDCA_AC_BE,
2800 EDCA_AC_VI,
2801 EDCA_AC_VI,
2802 EDCA_AC_VO,
2803 EDCA_AC_VO,
2804};
2805
2806/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2807const uint8_t iwx_ac_to_tx_fifo[] = {
2808 IWX_GEN2_EDCA_TX_FIFO_BE,
2809 IWX_GEN2_EDCA_TX_FIFO_BK,
2810 IWX_GEN2_EDCA_TX_FIFO_VI,
2811 IWX_GEN2_EDCA_TX_FIFO_VO,
2812};
2813
2814int
2815iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2816 int num_slots)
2817{
2818 struct iwx_rx_packet *pkt;
2819 struct iwx_tx_queue_cfg_rsp *resp;
2820 struct iwx_tx_queue_cfg_cmd cmd_v0;
2821 struct iwx_scd_queue_cfg_cmd cmd_v3;
2822 struct iwx_host_cmd hcmd = {
2823 .flags = IWX_CMD_WANT_RESP,
2824 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2825 };
2826 struct iwx_tx_ring *ring = &sc->txq[qid];
2827 int err, fwqid, cmd_ver;
2828 uint32_t wr_idx;
2829 size_t resp_len;
2830
2831 iwx_reset_tx_ring(sc, ring);
2832
2833 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP0x5,
2834 IWX_SCD_QUEUE_CONFIG_CMD0x17);
2835 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN99) {
2836 memset(&cmd_v0, 0, sizeof(cmd_v0))__builtin_memset((&cmd_v0), (0), (sizeof(cmd_v0)));
2837 cmd_v0.sta_id = sta_id;
2838 cmd_v0.tid = tid;
2839 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE)((__uint16_t)((1 << 0)));
2840 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots))((__uint32_t)((((sizeof(num_slots) <= 4) ? (fls(num_slots)
- 1) : (flsl(num_slots) - 1)) - 3)))
;
2841 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr)((__uint64_t)(ring->bc_tbl.paddr));
2842 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr)((__uint64_t)(ring->desc_dma.paddr));
2843 hcmd.id = IWX_SCD_QUEUE_CFG0x1d;
2844 hcmd.data[0] = &cmd_v0;
2845 hcmd.len[0] = sizeof(cmd_v0);
2846 } else if (cmd_ver == 3) {
2847 memset(&cmd_v3, 0, sizeof(cmd_v3))__builtin_memset((&cmd_v3), (0), (sizeof(cmd_v3)));
2848 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD)((__uint32_t)(0));
2849 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr)((__uint64_t)(ring->desc_dma.paddr));
2850 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr)((__uint64_t)(ring->bc_tbl.paddr));
2851 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots))((__uint32_t)((((sizeof(num_slots) <= 4) ? (fls(num_slots)
- 1) : (flsl(num_slots) - 1)) - 3)))
;
2852 cmd_v3.u.add.flags = htole32(0)((__uint32_t)(0));
2853 cmd_v3.u.add.sta_mask = htole32(1 << sta_id)((__uint32_t)(1 << sta_id));
2854 cmd_v3.u.add.tid = tid;
2855 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,((0x5 << 8) | 0x17)
2856 IWX_SCD_QUEUE_CONFIG_CMD)((0x5 << 8) | 0x17);
2857 hcmd.data[0] = &cmd_v3;
2858 hcmd.len[0] = sizeof(cmd_v3);
2859 } else {
2860 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2861 DEVNAME(sc)((sc)->sc_dev.dv_xname), cmd_ver);
2862 return ENOTSUP91;
2863 }
2864
2865 err = iwx_send_cmd(sc, &hcmd);
2866 if (err)
2867 return err;
2868
2869 pkt = hcmd.resp_pkt;
2870 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2871 err = EIO5;
2872 goto out;
2873 }
2874
2875 resp_len = iwx_rx_packet_payload_len(pkt);
2876 if (resp_len != sizeof(*resp)) {
2877 err = EIO5;
2878 goto out;
2879 }
2880
2881 resp = (void *)pkt->data;
2882 fwqid = le16toh(resp->queue_number)((__uint16_t)(resp->queue_number));
2883 wr_idx = le16toh(resp->write_pointer)((__uint16_t)(resp->write_pointer));
2884
2885 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2886 if (fwqid != qid) {
2887 err = EIO5;
2888 goto out;
2889 }
2890
2891 if (wr_idx != ring->cur_hw) {
2892 err = EIO5;
2893 goto out;
2894 }
2895
2896 sc->qenablemsk |= (1 << qid);
2897 ring->tid = tid;
2898out:
2899 iwx_free_resp(sc, &hcmd);
2900 return err;
2901}
2902
2903int
2904iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2905{
2906 struct iwx_rx_packet *pkt;
2907 struct iwx_tx_queue_cfg_rsp *resp;
2908 struct iwx_tx_queue_cfg_cmd cmd_v0;
2909 struct iwx_scd_queue_cfg_cmd cmd_v3;
2910 struct iwx_host_cmd hcmd = {
2911 .flags = IWX_CMD_WANT_RESP,
2912 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2913 };
2914 struct iwx_tx_ring *ring = &sc->txq[qid];
2915 int err, cmd_ver;
2916
2917 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP0x5,
2918 IWX_SCD_QUEUE_CONFIG_CMD0x17);
2919 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN99) {
2920 memset(&cmd_v0, 0, sizeof(cmd_v0))__builtin_memset((&cmd_v0), (0), (sizeof(cmd_v0)));
2921 cmd_v0.sta_id = sta_id;
2922 cmd_v0.tid = tid;
2923 cmd_v0.flags = htole16(0)((__uint16_t)(0)); /* clear "queue enabled" flag */
2924 cmd_v0.cb_size = htole32(0)((__uint32_t)(0));
2925 cmd_v0.byte_cnt_addr = htole64(0)((__uint64_t)(0));
2926 cmd_v0.tfdq_addr = htole64(0)((__uint64_t)(0));
2927 hcmd.id = IWX_SCD_QUEUE_CFG0x1d;
2928 hcmd.data[0] = &cmd_v0;
2929 hcmd.len[0] = sizeof(cmd_v0);
2930 } else if (cmd_ver == 3) {
2931 memset(&cmd_v3, 0, sizeof(cmd_v3))__builtin_memset((&cmd_v3), (0), (sizeof(cmd_v3)));
2932 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE)((__uint32_t)(1));
2933 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id)((__uint32_t)(1 << sta_id));
2934 cmd_v3.u.remove.tid = tid;
2935 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,((0x5 << 8) | 0x17)
2936 IWX_SCD_QUEUE_CONFIG_CMD)((0x5 << 8) | 0x17);
2937 hcmd.data[0] = &cmd_v3;
2938 hcmd.len[0] = sizeof(cmd_v3);
2939 } else {
2940 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2941 DEVNAME(sc)((sc)->sc_dev.dv_xname), cmd_ver);
2942 return ENOTSUP91;
2943 }
2944
2945 err = iwx_send_cmd(sc, &hcmd);
2946 if (err)
2947 return err;
2948
2949 pkt = hcmd.resp_pkt;
2950 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2951 err = EIO5;
2952 goto out;
2953 }
2954
2955 sc->qenablemsk &= ~(1 << qid);
2956 iwx_reset_tx_ring(sc, ring);
2957out:
2958 iwx_free_resp(sc, &hcmd);
2959 return err;
2960}
2961
2962void
2963iwx_post_alive(struct iwx_softc *sc)
2964{
2965 int txcmd_ver;
2966
2967 iwx_ict_reset(sc);
2968
2969 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP0x1, IWX_TX_CMD0x1c) ;
2970 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN99 && txcmd_ver > 6)
2971 sc->sc_rate_n_flags_version = 2;
2972 else
2973 sc->sc_rate_n_flags_version = 1;
2974
2975 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP0x1, IWX_TX_CMD0x1c);
2976}
2977
2978int
2979iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2980 uint32_t duration_tu)
2981{
2982 struct iwx_session_prot_cmd cmd = {
2983 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
2984 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
,
2985 .action = htole32(IWX_FW_CTXT_ACTION_ADD)((__uint32_t)(1)),
2986 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC)((__uint32_t)(IWX_SESSION_PROTECT_CONF_ASSOC)),
2987 .duration_tu = htole32(duration_tu)((__uint32_t)(duration_tu)),
2988 };
2989 uint32_t cmd_id;
2990 int err;
2991
2992 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD0x05, IWX_MAC_CONF_GROUP0x3, 0);
2993 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2994 if (!err)
2995 sc->sc_flags |= IWX_FLAG_TE_ACTIVE0x40;
2996 return err;
2997}
2998
2999void
3000iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3001{
3002 struct iwx_session_prot_cmd cmd = {
3003 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
3004 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
,
3005 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE)((__uint32_t)(3)),
3006 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC)((__uint32_t)(IWX_SESSION_PROTECT_CONF_ASSOC)),
3007 .duration_tu = 0,
3008 };
3009 uint32_t cmd_id;
3010
3011 /* Do nothing if the time event has already ended. */
3012 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE0x40) == 0)
3013 return;
3014
3015 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD0x05, IWX_MAC_CONF_GROUP0x3, 0);
3016 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3017 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE0x40;
3018}
3019
3020/*
3021 * NVM read access and content parsing. We do not support
3022 * external NVM or writing NVM.
3023 */
3024
3025uint8_t
3026iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3027{
3028 uint8_t tx_ant;
3029
3030 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN(0xf << 16))
3031 >> IWX_FW_PHY_CFG_TX_CHAIN_POS16);
3032
3033 if (sc->sc_nvm.valid_tx_ant)
3034 tx_ant &= sc->sc_nvm.valid_tx_ant;
3035
3036 return tx_ant;
3037}
3038
3039uint8_t
3040iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3041{
3042 uint8_t rx_ant;
3043
3044 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN(0xf << 20))
3045 >> IWX_FW_PHY_CFG_RX_CHAIN_POS20);
3046
3047 if (sc->sc_nvm.valid_rx_ant)
3048 rx_ant &= sc->sc_nvm.valid_rx_ant;
3049
3050 return rx_ant;
3051}
3052
3053void
3054iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
3055 uint32_t *channel_profile_v4, int nchan_profile)
3056{
3057 struct ieee80211com *ic = &sc->sc_ic;
3058 struct iwx_nvm_data *data = &sc->sc_nvm;
3059 int ch_idx;
3060 struct ieee80211_channel *channel;
3061 uint32_t ch_flags;
3062 int is_5ghz;
3063 int flags, hw_value;
3064 int nchan;
3065 const uint8_t *nvm_channels;
3066
3067 if (sc->sc_uhb_supported) {
3068 nchan = nitems(iwx_nvm_channels_uhb)(sizeof((iwx_nvm_channels_uhb)) / sizeof((iwx_nvm_channels_uhb
)[0]))
;
3069 nvm_channels = iwx_nvm_channels_uhb;
3070 } else {
3071 nchan = nitems(iwx_nvm_channels_8000)(sizeof((iwx_nvm_channels_8000)) / sizeof((iwx_nvm_channels_8000
)[0]))
;
3072 nvm_channels = iwx_nvm_channels_8000;
3073 }
3074
3075 for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
3076 if (channel_profile_v4)
3077 ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx)(((__uint32_t)(*(const uint32_t *)(channel_profile_v4 + ch_idx
))))
;
3078 else
3079 ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx)(((__uint16_t)(*(const uint16_t *)(channel_profile_v3 + ch_idx
))))
;
3080
3081 /* net80211 cannot handle 6 GHz channel numbers yet */
3082 if (ch_idx >= IWX_NUM_2GHZ_CHANNELS14 + IWX_NUM_5GHZ_CHANNELS37)
3083 break;
3084
3085 is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS14;
3086 if (is_5ghz && !data->sku_cap_band_52GHz_enable)
3087 ch_flags &= ~IWX_NVM_CHANNEL_VALID(1 << 0);
3088
3089 hw_value = nvm_channels[ch_idx];
3090 channel = &ic->ic_channels[hw_value];
3091
3092 if (!(ch_flags & IWX_NVM_CHANNEL_VALID(1 << 0))) {
3093 channel->ic_freq = 0;
3094 channel->ic_flags = 0;
3095 continue;
3096 }
3097
3098 if (!is_5ghz) {
3099 flags = IEEE80211_CHAN_2GHZ0x0080;
3100 channel->ic_flags
3101 = IEEE80211_CHAN_CCK0x0020
3102 | IEEE80211_CHAN_OFDM0x0040
3103 | IEEE80211_CHAN_DYN0x0400
3104 | IEEE80211_CHAN_2GHZ0x0080;
3105 } else {
3106 flags = IEEE80211_CHAN_5GHZ0x0100;
3107 channel->ic_flags =
3108 IEEE80211_CHAN_A(0x0100 | 0x0040);
3109 }
3110 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3111
3112 if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE(1 << 3)))
3113 channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200;
3114
3115 if (data->sku_cap_11n_enable) {
3116 channel->ic_flags |= IEEE80211_CHAN_HT0x2000;
3117 if (ch_flags & IWX_NVM_CHANNEL_40MHZ(1 << 9))
3118 channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000;
3119 }
3120
3121 if (is_5ghz && data->sku_cap_11ac_enable) {
3122 channel->ic_flags |= IEEE80211_CHAN_VHT0x4000;
3123 if (ch_flags & IWX_NVM_CHANNEL_80MHZ(1 << 10))
3124 channel->ic_xflags |= IEEE80211_CHANX_80MHZ0x00000001;
3125 }
3126 }
3127}
3128
3129int
3130iwx_mimo_enabled(struct iwx_softc *sc)
3131{
3132 struct ieee80211com *ic = &sc->sc_ic;
3133
3134 return !sc->sc_nvm.sku_cap_mimo_disable &&
3135 (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0;
3136}
3137
3138void
3139iwx_setup_ht_rates(struct iwx_softc *sc)
3140{
3141 struct ieee80211com *ic = &sc->sc_ic;
3142 uint8_t rx_ant;
3143
3144 /* TX is supported with the same MCS as RX. */
3145 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01;
3146
3147 memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs
)))
;
3148 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
3149
3150 if (!iwx_mimo_enabled(sc))
3151 return;
3152
3153 rx_ant = iwx_fw_valid_rx_ant(sc);
3154 if ((rx_ant & IWX_ANT_AB((1 << 0) | (1 << 1))) == IWX_ANT_AB((1 << 0) | (1 << 1)) ||
3155 (rx_ant & IWX_ANT_BC((1 << 1) | (1 << 2))) == IWX_ANT_BC((1 << 1) | (1 << 2)))
3156 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
3157}
3158
3159void
3160iwx_setup_vht_rates(struct iwx_softc *sc)
3161{
3162 struct ieee80211com *ic = &sc->sc_ic;
3163 uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
3164 int n;
3165
3166 ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_92 <<
3167 IEEE80211_VHT_MCS_FOR_SS_SHIFT(1)(2*((1)-1)));
3168
3169 if (iwx_mimo_enabled(sc) &&
3170 ((rx_ant & IWX_ANT_AB((1 << 0) | (1 << 1))) == IWX_ANT_AB((1 << 0) | (1 << 1)) ||
3171 (rx_ant & IWX_ANT_BC((1 << 1) | (1 << 2))) == IWX_ANT_BC((1 << 1) | (1 << 2)))) {
3172 ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_92 <<
3173 IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1)));
3174 } else {
3175 ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP3 <<
3176 IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1)));
3177 }
3178
3179 for (n = 3; n <= IEEE80211_VHT_NUM_SS8; n++) {
3180 ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP3 <<
3181 IEEE80211_VHT_MCS_FOR_SS_SHIFT(n)(2*((n)-1)));
3182 }
3183
3184 ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3185}
3186
3187void
3188iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3189 uint16_t ssn, uint16_t buf_size)
3190{
3191 reorder_buf->head_sn = ssn;
3192 reorder_buf->num_stored = 0;
3193 reorder_buf->buf_size = buf_size;
3194 reorder_buf->last_amsdu = 0;
3195 reorder_buf->last_sub_index = 0;
3196 reorder_buf->removed = 0;
3197 reorder_buf->valid = 0;
3198 reorder_buf->consec_oldsn_drops = 0;
3199 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3200 reorder_buf->consec_oldsn_prev_drop = 0;
3201}
3202
3203void
3204iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3205{
3206 int i;
3207 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3208 struct iwx_reorder_buf_entry *entry;
3209
3210 for (i = 0; i < reorder_buf->buf_size; i++) {
3211 entry = &rxba->entries[i];
3212 ml_purge(&entry->frames);
3213 timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time
)->tv_usec = 0
;
3214 }
3215
3216 reorder_buf->removed = 1;
3217 timeout_del(&reorder_buf->reorder_timer);
3218 timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)->
tv_usec = 0
;
3219 timeout_del(&rxba->session_timer);
3220 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID0x7f;
3221}
3222
3223#define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL)
3224
3225void
3226iwx_rx_ba_session_expired(void *arg)
3227{
3228 struct iwx_rxba_data *rxba = arg;
3229 struct iwx_softc *sc = rxba->sc;
3230 struct ieee80211com *ic = &sc->sc_ic;
3231 struct ieee80211_node *ni = ic->ic_bss;
3232 struct timeval now, timeout, expiry;
3233 int s;
3234
3235 s = splnet()splraise(0x4);
3236 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0 &&
3237 ic->ic_state == IEEE80211_S_RUN &&
3238 rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID0x7f) {
3239 getmicrouptime(&now);
3240 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
3241 timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec
+ (&timeout)->tv_sec; (&expiry)->tv_usec = (&
rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if
((&expiry)->tv_usec >= 1000000) { (&expiry)->
tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0)
;
3242 if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
) {
3243 timeout_add_usec(&rxba->session_timer, rxba->timeout);
3244 } else {
3245 ic->ic_stats.is_ht_rx_ba_timeout++;
3246 ieee80211_delba_request(ic, ni,
3247 IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3248 }
3249 }
3250 splx(s)spllower(s);
3251}
3252
3253void
3254iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3255 struct mbuf_list *ml)
3256{
3257 struct ieee80211com *ic = &sc->sc_ic;
3258 struct ieee80211_node *ni = ic->ic_bss;
3259 struct iwx_bar_frame_release *release = (void *)pkt->data;
3260 struct iwx_reorder_buffer *buf;
3261 struct iwx_rxba_data *rxba;
3262 unsigned int baid, nssn, sta_id, tid;
3263
3264 if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
3265 return;
3266
3267 baid = (le32toh(release->ba_info)((__uint32_t)(release->ba_info)) & IWX_BAR_FRAME_RELEASE_BAID_MASK0x3f000000) >>
3268 IWX_BAR_FRAME_RELEASE_BAID_SHIFT24;
3269 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
3270 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
3271 return;
3272
3273 rxba = &sc->sc_rxba_data[baid];
3274 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
3275 return;
3276
3277 tid = le32toh(release->sta_tid)((__uint32_t)(release->sta_tid)) & IWX_BAR_FRAME_RELEASE_TID_MASK0x0000000f;
3278 sta_id = (le32toh(release->sta_tid)((__uint32_t)(release->sta_tid)) &
3279 IWX_BAR_FRAME_RELEASE_STA_MASK0x000001f0) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT4;
3280 if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID0)
3281 return;
3282
3283 nssn = le32toh(release->ba_info)((__uint32_t)(release->ba_info)) & IWX_BAR_FRAME_RELEASE_NSSN_MASK0x00000fff;
3284 buf = &rxba->reorder_buf;
3285 iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
3286}
3287
3288void
3289iwx_reorder_timer_expired(void *arg)
3290{
3291 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3292 struct iwx_reorder_buffer *buf = arg;
3293 struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
3294 struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3295 struct iwx_softc *sc = rxba->sc;
3296 struct ieee80211com *ic = &sc->sc_ic;
3297 struct ieee80211_node *ni = ic->ic_bss;
3298 int i, s;
3299 uint16_t sn = 0, index = 0;
3300 int expired = 0;
3301 int cont = 0;
3302 struct timeval now, timeout, expiry;
3303
3304 if (!buf->num_stored || buf->removed)
3305 return;
3306
3307 s = splnet()splraise(0x4);
3308 getmicrouptime(&now);
3309 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
3310
3311 for (i = 0; i < buf->buf_size ; i++) {
3312 index = (buf->head_sn + i) % buf->buf_size;
3313
3314 if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
3315 /*
3316 * If there is a hole and the next frame didn't expire
3317 * we want to break and not advance SN.
3318 */
3319 cont = 0;
3320 continue;
3321 }
3322 timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time
)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec
= (&entries[index].reorder_time)->tv_usec + (&timeout
)->tv_usec; if ((&expiry)->tv_usec >= 1000000) {
(&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000
; } } while (0)
;
3323 if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
)
3324 break;
3325
3326 expired = 1;
3327 /* continue until next hole after this expired frame */
3328 cont = 1;
3329 sn = (buf->head_sn + (i + 1)) & 0xfff;
3330 }
3331
3332 if (expired) {
3333 /* SN is set to the last expired frame + 1 */
3334 iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3335 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
3336 ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3337 } else {
3338 /*
3339 * If no frame expired and there are stored frames, index is now
3340 * pointing to the first unexpired frame - modify reorder timeout
3341 * accordingly.
3342 */
3343 timeout_add_usec(&buf->reorder_timer,
3344 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
3345 }
3346
3347 splx(s)spllower(s);
3348}
3349
3350#define IWX_MAX_RX_BA_SESSIONS16 16
3351
3352struct iwx_rxba_data *
3353iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3354{
3355 int i;
3356
3357 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
3358 if (sc->sc_rxba_data[i].baid ==
3359 IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
3360 continue;
3361 if (sc->sc_rxba_data[i].tid == tid)
3362 return &sc->sc_rxba_data[i];
3363 }
3364
3365 return NULL((void *)0);
3366}
3367
3368int
3369iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3370 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3371 uint8_t *baid)
3372{
3373 struct iwx_rx_baid_cfg_cmd cmd;
3374 uint32_t new_baid = 0;
3375 int err;
3376
3377 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
3378
3379 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3380
3381 if (start) {
3382 cmd.action = IWX_RX_BAID_ACTION_ADD0;
3383 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID)((__uint32_t)(1 << 0));
3384 cmd.alloc.tid = tid;
3385 cmd.alloc.ssn = htole16(ssn)((__uint16_t)(ssn));
3386 cmd.alloc.win_size = htole16(winsize)((__uint16_t)(winsize));
3387 } else {
3388 struct iwx_rxba_data *rxba;
3389
3390 rxba = iwx_find_rxba_data(sc, tid);
3391 if (rxba == NULL((void *)0))
3392 return ENOENT2;
3393 *baid = rxba->baid;
3394
3395 cmd.action = IWX_RX_BAID_ACTION_REMOVE2;
3396 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP0x5,
3397 IWX_RX_BAID_ALLOCATION_CONFIG_CMD0x16) == 1) {
3398 cmd.remove_v1.baid = rxba->baid;
3399 } else {
3400 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID)((__uint32_t)(1 << 0));
3401 cmd.remove.tid = tid;
3402 }
3403 }
3404
3405 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,((0x5 << 8) | 0x16)
3406 IWX_RX_BAID_ALLOCATION_CONFIG_CMD)((0x5 << 8) | 0x16), sizeof(cmd), &cmd, &new_baid);
3407 if (err)
3408 return err;
3409
3410 if (start) {
3411 if (new_baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
3412 return ERANGE34;
3413 *baid = new_baid;
3414 }
3415
3416 return 0;
3417}
3418
3419int
3420iwx_sta_rx_agg_sta_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3421 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3422 uint8_t *baid)
3423{
3424 struct iwx_add_sta_cmd cmd;
3425 struct iwx_node *in = (void *)ni;
3426 int err;
3427 uint32_t status;
3428
3429 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
3430
3431 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3432
3433 cmd.sta_id = IWX_STATION_ID0;
3434 cmd.mac_id_n_color
3435 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3436 cmd.add_modify = IWX_STA_MODE_MODIFY1;
3437
3438 if (start) {
3439 cmd.add_immediate_ba_tid = (uint8_t)tid;
3440 cmd.add_immediate_ba_ssn = htole16(ssn)((__uint16_t)(ssn));
3441 cmd.rx_ba_window = htole16(winsize)((__uint16_t)(winsize));
3442 } else {
3443 struct iwx_rxba_data *rxba;
3444
3445 rxba = iwx_find_rxba_data(sc, tid);
3446 if (rxba == NULL((void *)0))
3447 return ENOENT2;
3448 *baid = rxba->baid;
3449
3450 cmd.remove_immediate_ba_tid = (uint8_t)tid;
3451 }
3452 cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID(1 << 3) :
3453 IWX_STA_MODIFY_REMOVE_BA_TID(1 << 4);
3454
3455 status = IWX_ADD_STA_SUCCESS0x1;
3456 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA0x18, sizeof(cmd), &cmd,
3457 &status);
3458 if (err)
3459 return err;
3460
3461 if ((status & IWX_ADD_STA_STATUS_MASK0xFF) != IWX_ADD_STA_SUCCESS0x1)
3462 return EIO5;
3463
3464 if (!(status & IWX_ADD_STA_BAID_VALID_MASK0x8000))
3465 return EINVAL22;
3466
3467 if (start) {
3468 *baid = (status & IWX_ADD_STA_BAID_MASK0x7F00) >>
3469 IWX_ADD_STA_BAID_SHIFT8;
3470 if (*baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
3471 *baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
3472 return ERANGE34;
3473 }
3474
3475 return 0;
3476}
3477
3478void
3479iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3480 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3481{
3482 struct ieee80211com *ic = &sc->sc_ic;
3483 int err, s;
3484 struct iwx_rxba_data *rxba = NULL((void *)0);
3485 uint8_t baid = 0;
3486
3487 s = splnet()splraise(0x4);
3488
3489 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS16) {
3490 ieee80211_addba_req_refuse(ic, ni, tid);
3491 splx(s)spllower(s);
3492 return;
3493 }
3494
3495 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)((sc->sc_enabled_capa)[(63)>>3] & (1<<((63
)&(8 -1))))
) {
3496 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3497 timeout_val, start, &baid);
3498 } else {
3499 err = iwx_sta_rx_agg_sta_cmd(sc, ni, tid, ssn, winsize,
3500 timeout_val, start, &baid);
3501 }
3502 if (err) {
3503 ieee80211_addba_req_refuse(ic, ni, tid);
3504 splx(s)spllower(s);
3505 return;
3506 }
3507
3508 rxba = &sc->sc_rxba_data[baid];
3509
3510 /* Deaggregation is done in hardware. */
3511 if (start) {
3512 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID0x7f) {
3513 ieee80211_addba_req_refuse(ic, ni, tid);
3514 splx(s)spllower(s);
3515 return;
3516 }
3517 rxba->sta_id = IWX_STATION_ID0;
3518 rxba->tid = tid;
3519 rxba->baid = baid;
3520 rxba->timeout = timeout_val;
3521 getmicrouptime(&rxba->last_rx);
3522 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3523 winsize);
3524 if (timeout_val != 0) {
3525 struct ieee80211_rx_ba *ba;
3526 timeout_add_usec(&rxba->session_timer,
3527 timeout_val);
3528 /* XXX disable net80211's BA timeout handler */
3529 ba = &ni->ni_rx_ba[tid];
3530 ba->ba_timeout_val = 0;
3531 }
3532 } else
3533 iwx_clear_reorder_buffer(sc, rxba);
3534
3535 if (start) {
3536 sc->sc_rx_ba_sessions++;
3537 ieee80211_addba_req_accept(ic, ni, tid);
3538 } else if (sc->sc_rx_ba_sessions > 0)
3539 sc->sc_rx_ba_sessions--;
3540
3541 splx(s)spllower(s);
3542}
3543
3544void
3545iwx_mac_ctxt_task(void *arg)
3546{
3547 struct iwx_softc *sc = arg;
3548 struct ieee80211com *ic = &sc->sc_ic;
3549 struct iwx_node *in = (void *)ic->ic_bss;
3550 int err, s = splnet()splraise(0x4);
3551
3552 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
3553 ic->ic_state != IEEE80211_S_RUN) {
3554 refcnt_rele_wake(&sc->task_refs);
3555 splx(s)spllower(s);
3556 return;
3557 }
3558
3559 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY2, 1);
3560 if (err)
3561 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3562
3563 iwx_unprotect_session(sc, in);
3564
3565 refcnt_rele_wake(&sc->task_refs);
3566 splx(s)spllower(s);
3567}
3568
3569void
3570iwx_phy_ctxt_task(void *arg)
3571{
3572 struct iwx_softc *sc = arg;
3573 struct ieee80211com *ic = &sc->sc_ic;
3574 struct iwx_node *in = (void *)ic->ic_bss;
3575 struct ieee80211_node *ni = &in->in_ni;
3576 uint8_t chains, sco, vht_chan_width;
3577 int err, s = splnet()splraise(0x4);
3578
3579 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
3580 ic->ic_state != IEEE80211_S_RUN ||
3581 in->in_phyctxt == NULL((void *)0)) {
3582 refcnt_rele_wake(&sc->task_refs);
3583 splx(s)spllower(s);
3584 return;
3585 }
3586
3587 chains = iwx_mimo_enabled(sc) ? 2 : 1;
3588 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
3589 IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x8000) != 0) &&
3590 ieee80211_node_supports_ht_chan40(ni))
3591 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
3592 else
3593 sco = IEEE80211_HTOP0_SCO_SCN0;
3594 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
3595 IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_xflags & 0x00000001) != 0
)
&&
3596 ieee80211_node_supports_vht_chan80(ni))
3597 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801;
3598 else
3599 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT0;
3600 if (in->in_phyctxt->sco != sco ||
3601 in->in_phyctxt->vht_chan_width != vht_chan_width) {
3602 err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3603 in->in_phyctxt->channel, chains, chains, 0, sco,
3604 vht_chan_width);
3605 if (err)
3606 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3607 }
3608
3609 refcnt_rele_wake(&sc->task_refs);
3610 splx(s)spllower(s);
3611}
3612
3613void
3614iwx_updatechan(struct ieee80211com *ic)
3615{
3616 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3617
3618 if (ic->ic_state == IEEE80211_S_RUN &&
3619 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3620 iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3621}
3622
3623void
3624iwx_updateprot(struct ieee80211com *ic)
3625{
3626 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3627
3628 if (ic->ic_state == IEEE80211_S_RUN &&
3629 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3630 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3631}
3632
3633void
3634iwx_updateslot(struct ieee80211com *ic)
3635{
3636 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3637
3638 if (ic->ic_state == IEEE80211_S_RUN &&
3639 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3640 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3641}
3642
3643void
3644iwx_updateedca(struct ieee80211com *ic)
3645{
3646 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3647
3648 if (ic->ic_state == IEEE80211_S_RUN &&
3649 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3650 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3651}
3652
3653void
3654iwx_updatedtim(struct ieee80211com *ic)
3655{
3656 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3657
3658 if (ic->ic_state == IEEE80211_S_RUN &&
3659 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3660 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3661}
3662
3663void
3664iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3665 uint8_t tid)
3666{
3667 struct ieee80211com *ic = &sc->sc_ic;
3668 struct ieee80211_tx_ba *ba;
3669 int err, qid;
3670 struct iwx_tx_ring *ring;
3671
3672 /* Ensure we can map this TID to an aggregation queue. */
3673 if (tid >= IWX_MAX_TID_COUNT8)
3674 return;
3675
3676 ba = &ni->ni_tx_ba[tid];
3677 if (ba->ba_state != IEEE80211_BA_REQUESTED1)
3678 return;
3679
3680 qid = sc->aggqid[tid];
3681 if (qid == 0) {
3682 /* Firmware should pick the next unused Tx queue. */
3683 qid = fls(sc->qenablemsk);
3684 }
3685
3686 /*
3687 * Simply enable the queue.
3688 * Firmware handles Tx Ba session setup and teardown.
3689 */
3690 if ((sc->qenablemsk & (1 << qid)) == 0) {
3691 if (!iwx_nic_lock(sc)) {
3692 ieee80211_addba_resp_refuse(ic, ni, tid,
3693 IEEE80211_STATUS_UNSPECIFIED);
3694 return;
3695 }
3696 err = iwx_enable_txq(sc, IWX_STATION_ID0, qid, tid,
3697 IWX_TX_RING_COUNT(256));
3698 iwx_nic_unlock(sc);
3699 if (err) {
3700 printf("%s: could not enable Tx queue %d "
3701 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err);
3702 ieee80211_addba_resp_refuse(ic, ni, tid,
3703 IEEE80211_STATUS_UNSPECIFIED);
3704 return;
3705 }
3706
3707 ba->ba_winstart = 0;
3708 } else
3709 ba->ba_winstart = ni->ni_qos_txseqs[tid];
3710
3711 ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3712
3713 ring = &sc->txq[qid];
3714 ba->ba_timeout_val = 0;
3715 ieee80211_addba_resp_accept(ic, ni, tid);
3716 sc->aggqid[tid] = qid;
3717}
3718
3719void
3720iwx_ba_task(void *arg)
3721{
3722 struct iwx_softc *sc = arg;
3723 struct ieee80211com *ic = &sc->sc_ic;
3724 struct ieee80211_node *ni = ic->ic_bss;
3725 int s = splnet()splraise(0x4);
3726 int tid;
3727
3728 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
3729 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
3730 break;
3731 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3732 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3733 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3734 ba->ba_winsize, ba->ba_timeout_val, 1);
3735 sc->ba_rx.start_tidmask &= ~(1 << tid);
3736 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3737 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3738 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3739 }
3740 }
3741
3742 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
3743 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
3744 break;
3745 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3746 iwx_sta_tx_agg_start(sc, ni, tid);
3747 sc->ba_tx.start_tidmask &= ~(1 << tid);
3748 }
3749 }
3750
3751 refcnt_rele_wake(&sc->task_refs);
3752 splx(s)spllower(s);
3753}
3754
3755/*
3756 * This function is called by upper layer when an ADDBA request is received
3757 * from another STA and before the ADDBA response is sent.
3758 */
3759int
3760iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3761 uint8_t tid)
3762{
3763 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3764
3765 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS16 ||
3766 tid >= IWX_MAX_TID_COUNT8)
3767 return ENOSPC28;
3768
3769 if (sc->ba_rx.start_tidmask & (1 << tid))
3770 return EBUSY16;
3771
3772 sc->ba_rx.start_tidmask |= (1 << tid);
3773 iwx_add_task(sc, systq, &sc->ba_task);
3774
3775 return EBUSY16;
3776}
3777
3778/*
3779 * This function is called by upper layer on teardown of an HT-immediate
3780 * Block Ack agreement (eg. upon receipt of a DELBA frame).
3781 */
3782void
3783iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3784 uint8_t tid)
3785{
3786 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3787
3788 if (tid >= IWX_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid))
3789 return;
3790
3791 sc->ba_rx.stop_tidmask |= (1 << tid);
3792 iwx_add_task(sc, systq, &sc->ba_task);
3793}
3794
3795int
3796iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3797 uint8_t tid)
3798{
3799 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3800 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3801
3802 /*
3803 * Require a firmware version which uses an internal AUX queue.
3804 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3805 */
3806 if (sc->first_data_qid != IWX_DQA_CMD_QUEUE0 + 1)
3807 return ENOTSUP91;
3808
3809 /* Ensure we can map this TID to an aggregation queue. */
3810 if (tid >= IWX_MAX_TID_COUNT8)
3811 return EINVAL22;
3812
3813 /* We only support a fixed Tx aggregation window size, for now. */
3814 if (ba->ba_winsize != IWX_FRAME_LIMIT64)
3815 return ENOTSUP91;
3816
3817 /* Is firmware already using an agg queue with this TID? */
3818 if (sc->aggqid[tid] != 0)
3819 return ENOSPC28;
3820
3821 /* Are we already processing an ADDBA request? */
3822 if (sc->ba_tx.start_tidmask & (1 << tid))
3823 return EBUSY16;
3824
3825 sc->ba_tx.start_tidmask |= (1 << tid);
3826 iwx_add_task(sc, systq, &sc->ba_task);
3827
3828 return EBUSY16;
3829}
3830
3831void
3832iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3833{
3834 uint32_t mac_addr0, mac_addr1;
3835
3836 memset(data->hw_addr, 0, sizeof(data->hw_addr))__builtin_memset((data->hw_addr), (0), (sizeof(data->hw_addr
)))
;
3837
3838 if (!iwx_nic_lock(sc))
3839 return;
3840
3841 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
((((sc)->mac_addr_from_csr) + 0x08)))))))
;
3842 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
((((sc)->mac_addr_from_csr) + 0x0c)))))))
;
3843
3844 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3845
3846 /* If OEM fused a valid address, use it instead of the one in OTP. */
3847 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3848 iwx_nic_unlock(sc);
3849 return;
3850 }
3851
3852 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
((((sc)->mac_addr_from_csr) + 0x00)))))))
;
3853 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
((((sc)->mac_addr_from_csr) + 0x04)))))))
;
3854
3855 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3856
3857 iwx_nic_unlock(sc);
3858}
3859
3860int
3861iwx_is_valid_mac_addr(const uint8_t *addr)
3862{
3863 static const uint8_t reserved_mac[] = {
3864 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3865 };
3866
3867 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (addr), (6)) != 0 &&
3868 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (addr), (sizeof(etherbroadcastaddr
)))
!= 0 &&
3869 memcmp(etheranyaddr, addr, sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (addr), (sizeof(etheranyaddr
)))
!= 0 &&
3870 !ETHER_IS_MULTICAST(addr)(*(addr) & 0x01));
3871}
3872
3873void
3874iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3875{
3876 const uint8_t *hw_addr;
3877
3878 hw_addr = (const uint8_t *)&mac_addr0;
3879 dest[0] = hw_addr[3];
3880 dest[1] = hw_addr[2];
3881 dest[2] = hw_addr[1];
3882 dest[3] = hw_addr[0];
3883
3884 hw_addr = (const uint8_t *)&mac_addr1;
3885 dest[4] = hw_addr[1];
3886 dest[5] = hw_addr[0];
3887}
3888
3889int
3890iwx_nvm_get(struct iwx_softc *sc)
3891{
3892 struct iwx_nvm_get_info cmd = {};
3893 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3894 struct iwx_host_cmd hcmd = {
3895 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3896 .data = { &cmd, },
3897 .len = { sizeof(cmd) },
3898 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x02)
3899 IWX_NVM_GET_INFO)((0xc << 8) | 0x02)
3900 };
3901 int err;
3902 uint32_t mac_flags;
3903 /*
3904 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3905 * in v3, except for the channel profile part of the
3906 * regulatory. So we can just access the new struct, with the
3907 * exception of the latter.
3908 */
3909 struct iwx_nvm_get_info_rsp *rsp;
3910 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3911 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO)((sc->sc_ucode_api)[(48)>>3] & (1<<((48)&
(8 -1))))
;
3912 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3913
3914 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3915 err = iwx_send_cmd(sc, &hcmd);
3916 if (err)
3917 return err;
3918
3919 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3920 err = EIO5;
3921 goto out;
3922 }
3923
3924 memset(nvm, 0, sizeof(*nvm))__builtin_memset((nvm), (0), (sizeof(*nvm)));
3925
3926 iwx_set_mac_addr_from_csr(sc, nvm);
3927 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3928 printf("%s: no valid mac address was found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3929 err = EINVAL22;
3930 goto out;
3931 }
3932
3933 rsp = (void *)hcmd.resp_pkt->data;
3934
3935 /* Initialize general data */
3936 nvm->nvm_version = le16toh(rsp->general.nvm_version)((__uint16_t)(rsp->general.nvm_version));
3937 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3938
3939 /* Initialize MAC sku data */
3940 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags)((__uint32_t)(rsp->mac_sku.mac_sku_flags));
3941 nvm->sku_cap_11ac_enable =
3942 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED(1 << 3));
3943 nvm->sku_cap_11n_enable =
3944 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED(1 << 2));
3945 nvm->sku_cap_11ax_enable =
3946 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED(1 << 4));
3947 nvm->sku_cap_band_24GHz_enable =
3948 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED(1 << 0));
3949 nvm->sku_cap_band_52GHz_enable =
3950 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED(1 << 1));
3951 nvm->sku_cap_mimo_disable =
3952 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED(1 << 5));
3953
3954 /* Initialize PHY sku data */
3955 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains)((__uint32_t)(rsp->phy_sku.tx_chains));
3956 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains)((__uint32_t)(rsp->phy_sku.rx_chains));
3957
3958 if (le32toh(rsp->regulatory.lar_enabled)((__uint32_t)(rsp->regulatory.lar_enabled)) &&
3959 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)((sc->sc_enabled_capa)[(1)>>3] & (1<<((1)&
(8 -1))))
) {
3960 nvm->lar_enabled = 1;
3961 }
3962
3963 if (v4) {
3964 iwx_init_channel_map(sc, NULL((void *)0),
3965 rsp->regulatory.channel_profile, IWX_NUM_CHANNELS110);
3966 } else {
3967 rsp_v3 = (void *)rsp;
3968 iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3969 NULL((void *)0), IWX_NUM_CHANNELS_V151);
3970 }
3971out:
3972 iwx_free_resp(sc, &hcmd);
3973 return err;
3974}
3975
3976int
3977iwx_load_firmware(struct iwx_softc *sc)
3978{
3979 struct iwx_fw_sects *fws;
3980 int err;
3981
3982 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
3983
3984 sc->sc_uc.uc_intr = 0;
3985 sc->sc_uc.uc_ok = 0;
3986
3987 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3988 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
3989 err = iwx_ctxt_info_gen3_init(sc, fws);
3990 else
3991 err = iwx_ctxt_info_init(sc, fws);
3992 if (err) {
3993 printf("%s: could not init context info\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3994 return err;
3995 }
3996
3997 /* wait for the firmware to load */
3998 err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3999 if (err || !sc->sc_uc.uc_ok) {
4000 printf("%s: could not load firmware, %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
4001 iwx_ctxt_info_free_paging(sc);
4002 }
4003
4004 iwx_dma_contig_free(&sc->iml_dma);
4005 iwx_ctxt_info_free_fw_img(sc);
4006
4007 if (!sc->sc_uc.uc_ok)
4008 return EINVAL22;
4009
4010 return err;
4011}
4012
4013int
4014iwx_start_fw(struct iwx_softc *sc)
4015{
4016 int err;
4017
4018 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
4019
4020 iwx_disable_interrupts(sc);
4021
4022 /* make sure rfkill handshake bits are cleared */
4023 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4024 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
4025 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
;
4026
4027 /* clear (again), then enable firmware load interrupt */
4028 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
4029
4030 err = iwx_nic_init(sc);
4031 if (err) {
4032 printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4033 return err;
4034 }
4035
4036 iwx_enable_fwload_interrupt(sc);
4037
4038 return iwx_load_firmware(sc);
4039}
4040
4041int
4042iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
4043 size_t len)
4044{
4045 const struct iwx_ucode_tlv *tlv;
4046 uint32_t sha1 = 0;
4047 uint16_t mac_type = 0, rf_id = 0;
4048 uint8_t *pnvm_data = NULL((void *)0), *tmp;
4049 int hw_match = 0;
4050 uint32_t size = 0;
4051 int err;
4052
4053 while (len >= sizeof(*tlv)) {
4054 uint32_t tlv_len, tlv_type;
4055
4056 len -= sizeof(*tlv);
4057 tlv = (const void *)data;
4058
4059 tlv_len = le32toh(tlv->length)((__uint32_t)(tlv->length));
4060 tlv_type = le32toh(tlv->type)((__uint32_t)(tlv->type));
4061
4062 if (len < tlv_len) {
4063 printf("%s: invalid TLV len: %zd/%u\n",
4064 DEVNAME(sc)((sc)->sc_dev.dv_xname), len, tlv_len);
4065 err = EINVAL22;
4066 goto out;
4067 }
4068
4069 data += sizeof(*tlv);
4070
4071 switch (tlv_type) {
4072 case IWX_UCODE_TLV_PNVM_VERSION62:
4073 if (tlv_len < sizeof(uint32_t))
4074 break;
4075
4076 sha1 = le32_to_cpup((const uint32_t *)data)(((__uint32_t)(*(const uint32_t *)((const uint32_t *)data))));
4077 break;
4078 case IWX_UCODE_TLV_HW_TYPE58:
4079 if (tlv_len < 2 * sizeof(uint16_t))
4080 break;
4081
4082 if (hw_match)
4083 break;
4084
4085 mac_type = le16_to_cpup((const uint16_t *)data)(((__uint16_t)(*(const uint16_t *)((const uint16_t *)data))));
4086 rf_id = le16_to_cpup((const uint16_t *)(data +(((__uint16_t)(*(const uint16_t *)((const uint16_t *)(data + sizeof
(uint16_t))))))
4087 sizeof(uint16_t)))(((__uint16_t)(*(const uint16_t *)((const uint16_t *)(data + sizeof
(uint16_t))))))
;
4088
4089 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000FFF0) >> 4) &&
4090 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id)(((sc->sc_hw_rf_id) & 0x0FFF000) >> 12))
4091 hw_match = 1;
4092 break;
4093 case IWX_UCODE_TLV_SEC_RT19: {
4094 const struct iwx_pnvm_section *section;
4095 uint32_t data_len;
4096
4097 section = (const void *)data;
4098 data_len = tlv_len - sizeof(*section);
4099
4100 /* TODO: remove, this is a deprecated separator */
4101 if (le32_to_cpup((const uint32_t *)data)(((__uint32_t)(*(const uint32_t *)((const uint32_t *)data)))) == 0xddddeeee)
4102 break;
4103
4104 tmp = malloc(size + data_len, M_DEVBUF2,
4105 M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
4106 if (tmp == NULL((void *)0)) {
4107 err = ENOMEM12;
4108 goto out;
4109 }
4110 memcpy(tmp, pnvm_data, size)__builtin_memcpy((tmp), (pnvm_data), (size));
4111 memcpy(tmp + size, section->data, data_len)__builtin_memcpy((tmp + size), (section->data), (data_len)
)
;
4112 free(pnvm_data, M_DEVBUF2, size);
4113 pnvm_data = tmp;
4114 size += data_len;
4115 break;
4116 }
4117 case IWX_UCODE_TLV_PNVM_SKU64:
4118 /* New PNVM section started, stop parsing. */
4119 goto done;
4120 default:
4121 break;
4122 }
4123
4124 if (roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)) > len)
4125 break;
4126 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
4127 data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
4128 }
4129done:
4130 if (!hw_match || size == 0) {
4131 err = ENOENT2;
4132 goto out;
4133 }
4134
4135 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 0);
4136 if (err) {
4137 printf("%s: could not allocate DMA memory for PNVM\n",
4138 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4139 err = ENOMEM12;
4140 goto out;
4141 }
4142 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size)__builtin_memcpy((sc->pnvm_dma.vaddr), (pnvm_data), (size)
)
;
4143 iwx_ctxt_info_gen3_set_pnvm(sc);
4144 sc->sc_pnvm_ver = sha1;
4145out:
4146 free(pnvm_data, M_DEVBUF2, size);
4147 return err;
4148}
4149
4150int
4151iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
4152{
4153 const struct iwx_ucode_tlv *tlv;
4154
4155 while (len >= sizeof(*tlv)) {
4156 uint32_t tlv_len, tlv_type;
4157
4158 len -= sizeof(*tlv);
4159 tlv = (const void *)data;
4160
4161 tlv_len = le32toh(tlv->length)((__uint32_t)(tlv->length));
4162 tlv_type = le32toh(tlv->type)((__uint32_t)(tlv->type));
4163
4164 if (len < tlv_len || roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)) > len)
4165 return EINVAL22;
4166
4167 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU64) {
4168 const struct iwx_sku_id *sku_id =
4169 (const void *)(data + sizeof(*tlv));
4170
4171 data += sizeof(*tlv) + roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
4172 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
4173
4174 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0])((__uint32_t)(sku_id->data[0])) &&
4175 sc->sc_sku_id[1] == le32toh(sku_id->data[1])((__uint32_t)(sku_id->data[1])) &&
4176 sc->sc_sku_id[2] == le32toh(sku_id->data[2])((__uint32_t)(sku_id->data[2])) &&
4177 iwx_pnvm_handle_section(sc, data, len) == 0)
4178 return 0;
4179 } else {
4180 data += sizeof(*tlv) + roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
4181 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
4182 }
4183 }
4184
4185 return ENOENT2;
4186}
4187
4188/* Make AX210 firmware loading context point at PNVM image in DMA memory. */
4189void
4190iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
4191{
4192 struct iwx_prph_scratch *prph_scratch;
4193 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
4194
4195 prph_scratch = sc->prph_scratch_dma.vaddr;
4196 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
4197
4198 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr)((__uint64_t)(sc->pnvm_dma.paddr));
4199 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size)((__uint32_t)(sc->pnvm_dma.size));
4200
4201 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, 0, sc->pnvm_dma.size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
pnvm_dma.map), (0), (sc->pnvm_dma.size), (0x04))
4202 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
pnvm_dma.map), (0), (sc->pnvm_dma.size), (0x04))
;
4203}
4204
4205/*
4206 * Load platform-NVM (non-volatile-memory) data from the filesystem.
4207 * This data apparently contains regulatory information and affects device
4208 * channel configuration.
4209 * The SKU of AX210 devices tells us which PNVM file section is needed.
4210 * Pre-AX210 devices store NVM data onboard.
4211 */
4212int
4213iwx_load_pnvm(struct iwx_softc *sc)
4214{
4215 const int wait_flags = IWX_PNVM_COMPLETE0x04;
4216 int s, err = 0;
4217 u_char *pnvm_data = NULL((void *)0);
4218 size_t pnvm_size = 0;
4219
4220 if (sc->sc_sku_id[0] == 0 &&
4221 sc->sc_sku_id[1] == 0 &&
4222 sc->sc_sku_id[2] == 0)
4223 return 0;
4224
4225 if (sc->sc_pnvm_name) {
4226 if (sc->pnvm_dma.vaddr == NULL((void *)0)) {
4227 err = loadfirmware(sc->sc_pnvm_name,
4228 &pnvm_data, &pnvm_size);
4229 if (err) {
4230 printf("%s: could not read %s (error %d)\n",
4231 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_pnvm_name, err);
4232 return err;
4233 }
4234
4235 err = iwx_pnvm_parse(sc, pnvm_data, pnvm_size);
4236 if (err && err != ENOENT2) {
4237 free(pnvm_data, M_DEVBUF2, pnvm_size);
4238 return err;
4239 }
4240 } else
4241 iwx_ctxt_info_gen3_set_pnvm(sc);
4242 }
4243
4244 s = splnet()splraise(0x4);
4245
4246 if (!iwx_nic_lock(sc)) {
4247 splx(s)spllower(s);
4248 free(pnvm_data, M_DEVBUF2, pnvm_size);
4249 return EBUSY16;
4250 }
4251
4252 /*
4253 * If we don't have a platform NVM file simply ask firmware
4254 * to proceed without it.
4255 */
4256
4257 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR60xa05c04,
4258 IWX_UREG_DOORBELL_TO_ISR6_PNVM(1 << 20));
4259
4260 /* Wait for the pnvm complete notification from firmware. */
4261 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4262 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4263 SEC_TO_NSEC(2));
4264 if (err)
4265 break;
4266 }
4267
4268 splx(s)spllower(s);
4269 iwx_nic_unlock(sc);
4270 free(pnvm_data, M_DEVBUF2, pnvm_size);
4271 return err;
4272}
4273
4274int
4275iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4276{
4277 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4278 .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)),
4279 };
4280
4281 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD0x98,
4282 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4283}
4284
4285int
4286iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4287{
4288 struct iwx_phy_cfg_cmd phy_cfg_cmd;
4289
4290 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config)((__uint32_t)(sc->sc_fw_phy_config));
4291 phy_cfg_cmd.calib_control.event_trigger =
4292 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4293 phy_cfg_cmd.calib_control.flow_trigger =
4294 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4295
4296 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD0x6a, 0,
4297 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4298}
4299
4300int
4301iwx_send_dqa_cmd(struct iwx_softc *sc)
4302{
4303 struct iwx_dqa_enable_cmd dqa_cmd = {
4304 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE)((__uint32_t)(0)),
4305 };
4306 uint32_t cmd_id;
4307
4308 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD0x00, IWX_DATA_PATH_GROUP0x5, 0);
4309 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4310}
4311
4312int
4313iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4314{
4315 int err;
4316
4317 err = iwx_read_firmware(sc);
4318 if (err)
4319 return err;
4320
4321 err = iwx_start_fw(sc);
4322 if (err)
4323 return err;
4324
4325 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
4326 err = iwx_load_pnvm(sc);
4327 if (err)
4328 return err;
4329 }
4330
4331 iwx_post_alive(sc);
4332
4333 return 0;
4334}
4335
4336int
4337iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4338{
4339 const int wait_flags = IWX_INIT_COMPLETE0x01;
4340 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4341 struct iwx_init_extended_cfg_cmd init_cfg = {
4342 .init_flags = htole32(IWX_INIT_NVM)((__uint32_t)((1 << 1))),
4343 };
4344 int err, s;
4345
4346 if ((sc->sc_flags & IWX_FLAG_RFKILL0x02) && !readnvm) {
4347 printf("%s: radio is disabled by hardware switch\n",
4348 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4349 return EPERM1;
4350 }
4351
4352 s = splnet()splraise(0x4);
4353 sc->sc_init_complete = 0;
4354 err = iwx_load_ucode_wait_alive(sc);
4355 if (err) {
4356 printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4357 splx(s)spllower(s);
4358 return err;
4359 }
4360
4361 /*
4362 * Send init config command to mark that we are sending NVM
4363 * access commands
4364 */
4365 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,((0x2 << 8) | 0x03)
4366 IWX_INIT_EXTENDED_CFG_CMD)((0x2 << 8) | 0x03), 0, sizeof(init_cfg), &init_cfg);
4367 if (err) {
4368 splx(s)spllower(s);
4369 return err;
4370 }
4371
4372 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x00)
4373 IWX_NVM_ACCESS_COMPLETE)((0xc << 8) | 0x00), 0, sizeof(nvm_complete), &nvm_complete);
4374 if (err) {
4375 splx(s)spllower(s);
4376 return err;
4377 }
4378
4379 /* Wait for the init complete notification from the firmware. */
4380 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4381 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4382 SEC_TO_NSEC(2));
4383 if (err) {
4384 splx(s)spllower(s);
4385 return err;
4386 }
4387 }
4388 splx(s)spllower(s);
4389 if (readnvm) {
4390 err = iwx_nvm_get(sc);
4391 if (err) {
4392 printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4393 return err;
4394 }
4395 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), (
6)) == 0)
)
4396 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
4397 sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
;
4398
4399 }
4400 return 0;
4401}
4402
4403int
4404iwx_config_ltr(struct iwx_softc *sc)
4405{
4406 struct iwx_ltr_config_cmd cmd = {
4407 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)),
4408 };
4409
4410 if (!sc->sc_ltr_enabled)
4411 return 0;
4412
4413 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd);
4414}
4415
4416void
4417iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
4418{
4419 struct iwx_rx_data *data = &ring->data[idx];
4420
4421 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
4422 struct iwx_rx_transfer_desc *desc = ring->desc;
4423 desc[idx].rbid = htole16(idx & 0xffff)((__uint16_t)(idx & 0xffff));
4424 desc[idx].addr = htole64(data->map->dm_segs[0].ds_addr)((__uint64_t)(data->map->dm_segs[0].ds_addr));
4425 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(*desc)), (sizeof(*desc)), (
0x04))
4426 idx * sizeof(*desc), sizeof(*desc),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(*desc)), (sizeof(*desc)), (
0x04))
4427 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(*desc)), (sizeof(*desc)), (
0x04))
;
4428 } else {
4429 ((uint64_t *)ring->desc)[idx] =
4430 htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff))((__uint64_t)(data->map->dm_segs[0].ds_addr | (idx &
0x0fff)))
;
4431 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
4432 idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
4433 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
;
4434 }
4435}
4436
4437int
4438iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4439{
4440 struct iwx_rx_ring *ring = &sc->rxq;
4441 struct iwx_rx_data *data = &ring->data[idx];
4442 struct mbuf *m;
4443 int err;
4444 int fatal = 0;
4445
4446 m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1);
4447 if (m == NULL((void *)0))
4448 return ENOBUFS55;
4449
4450 if (size <= MCLBYTES(1 << 11)) {
4451 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
4452 } else {
4453 MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE)m_clget((m), (0x0002), (4096));
4454 }
4455 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
4456 m_freem(m);
4457 return ENOBUFS55;
4458 }
4459
4460 if (data->m != NULL((void *)0)) {
4461 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
4462 fatal = 1;
4463 }
4464
4465 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
4466 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
4467 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
;
4468 if (err) {
4469 /* XXX */
4470 if (fatal)
4471 panic("%s: could not load RX mbuf", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4472 m_freem(m);
4473 return err;
4474 }
4475 data->m = m;
4476 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (size), (0x01))
;
4477
4478 /* Update RX descriptor. */
4479 iwx_update_rx_desc(sc, ring, idx);
4480
4481 return 0;
4482}
4483
4484int
4485iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4486 struct iwx_rx_mpdu_desc *desc)
4487{
4488 int energy_a, energy_b;
4489
4490 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
4491 energy_a = desc->v3.energy_a;
4492 energy_b = desc->v3.energy_b;
4493 } else {
4494 energy_a = desc->v1.energy_a;
4495 energy_b = desc->v1.energy_b;
4496 }
4497 energy_a = energy_a ? -energy_a : -256;
4498 energy_b = energy_b ? -energy_b : -256;
4499 return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
4500}
4501
4502void
4503iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4504 struct iwx_rx_data *data)
4505{
4506 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4507
4508 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
4509 sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
;
4510
4511 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), (
sizeof(sc->sc_last_phy_info)))
;
4512}
4513
4514/*
4515 * Retrieve the average noise (in dBm) among receivers.
4516 */
4517int
4518iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4519{
4520 int i, total, nbant, noise;
4521
4522 total = nbant = noise = 0;
4523 for (i = 0; i < 3; i++) {
4524 noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff;
4525 if (noise) {
4526 total += noise;
4527 nbant++;
4528 }
4529 }
4530
4531 /* There should be at least one antenna but check anyway. */
4532 return (nbant == 0) ? -127 : (total / nbant) - 107;
4533}
4534
4535int
4536iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4537 struct ieee80211_rxinfo *rxi)
4538{
4539 struct ieee80211com *ic = &sc->sc_ic;
4540 struct ieee80211_key *k;
4541 struct ieee80211_frame *wh;
4542 uint64_t pn, *prsc;
4543 uint8_t *ivp;
4544 uint8_t tid;
4545 int hdrlen, hasqos;
4546
4547 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4548 hdrlen = ieee80211_get_hdrlen(wh);
4549 ivp = (uint8_t *)wh + hdrlen;
4550
4551 /* find key for decryption */
4552 k = ieee80211_get_rxkey(ic, m, ni);
4553 if (k == NULL((void *)0) || k->k_cipher != IEEE80211_CIPHER_CCMP)
4554 return 1;
4555
4556 /* Check that ExtIV bit is be set. */
4557 if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20))
4558 return 1;
4559
4560 hasqos = ieee80211_has_qos(wh);
4561 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
4562 prsc = &k->k_rsc[tid];
4563
4564 /* Extract the 48-bit PN from the CCMP header. */
4565 pn = (uint64_t)ivp[0] |
4566 (uint64_t)ivp[1] << 8 |
4567 (uint64_t)ivp[4] << 16 |
4568 (uint64_t)ivp[5] << 24 |
4569 (uint64_t)ivp[6] << 32 |
4570 (uint64_t)ivp[7] << 40;
4571 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) {
4572 if (pn < *prsc) {
4573 ic->ic_stats.is_ccmp_replays++;
4574 return 1;
4575 }
4576 } else if (pn <= *prsc) {
4577 ic->ic_stats.is_ccmp_replays++;
4578 return 1;
4579 }
4580 /* Last seen packet number is updated in ieee80211_inputm(). */
4581
4582 /*
4583 * Some firmware versions strip the MIC, and some don't. It is not
4584 * clear which of the capability flags could tell us what to expect.
4585 * For now, keep things simple and just leave the MIC in place if
4586 * it is present.
4587 *
4588 * The IV will be stripped by ieee80211_inputm().
4589 */
4590 return 0;
4591}
4592
4593int
4594iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4595 struct ieee80211_rxinfo *rxi)
4596{
4597 struct ieee80211com *ic = &sc->sc_ic;
4598 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4599 struct ieee80211_frame *wh;
4600 struct ieee80211_node *ni;
4601 int ret = 0;
4602 uint8_t type, subtype;
4603
4604 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4605
4606 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4607 if (type == IEEE80211_FC0_TYPE_CTL0x04)
4608 return 0;
4609
4610 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4611 if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40))
4612 return 0;
4613
4614 ni = ieee80211_find_rxnode(ic, wh);
4615 /* Handle hardware decryption. */
4616 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) != IEEE80211_FC0_TYPE_CTL0x04)
4617 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40) &&
4618 (ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) &&
4619 ((!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
4620 ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
4621 (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
4622 ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
4623 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) !=
4624 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
4625 ic->ic_stats.is_ccmp_dec_errs++;
4626 ret = 1;
4627 goto out;
4628 }
4629 /* Check whether decryption was successful or not. */
4630 if ((rx_pkt_status &
4631 (IWX_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
4632 IWX_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) !=
4633 (IWX_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
4634 IWX_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) {
4635 ic->ic_stats.is_ccmp_dec_errs++;
4636 ret = 1;
4637 goto out;
4638 }
4639 rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
4640 }
4641out:
4642 if (ret)
4643 ifp->if_ierrorsif_data.ifi_ierrors++;
4644 ieee80211_release_node(ic, ni);
4645 return ret;
4646}
4647
4648void
4649iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4650 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4651 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4652 struct mbuf_list *ml)
4653{
4654 struct ieee80211com *ic = &sc->sc_ic;
4655 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4656 struct ieee80211_frame *wh;
4657 struct ieee80211_node *ni;
4658
4659 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0
]))
)
4660 chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4661
4662 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4663 ni = ieee80211_find_rxnode(ic, wh);
4664 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) &&
4665 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4666 ifp->if_ierrorsif_data.ifi_ierrors++;
4667 m_freem(m);
4668 ieee80211_release_node(ic, ni);
4669 return;
4670 }
4671
4672#if NBPFILTER1 > 0
4673 if (sc->sc_drvbpf != NULL((void *)0)) {
4674 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th;
4675 uint16_t chan_flags;
4676 int have_legacy_rate = 1;
4677 uint8_t mcs, rate;
4678
4679 tap->wr_flags = 0;
4680 if (is_shortpre)
4681 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
4682 tap->wr_chan_freq =
4683 htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq));
4684 chan_flags = ic->ic_channels[chanidx].ic_flags;
4685 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4686 ic->ic_curmode != IEEE80211_MODE_11AC) {
4687 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
4688 chan_flags &= ~IEEE80211_CHAN_40MHZ0x8000;
4689 }
4690 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4691 chan_flags &= ~IEEE80211_CHAN_VHT0x4000;
4692 tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
4693 tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4694 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4695 tap->wr_tsft = device_timestamp;
4696 if (sc->sc_rate_n_flags_version >= 2) {
4697 uint32_t mod_type = (rate_n_flags &
4698 IWX_RATE_MCS_MOD_TYPE_MSK(0x7 << 8));
4699 const struct ieee80211_rateset *rs = NULL((void *)0);
4700 uint32_t ridx;
4701 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK(0 << 8) ||
4702 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK(1 << 8));
4703 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK0x7);
4704 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK0x7);
4705 if (mod_type == IWX_RATE_MCS_CCK_MSK(0 << 8))
4706 rs = &ieee80211_std_rateset_11b;
4707 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK(1 << 8))
4708 rs = &ieee80211_std_rateset_11a;
4709 if (rs && ridx < rs->rs_nrates) {
4710 rate = (rs->rs_rates[ridx] &
4711 IEEE80211_RATE_VAL0x7f);
4712 } else
4713 rate = 0;
4714 } else {
4715 have_legacy_rate = ((rate_n_flags &
4716 (IWX_RATE_MCS_HT_MSK_V1(1 << 8) |
4717 IWX_RATE_MCS_VHT_MSK_V1(1 << 26))) == 0);
4718 mcs = (rate_n_flags &
4719 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V10x7 |
4720 IWX_RATE_HT_MCS_NSS_MSK_V1(3 << 3)));
4721 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V10xff);
4722 }
4723 if (!have_legacy_rate) {
4724 tap->wr_rate = (0x80 | mcs);
4725 } else {
4726 switch (rate) {
4727 /* CCK rates. */
4728 case 10: tap->wr_rate = 2; break;
4729 case 20: tap->wr_rate = 4; break;
4730 case 55: tap->wr_rate = 11; break;
4731 case 110: tap->wr_rate = 22; break;
4732 /* OFDM rates. */
4733 case 0xd: tap->wr_rate = 12; break;
4734 case 0xf: tap->wr_rate = 18; break;
4735 case 0x5: tap->wr_rate = 24; break;
4736 case 0x7: tap->wr_rate = 36; break;
4737 case 0x9: tap->wr_rate = 48; break;
4738 case 0xb: tap->wr_rate = 72; break;
4739 case 0x1: tap->wr_rate = 96; break;
4740 case 0x3: tap->wr_rate = 108; break;
4741 /* Unknown rate: should not happen. */
4742 default: tap->wr_rate = 0;
4743 }
4744 }
4745
4746 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4747 m, BPF_DIRECTION_IN(1 << 0));
4748 }
4749#endif
4750 ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml);
4751 ieee80211_release_node(ic, ni);
4752}
4753
4754/*
4755 * Drop duplicate 802.11 retransmissions
4756 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4757 * and handle pseudo-duplicate frames which result from deaggregation
4758 * of A-MSDU frames in hardware.
4759 */
4760int
4761iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4762 struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4763{
4764 struct ieee80211com *ic = &sc->sc_ic;
4765 struct iwx_node *in = (void *)ic->ic_bss;
4766 struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4767 uint8_t tid = IWX_MAX_TID_COUNT8, subframe_idx;
4768 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4769 uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4770 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4771 int hasqos = ieee80211_has_qos(wh);
4772 uint16_t seq;
4773
4774 if (type == IEEE80211_FC0_TYPE_CTL0x04 ||
4775 (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) ||
4776 IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4777 return 0;
4778
4779 if (hasqos) {
4780 tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f);
4781 if (tid > IWX_MAX_TID_COUNT8)
4782 tid = IWX_MAX_TID_COUNT8;
4783 }
4784
4785 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4786 subframe_idx = desc->amsdu_info &
4787 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f;
4788
4789 seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4;
4790 if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) &&
4791 dup_data->last_seq[tid] == seq &&
4792 dup_data->last_sub_frame[tid] >= subframe_idx)
4793 return 1;
4794
4795 /*
4796 * Allow the same frame sequence number for all A-MSDU subframes
4797 * following the first subframe.
4798 * Otherwise these subframes would be discarded as replays.
4799 */
4800 if (dup_data->last_seq[tid] == seq &&
4801 subframe_idx > dup_data->last_sub_frame[tid] &&
4802 (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40)) {
4803 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4804 }
4805
4806 dup_data->last_seq[tid] = seq;
4807 dup_data->last_sub_frame[tid] = subframe_idx;
4808
4809 return 0;
4810}
4811
4812/*
4813 * Returns true if sn2 - buffer_size < sn1 < sn2.
4814 * To be used only in order to compare reorder buffer head with NSSN.
4815 * We fully trust NSSN unless it is behind us due to reorder timeout.
4816 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4817 */
4818int
4819iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4820{
4821 return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048
)
&& !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff
) > 2048)
;
4822}
4823
4824void
4825iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4826 struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4827 uint16_t nssn, struct mbuf_list *ml)
4828{
4829 struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4830 uint16_t ssn = reorder_buf->head_sn;
4831
4832 /* ignore nssn smaller than head sn - this can happen due to timeout */
4833 if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4834 goto set_timer;
4835
4836 while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4837 int index = ssn % reorder_buf->buf_size;
4838 struct mbuf *m;
4839 int chanidx, is_shortpre;
4840 uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4841 struct ieee80211_rxinfo *rxi;
4842
4843 /* This data is the same for all A-MSDU subframes. */
4844 chanidx = entries[index].chanidx;
4845 rx_pkt_status = entries[index].rx_pkt_status;
4846 is_shortpre = entries[index].is_shortpre;
4847 rate_n_flags = entries[index].rate_n_flags;
4848 device_timestamp = entries[index].device_timestamp;
4849 rxi = &entries[index].rxi;
4850
4851 /*
4852 * Empty the list. Will have more than one frame for A-MSDU.
4853 * Empty list is valid as well since nssn indicates frames were
4854 * received.
4855 */
4856 while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) {
4857 iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4858 rate_n_flags, device_timestamp, rxi, ml);
4859 reorder_buf->num_stored--;
4860
4861 /*
4862 * Allow the same frame sequence number and CCMP PN for
4863 * all A-MSDU subframes following the first subframe.
4864 * Otherwise they would be discarded as replays.
4865 */
4866 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4867 rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
4868 }
4869
4870 ssn = (ssn + 1) & 0xfff;
4871 }
4872 reorder_buf->head_sn = nssn;
4873
4874set_timer:
4875 if (reorder_buf->num_stored && !reorder_buf->removed) {
4876 timeout_add_usec(&reorder_buf->reorder_timer,
4877 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
4878 } else
4879 timeout_del(&reorder_buf->reorder_timer);
4880}
4881
4882int
4883iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4884 struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4885{
4886 struct ieee80211com *ic = &sc->sc_ic;
4887
4888 if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4889 /* we have a new (A-)MPDU ... */
4890
4891 /*
4892 * reset counter to 0 if we didn't have any oldsn in
4893 * the last A-MPDU (as detected by GP2 being identical)
4894 */
4895 if (!buffer->consec_oldsn_prev_drop)
4896 buffer->consec_oldsn_drops = 0;
4897
4898 /* either way, update our tracking state */
4899 buffer->consec_oldsn_ampdu_gp2 = gp2;
4900 } else if (buffer->consec_oldsn_prev_drop) {
4901 /*
4902 * tracking state didn't change, and we had an old SN
4903 * indication before - do nothing in this case, we
4904 * already noted this one down and are waiting for the
4905 * next A-MPDU (by GP2)
4906 */
4907 return 0;
4908 }
4909
4910 /* return unless this MPDU has old SN */
4911 if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN0x80000000))
4912 return 0;
4913
4914 /* update state */
4915 buffer->consec_oldsn_prev_drop = 1;
4916 buffer->consec_oldsn_drops++;
4917
4918 /* if limit is reached, send del BA and reset state */
4919 if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA10) {
4920 ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4921 0, tid);
4922 buffer->consec_oldsn_prev_drop = 0;
4923 buffer->consec_oldsn_drops = 0;
4924 return 1;
4925 }
4926
4927 return 0;
4928}
4929
4930/*
4931 * Handle re-ordering of frames which were de-aggregated in hardware.
4932 * Returns 1 if the MPDU was consumed (buffered or dropped).
4933 * Returns 0 if the MPDU should be passed to upper layer.
4934 */
4935int
4936iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4937 struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4938 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4939 struct mbuf_list *ml)
4940{
4941 struct ieee80211com *ic = &sc->sc_ic;
4942 struct ieee80211_frame *wh;
4943 struct ieee80211_node *ni;
4944 struct iwx_rxba_data *rxba;
4945 struct iwx_reorder_buffer *buffer;
4946 uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data));
4947 int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40);
4948 int last_subframe =
4949 (desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME0x80);
4950 uint8_t tid;
4951 uint8_t subframe_idx = (desc->amsdu_info &
4952 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
4953 struct iwx_reorder_buf_entry *entries;
4954 int index;
4955 uint16_t nssn, sn;
4956 uint8_t baid, type, subtype;
4957 int hasqos;
4958
4959 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4960 hasqos = ieee80211_has_qos(wh);
4961 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
4962
4963 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4964 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4965
4966 /*
4967 * We are only interested in Block Ack requests and unicast QoS data.
4968 */
4969 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4970 return 0;
4971 if (hasqos) {
4972 if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)
4973 return 0;
4974 } else {
4975 if (type != IEEE80211_FC0_TYPE_CTL0x04 ||
4976 subtype != IEEE80211_FC0_SUBTYPE_BAR0x80)
4977 return 0;
4978 }
4979
4980 baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK0x7f000000) >>
4981 IWX_RX_MPDU_REORDER_BAID_SHIFT24;
4982 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
4983 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
4984 return 0;
4985
4986 rxba = &sc->sc_rxba_data[baid];
4987 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
4988 tid != rxba->tid || rxba->sta_id != IWX_STATION_ID0)
4989 return 0;
4990
4991 if (rxba->timeout != 0)
4992 getmicrouptime(&rxba->last_rx);
4993
4994 /* Bypass A-MPDU re-ordering in net80211. */
4995 rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002;
4996
4997 nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK0x00000fff;
4998 sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK0x00fff000) >>
4999 IWX_RX_MPDU_REORDER_SN_SHIFT12;
5000
5001 buffer = &rxba->reorder_buf;
5002 entries = &rxba->entries[0];
5003
5004 if (!buffer->valid) {
5005 if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN0x80000000)
5006 return 0;
5007 buffer->valid = 1;
5008 }
5009
5010 ni = ieee80211_find_rxnode(ic, wh);
5011 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
5012 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
5013 iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5014 goto drop;
5015 }
5016
5017 /*
5018 * If there was a significant jump in the nssn - adjust.
5019 * If the SN is smaller than the NSSN it might need to first go into
5020 * the reorder buffer, in which case we just release up to it and the
5021 * rest of the function will take care of storing it and releasing up to
5022 * the nssn.
5023 */
5024 if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5025 buffer->buf_size) ||
5026 !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer->
buf_size)) & 0xfff) > 2048)
) {
5027 uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
? sn : nssn;
5028 ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5029 iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5030 }
5031
5032 if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5033 device_timestamp)) {
5034 /* BA session will be torn down. */
5035 ic->ic_stats.is_ht_rx_ba_window_jump++;
5036 goto drop;
5037
5038 }
5039
5040 /* drop any outdated packets */
5041 if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff
) > 2048)
) {
5042 ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5043 goto drop;
5044 }
5045
5046 /* release immediately if allowed by nssn and no stored frames */
5047 if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
) {
5048 if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5049 (!is_amsdu || last_subframe))
5050 buffer->head_sn = nssn;
5051 ieee80211_release_node(ic, ni);
5052 return 0;
5053 }
5054
5055 /*
5056 * release immediately if there are no stored frames, and the sn is
5057 * equal to the head.
5058 * This can happen due to reorder timer, where NSSN is behind head_sn.
5059 * When we released everything, and we got the next frame in the
5060 * sequence, according to the NSSN we can't release immediately,
5061 * while technically there is no hole and we can move forward.
5062 */
5063 if (!buffer->num_stored && sn == buffer->head_sn) {
5064 if (!is_amsdu || last_subframe)
5065 buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5066 ieee80211_release_node(ic, ni);
5067 return 0;
5068 }
5069
5070 index = sn % buffer->buf_size;
5071
5072 /*
5073 * Check if we already stored this frame
5074 * As AMSDU is either received or not as whole, logic is simple:
5075 * If we have frames in that position in the buffer and the last frame
5076 * originated from AMSDU had a different SN then it is a retransmission.
5077 * If it is the same SN then if the subframe index is incrementing it
5078 * is the same AMSDU - otherwise it is a retransmission.
5079 */
5080 if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
5081 if (!is_amsdu) {
5082 ic->ic_stats.is_ht_rx_ba_no_buf++;
5083 goto drop;
5084 } else if (sn != buffer->last_amsdu ||
5085 buffer->last_sub_index >= subframe_idx) {
5086 ic->ic_stats.is_ht_rx_ba_no_buf++;
5087 goto drop;
5088 }
5089 } else {
5090 /* This data is the same for all A-MSDU subframes. */
5091 entries[index].chanidx = chanidx;
5092 entries[index].is_shortpre = is_shortpre;
5093 entries[index].rate_n_flags = rate_n_flags;
5094 entries[index].device_timestamp = device_timestamp;
5095 memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries
[index].rxi)))
;
5096 }
5097
5098 /* put in reorder buffer */
5099 ml_enqueue(&entries[index].frames, m);
5100 buffer->num_stored++;
5101 getmicrouptime(&entries[index].reorder_time);
5102
5103 if (is_amsdu) {
5104 buffer->last_amsdu = sn;
5105 buffer->last_sub_index = subframe_idx;
5106 }
5107
5108 /*
5109 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5110 * The reason is that NSSN advances on the first sub-frame, and may
5111 * cause the reorder buffer to advance before all the sub-frames arrive.
5112 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5113 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5114 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5115 * already ahead and it will be dropped.
5116 * If the last sub-frame is not on this queue - we will get frame
5117 * release notification with up to date NSSN.
5118 */
5119 if (!is_amsdu || last_subframe)
5120 iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5121
5122 ieee80211_release_node(ic, ni);
5123 return 1;
5124
5125drop:
5126 m_freem(m);
5127 ieee80211_release_node(ic, ni);
5128 return 1;
5129}
5130
5131void
5132iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
5133 size_t maxlen, struct mbuf_list *ml)
5134{
5135 struct ieee80211com *ic = &sc->sc_ic;
5136 struct ieee80211_rxinfo rxi;
5137 struct iwx_rx_mpdu_desc *desc;
5138 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5139 int rssi;
5140 uint8_t chanidx;
5141 uint16_t phy_info;
5142 size_t desc_size;
5143
5144 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102)
5145 desc_size = sizeof(*desc);
5146 else
5147 desc_size = IWX_RX_DESC_SIZE_V1((sizeof(struct iwx_rx_mpdu_desc) - sizeof(struct iwx_rx_mpdu_desc_v3
)) + sizeof(struct iwx_rx_mpdu_desc_v1))
;
5148
5149 if (maxlen < desc_size) {
5150 m_freem(m);
5151 return; /* drop */
5152 }
5153
5154 desc = (struct iwx_rx_mpdu_desc *)pktdata;
5155
5156 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) ||
5157 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) {
5158 m_freem(m);
5159 return; /* drop */
5160 }
5161
5162 len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len));
5163 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5164 /* Allow control frames in monitor mode. */
5165 if (len < sizeof(struct ieee80211_frame_cts)) {
5166 ic->ic_stats.is_rx_tooshort++;
5167 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5168 m_freem(m);
5169 return;
5170 }
5171 } else if (len < sizeof(struct ieee80211_frame)) {
5172 ic->ic_stats.is_rx_tooshort++;
5173 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5174 m_freem(m);
5175 return;
5176 }
5177 if (len > maxlen - desc_size) {
5178 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5179 m_freem(m);
5180 return;
5181 }
5182
5183 m->m_datam_hdr.mh_data = pktdata + desc_size;
5184 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
5185
5186 /* Account for padding following the frame header. */
5187 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD0x20) {
5188 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5189 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5190 if (type == IEEE80211_FC0_TYPE_CTL0x04) {
5191 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) {
5192 case IEEE80211_FC0_SUBTYPE_CTS0xc0:
5193 hdrlen = sizeof(struct ieee80211_frame_cts);
5194 break;
5195 case IEEE80211_FC0_SUBTYPE_ACK0xd0:
5196 hdrlen = sizeof(struct ieee80211_frame_ack);
5197 break;
5198 default:
5199 hdrlen = sizeof(struct ieee80211_frame_min);
5200 break;
5201 }
5202 } else
5203 hdrlen = ieee80211_get_hdrlen(wh);
5204
5205 if ((le16toh(desc->status)((__uint16_t)(desc->status)) &
5206 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) ==
5207 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
5208 /* Padding is inserted after the IV. */
5209 hdrlen += IEEE80211_CCMP_HDRLEN8;
5210 }
5211
5212 memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data
), (hdrlen))
;
5213 m_adj(m, 2);
5214 }
5215
5216 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
5217
5218 /*
5219 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5220 * in place for each subframe. But it leaves the 'A-MSDU present'
5221 * bit set in the frame header. We need to clear this bit ourselves.
5222 * (XXX This workaround is not required on AX200/AX201 devices that
5223 * have been tested by me, but it's unclear when this problem was
5224 * fixed in the hardware. It definitely affects the 9k generation.
5225 * Leaving this in place for now since some 9k/AX200 hybrids seem
5226 * to exist that we may eventually add support for.)
5227 *
5228 * And we must allow the same CCMP PN for subframes following the
5229 * first subframe. Otherwise they would be discarded as replays.
5230 */
5231 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40) {
5232 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5233 uint8_t subframe_idx = (desc->amsdu_info &
5234 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
5235 if (subframe_idx > 0)
5236 rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
5237 if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5238 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5239 struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data))
5240 struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data));
5241 qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
5242 } else if (ieee80211_has_qos(wh) &&
5243 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) {
5244 struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data))
5245 struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data));
5246 qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
5247 }
5248 }
5249
5250 /*
5251 * Verify decryption before duplicate detection. The latter uses
5252 * the TID supplied in QoS frame headers and this TID is implicitly
5253 * verified as part of the CCMP nonce.
5254 */
5255 if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) {
5256 m_freem(m);
5257 return;
5258 }
5259
5260 if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
5261 m_freem(m);
5262 return;
5263 }
5264
5265 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX2102) {
5266 rate_n_flags = le32toh(desc->v3.rate_n_flags)((__uint32_t)(desc->v3.rate_n_flags));
5267 chanidx = desc->v3.channel;
5268 device_timestamp = le32toh(desc->v3.gp2_on_air_rise)((__uint32_t)(desc->v3.gp2_on_air_rise));
5269 } else {
5270 rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags));
5271 chanidx = desc->v1.channel;
5272 device_timestamp = le32toh(desc->v1.gp2_on_air_rise)((__uint32_t)(desc->v1.gp2_on_air_rise));
5273 }
5274
5275 phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info));
5276
5277 rssi = iwx_rxmq_get_signal_strength(sc, desc);
5278 rssi = (0 - IWX_MIN_DBM-100) + rssi; /* normalize */
5279 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
5280
5281 rxi.rxi_rssi = rssi;
5282 rxi.rxi_tstamp = device_timestamp;
5283 rxi.rxi_chan = chanidx;
5284
5285 if (iwx_rx_reorder(sc, m, chanidx, desc,
5286 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
5287 rate_n_flags, device_timestamp, &rxi, ml))
5288 return;
5289
5290 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)),
5291 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
5292 rate_n_flags, device_timestamp, &rxi, ml);
5293}
5294
5295void
5296iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
5297{
5298 struct iwx_tfh_tfd *desc = &ring->desc[idx];
5299 uint8_t num_tbs = le16toh(desc->num_tbs)((__uint16_t)(desc->num_tbs)) & 0x1f;
5300 int i;
5301
5302 /* First TB is never cleared - it is bidirectional DMA data. */
5303 for (i = 1; i < num_tbs; i++) {
5304 struct iwx_tfh_tb *tb = &desc->tbs[i];
5305 memset(tb, 0, sizeof(*tb))__builtin_memset((tb), (0), (sizeof(*tb)));
5306 }
5307 desc->num_tbs = htole16(1)((__uint16_t)(1));
5308
5309 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
5310 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
5311 sizeof(*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
;
5312}
5313
5314void
5315iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
5316{
5317 struct ieee80211com *ic = &sc->sc_ic;
5318
5319 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
5320 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
;
5321 bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->map))
;
5322 m_freem(txd->m);
5323 txd->m = NULL((void *)0);
5324
5325 KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 5325, "txd->in"))
;
5326 ieee80211_release_node(ic, &txd->in->in_ni);
5327 txd->in = NULL((void *)0);
5328}
5329
5330void
5331iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
5332{
5333 struct iwx_tx_data *txd;
5334
5335 while (ring->tail_hw != idx) {
5336 txd = &ring->data[ring->tail];
5337 if (txd->m != NULL((void *)0)) {
5338 iwx_clear_tx_desc(sc, ring, ring->tail);
5339 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
5340 iwx_txd_done(sc, txd);
5341 ring->queued--;
5342 }
5343 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT(256);
5344 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
5345 }
5346}
5347
5348void
5349iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5350 struct iwx_rx_data *data)
5351{
5352 struct ieee80211com *ic = &sc->sc_ic;
5353 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
5354 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
5355 int qid = cmd_hdr->qid, status, txfail;
5356 struct iwx_tx_ring *ring = &sc->txq[qid];
5357 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
5358 uint32_t ssn;
5359 uint32_t len = iwx_rx_packet_len(pkt);
5360
5361 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
5362 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
5363
5364 /* Sanity checks. */
5365 if (sizeof(*tx_resp) > len)
5366 return;
5367 if (qid < IWX_FIRST_AGG_TX_QUEUE(1 + 1) && tx_resp->frame_count > 1)
5368 return;
5369 if (qid >= IWX_FIRST_AGG_TX_QUEUE(1 + 1) && sizeof(*tx_resp) + sizeof(ssn) +
5370 tx_resp->frame_count * sizeof(tx_resp->status) > len)
5371 return;
5372
5373 sc->sc_tx_timer[qid] = 0;
5374
5375 if (tx_resp->frame_count > 1) /* A-MPDU */
5376 return;
5377
5378 status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWX_TX_STATUS_MSK0x000000ff;
5379 txfail = (status != IWX_TX_STATUS_SUCCESS0x01 &&
5380 status != IWX_TX_STATUS_DIRECT_DONE0x02);
5381
5382 if (txfail)
5383 ifp->if_oerrorsif_data.ifi_oerrors++;
5384
5385 /*
5386 * On hardware supported by iwx(4) the SSN counter corresponds
5387 * to a Tx ring index rather than a sequence number.
5388 * Frames up to this index (non-inclusive) can now be freed.
5389 */
5390 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn))__builtin_memcpy((&ssn), (&tx_resp->status + tx_resp
->frame_count), (sizeof(ssn)))
;
5391 ssn = le32toh(ssn)((__uint32_t)(ssn));
5392 if (ssn < sc->max_tfd_queue_size) {
5393 iwx_txq_advance(sc, ring, ssn);
5394 iwx_clear_oactive(sc, ring);
5395 }
5396}
5397
5398void
5399iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
5400{
5401 struct ieee80211com *ic = &sc->sc_ic;
5402 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
5403
5404 if (ring->queued < IWX_TX_RING_LOMARK192) {
5405 sc->qfullmsk &= ~(1 << ring->qid);
5406 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5407 ifq_clr_oactive(&ifp->if_snd);
5408 /*
5409 * Well, we're in interrupt context, but then again
5410 * I guess net80211 does all sorts of stunts in
5411 * interrupt context, so maybe this is no biggie.
5412 */
5413 (*ifp->if_start)(ifp);
5414 }
5415 }
5416}
5417
5418void
5419iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
5420{
5421 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
5422 struct ieee80211com *ic = &sc->sc_ic;
5423 struct ieee80211_node *ni;
5424 struct ieee80211_tx_ba *ba;
5425 struct iwx_node *in;
5426 struct iwx_tx_ring *ring;
5427 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
5428 int qid;
5429
5430 if (ic->ic_state != IEEE80211_S_RUN)
5431 return;
5432
5433 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
5434 return;
5435
5436 if (ba_res->sta_id != IWX_STATION_ID0)
5437 return;
5438
5439 ni = ic->ic_bss;
5440 in = (void *)ni;
5441
5442 tfd_cnt = le16toh(ba_res->tfd_cnt)((__uint16_t)(ba_res->tfd_cnt));
5443 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt)((__uint16_t)(ba_res->ra_tid_cnt));
5444 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
5445 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
5446 sizeof(ba_res->tfd[0]) * tfd_cnt))
5447 return;
5448
5449 for (i = 0; i < tfd_cnt; i++) {
5450 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
5451 uint8_t tid;
5452
5453 tid = ba_tfd->tid;
5454 if (tid >= nitems(sc->aggqid)(sizeof((sc->aggqid)) / sizeof((sc->aggqid)[0])))
5455 continue;
5456
5457 qid = sc->aggqid[tid];
5458 if (qid != htole16(ba_tfd->q_num)((__uint16_t)(ba_tfd->q_num)))
5459 continue;
5460
5461 ring = &sc->txq[qid];
5462
5463 ba = &ni->ni_tx_ba[tid];
5464 if (ba->ba_state != IEEE80211_BA_AGREED2)
5465 continue;
5466
5467 idx = le16toh(ba_tfd->tfd_index)((__uint16_t)(ba_tfd->tfd_index));
5468 sc->sc_tx_timer[qid] = 0;
5469 iwx_txq_advance(sc, ring, idx);
5470 iwx_clear_oactive(sc, ring);
5471 }
5472}
5473
5474void
5475iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5476 struct iwx_rx_data *data)
5477{
5478 struct ieee80211com *ic = &sc->sc_ic;
5479 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
5480 uint32_t missed;
5481
5482 if ((ic->ic_opmode != IEEE80211_M_STA) ||
5483 (ic->ic_state != IEEE80211_S_RUN))
5484 return;
5485
5486 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
5487 sizeof(*mbn), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
;
5488
5489 missed = le32toh(mbn->consec_missed_beacons_since_last_rx)((__uint32_t)(mbn->consec_missed_beacons_since_last_rx));
5490 if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5491 if (ic->ic_ific_ac.ac_if.if_flags & IFF_DEBUG0x4)
5492 printf("%s: receiving no beacons from %s; checking if "
5493 "this AP is still responding to probe requests\n",
5494 DEVNAME(sc)((sc)->sc_dev.dv_xname), ether_sprintf(ic->ic_bss->ni_macaddr));
5495 /*
5496 * Rather than go directly to scan state, try to send a
5497 * directed probe request first. If that fails then the
5498 * state machine will drop us into scanning after timing
5499 * out waiting for a probe response.
5500 */
5501 IEEE80211_SEND_MGMT(ic, ic->ic_bss,((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0))
5502 IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0)((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0));
5503 }
5504
5505}
5506
5507int
5508iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
5509{
5510 struct iwx_binding_cmd cmd;
5511 struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
5512 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)((in->in_id << (0)) | (in->in_color << (8))
)
;
5513 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE0x10);
5514 uint32_t status;
5515
5516 if (action == IWX_FW_CTXT_ACTION_ADD1 && active)
5517 panic("binding already added");
5518 if (action == IWX_FW_CTXT_ACTION_REMOVE3 && !active)
5519 panic("binding already removed");
5520
5521 if (phyctxt == NULL((void *)0)) /* XXX race with iwx_stop() */
5522 return EINVAL22;
5523
5524 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5525
5526 cmd.id_and_color
5527 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
5528 cmd.action = htole32(action)((__uint32_t)(action));
5529 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
5530
5531 cmd.macs[0] = htole32(mac_id)((__uint32_t)(mac_id));
5532 for (i = 1; i < IWX_MAX_MACS_IN_BINDING(3); i++)
5533 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
5534
5535 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel)(((phyctxt->channel)->ic_flags & 0x0080) != 0) ||
5536 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
5537 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
5538 else
5539 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
5540
5541 status = 0;
5542 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD0x2b, sizeof(cmd),
5543 &cmd, &status);
5544 if (err == 0 && status != 0)
5545 err = EIO5;
5546
5547 return err;
5548}
5549
5550uint8_t
5551iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
5552{
5553 int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
5554 int primary_idx = ic->ic_bss->ni_primary_chan;
5555 /*
5556 * The FW is expected to check the control channel position only
5557 * when in HT/VHT and the channel width is not 20MHz. Return
5558 * this value as the default one:
5559 */
5560 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5561
5562 switch (primary_idx - center_idx) {
5563 case -6:
5564 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW(0x1);
5565 break;
5566 case -2:
5567 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5568 break;
5569 case 2:
5570 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
5571 break;
5572 case 6:
5573 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE(0x5);
5574 break;
5575 default:
5576 break;
5577 }
5578
5579 return pos;
5580}
5581
5582int
5583iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5584 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5585 uint8_t vht_chan_width, int cmdver)
5586{
5587 struct ieee80211com *ic = &sc->sc_ic;
5588 struct iwx_phy_context_cmd_uhb cmd;
5589 uint8_t active_cnt, idle_cnt;
5590 struct ieee80211_channel *chan = ctxt->channel;
5591
5592 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5593 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
5594 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
5595 cmd.action = htole32(action)((__uint32_t)(action));
5596
5597 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel)(((ctxt->channel)->ic_flags & 0x0080) != 0) ||
5598 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
5599 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
5600 else
5601 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
5602
5603 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
5604 IWX_PHY_BAND_24(1) : IWX_PHY_BAND_5(0);
5605 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan))((__uint32_t)(ieee80211_chan2ieee(ic, chan)));
5606 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_801) {
5607 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5608 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80(0x2);
5609 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
5610 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
5611 /* secondary chan above -> control chan below */
5612 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5613 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
5614 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
5615 /* secondary chan below -> control chan above */
5616 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
5617 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
5618 } else {
5619 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
5620 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5621 }
5622 } else {
5623 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
5624 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5625 }
5626
5627 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP0x5,
5628 IWX_RLC_CONFIG_CMD0x08) != 2) {
5629 idle_cnt = chains_static;
5630 active_cnt = chains_dynamic;
5631 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)))
5632 IWX_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)));
5633 cmd.rxchain_info |= htole32(idle_cnt <<((__uint32_t)(idle_cnt << (10)))
5634 IWX_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
5635 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
5636 IWX_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
5637 }
5638
5639 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
5640}
5641
5642int
5643iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5644 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5645 uint8_t vht_chan_width, int cmdver)
5646{
5647 struct ieee80211com *ic = &sc->sc_ic;
5648 struct iwx_phy_context_cmd cmd;
5649 uint8_t active_cnt, idle_cnt;
5650 struct ieee80211_channel *chan = ctxt->channel;
5651
5652 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5653 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
5654 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
5655 cmd.action = htole32(action)((__uint32_t)(action));
5656
5657 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel)(((ctxt->channel)->ic_flags & 0x0080) != 0) ||
5658 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
5659 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
5660 else
5661 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
5662
5663 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
5664 IWX_PHY_BAND_24(1) : IWX_PHY_BAND_5(0);
5665 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5666 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_801) {
5667 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5668 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80(0x2);
5669 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
5670 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
5671 /* secondary chan above -> control chan below */
5672 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5673 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
5674 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
5675 /* secondary chan below -> control chan above */
5676 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
5677 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
5678 } else {
5679 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
5680 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5681 }
5682 } else {
5683 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
5684 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
5685 }
5686
5687 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP0x5,
5688 IWX_RLC_CONFIG_CMD0x08) != 2) {
5689 idle_cnt = chains_static;
5690 active_cnt = chains_dynamic;
5691 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)))
5692 IWX_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)));
5693 cmd.rxchain_info |= htole32(idle_cnt <<((__uint32_t)(idle_cnt << (10)))
5694 IWX_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
5695 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
5696 IWX_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
5697 }
5698
5699 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
5700}
5701
5702int
5703iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5704 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5705 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5706{
5707 int cmdver;
5708
5709 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP0x1, IWX_PHY_CONTEXT_CMD0x8);
5710 if (cmdver != 3 && cmdver != 4) {
5711 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5712 DEVNAME(sc)((sc)->sc_dev.dv_xname));
5713 return ENOTSUP91;
5714 }
5715
5716 /*
5717 * Intel increased the size of the fw_channel_info struct and neglected
5718 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5719 * member in the middle.
5720 * To keep things simple we use a separate function to handle the larger
5721 * variant of the phy context command.
5722 */
5723 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)((sc->sc_enabled_capa)[(48)>>3] & (1<<((48
)&(8 -1))))
) {
5724 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5725 chains_dynamic, action, sco, vht_chan_width, cmdver);
5726 }
5727
5728 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5729 action, sco, vht_chan_width, cmdver);
5730}