File: | dev/pci/if_iwm.c |
Warning: | line 9114, column 9 Result of 'malloc' is converted to a pointer of type 'struct ieee80211_node', which is incompatible with sizeof operand type 'struct iwm_node' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_iwm.c,v 1.413 2023/12/20 07:32:05 stsp Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2014, 2016 genua gmbh <info@genua.de> |
5 | * Author: Stefan Sperling <stsp@openbsd.org> |
6 | * Copyright (c) 2014 Fixup Software Ltd. |
7 | * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org> |
8 | * |
9 | * Permission to use, copy, modify, and distribute this software for any |
10 | * purpose with or without fee is hereby granted, provided that the above |
11 | * copyright notice and this permission notice appear in all copies. |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
14 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
15 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
16 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
17 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
18 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
19 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
20 | */ |
21 | |
22 | /*- |
23 | * Based on BSD-licensed source modules in the Linux iwlwifi driver, |
24 | * which were used as the reference documentation for this implementation. |
25 | * |
26 | *********************************************************************** |
27 | * |
28 | * This file is provided under a dual BSD/GPLv2 license. When using or |
29 | * redistributing this file, you may do so under either license. |
30 | * |
31 | * GPL LICENSE SUMMARY |
32 | * |
33 | * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. |
34 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
35 | * Copyright(c) 2016 Intel Deutschland GmbH |
36 | * |
37 | * This program is free software; you can redistribute it and/or modify |
38 | * it under the terms of version 2 of the GNU General Public License as |
39 | * published by the Free Software Foundation. |
40 | * |
41 | * This program is distributed in the hope that it will be useful, but |
42 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
43 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
44 | * General Public License for more details. |
45 | * |
46 | * You should have received a copy of the GNU General Public License |
47 | * along with this program; if not, write to the Free Software |
48 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
49 | * USA |
50 | * |
51 | * The full GNU General Public License is included in this distribution |
52 | * in the file called COPYING. |
53 | * |
54 | * Contact Information: |
55 | * Intel Linux Wireless <ilw@linux.intel.com> |
56 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
57 | * |
58 | * |
59 | * BSD LICENSE |
60 | * |
61 | * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. |
62 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
63 | * Copyright(c) 2016 Intel Deutschland GmbH |
64 | * All rights reserved. |
65 | * |
66 | * Redistribution and use in source and binary forms, with or without |
67 | * modification, are permitted provided that the following conditions |
68 | * are met: |
69 | * |
70 | * * Redistributions of source code must retain the above copyright |
71 | * notice, this list of conditions and the following disclaimer. |
72 | * * Redistributions in binary form must reproduce the above copyright |
73 | * notice, this list of conditions and the following disclaimer in |
74 | * the documentation and/or other materials provided with the |
75 | * distribution. |
76 | * * Neither the name Intel Corporation nor the names of its |
77 | * contributors may be used to endorse or promote products derived |
78 | * from this software without specific prior written permission. |
79 | * |
80 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
81 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
82 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
83 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
84 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
85 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
86 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
87 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
88 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
89 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
90 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
91 | */ |
92 | |
93 | /*- |
94 | * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> |
95 | * |
96 | * Permission to use, copy, modify, and distribute this software for any |
97 | * purpose with or without fee is hereby granted, provided that the above |
98 | * copyright notice and this permission notice appear in all copies. |
99 | * |
100 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
101 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
102 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
103 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
104 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
105 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
106 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
107 | */ |
108 | |
109 | #include "bpfilter.h" |
110 | |
111 | #include <sys/param.h> |
112 | #include <sys/conf.h> |
113 | #include <sys/kernel.h> |
114 | #include <sys/malloc.h> |
115 | #include <sys/mbuf.h> |
116 | #include <sys/mutex.h> |
117 | #include <sys/proc.h> |
118 | #include <sys/rwlock.h> |
119 | #include <sys/socket.h> |
120 | #include <sys/sockio.h> |
121 | #include <sys/systm.h> |
122 | #include <sys/endian.h> |
123 | |
124 | #include <sys/refcnt.h> |
125 | #include <sys/task.h> |
126 | #include <machine/bus.h> |
127 | #include <machine/intr.h> |
128 | |
129 | #include <dev/pci/pcireg.h> |
130 | #include <dev/pci/pcivar.h> |
131 | #include <dev/pci/pcidevs.h> |
132 | |
133 | #if NBPFILTER1 > 0 |
134 | #include <net/bpf.h> |
135 | #endif |
136 | #include <net/if.h> |
137 | #include <net/if_dl.h> |
138 | #include <net/if_media.h> |
139 | |
140 | #include <netinet/in.h> |
141 | #include <netinet/if_ether.h> |
142 | |
143 | #include <net80211/ieee80211_var.h> |
144 | #include <net80211/ieee80211_amrr.h> |
145 | #include <net80211/ieee80211_ra.h> |
146 | #include <net80211/ieee80211_ra_vht.h> |
147 | #include <net80211/ieee80211_radiotap.h> |
148 | #include <net80211/ieee80211_priv.h> /* for SEQ_LT */ |
149 | #undef DPRINTF /* defined in ieee80211_priv.h */ |
150 | |
151 | #define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname) |
152 | |
153 | #define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if) |
154 | |
155 | #define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_)))) |
156 | #define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_)))) |
157 | |
158 | #ifdef IWM_DEBUG |
159 | #define DPRINTF(x)do { ; } while (0) do { if (iwm_debug > 0) printf x; } while (0) |
160 | #define DPRINTFN(n, x)do { ; } while (0) do { if (iwm_debug >= (n)) printf x; } while (0) |
161 | int iwm_debug = 1; |
162 | #else |
163 | #define DPRINTF(x)do { ; } while (0) do { ; } while (0) |
164 | #define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0) |
165 | #endif |
166 | |
167 | #include <dev/pci/if_iwmreg.h> |
168 | #include <dev/pci/if_iwmvar.h> |
169 | |
170 | const uint8_t iwm_nvm_channels[] = { |
171 | /* 2.4 GHz */ |
172 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, |
173 | /* 5 GHz */ |
174 | 36, 40, 44 , 48, 52, 56, 60, 64, |
175 | 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, |
176 | 149, 153, 157, 161, 165 |
177 | }; |
178 | |
179 | const uint8_t iwm_nvm_channels_8000[] = { |
180 | /* 2.4 GHz */ |
181 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, |
182 | /* 5 GHz */ |
183 | 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, |
184 | 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, |
185 | 149, 153, 157, 161, 165, 169, 173, 177, 181 |
186 | }; |
187 | |
188 | #define IWM_NUM_2GHZ_CHANNELS14 14 |
189 | |
190 | const struct iwm_rate { |
191 | uint16_t rate; |
192 | uint8_t plcp; |
193 | uint8_t ht_plcp; |
194 | } iwm_rates[] = { |
195 | /* Legacy */ /* HT */ |
196 | { 2, IWM_RATE_1M_PLCP10, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
197 | { 4, IWM_RATE_2M_PLCP20, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
198 | { 11, IWM_RATE_5M_PLCP55, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
199 | { 22, IWM_RATE_11M_PLCP110, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
200 | { 12, IWM_RATE_6M_PLCP13, IWM_RATE_HT_SISO_MCS_0_PLCP0 }, |
201 | { 18, IWM_RATE_9M_PLCP15, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
202 | { 24, IWM_RATE_12M_PLCP5, IWM_RATE_HT_SISO_MCS_1_PLCP1 }, |
203 | { 26, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_8_PLCP0x8 }, |
204 | { 36, IWM_RATE_18M_PLCP7, IWM_RATE_HT_SISO_MCS_2_PLCP2 }, |
205 | { 48, IWM_RATE_24M_PLCP9, IWM_RATE_HT_SISO_MCS_3_PLCP3 }, |
206 | { 52, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_9_PLCP0x9 }, |
207 | { 72, IWM_RATE_36M_PLCP11, IWM_RATE_HT_SISO_MCS_4_PLCP4 }, |
208 | { 78, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_10_PLCP0xA }, |
209 | { 96, IWM_RATE_48M_PLCP1, IWM_RATE_HT_SISO_MCS_5_PLCP5 }, |
210 | { 104, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_11_PLCP0xB }, |
211 | { 108, IWM_RATE_54M_PLCP3, IWM_RATE_HT_SISO_MCS_6_PLCP6 }, |
212 | { 128, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_SISO_MCS_7_PLCP7 }, |
213 | { 156, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_12_PLCP0xC }, |
214 | { 208, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_13_PLCP0xD }, |
215 | { 234, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_14_PLCP0xE }, |
216 | { 260, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_15_PLCP0xF }, |
217 | }; |
218 | #define IWM_RIDX_CCK0 0 |
219 | #define IWM_RIDX_OFDM4 4 |
220 | #define IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) (nitems(iwm_rates)(sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) |
221 | #define IWM_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWM_RIDX_OFDM4) |
222 | #define IWM_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWM_RIDX_OFDM4) |
223 | #define IWM_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22) |
224 | |
225 | /* Convert an MCS index into an iwm_rates[] index. */ |
226 | const int iwm_ht_mcs2ridx[] = { |
227 | IWM_RATE_MCS_0_INDEX, |
228 | IWM_RATE_MCS_1_INDEX, |
229 | IWM_RATE_MCS_2_INDEX, |
230 | IWM_RATE_MCS_3_INDEX, |
231 | IWM_RATE_MCS_4_INDEX, |
232 | IWM_RATE_MCS_5_INDEX, |
233 | IWM_RATE_MCS_6_INDEX, |
234 | IWM_RATE_MCS_7_INDEX, |
235 | IWM_RATE_MCS_8_INDEX, |
236 | IWM_RATE_MCS_9_INDEX, |
237 | IWM_RATE_MCS_10_INDEX, |
238 | IWM_RATE_MCS_11_INDEX, |
239 | IWM_RATE_MCS_12_INDEX, |
240 | IWM_RATE_MCS_13_INDEX, |
241 | IWM_RATE_MCS_14_INDEX, |
242 | IWM_RATE_MCS_15_INDEX, |
243 | }; |
244 | |
245 | struct iwm_nvm_section { |
246 | uint16_t length; |
247 | uint8_t *data; |
248 | }; |
249 | |
250 | int iwm_is_mimo_ht_plcp(uint8_t); |
251 | int iwm_is_mimo_ht_mcs(int); |
252 | int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t); |
253 | int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type, |
254 | uint8_t *, size_t); |
255 | int iwm_set_default_calib(struct iwm_softc *, const void *); |
256 | void iwm_fw_info_free(struct iwm_fw_info *); |
257 | void iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t); |
258 | int iwm_read_firmware(struct iwm_softc *); |
259 | uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t); |
260 | uint32_t iwm_read_prph(struct iwm_softc *, uint32_t); |
261 | void iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t); |
262 | void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t); |
263 | int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int); |
264 | int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int); |
265 | int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t); |
266 | int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int); |
267 | int iwm_nic_lock(struct iwm_softc *); |
268 | void iwm_nic_assert_locked(struct iwm_softc *); |
269 | void iwm_nic_unlock(struct iwm_softc *); |
270 | int iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t, |
271 | uint32_t); |
272 | int iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t); |
273 | int iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t); |
274 | int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t, |
275 | bus_size_t); |
276 | void iwm_dma_contig_free(struct iwm_dma_info *); |
277 | int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); |
278 | void iwm_disable_rx_dma(struct iwm_softc *); |
279 | void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); |
280 | void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); |
281 | int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int); |
282 | void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); |
283 | void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); |
284 | void iwm_enable_rfkill_int(struct iwm_softc *); |
285 | int iwm_check_rfkill(struct iwm_softc *); |
286 | void iwm_enable_interrupts(struct iwm_softc *); |
287 | void iwm_enable_fwload_interrupt(struct iwm_softc *); |
288 | void iwm_restore_interrupts(struct iwm_softc *); |
289 | void iwm_disable_interrupts(struct iwm_softc *); |
290 | void iwm_ict_reset(struct iwm_softc *); |
291 | int iwm_set_hw_ready(struct iwm_softc *); |
292 | int iwm_prepare_card_hw(struct iwm_softc *); |
293 | void iwm_apm_config(struct iwm_softc *); |
294 | int iwm_apm_init(struct iwm_softc *); |
295 | void iwm_apm_stop(struct iwm_softc *); |
296 | int iwm_allow_mcast(struct iwm_softc *); |
297 | void iwm_init_msix_hw(struct iwm_softc *); |
298 | void iwm_conf_msix_hw(struct iwm_softc *, int); |
299 | int iwm_clear_persistence_bit(struct iwm_softc *); |
300 | int iwm_start_hw(struct iwm_softc *); |
301 | void iwm_stop_device(struct iwm_softc *); |
302 | void iwm_nic_config(struct iwm_softc *); |
303 | int iwm_nic_rx_init(struct iwm_softc *); |
304 | int iwm_nic_rx_legacy_init(struct iwm_softc *); |
305 | int iwm_nic_rx_mq_init(struct iwm_softc *); |
306 | int iwm_nic_tx_init(struct iwm_softc *); |
307 | int iwm_nic_init(struct iwm_softc *); |
308 | int iwm_enable_ac_txq(struct iwm_softc *, int, int); |
309 | int iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t, |
310 | uint16_t); |
311 | int iwm_disable_txq(struct iwm_softc *, int, int, uint8_t); |
312 | int iwm_post_alive(struct iwm_softc *); |
313 | struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t, |
314 | uint16_t); |
315 | int iwm_phy_db_set_section(struct iwm_softc *, |
316 | struct iwm_calib_res_notif_phy_db *); |
317 | int iwm_is_valid_channel(uint16_t); |
318 | uint8_t iwm_ch_id_to_ch_index(uint16_t); |
319 | uint16_t iwm_channel_id_to_papd(uint16_t); |
320 | uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t); |
321 | int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **, |
322 | uint16_t *, uint16_t); |
323 | int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *); |
324 | int iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t, |
325 | uint8_t); |
326 | int iwm_send_phy_db_data(struct iwm_softc *); |
327 | void iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t, |
328 | uint32_t); |
329 | void iwm_unprotect_session(struct iwm_softc *, struct iwm_node *); |
330 | int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t, |
331 | uint8_t *, uint16_t *); |
332 | int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, |
333 | uint16_t *, size_t); |
334 | uint8_t iwm_fw_valid_tx_ant(struct iwm_softc *); |
335 | uint8_t iwm_fw_valid_rx_ant(struct iwm_softc *); |
336 | int iwm_valid_siso_ant_rate_mask(struct iwm_softc *); |
337 | void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const, |
338 | const uint8_t *nvm_channels, int nchan); |
339 | int iwm_mimo_enabled(struct iwm_softc *); |
340 | void iwm_setup_ht_rates(struct iwm_softc *); |
341 | void iwm_setup_vht_rates(struct iwm_softc *); |
342 | void iwm_mac_ctxt_task(void *); |
343 | void iwm_phy_ctxt_task(void *); |
344 | void iwm_updateprot(struct ieee80211com *); |
345 | void iwm_updateslot(struct ieee80211com *); |
346 | void iwm_updateedca(struct ieee80211com *); |
347 | void iwm_updatechan(struct ieee80211com *); |
348 | void iwm_updatedtim(struct ieee80211com *); |
349 | void iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t, |
350 | uint16_t); |
351 | void iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *); |
352 | int iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *, |
353 | uint8_t); |
354 | void iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *, |
355 | uint8_t); |
356 | void iwm_rx_ba_session_expired(void *); |
357 | void iwm_reorder_timer_expired(void *); |
358 | int iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t, |
359 | uint16_t, uint16_t, int, int); |
360 | int iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *, |
361 | uint8_t); |
362 | void iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *, |
363 | uint8_t); |
364 | void iwm_ba_task(void *); |
365 | |
366 | int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, |
367 | const uint16_t *, const uint16_t *, |
368 | const uint16_t *, const uint16_t *, |
369 | const uint16_t *, int); |
370 | void iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *, |
371 | const uint16_t *, const uint16_t *); |
372 | int iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); |
373 | int iwm_nvm_init(struct iwm_softc *); |
374 | int iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *, |
375 | uint32_t); |
376 | int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *, |
377 | uint32_t); |
378 | int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type); |
379 | int iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *, |
380 | int , int *); |
381 | int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type); |
382 | int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type); |
383 | int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type); |
384 | int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); |
385 | int iwm_send_phy_cfg_cmd(struct iwm_softc *); |
386 | int iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type); |
387 | int iwm_send_dqa_cmd(struct iwm_softc *); |
388 | int iwm_run_init_mvm_ucode(struct iwm_softc *, int); |
389 | int iwm_config_ltr(struct iwm_softc *); |
390 | int iwm_rx_addbuf(struct iwm_softc *, int, int); |
391 | int iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *); |
392 | int iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *); |
393 | void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *, |
394 | struct iwm_rx_data *); |
395 | int iwm_get_noise(const struct iwm_statistics_rx_non_phy *); |
396 | int iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t, |
397 | struct ieee80211_rxinfo *); |
398 | int iwm_ccmp_decap(struct iwm_softc *, struct mbuf *, |
399 | struct ieee80211_node *, struct ieee80211_rxinfo *); |
400 | void iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int, |
401 | uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *); |
402 | void iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *, |
403 | int, uint8_t, int); |
404 | void iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *, |
405 | int, int, uint8_t, int); |
406 | void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, |
407 | struct iwm_node *, int, int); |
408 | void iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *); |
409 | void iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int); |
410 | void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *, |
411 | struct iwm_rx_data *); |
412 | void iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *); |
413 | void iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *, |
414 | struct iwm_tx_ring *, int, uint16_t, uint16_t); |
415 | void iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *); |
416 | void iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *, |
417 | struct iwm_rx_data *); |
418 | int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t); |
419 | uint8_t iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *); |
420 | int iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t, |
421 | uint8_t, uint32_t, uint32_t, uint8_t, uint8_t); |
422 | void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *, |
423 | struct iwm_phy_context_cmd *, uint32_t, uint32_t); |
424 | void iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *, |
425 | struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t); |
426 | int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t, |
427 | uint8_t, uint32_t, uint32_t, uint8_t, uint8_t); |
428 | int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *); |
429 | int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t, |
430 | const void *); |
431 | int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *, |
432 | uint32_t *); |
433 | int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t, |
434 | const void *, uint32_t *); |
435 | void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *); |
436 | void iwm_cmd_done(struct iwm_softc *, int, int, int); |
437 | void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t); |
438 | void iwm_reset_sched(struct iwm_softc *, int, int, uint8_t); |
439 | uint8_t iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, |
440 | struct ieee80211_frame *, struct iwm_tx_cmd *); |
441 | int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int); |
442 | int iwm_flush_tx_path(struct iwm_softc *, int); |
443 | int iwm_wait_tx_queues_empty(struct iwm_softc *); |
444 | void iwm_led_enable(struct iwm_softc *); |
445 | void iwm_led_disable(struct iwm_softc *); |
446 | int iwm_led_is_enabled(struct iwm_softc *); |
447 | void iwm_led_blink_timeout(void *); |
448 | void iwm_led_blink_start(struct iwm_softc *); |
449 | void iwm_led_blink_stop(struct iwm_softc *); |
450 | int iwm_beacon_filter_send_cmd(struct iwm_softc *, |
451 | struct iwm_beacon_filter_cmd *); |
452 | void iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *, |
453 | struct iwm_beacon_filter_cmd *); |
454 | int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int); |
455 | void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *, |
456 | struct iwm_mac_power_cmd *); |
457 | int iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *); |
458 | int iwm_power_update_device(struct iwm_softc *); |
459 | int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *); |
460 | int iwm_disable_beacon_filter(struct iwm_softc *); |
461 | int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int); |
462 | int iwm_add_aux_sta(struct iwm_softc *); |
463 | int iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int); |
464 | int iwm_flush_sta(struct iwm_softc *, struct iwm_node *); |
465 | int iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *); |
466 | uint16_t iwm_scan_rx_chain(struct iwm_softc *); |
467 | uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int); |
468 | uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *, |
469 | struct iwm_scan_channel_cfg_lmac *, int, int); |
470 | int iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *); |
471 | int iwm_lmac_scan(struct iwm_softc *, int); |
472 | int iwm_config_umac_scan(struct iwm_softc *); |
473 | int iwm_umac_scan(struct iwm_softc *, int); |
474 | void iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *); |
475 | uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int); |
476 | int iwm_rval2ridx(int); |
477 | void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *); |
478 | void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *, |
479 | struct iwm_mac_ctx_cmd *, uint32_t); |
480 | void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *, |
481 | struct iwm_mac_data_sta *, int); |
482 | int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int); |
483 | int iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int); |
484 | void iwm_add_task(struct iwm_softc *, struct taskq *, struct task *); |
485 | void iwm_del_task(struct iwm_softc *, struct taskq *, struct task *); |
486 | int iwm_scan(struct iwm_softc *); |
487 | int iwm_bgscan(struct ieee80211com *); |
488 | void iwm_bgscan_done(struct ieee80211com *, |
489 | struct ieee80211_node_switch_bss_arg *, size_t); |
490 | void iwm_bgscan_done_task(void *); |
491 | int iwm_umac_scan_abort(struct iwm_softc *); |
492 | int iwm_lmac_scan_abort(struct iwm_softc *); |
493 | int iwm_scan_abort(struct iwm_softc *); |
494 | int iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *, |
495 | struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t, |
496 | uint8_t); |
497 | int iwm_auth(struct iwm_softc *); |
498 | int iwm_deauth(struct iwm_softc *); |
499 | int iwm_run(struct iwm_softc *); |
500 | int iwm_run_stop(struct iwm_softc *); |
501 | struct ieee80211_node *iwm_node_alloc(struct ieee80211com *); |
502 | int iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *, |
503 | struct ieee80211_key *); |
504 | int iwm_set_key(struct ieee80211com *, struct ieee80211_node *, |
505 | struct ieee80211_key *); |
506 | void iwm_delete_key_v1(struct ieee80211com *, |
507 | struct ieee80211_node *, struct ieee80211_key *); |
508 | void iwm_delete_key(struct ieee80211com *, |
509 | struct ieee80211_node *, struct ieee80211_key *); |
510 | void iwm_calib_timeout(void *); |
511 | void iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *); |
512 | void iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *); |
513 | void iwm_setrates(struct iwm_node *, int); |
514 | int iwm_media_change(struct ifnet *); |
515 | void iwm_newstate_task(void *); |
516 | int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int); |
517 | void iwm_endscan(struct iwm_softc *); |
518 | void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *, |
519 | struct ieee80211_node *); |
520 | int iwm_sf_config(struct iwm_softc *, int); |
521 | int iwm_send_bt_init_conf(struct iwm_softc *); |
522 | int iwm_send_soc_conf(struct iwm_softc *); |
523 | int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); |
524 | int iwm_send_temp_report_ths_cmd(struct iwm_softc *); |
525 | void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t); |
526 | void iwm_free_fw_paging(struct iwm_softc *); |
527 | int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *); |
528 | int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *); |
529 | int iwm_init_hw(struct iwm_softc *); |
530 | int iwm_init(struct ifnet *); |
531 | void iwm_start(struct ifnet *); |
532 | void iwm_stop(struct ifnet *); |
533 | void iwm_watchdog(struct ifnet *); |
534 | int iwm_ioctl(struct ifnet *, u_long, caddr_t); |
535 | const char *iwm_desc_lookup(uint32_t); |
536 | void iwm_nic_error(struct iwm_softc *); |
537 | void iwm_dump_driver_status(struct iwm_softc *); |
538 | void iwm_nic_umac_error(struct iwm_softc *); |
539 | void iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t, |
540 | struct mbuf_list *); |
541 | void iwm_flip_address(uint8_t *); |
542 | int iwm_detect_duplicate(struct iwm_softc *, struct mbuf *, |
543 | struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *); |
544 | int iwm_is_sn_less(uint16_t, uint16_t, uint16_t); |
545 | void iwm_release_frames(struct iwm_softc *, struct ieee80211_node *, |
546 | struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t, |
547 | struct mbuf_list *); |
548 | int iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *, |
549 | int, struct iwm_reorder_buffer *, uint32_t, uint32_t); |
550 | int iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int, |
551 | struct iwm_rx_mpdu_desc *, int, int, uint32_t, |
552 | struct ieee80211_rxinfo *, struct mbuf_list *); |
553 | void iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t, |
554 | struct mbuf_list *); |
555 | int iwm_rx_pkt_valid(struct iwm_rx_packet *); |
556 | void iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *, |
557 | struct mbuf_list *); |
558 | void iwm_notif_intr(struct iwm_softc *); |
559 | int iwm_intr(void *); |
560 | int iwm_intr_msix(void *); |
561 | int iwm_match(struct device *, void *, void *); |
562 | int iwm_preinit(struct iwm_softc *); |
563 | void iwm_attach_hook(struct device *); |
564 | void iwm_attach(struct device *, struct device *, void *); |
565 | void iwm_init_task(void *); |
566 | int iwm_activate(struct device *, int); |
567 | void iwm_resume(struct iwm_softc *); |
568 | int iwm_wakeup(struct iwm_softc *); |
569 | |
570 | #if NBPFILTER1 > 0 |
571 | void iwm_radiotap_attach(struct iwm_softc *); |
572 | #endif |
573 | |
574 | uint8_t |
575 | iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd) |
576 | { |
577 | const struct iwm_fw_cmd_version *entry; |
578 | int i; |
579 | |
580 | for (i = 0; i < sc->n_cmd_versions; i++) { |
581 | entry = &sc->cmd_versions[i]; |
582 | if (entry->group == grp && entry->cmd == cmd) |
583 | return entry->cmd_ver; |
584 | } |
585 | |
586 | return IWM_FW_CMD_VER_UNKNOWN99; |
587 | } |
588 | |
589 | int |
590 | iwm_is_mimo_ht_plcp(uint8_t ht_plcp) |
591 | { |
592 | return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 && |
593 | (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK(3 << 3))); |
594 | } |
595 | |
596 | int |
597 | iwm_is_mimo_ht_mcs(int mcs) |
598 | { |
599 | int ridx = iwm_ht_mcs2ridx[mcs]; |
600 | return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp); |
601 | |
602 | } |
603 | |
604 | int |
605 | iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen) |
606 | { |
607 | struct iwm_fw_cscheme_list *l = (void *)data; |
608 | |
609 | if (dlen < sizeof(*l) || |
610 | dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) |
611 | return EINVAL22; |
612 | |
613 | /* we don't actually store anything for now, always use s/w crypto */ |
614 | |
615 | return 0; |
616 | } |
617 | |
618 | int |
619 | iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type, |
620 | uint8_t *data, size_t dlen) |
621 | { |
622 | struct iwm_fw_sects *fws; |
623 | struct iwm_fw_onesect *fwone; |
624 | |
625 | if (type >= IWM_UCODE_TYPE_MAX) |
626 | return EINVAL22; |
627 | if (dlen < sizeof(uint32_t)) |
628 | return EINVAL22; |
629 | |
630 | fws = &sc->sc_fw.fw_sects[type]; |
631 | if (fws->fw_count >= IWM_UCODE_SECT_MAX16) |
632 | return EINVAL22; |
633 | |
634 | fwone = &fws->fw_sect[fws->fw_count]; |
635 | |
636 | /* first 32bit are device load offset */ |
637 | memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof (uint32_t))); |
638 | |
639 | /* rest is data */ |
640 | fwone->fws_data = data + sizeof(uint32_t); |
641 | fwone->fws_len = dlen - sizeof(uint32_t); |
642 | |
643 | fws->fw_count++; |
644 | fws->fw_totlen += fwone->fws_len; |
645 | |
646 | return 0; |
647 | } |
648 | |
649 | #define IWM_DEFAULT_SCAN_CHANNELS40 40 |
650 | /* Newer firmware might support more channels. Raise this value if needed. */ |
651 | #define IWM_MAX_SCAN_CHANNELS52 52 /* as of 8265-34 firmware image */ |
652 | |
653 | struct iwm_tlv_calib_data { |
654 | uint32_t ucode_type; |
655 | struct iwm_tlv_calib_ctrl calib; |
656 | } __packed__attribute__((__packed__)); |
657 | |
658 | int |
659 | iwm_set_default_calib(struct iwm_softc *sc, const void *data) |
660 | { |
661 | const struct iwm_tlv_calib_data *def_calib = data; |
662 | uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type)); |
663 | |
664 | if (ucode_type >= IWM_UCODE_TYPE_MAX) |
665 | return EINVAL22; |
666 | |
667 | sc->sc_default_calib[ucode_type].flow_trigger = |
668 | def_calib->calib.flow_trigger; |
669 | sc->sc_default_calib[ucode_type].event_trigger = |
670 | def_calib->calib.event_trigger; |
671 | |
672 | return 0; |
673 | } |
674 | |
675 | void |
676 | iwm_fw_info_free(struct iwm_fw_info *fw) |
677 | { |
678 | free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize); |
679 | fw->fw_rawdata = NULL((void *)0); |
680 | fw->fw_rawsize = 0; |
681 | /* don't touch fw->fw_status */ |
682 | memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects ))); |
683 | } |
684 | |
685 | void |
686 | iwm_fw_version_str(char *buf, size_t bufsize, |
687 | uint32_t major, uint32_t minor, uint32_t api) |
688 | { |
689 | /* |
690 | * Starting with major version 35 the Linux driver prints the minor |
691 | * version in hexadecimal. |
692 | */ |
693 | if (major >= 35) |
694 | snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api); |
695 | else |
696 | snprintf(buf, bufsize, "%u.%u.%u", major, minor, api); |
697 | } |
698 | |
699 | int |
700 | iwm_read_firmware(struct iwm_softc *sc) |
701 | { |
702 | struct iwm_fw_info *fw = &sc->sc_fw; |
703 | struct iwm_tlv_ucode_header *uhdr; |
704 | struct iwm_ucode_tlv tlv; |
705 | uint32_t tlv_type; |
706 | uint8_t *data; |
707 | uint32_t usniffer_img; |
708 | uint32_t paging_mem_size; |
709 | int err; |
710 | size_t len; |
711 | |
712 | if (fw->fw_status == IWM_FW_STATUS_DONE2) |
713 | return 0; |
714 | |
715 | while (fw->fw_status == IWM_FW_STATUS_INPROGRESS1) |
716 | tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP0xffffffffffffffffULL); |
717 | fw->fw_status = IWM_FW_STATUS_INPROGRESS1; |
718 | |
719 | if (fw->fw_rawdata != NULL((void *)0)) |
720 | iwm_fw_info_free(fw); |
721 | |
722 | err = loadfirmware(sc->sc_fwname, |
723 | (u_char **)&fw->fw_rawdata, &fw->fw_rawsize); |
724 | if (err) { |
725 | printf("%s: could not read firmware %s (error %d)\n", |
726 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err); |
727 | goto out; |
728 | } |
729 | |
730 | sc->sc_capaflags = 0; |
731 | sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS40; |
732 | memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc-> sc_enabled_capa))); |
733 | memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api ))); |
734 | sc->n_cmd_versions = 0; |
735 | |
736 | uhdr = (void *)fw->fw_rawdata; |
737 | if (*(uint32_t *)fw->fw_rawdata != 0 |
738 | || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWM_TLV_UCODE_MAGIC0x0a4c5749) { |
739 | printf("%s: invalid firmware %s\n", |
740 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname); |
741 | err = EINVAL22; |
742 | goto out; |
743 | } |
744 | |
745 | iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), |
746 | IWM_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24 ), |
747 | IWM_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16 ), |
748 | IWM_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8 )); |
749 | |
750 | data = uhdr->data; |
751 | len = fw->fw_rawsize - sizeof(*uhdr); |
752 | |
753 | while (len >= sizeof(tlv)) { |
754 | size_t tlv_len; |
755 | void *tlv_data; |
756 | |
757 | memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv))); |
758 | tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length)); |
759 | tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type)); |
760 | |
761 | len -= sizeof(tlv); |
762 | data += sizeof(tlv); |
763 | tlv_data = data; |
764 | |
765 | if (len < tlv_len) { |
766 | printf("%s: firmware too short: %zu bytes\n", |
767 | DEVNAME(sc)((sc)->sc_dev.dv_xname), len); |
768 | err = EINVAL22; |
769 | goto parse_out; |
770 | } |
771 | |
772 | switch (tlv_type) { |
773 | case IWM_UCODE_TLV_PROBE_MAX_LEN6: |
774 | if (tlv_len < sizeof(uint32_t)) { |
775 | err = EINVAL22; |
776 | goto parse_out; |
777 | } |
778 | sc->sc_capa_max_probe_len |
779 | = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
780 | if (sc->sc_capa_max_probe_len > |
781 | IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE512) { |
782 | err = EINVAL22; |
783 | goto parse_out; |
784 | } |
785 | break; |
786 | case IWM_UCODE_TLV_PAN7: |
787 | if (tlv_len) { |
788 | err = EINVAL22; |
789 | goto parse_out; |
790 | } |
791 | sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN(1 << 0); |
792 | break; |
793 | case IWM_UCODE_TLV_FLAGS18: |
794 | if (tlv_len < sizeof(uint32_t)) { |
795 | err = EINVAL22; |
796 | goto parse_out; |
797 | } |
798 | /* |
799 | * Apparently there can be many flags, but Linux driver |
800 | * parses only the first one, and so do we. |
801 | * |
802 | * XXX: why does this override IWM_UCODE_TLV_PAN? |
803 | * Intentional or a bug? Observations from |
804 | * current firmware file: |
805 | * 1) TLV_PAN is parsed first |
806 | * 2) TLV_FLAGS contains TLV_FLAGS_PAN |
807 | * ==> this resets TLV_PAN to itself... hnnnk |
808 | */ |
809 | sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
810 | break; |
811 | case IWM_UCODE_TLV_CSCHEME28: |
812 | err = iwm_store_cscheme(sc, tlv_data, tlv_len); |
813 | if (err) |
814 | goto parse_out; |
815 | break; |
816 | case IWM_UCODE_TLV_NUM_OF_CPU27: { |
817 | uint32_t num_cpu; |
818 | if (tlv_len != sizeof(uint32_t)) { |
819 | err = EINVAL22; |
820 | goto parse_out; |
821 | } |
822 | num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
823 | if (num_cpu < 1 || num_cpu > 2) { |
824 | err = EINVAL22; |
825 | goto parse_out; |
826 | } |
827 | break; |
828 | } |
829 | case IWM_UCODE_TLV_SEC_RT19: |
830 | err = iwm_firmware_store_section(sc, |
831 | IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len); |
832 | if (err) |
833 | goto parse_out; |
834 | break; |
835 | case IWM_UCODE_TLV_SEC_INIT20: |
836 | err = iwm_firmware_store_section(sc, |
837 | IWM_UCODE_TYPE_INIT, tlv_data, tlv_len); |
838 | if (err) |
839 | goto parse_out; |
840 | break; |
841 | case IWM_UCODE_TLV_SEC_WOWLAN21: |
842 | err = iwm_firmware_store_section(sc, |
843 | IWM_UCODE_TYPE_WOW, tlv_data, tlv_len); |
844 | if (err) |
845 | goto parse_out; |
846 | break; |
847 | case IWM_UCODE_TLV_DEF_CALIB22: |
848 | if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { |
849 | err = EINVAL22; |
850 | goto parse_out; |
851 | } |
852 | err = iwm_set_default_calib(sc, tlv_data); |
853 | if (err) |
854 | goto parse_out; |
855 | break; |
856 | case IWM_UCODE_TLV_PHY_SKU23: |
857 | if (tlv_len != sizeof(uint32_t)) { |
858 | err = EINVAL22; |
859 | goto parse_out; |
860 | } |
861 | sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
862 | break; |
863 | |
864 | case IWM_UCODE_TLV_API_CHANGES_SET29: { |
865 | struct iwm_ucode_api *api; |
866 | int idx, i; |
867 | if (tlv_len != sizeof(*api)) { |
868 | err = EINVAL22; |
869 | goto parse_out; |
870 | } |
871 | api = (struct iwm_ucode_api *)tlv_data; |
872 | idx = le32toh(api->api_index)((__uint32_t)(api->api_index)); |
873 | if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) { |
874 | err = EINVAL22; |
875 | goto parse_out; |
876 | } |
877 | for (i = 0; i < 32; i++) { |
878 | if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0) |
879 | continue; |
880 | setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<< ((i + (32 * idx))&(8 -1))); |
881 | } |
882 | break; |
883 | } |
884 | |
885 | case IWM_UCODE_TLV_ENABLED_CAPABILITIES30: { |
886 | struct iwm_ucode_capa *capa; |
887 | int idx, i; |
888 | if (tlv_len != sizeof(*capa)) { |
889 | err = EINVAL22; |
890 | goto parse_out; |
891 | } |
892 | capa = (struct iwm_ucode_capa *)tlv_data; |
893 | idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index)); |
894 | if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) { |
895 | goto parse_out; |
896 | } |
897 | for (i = 0; i < 32; i++) { |
898 | if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0) |
899 | continue; |
900 | setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<< ((i + (32 * idx))&(8 -1))); |
901 | } |
902 | break; |
903 | } |
904 | |
905 | case IWM_UCODE_TLV_CMD_VERSIONS48: |
906 | if (tlv_len % sizeof(struct iwm_fw_cmd_version)) { |
907 | tlv_len /= sizeof(struct iwm_fw_cmd_version); |
908 | tlv_len *= sizeof(struct iwm_fw_cmd_version); |
909 | } |
910 | if (sc->n_cmd_versions != 0) { |
911 | err = EINVAL22; |
912 | goto parse_out; |
913 | } |
914 | if (tlv_len > sizeof(sc->cmd_versions)) { |
915 | err = EINVAL22; |
916 | goto parse_out; |
917 | } |
918 | memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), ( tlv_len)); |
919 | sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version); |
920 | break; |
921 | |
922 | case IWM_UCODE_TLV_SDIO_ADMA_ADDR35: |
923 | case IWM_UCODE_TLV_FW_GSCAN_CAPA50: |
924 | /* ignore, not used by current driver */ |
925 | break; |
926 | |
927 | case IWM_UCODE_TLV_SEC_RT_USNIFFER34: |
928 | err = iwm_firmware_store_section(sc, |
929 | IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data, |
930 | tlv_len); |
931 | if (err) |
932 | goto parse_out; |
933 | break; |
934 | |
935 | case IWM_UCODE_TLV_PAGING32: |
936 | if (tlv_len != sizeof(uint32_t)) { |
937 | err = EINVAL22; |
938 | goto parse_out; |
939 | } |
940 | paging_mem_size = le32toh(*(const uint32_t *)tlv_data)((__uint32_t)(*(const uint32_t *)tlv_data)); |
941 | |
942 | DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",do { ; } while (0) |
943 | DEVNAME(sc), paging_mem_size))do { ; } while (0); |
944 | if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12)))) { |
945 | printf("%s: Driver only supports up to %u" |
946 | " bytes for paging image (%u requested)\n", |
947 | DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12))), |
948 | paging_mem_size); |
949 | err = EINVAL22; |
950 | goto out; |
951 | } |
952 | if (paging_mem_size & (IWM_FW_PAGING_SIZE(1 << 12) - 1)) { |
953 | printf("%s: Paging: image isn't multiple of %u\n", |
954 | DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_FW_PAGING_SIZE(1 << 12)); |
955 | err = EINVAL22; |
956 | goto out; |
957 | } |
958 | |
959 | fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size = |
960 | paging_mem_size; |
961 | usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER; |
962 | fw->fw_sects[usniffer_img].paging_mem_size = |
963 | paging_mem_size; |
964 | break; |
965 | |
966 | case IWM_UCODE_TLV_N_SCAN_CHANNELS31: |
967 | if (tlv_len != sizeof(uint32_t)) { |
968 | err = EINVAL22; |
969 | goto parse_out; |
970 | } |
971 | sc->sc_capa_n_scan_channels = |
972 | le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
973 | if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS52) { |
974 | err = ERANGE34; |
975 | goto parse_out; |
976 | } |
977 | break; |
978 | |
979 | case IWM_UCODE_TLV_FW_VERSION36: |
980 | if (tlv_len != sizeof(uint32_t) * 3) { |
981 | err = EINVAL22; |
982 | goto parse_out; |
983 | } |
984 | |
985 | iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), |
986 | le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])), |
987 | le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])), |
988 | le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2]))); |
989 | break; |
990 | |
991 | case IWM_UCODE_TLV_FW_DBG_DEST38: |
992 | case IWM_UCODE_TLV_FW_DBG_CONF39: |
993 | case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS54: |
994 | case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS55: |
995 | case IWM_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0): |
996 | case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1): |
997 | case IWM_UCODE_TLV_TYPE_HCMD(0x1000005 + 2): |
998 | case IWM_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3): |
999 | case IWM_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4): |
1000 | break; |
1001 | |
1002 | case IWM_UCODE_TLV_HW_TYPE58: |
1003 | break; |
1004 | |
1005 | case IWM_UCODE_TLV_FW_MEM_SEG51: |
1006 | break; |
1007 | |
1008 | /* undocumented TLVs found in iwm-9000-43 image */ |
1009 | case 0x1000003: |
1010 | case 0x1000004: |
1011 | break; |
1012 | |
1013 | default: |
1014 | err = EINVAL22; |
1015 | goto parse_out; |
1016 | } |
1017 | |
1018 | /* |
1019 | * Check for size_t overflow and ignore missing padding at |
1020 | * end of firmware file. |
1021 | */ |
1022 | if (roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)) > len) |
1023 | break; |
1024 | |
1025 | len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)); |
1026 | data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)); |
1027 | } |
1028 | |
1029 | KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c" , 1029, "err == 0")); |
1030 | |
1031 | parse_out: |
1032 | if (err) { |
1033 | printf("%s: firmware parse error %d, " |
1034 | "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type); |
1035 | } |
1036 | |
1037 | out: |
1038 | if (err) { |
1039 | fw->fw_status = IWM_FW_STATUS_NONE0; |
1040 | if (fw->fw_rawdata != NULL((void *)0)) |
1041 | iwm_fw_info_free(fw); |
1042 | } else |
1043 | fw->fw_status = IWM_FW_STATUS_DONE2; |
1044 | wakeup(&sc->sc_fw); |
1045 | |
1046 | return err; |
1047 | } |
1048 | |
1049 | uint32_t |
1050 | iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr) |
1051 | { |
1052 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048 ))), ((((addr & 0x000fffff) | (3 << 24)))))) |
1053 | IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048 ))), ((((addr & 0x000fffff) | (3 << 24)))))); |
1054 | IWM_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)-> sc_sz, 0x01 | 0x02); |
1055 | return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050 ))))); |
1056 | } |
1057 | |
1058 | uint32_t |
1059 | iwm_read_prph(struct iwm_softc *sc, uint32_t addr) |
1060 | { |
1061 | iwm_nic_assert_locked(sc); |
1062 | return iwm_read_prph_unlocked(sc, addr); |
1063 | } |
1064 | |
1065 | void |
1066 | iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val) |
1067 | { |
1068 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044 ))), ((((addr & 0x000fffff) | (3 << 24)))))) |
1069 | IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044 ))), ((((addr & 0x000fffff) | (3 << 24)))))); |
1070 | IWM_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)-> sc_sz, 0x02); |
1071 | IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c ))), ((val)))); |
1072 | } |
1073 | |
1074 | void |
1075 | iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val) |
1076 | { |
1077 | iwm_nic_assert_locked(sc); |
1078 | iwm_write_prph_unlocked(sc, addr, val); |
1079 | } |
1080 | |
1081 | void |
1082 | iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val) |
1083 | { |
1084 | iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff); |
1085 | iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32); |
1086 | } |
1087 | |
1088 | int |
1089 | iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords) |
1090 | { |
1091 | int offs, err = 0; |
1092 | uint32_t *vals = buf; |
1093 | |
1094 | if (iwm_nic_lock(sc)) { |
1095 | IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c ))), ((addr)))); |
1096 | for (offs = 0; offs < dwords; offs++) |
1097 | vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x01c ))))); |
1098 | iwm_nic_unlock(sc); |
1099 | } else { |
1100 | err = EBUSY16; |
1101 | } |
1102 | return err; |
1103 | } |
1104 | |
1105 | int |
1106 | iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords) |
1107 | { |
1108 | int offs; |
1109 | const uint32_t *vals = buf; |
1110 | |
1111 | if (iwm_nic_lock(sc)) { |
1112 | IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010 ))), ((addr)))); |
1113 | /* WADDR auto-increments */ |
1114 | for (offs = 0; offs < dwords; offs++) { |
1115 | uint32_t val = vals ? vals[offs] : 0; |
1116 | IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018 ))), ((val)))); |
1117 | } |
1118 | iwm_nic_unlock(sc); |
1119 | } else { |
1120 | return EBUSY16; |
1121 | } |
1122 | return 0; |
1123 | } |
1124 | |
1125 | int |
1126 | iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val) |
1127 | { |
1128 | return iwm_write_mem(sc, addr, &val, 1); |
1129 | } |
1130 | |
1131 | int |
1132 | iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask, |
1133 | int timo) |
1134 | { |
1135 | for (;;) { |
1136 | if ((IWM_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) { |
1137 | return 1; |
1138 | } |
1139 | if (timo < 10) { |
1140 | return 0; |
1141 | } |
1142 | timo -= 10; |
1143 | DELAY(10)(*delay_func)(10); |
1144 | } |
1145 | } |
1146 | |
1147 | int |
1148 | iwm_nic_lock(struct iwm_softc *sc) |
1149 | { |
1150 | if (sc->sc_nic_locks > 0) { |
1151 | iwm_nic_assert_locked(sc); |
1152 | sc->sc_nic_locks++; |
1153 | return 1; /* already locked */ |
1154 | } |
1155 | |
1156 | IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x00000008)))))) |
1157 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x00000008)))))); |
1158 | |
1159 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) |
1160 | DELAY(2)(*delay_func)(2); |
1161 | |
1162 | if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024), |
1163 | IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001), |
1164 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001) |
1165 | | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) { |
1166 | sc->sc_nic_locks++; |
1167 | return 1; |
1168 | } |
1169 | |
1170 | printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1171 | return 0; |
1172 | } |
1173 | |
1174 | void |
1175 | iwm_nic_assert_locked(struct iwm_softc *sc) |
1176 | { |
1177 | if (sc->sc_nic_locks <= 0) |
1178 | panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks); |
1179 | } |
1180 | |
1181 | void |
1182 | iwm_nic_unlock(struct iwm_softc *sc) |
1183 | { |
1184 | if (sc->sc_nic_locks > 0) { |
1185 | if (--sc->sc_nic_locks == 0) |
1186 | IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))) |
1187 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))); |
1188 | } else |
1189 | printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1190 | } |
1191 | |
1192 | int |
1193 | iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits, |
1194 | uint32_t mask) |
1195 | { |
1196 | uint32_t val; |
1197 | |
1198 | if (iwm_nic_lock(sc)) { |
1199 | val = iwm_read_prph(sc, reg) & mask; |
1200 | val |= bits; |
1201 | iwm_write_prph(sc, reg, val); |
1202 | iwm_nic_unlock(sc); |
1203 | return 0; |
1204 | } |
1205 | return EBUSY16; |
1206 | } |
1207 | |
1208 | int |
1209 | iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits) |
1210 | { |
1211 | return iwm_set_bits_mask_prph(sc, reg, bits, ~0); |
1212 | } |
1213 | |
1214 | int |
1215 | iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits) |
1216 | { |
1217 | return iwm_set_bits_mask_prph(sc, reg, 0, ~bits); |
1218 | } |
1219 | |
1220 | int |
1221 | iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma, |
1222 | bus_size_t size, bus_size_t alignment) |
1223 | { |
1224 | int nsegs, err; |
1225 | caddr_t va; |
1226 | |
1227 | dma->tag = tag; |
1228 | dma->size = size; |
1229 | |
1230 | err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), ( 0x0001), (&dma->map)) |
1231 | &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), ( 0x0001), (&dma->map)); |
1232 | if (err) |
1233 | goto fail; |
1234 | |
1235 | err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), ( &dma->seg), (1), (&nsegs), (0x0001)) |
1236 | BUS_DMA_NOWAIT)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), ( &dma->seg), (1), (&nsegs), (0x0001)); |
1237 | if (err) |
1238 | goto fail; |
1239 | |
1240 | err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size ), (&va), (0x0001)) |
1241 | BUS_DMA_NOWAIT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size ), (&va), (0x0001)); |
1242 | if (err) |
1243 | goto fail; |
1244 | dma->vaddr = va; |
1245 | |
1246 | err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr ), (size), (((void *)0)), (0x0001)) |
1247 | BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr ), (size), (((void *)0)), (0x0001)); |
1248 | if (err) |
1249 | goto fail; |
1250 | |
1251 | memset(dma->vaddr, 0, size)__builtin_memset((dma->vaddr), (0), (size)); |
1252 | bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), ( 0x04)); |
1253 | dma->paddr = dma->map->dm_segs[0].ds_addr; |
1254 | |
1255 | return 0; |
1256 | |
1257 | fail: iwm_dma_contig_free(dma); |
1258 | return err; |
1259 | } |
1260 | |
1261 | void |
1262 | iwm_dma_contig_free(struct iwm_dma_info *dma) |
1263 | { |
1264 | if (dma->map != NULL((void *)0)) { |
1265 | if (dma->vaddr != NULL((void *)0)) { |
1266 | bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map ), (0), (dma->size), (0x02 | 0x08)) |
1267 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map ), (0), (dma->size), (0x02 | 0x08)); |
1268 | bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map )); |
1269 | bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr ), (dma->size)); |
1270 | bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma-> seg), (1)); |
1271 | dma->vaddr = NULL((void *)0); |
1272 | } |
1273 | bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma-> map)); |
1274 | dma->map = NULL((void *)0); |
1275 | } |
1276 | } |
1277 | |
1278 | int |
1279 | iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) |
1280 | { |
1281 | bus_size_t size; |
1282 | size_t descsz; |
1283 | int count, i, err; |
1284 | |
1285 | ring->cur = 0; |
1286 | |
1287 | if (sc->sc_mqrx_supported) { |
1288 | count = IWM_RX_MQ_RING_COUNT512; |
1289 | descsz = sizeof(uint64_t); |
1290 | } else { |
1291 | count = IWM_RX_RING_COUNT256; |
1292 | descsz = sizeof(uint32_t); |
1293 | } |
1294 | |
1295 | /* Allocate RX descriptors (256-byte aligned). */ |
1296 | size = count * descsz; |
1297 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256); |
1298 | if (err) { |
1299 | printf("%s: could not allocate RX ring DMA memory\n", |
1300 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1301 | goto fail; |
1302 | } |
1303 | ring->desc = ring->free_desc_dma.vaddr; |
1304 | |
1305 | /* Allocate RX status area (16-byte aligned). */ |
1306 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, |
1307 | sizeof(*ring->stat), 16); |
1308 | if (err) { |
1309 | printf("%s: could not allocate RX status DMA memory\n", |
1310 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1311 | goto fail; |
1312 | } |
1313 | ring->stat = ring->stat_dma.vaddr; |
1314 | |
1315 | if (sc->sc_mqrx_supported) { |
1316 | size = count * sizeof(uint32_t); |
1317 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, |
1318 | size, 256); |
1319 | if (err) { |
1320 | printf("%s: could not allocate RX ring DMA memory\n", |
1321 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1322 | goto fail; |
1323 | } |
1324 | } |
1325 | |
1326 | for (i = 0; i < count; i++) { |
1327 | struct iwm_rx_data *data = &ring->data[i]; |
1328 | |
1329 | memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data))); |
1330 | err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0001 | 0x0002), (&data->map)) |
1331 | IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0001 | 0x0002), (&data->map)) |
1332 | &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0001 | 0x0002), (&data->map)); |
1333 | if (err) { |
1334 | printf("%s: could not create RX buf DMA map\n", |
1335 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1336 | goto fail; |
1337 | } |
1338 | |
1339 | err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE4096, i); |
1340 | if (err) |
1341 | goto fail; |
1342 | } |
1343 | return 0; |
1344 | |
1345 | fail: iwm_free_rx_ring(sc, ring); |
1346 | return err; |
1347 | } |
1348 | |
1349 | void |
1350 | iwm_disable_rx_dma(struct iwm_softc *sc) |
1351 | { |
1352 | int ntries; |
1353 | |
1354 | if (iwm_nic_lock(sc)) { |
1355 | if (sc->sc_mqrx_supported) { |
1356 | iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0); |
1357 | for (ntries = 0; ntries < 1000; ntries++) { |
1358 | if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS0xA09808) & |
1359 | IWM_RXF_DMA_IDLE(1U << 31)) |
1360 | break; |
1361 | DELAY(10)(*delay_func)(10); |
1362 | } |
1363 | } else { |
1364 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), ((0)))); |
1365 | for (ntries = 0; ntries < 1000; ntries++) { |
1366 | if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) + 0xC40) + 0x004)))))& |
1367 | IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE(0x01000000)) |
1368 | break; |
1369 | DELAY(10)(*delay_func)(10); |
1370 | } |
1371 | } |
1372 | iwm_nic_unlock(sc); |
1373 | } |
1374 | } |
1375 | |
1376 | void |
1377 | iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) |
1378 | { |
1379 | ring->cur = 0; |
1380 | bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x04)) |
1381 | ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x04)); |
1382 | memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat ))); |
1383 | bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x08)) |
1384 | ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x08)); |
1385 | |
1386 | } |
1387 | |
1388 | void |
1389 | iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) |
1390 | { |
1391 | int count, i; |
1392 | |
1393 | iwm_dma_contig_free(&ring->free_desc_dma); |
1394 | iwm_dma_contig_free(&ring->stat_dma); |
1395 | iwm_dma_contig_free(&ring->used_desc_dma); |
1396 | |
1397 | if (sc->sc_mqrx_supported) |
1398 | count = IWM_RX_MQ_RING_COUNT512; |
1399 | else |
1400 | count = IWM_RX_RING_COUNT256; |
1401 | |
1402 | for (i = 0; i < count; i++) { |
1403 | struct iwm_rx_data *data = &ring->data[i]; |
1404 | |
1405 | if (data->m != NULL((void *)0)) { |
1406 | bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x02)) |
1407 | data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x02)); |
1408 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
1409 | m_freem(data->m); |
1410 | data->m = NULL((void *)0); |
1411 | } |
1412 | if (data->map != NULL((void *)0)) |
1413 | bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data ->map)); |
1414 | } |
1415 | } |
1416 | |
1417 | int |
1418 | iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) |
1419 | { |
1420 | bus_addr_t paddr; |
1421 | bus_size_t size; |
1422 | int i, err; |
1423 | |
1424 | ring->qid = qid; |
1425 | ring->queued = 0; |
1426 | ring->cur = 0; |
1427 | ring->tail = 0; |
1428 | |
1429 | /* Allocate TX descriptors (256-byte aligned). */ |
1430 | size = IWM_TX_RING_COUNT256 * sizeof (struct iwm_tfd); |
1431 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); |
1432 | if (err) { |
1433 | printf("%s: could not allocate TX ring DMA memory\n", |
1434 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1435 | goto fail; |
1436 | } |
1437 | ring->desc = ring->desc_dma.vaddr; |
1438 | |
1439 | /* |
1440 | * There is no need to allocate DMA buffers for unused rings. |
1441 | * 7k/8k/9k hardware supports up to 31 Tx rings which is more |
1442 | * than we currently need. |
1443 | * |
1444 | * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues. |
1445 | * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues |
1446 | * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8], |
1447 | * in order to provide one queue per EDCA category. |
1448 | * Tx aggregation requires additional queues, one queue per TID for |
1449 | * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17]. |
1450 | * |
1451 | * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd), |
1452 | * and Tx aggregation is not supported. |
1453 | * |
1454 | * Unfortunately, we cannot tell if DQA will be used until the |
1455 | * firmware gets loaded later, so just allocate sufficient rings |
1456 | * in order to satisfy both cases. |
1457 | */ |
1458 | if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1)) |
1459 | return 0; |
1460 | |
1461 | size = IWM_TX_RING_COUNT256 * sizeof(struct iwm_device_cmd); |
1462 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); |
1463 | if (err) { |
1464 | printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1465 | goto fail; |
1466 | } |
1467 | ring->cmd = ring->cmd_dma.vaddr; |
1468 | |
1469 | paddr = ring->cmd_dma.paddr; |
1470 | for (i = 0; i < IWM_TX_RING_COUNT256; i++) { |
1471 | struct iwm_tx_data *data = &ring->data[i]; |
1472 | size_t mapsize; |
1473 | |
1474 | data->cmd_paddr = paddr; |
1475 | data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) |
1476 | + offsetof(struct iwm_tx_cmd, scratch)__builtin_offsetof(struct iwm_tx_cmd, scratch); |
1477 | paddr += sizeof(struct iwm_device_cmd); |
1478 | |
1479 | /* FW commands may require more mapped space than packets. */ |
1480 | if (qid == IWM_CMD_QUEUE9 || qid == IWM_DQA_CMD_QUEUE0) |
1481 | mapsize = (sizeof(struct iwm_cmd_header) + |
1482 | IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header))); |
1483 | else |
1484 | mapsize = MCLBYTES(1 << 11); |
1485 | err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize ), (20 - 2), (mapsize), (0), (0x0001), (&data->map)) |
1486 | IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize ), (20 - 2), (mapsize), (0), (0x0001), (&data->map)) |
1487 | &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize ), (20 - 2), (mapsize), (0), (0x0001), (&data->map)); |
1488 | if (err) { |
1489 | printf("%s: could not create TX buf DMA map\n", |
1490 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1491 | goto fail; |
1492 | } |
1493 | } |
1494 | KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 1494, "paddr == ring->cmd_dma.paddr + size" )); |
1495 | return 0; |
1496 | |
1497 | fail: iwm_free_tx_ring(sc, ring); |
1498 | return err; |
1499 | } |
1500 | |
1501 | void |
1502 | iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) |
1503 | { |
1504 | int i; |
1505 | |
1506 | for (i = 0; i < IWM_TX_RING_COUNT256; i++) { |
1507 | struct iwm_tx_data *data = &ring->data[i]; |
1508 | |
1509 | if (data->m != NULL((void *)0)) { |
1510 | bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)) |
1511 | data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)); |
1512 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
1513 | m_freem(data->m); |
1514 | data->m = NULL((void *)0); |
1515 | } |
1516 | } |
1517 | /* Clear TX descriptors. */ |
1518 | memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size )); |
1519 | bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> desc_dma.map), (0), (ring->desc_dma.size), (0x04)) |
1520 | ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> desc_dma.map), (0), (ring->desc_dma.size), (0x04)); |
1521 | sc->qfullmsk &= ~(1 << ring->qid); |
1522 | sc->qenablemsk &= ~(1 << ring->qid); |
1523 | /* 7000 family NICs are locked while commands are in progress. */ |
1524 | if (ring->qid == sc->cmdqid && ring->queued > 0) { |
1525 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) |
1526 | iwm_nic_unlock(sc); |
1527 | } |
1528 | ring->queued = 0; |
1529 | ring->cur = 0; |
1530 | ring->tail = 0; |
1531 | } |
1532 | |
1533 | void |
1534 | iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) |
1535 | { |
1536 | int i; |
1537 | |
1538 | iwm_dma_contig_free(&ring->desc_dma); |
1539 | iwm_dma_contig_free(&ring->cmd_dma); |
1540 | |
1541 | for (i = 0; i < IWM_TX_RING_COUNT256; i++) { |
1542 | struct iwm_tx_data *data = &ring->data[i]; |
1543 | |
1544 | if (data->m != NULL((void *)0)) { |
1545 | bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)) |
1546 | data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)); |
1547 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
1548 | m_freem(data->m); |
1549 | data->m = NULL((void *)0); |
1550 | } |
1551 | if (data->map != NULL((void *)0)) |
1552 | bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data ->map)); |
1553 | } |
1554 | } |
1555 | |
1556 | void |
1557 | iwm_enable_rfkill_int(struct iwm_softc *sc) |
1558 | { |
1559 | if (!sc->sc_msix) { |
1560 | sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL(1 << 7); |
1561 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1562 | } else { |
1563 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))) |
1564 | sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))); |
1565 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)))) |
1566 | ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)))); |
1567 | sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL; |
1568 | } |
1569 | |
1570 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_90003) |
1571 | IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x04000000)))))) |
1572 | IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x04000000)))))); |
1573 | } |
1574 | |
1575 | int |
1576 | iwm_check_rfkill(struct iwm_softc *sc) |
1577 | { |
1578 | uint32_t v; |
1579 | int rv; |
1580 | |
1581 | /* |
1582 | * "documentation" is not really helpful here: |
1583 | * 27: HW_RF_KILL_SW |
1584 | * Indicates state of (platform's) hardware RF-Kill switch |
1585 | * |
1586 | * But apparently when it's off, it's on ... |
1587 | */ |
1588 | v = IWM_READ(sc, IWM_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))); |
1589 | rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0; |
1590 | if (rv) { |
1591 | sc->sc_flags |= IWM_FLAG_RFKILL0x02; |
1592 | } else { |
1593 | sc->sc_flags &= ~IWM_FLAG_RFKILL0x02; |
1594 | } |
1595 | |
1596 | return rv; |
1597 | } |
1598 | |
1599 | void |
1600 | iwm_enable_interrupts(struct iwm_softc *sc) |
1601 | { |
1602 | if (!sc->sc_msix) { |
1603 | sc->sc_intmask = IWM_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 << 25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 << 0) | (1 << 28)); |
1604 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1605 | } else { |
1606 | /* |
1607 | * fh/hw_mask keeps all the unmasked causes. |
1608 | * Unlike msi, in msix cause is enabled when it is unset. |
1609 | */ |
1610 | sc->sc_hw_mask = sc->sc_hw_init_mask; |
1611 | sc->sc_fh_mask = sc->sc_fh_init_mask; |
1612 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~sc->sc_fh_mask)))) |
1613 | ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~sc->sc_fh_mask)))); |
1614 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~sc->sc_hw_mask)))) |
1615 | ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~sc->sc_hw_mask)))); |
1616 | } |
1617 | } |
1618 | |
1619 | void |
1620 | iwm_enable_fwload_interrupt(struct iwm_softc *sc) |
1621 | { |
1622 | if (!sc->sc_msix) { |
1623 | sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX(1 << 27); |
1624 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1625 | } else { |
1626 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))) |
1627 | sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))); |
1628 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)))) |
1629 | ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)))); |
1630 | sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM; |
1631 | } |
1632 | } |
1633 | |
1634 | void |
1635 | iwm_restore_interrupts(struct iwm_softc *sc) |
1636 | { |
1637 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1638 | } |
1639 | |
1640 | void |
1641 | iwm_disable_interrupts(struct iwm_softc *sc) |
1642 | { |
1643 | if (!sc->sc_msix) { |
1644 | IWM_WRITE(sc, IWM_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (0)))); |
1645 | |
1646 | /* acknowledge all interrupts */ |
1647 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
1648 | IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), ( (~0)))); |
1649 | } else { |
1650 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))) |
1651 | sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))); |
1652 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))) |
1653 | sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))); |
1654 | } |
1655 | } |
1656 | |
1657 | void |
1658 | iwm_ict_reset(struct iwm_softc *sc) |
1659 | { |
1660 | iwm_disable_interrupts(sc); |
1661 | |
1662 | memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096)); |
1663 | sc->ict_cur = 0; |
1664 | |
1665 | /* Set physical address of ICT (4KB aligned). */ |
1666 | IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1667 | IWM_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1668 | | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1669 | | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1670 | | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))); |
1671 | |
1672 | /* Switch to ICT interrupt mode in driver. */ |
1673 | sc->sc_flags |= IWM_FLAG_USE_ICT0x01; |
1674 | |
1675 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
1676 | iwm_enable_interrupts(sc); |
1677 | } |
1678 | |
1679 | #define IWM_HW_READY_TIMEOUT 50 |
1680 | int |
1681 | iwm_set_hw_ready(struct iwm_softc *sc) |
1682 | { |
1683 | int ready; |
1684 | |
1685 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00400000)))))) |
1686 | IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00400000)))))); |
1687 | |
1688 | ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG(0x000), |
1689 | IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000), |
1690 | IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000), |
1691 | IWM_HW_READY_TIMEOUT); |
1692 | if (ready) |
1693 | IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088))))) | (0x20))))) |
1694 | IWM_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088))))) | (0x20))))); |
1695 | |
1696 | return ready; |
1697 | } |
1698 | #undef IWM_HW_READY_TIMEOUT |
1699 | |
1700 | int |
1701 | iwm_prepare_card_hw(struct iwm_softc *sc) |
1702 | { |
1703 | int t = 0; |
1704 | int ntries; |
1705 | |
1706 | if (iwm_set_hw_ready(sc)) |
1707 | return 0; |
1708 | |
1709 | IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))) |
1710 | IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))); |
1711 | DELAY(1000)(*delay_func)(1000); |
1712 | |
1713 | for (ntries = 0; ntries < 10; ntries++) { |
1714 | /* If HW is not ready, prepare the conditions to check again */ |
1715 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000)))))) |
1716 | IWM_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000)))))); |
1717 | |
1718 | do { |
1719 | if (iwm_set_hw_ready(sc)) |
1720 | return 0; |
1721 | DELAY(200)(*delay_func)(200); |
1722 | t += 200; |
1723 | } while (t < 150000); |
1724 | DELAY(25000)(*delay_func)(25000); |
1725 | } |
1726 | |
1727 | return ETIMEDOUT60; |
1728 | } |
1729 | |
1730 | void |
1731 | iwm_apm_config(struct iwm_softc *sc) |
1732 | { |
1733 | pcireg_t lctl, cap; |
1734 | |
1735 | /* |
1736 | * HW bug W/A for instability in PCIe bus L0S->L1 transition. |
1737 | * Check if BIOS (or OS) enabled L1-ASPM on this device. |
1738 | * If so (likely), disable L0S, so device moves directly L0->L1; |
1739 | * costs negligible amount of power savings. |
1740 | * If not (unlikely), enable L0S, so there is at least some |
1741 | * power savings, even without L1. |
1742 | */ |
1743 | lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag, |
1744 | sc->sc_cap_off + PCI_PCIE_LCSR0x10); |
1745 | if (lctl & PCI_PCIE_LCSR_ASPM_L10x00000002) { |
1746 | IWM_SETBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) | ((0x00000002)))))) |
1747 | IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) | ((0x00000002)))))); |
1748 | } else { |
1749 | IWM_CLRBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) & ~((0x00000002)))))) |
1750 | IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) & ~((0x00000002)))))); |
1751 | } |
1752 | |
1753 | cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag, |
1754 | sc->sc_cap_off + PCI_PCIE_DCSR20x28); |
1755 | sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0; |
1756 | DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0) |
1757 | DEVNAME(sc),do { ; } while (0) |
1758 | (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0) |
1759 | sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0); |
1760 | } |
1761 | |
1762 | /* |
1763 | * Start up NIC's basic functionality after it has been reset |
1764 | * e.g. after platform boot or shutdown. |
1765 | * NOTE: This does not load uCode nor start the embedded processor |
1766 | */ |
1767 | int |
1768 | iwm_apm_init(struct iwm_softc *sc) |
1769 | { |
1770 | int err = 0; |
1771 | |
1772 | /* Disable L0S exit timer (platform NMI workaround) */ |
1773 | if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) |
1774 | IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x20000000)))))) |
1775 | IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x20000000)))))); |
1776 | |
1777 | /* |
1778 | * Disable L0s without affecting L1; |
1779 | * don't wait for ICH L0s (ICH bug W/A) |
1780 | */ |
1781 | IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x00800000)))))) |
1782 | IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x00800000)))))); |
1783 | |
1784 | /* Set FH wait threshold to maximum (HW error during stress W/A) */ |
1785 | IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240))))) | ((0xFFFF0000)))))); |
1786 | |
1787 | /* |
1788 | * Enable HAP INTA (interrupt from management bus) to |
1789 | * wake device's PCI Express link L1a -> L0s |
1790 | */ |
1791 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00080000)))))) |
1792 | IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00080000)))))); |
1793 | |
1794 | iwm_apm_config(sc); |
1795 | |
1796 | #if 0 /* not for 7k/8k */ |
1797 | /* Configure analog phase-lock-loop before activating to D0A */ |
1798 | if (trans->cfg->base_params->pll_cfg_val) |
1799 | IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c ))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), ( ((0x20c))))) | (trans->cfg->base_params->pll_cfg_val ))))) |
1800 | trans->cfg->base_params->pll_cfg_val)(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c ))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), ( ((0x20c))))) | (trans->cfg->base_params->pll_cfg_val ))))); |
1801 | #endif |
1802 | |
1803 | /* |
1804 | * Set "initialization complete" bit to move adapter from |
1805 | * D0U* --> D0A* (powered-up active) state. |
1806 | */ |
1807 | IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x00000004)))))); |
1808 | |
1809 | /* |
1810 | * Wait for clock stabilization; once stabilized, access to |
1811 | * device-internal resources is supported, e.g. iwm_write_prph() |
1812 | * and accesses to uCode SRAM. |
1813 | */ |
1814 | if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024), |
1815 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), |
1816 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) { |
1817 | printf("%s: timeout waiting for clock stabilization\n", |
1818 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1819 | err = ETIMEDOUT60; |
1820 | goto out; |
1821 | } |
1822 | |
1823 | if (sc->host_interrupt_operation_mode) { |
1824 | /* |
1825 | * This is a bit of an abuse - This is needed for 7260 / 3160 |
1826 | * only check host_interrupt_operation_mode even if this is |
1827 | * not related to host_interrupt_operation_mode. |
1828 | * |
1829 | * Enable the oscillator to count wake up time for L1 exit. This |
1830 | * consumes slightly more power (100uA) - but allows to be sure |
1831 | * that we wake up from L1 on time. |
1832 | * |
1833 | * This looks weird: read twice the same register, discard the |
1834 | * value, set a bit, and yet again, read that same register |
1835 | * just to discard the value. But that's the way the hardware |
1836 | * seems to like it. |
1837 | */ |
1838 | if (iwm_nic_lock(sc)) { |
1839 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1840 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1841 | iwm_nic_unlock(sc); |
1842 | } |
1843 | err = iwm_set_bits_prph(sc, IWM_OSC_CLK(0xa04068), |
1844 | IWM_OSC_CLK_FORCE_CONTROL(0x8)); |
1845 | if (err) |
1846 | goto out; |
1847 | if (iwm_nic_lock(sc)) { |
1848 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1849 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1850 | iwm_nic_unlock(sc); |
1851 | } |
1852 | } |
1853 | |
1854 | /* |
1855 | * Enable DMA clock and wait for it to stabilize. |
1856 | * |
1857 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits |
1858 | * do not disable clocks. This preserves any hardware bits already |
1859 | * set by default in "CLK_CTRL_REG" after reset. |
1860 | */ |
1861 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
1862 | if (iwm_nic_lock(sc)) { |
1863 | iwm_write_prph(sc, IWM_APMG_CLK_EN_REG(((0x00000) + 0x3000) + 0x0004), |
1864 | IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200)); |
1865 | iwm_nic_unlock(sc); |
1866 | } |
1867 | DELAY(20)(*delay_func)(20); |
1868 | |
1869 | /* Disable L1-Active */ |
1870 | err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010), |
1871 | IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800)); |
1872 | if (err) |
1873 | goto out; |
1874 | |
1875 | /* Clear the interrupt in APMG if the NIC is in RFKILL */ |
1876 | if (iwm_nic_lock(sc)) { |
1877 | iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG(((0x00000) + 0x3000) + 0x001c), |
1878 | IWM_APMG_RTC_INT_STT_RFKILL(0x10000000)); |
1879 | iwm_nic_unlock(sc); |
1880 | } |
1881 | } |
1882 | out: |
1883 | if (err) |
1884 | printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
1885 | return err; |
1886 | } |
1887 | |
1888 | void |
1889 | iwm_apm_stop(struct iwm_softc *sc) |
1890 | { |
1891 | IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))) |
1892 | IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))); |
1893 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000) | (0x10000000)))))) |
1894 | IWM_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000) | (0x10000000)))))) |
1895 | IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000) | (0x10000000)))))); |
1896 | DELAY(1000)(*delay_func)(1000); |
1897 | IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) & ~((0x80000000)))))) |
1898 | IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) & ~((0x80000000)))))); |
1899 | DELAY(5000)(*delay_func)(5000); |
1900 | |
1901 | /* stop device's busmaster DMA activity */ |
1902 | IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020))))) | ((0x00000200)))))); |
1903 | |
1904 | if (!iwm_poll_bit(sc, IWM_CSR_RESET(0x020), |
1905 | IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), |
1906 | IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100)) |
1907 | printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1908 | |
1909 | /* |
1910 | * Clear "initialization complete" bit to move adapter from |
1911 | * D0A* (powered-up Active) --> D0U* (Uninitialized) state. |
1912 | */ |
1913 | IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000004)))))) |
1914 | IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000004)))))); |
1915 | } |
1916 | |
1917 | void |
1918 | iwm_init_msix_hw(struct iwm_softc *sc) |
1919 | { |
1920 | iwm_conf_msix_hw(sc, 0); |
1921 | |
1922 | if (!sc->sc_msix) |
1923 | return; |
1924 | |
1925 | sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804 ))))); |
1926 | sc->sc_fh_mask = sc->sc_fh_init_mask; |
1927 | sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C ))))); |
1928 | sc->sc_hw_mask = sc->sc_hw_init_mask; |
1929 | } |
1930 | |
1931 | void |
1932 | iwm_conf_msix_hw(struct iwm_softc *sc, int stopped) |
1933 | { |
1934 | int vector = 0; |
1935 | |
1936 | if (!sc->sc_msix) { |
1937 | /* Newer chips default to MSIX. */ |
1938 | if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) { |
1939 | iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00, |
1940 | IWM_UREG_CHICK_MSI_ENABLE(1 << 24)); |
1941 | iwm_nic_unlock(sc); |
1942 | } |
1943 | return; |
1944 | } |
1945 | |
1946 | if (!stopped && iwm_nic_lock(sc)) { |
1947 | iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00, IWM_UREG_CHICK_MSIX_ENABLE(1 << 25)); |
1948 | iwm_nic_unlock(sc); |
1949 | } |
1950 | |
1951 | /* Disable all interrupts */ |
1952 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~0)))); |
1953 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~0)))); |
1954 | |
1955 | /* Map fallback-queue (command/mgmt) to a single vector */ |
1956 | IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (0)))), ((vector | (1 << 7))))) |
1957 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (0)))), ((vector | (1 << 7))))); |
1958 | /* Map RSS queue (data) to the same vector */ |
1959 | IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (1)))), ((vector | (1 << 7))))) |
1960 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (1)))), ((vector | (1 << 7))))); |
1961 | |
1962 | /* Enable the RX queues cause interrupts */ |
1963 | IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1 ))))) |
1964 | IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1 ))))); |
1965 | |
1966 | /* Map non-RX causes to the same vector */ |
1967 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1 << 7))))) |
1968 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1 << 7))))); |
1969 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1 << 7))))) |
1970 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1 << 7))))); |
1971 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 << 7))))) |
1972 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 << 7))))); |
1973 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 << 7))))) |
1974 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 << 7))))); |
1975 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 << 7))))) |
1976 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 << 7))))); |
1977 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 << 7))))) |
1978 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 << 7))))); |
1979 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 << 7))))) |
1980 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 << 7))))); |
1981 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1 << 7))))) |
1982 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1 << 7))))); |
1983 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1 << 7))))) |
1984 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1 << 7))))); |
1985 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | ( 1 << 7))))) |
1986 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | ( 1 << 7))))); |
1987 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 << 7))))) |
1988 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 << 7))))); |
1989 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 << 7))))) |
1990 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 << 7))))); |
1991 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 << 7))))) |
1992 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 << 7))))); |
1993 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 << 7))))) |
1994 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 << 7))))); |
1995 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 << 7))))) |
1996 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 << 7))))); |
1997 | |
1998 | /* Enable non-RX causes interrupts */ |
1999 | IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
2000 | IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
2001 | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
2002 | IWM_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
2003 | IWM_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))); |
2004 | IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2005 | IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2006 | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2007 | IWM_MSIX_HW_INT_CAUSES_REG_IML |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2008 | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2009 | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2010 | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2011 | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2012 | IWM_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2013 | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2014 | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
2015 | IWM_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))); |
2016 | } |
2017 | |
2018 | int |
2019 | iwm_clear_persistence_bit(struct iwm_softc *sc) |
2020 | { |
2021 | uint32_t hpm, wprot; |
2022 | |
2023 | hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440); |
2024 | if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT(1 << 12))) { |
2025 | wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_90000xa04ce0); |
2026 | if (wprot & IWM_PREG_WFPM_ACCESS(1 << 12)) { |
2027 | printf("%s: cannot clear persistence bit\n", |
2028 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2029 | return EPERM1; |
2030 | } |
2031 | iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440, |
2032 | hpm & ~IWM_HPM_PERSISTENCE_BIT(1 << 12)); |
2033 | } |
2034 | |
2035 | return 0; |
2036 | } |
2037 | |
2038 | int |
2039 | iwm_start_hw(struct iwm_softc *sc) |
2040 | { |
2041 | int err; |
2042 | |
2043 | err = iwm_prepare_card_hw(sc); |
2044 | if (err) |
2045 | return err; |
2046 | |
2047 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003) { |
2048 | err = iwm_clear_persistence_bit(sc); |
2049 | if (err) |
2050 | return err; |
2051 | } |
2052 | |
2053 | /* Reset the entire device */ |
2054 | IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( ((0x00000080))))); |
2055 | DELAY(5000)(*delay_func)(5000); |
2056 | |
2057 | err = iwm_apm_init(sc); |
2058 | if (err) |
2059 | return err; |
2060 | |
2061 | iwm_init_msix_hw(sc); |
2062 | |
2063 | iwm_enable_rfkill_int(sc); |
2064 | iwm_check_rfkill(sc); |
2065 | |
2066 | return 0; |
2067 | } |
2068 | |
2069 | |
2070 | void |
2071 | iwm_stop_device(struct iwm_softc *sc) |
2072 | { |
2073 | int chnl, ntries; |
2074 | int qid; |
2075 | |
2076 | iwm_disable_interrupts(sc); |
2077 | sc->sc_flags &= ~IWM_FLAG_USE_ICT0x01; |
2078 | |
2079 | /* Stop all DMA channels. */ |
2080 | if (iwm_nic_lock(sc)) { |
2081 | /* Deactivate TX scheduler. */ |
2082 | iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0); |
2083 | |
2084 | for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) { |
2085 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), ((0)))) |
2086 | IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), ((0)))); |
2087 | for (ntries = 0; ntries < 200; ntries++) { |
2088 | uint32_t r; |
2089 | |
2090 | r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) + 0xEA0) + 0x010))))); |
2091 | if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(((1 << (chnl)) << 16) |
2092 | chnl)((1 << (chnl)) << 16)) |
2093 | break; |
2094 | DELAY(20)(*delay_func)(20); |
2095 | } |
2096 | } |
2097 | iwm_nic_unlock(sc); |
2098 | } |
2099 | iwm_disable_rx_dma(sc); |
2100 | |
2101 | iwm_reset_rx_ring(sc, &sc->rxq); |
2102 | |
2103 | for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++) |
2104 | iwm_reset_tx_ring(sc, &sc->txq[qid]); |
2105 | |
2106 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
2107 | if (iwm_nic_lock(sc)) { |
2108 | /* Power-down device's busmaster DMA clocks */ |
2109 | iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG(((0x00000) + 0x3000) + 0x0008), |
2110 | IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200)); |
2111 | iwm_nic_unlock(sc); |
2112 | } |
2113 | DELAY(5)(*delay_func)(5); |
2114 | } |
2115 | |
2116 | /* Make sure (redundant) we've released our request to stay awake */ |
2117 | IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))) |
2118 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))); |
2119 | if (sc->sc_nic_locks > 0) |
2120 | printf("%s: %d active NIC locks forcefully cleared\n", |
2121 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks); |
2122 | sc->sc_nic_locks = 0; |
2123 | |
2124 | /* Stop the device, and put it in low power state */ |
2125 | iwm_apm_stop(sc); |
2126 | |
2127 | /* Reset the on-board processor. */ |
2128 | IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( ((0x00000080))))); |
2129 | DELAY(5000)(*delay_func)(5000); |
2130 | |
2131 | /* |
2132 | * Upon stop, the IVAR table gets erased, so msi-x won't |
2133 | * work. This causes a bug in RF-KILL flows, since the interrupt |
2134 | * that enables radio won't fire on the correct irq, and the |
2135 | * driver won't be able to handle the interrupt. |
2136 | * Configure the IVAR table again after reset. |
2137 | */ |
2138 | iwm_conf_msix_hw(sc, 1); |
2139 | |
2140 | /* |
2141 | * Upon stop, the APM issues an interrupt if HW RF kill is set. |
2142 | * Clear the interrupt again. |
2143 | */ |
2144 | iwm_disable_interrupts(sc); |
2145 | |
2146 | /* Even though we stop the HW we still want the RF kill interrupt. */ |
2147 | iwm_enable_rfkill_int(sc); |
2148 | iwm_check_rfkill(sc); |
2149 | |
2150 | iwm_prepare_card_hw(sc); |
2151 | } |
2152 | |
2153 | void |
2154 | iwm_nic_config(struct iwm_softc *sc) |
2155 | { |
2156 | uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; |
2157 | uint32_t mask, val, reg_val = 0; |
2158 | |
2159 | radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >> |
2160 | IWM_FW_PHY_CFG_RADIO_TYPE_POS0; |
2161 | radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >> |
2162 | IWM_FW_PHY_CFG_RADIO_STEP_POS2; |
2163 | radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >> |
2164 | IWM_FW_PHY_CFG_RADIO_DASH_POS4; |
2165 | |
2166 | reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) << |
2167 | IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2); |
2168 | reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) << |
2169 | IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0); |
2170 | |
2171 | /* radio configuration */ |
2172 | reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10); |
2173 | reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14); |
2174 | reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12); |
2175 | |
2176 | mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) | |
2177 | IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) | |
2178 | IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) | |
2179 | IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) | |
2180 | IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) | |
2181 | IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) | |
2182 | IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100); |
2183 | |
2184 | val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))); |
2185 | val &= ~mask; |
2186 | val |= reg_val; |
2187 | IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( (val)))); |
2188 | |
2189 | /* |
2190 | * W/A : NIC is stuck in a reset state after Early PCIe power off |
2191 | * (PCIe power is lost before PERST# is asserted), causing ME FW |
2192 | * to lose ownership and not being able to obtain it back. |
2193 | */ |
2194 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) |
2195 | iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c), |
2196 | IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000), |
2197 | ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000)); |
2198 | } |
2199 | |
2200 | int |
2201 | iwm_nic_rx_init(struct iwm_softc *sc) |
2202 | { |
2203 | if (sc->sc_mqrx_supported) |
2204 | return iwm_nic_rx_mq_init(sc); |
2205 | else |
2206 | return iwm_nic_rx_legacy_init(sc); |
2207 | } |
2208 | |
2209 | int |
2210 | iwm_nic_rx_mq_init(struct iwm_softc *sc) |
2211 | { |
2212 | int enabled; |
2213 | |
2214 | if (!iwm_nic_lock(sc)) |
2215 | return EBUSY16; |
2216 | |
2217 | /* Stop RX DMA. */ |
2218 | iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0); |
2219 | /* Disable RX used and free queue operation. */ |
2220 | iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, 0); |
2221 | |
2222 | iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB0xA08000, |
2223 | sc->rxq.free_desc_dma.paddr); |
2224 | iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB0xA08100, |
2225 | sc->rxq.used_desc_dma.paddr); |
2226 | iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB0xA08200, |
2227 | sc->rxq.stat_dma.paddr); |
2228 | iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX0xA08080, 0); |
2229 | iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX0xA080C0, 0); |
2230 | iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX0xA08180, 0); |
2231 | |
2232 | /* We configure only queue 0 for now. */ |
2233 | enabled = ((1 << 0) << 16) | (1 << 0); |
2234 | |
2235 | /* Enable RX DMA, 4KB buffer size. */ |
2236 | iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, |
2237 | IWM_RFH_DMA_EN_ENABLE_VAL(1U << 31) | |
2238 | IWM_RFH_RXF_DMA_RB_SIZE_4K(0x4 << 16) | |
2239 | IWM_RFH_RXF_DMA_MIN_RB_4_8(3 << 24) | |
2240 | IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK(0x04000000) | |
2241 | IWM_RFH_RXF_DMA_RBDCB_SIZE_512(0x9 << 20)); |
2242 | |
2243 | /* Enable RX DMA snooping. */ |
2244 | iwm_write_prph(sc, IWM_RFH_GEN_CFG0xA09800, |
2245 | IWM_RFH_GEN_CFG_RFH_DMA_SNOOP(1 << 1) | |
2246 | IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP(1 << 0) | |
2247 | (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_640x00000000 : |
2248 | IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_1280x00000010)); |
2249 | |
2250 | /* Enable the configured queue(s). */ |
2251 | iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, enabled); |
2252 | |
2253 | iwm_nic_unlock(sc); |
2254 | |
2255 | IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), ( ((0x40))))); |
2256 | |
2257 | IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), ( (8)))); |
2258 | |
2259 | return 0; |
2260 | } |
2261 | |
2262 | int |
2263 | iwm_nic_rx_legacy_init(struct iwm_softc *sc) |
2264 | { |
2265 | memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat))__builtin_memset((sc->rxq.stat), (0), (sizeof(*sc->rxq. stat))); |
2266 | |
2267 | iwm_disable_rx_dma(sc); |
2268 | |
2269 | if (!iwm_nic_lock(sc)) |
2270 | return EBUSY16; |
2271 | |
2272 | /* reset and flush pointers */ |
2273 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00)) + 0x8))), ((0)))); |
2274 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00)) + 0x10))), ((0)))); |
2275 | IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x00c))), ((0)))); |
2276 | IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x008))), ((0)))); |
2277 | |
2278 | /* Set physical address of RX ring (256-byte aligned). */ |
2279 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >> 8)))) |
2280 | IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >> 8)))); |
2281 | |
2282 | /* Set physical address of RX status (16-byte aligned). */ |
2283 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4)))) |
2284 | IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4)))); |
2285 | |
2286 | /* Enable RX. */ |
2287 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2288 | IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2289 | IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2290 | IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2291 | (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2292 | IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2293 | IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))); |
2294 | |
2295 | IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), ( ((0x40))))); |
2296 | |
2297 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ |
2298 | if (sc->host_interrupt_operation_mode) |
2299 | IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x004))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x004))))) | ((1U << 31)))))); |
2300 | |
2301 | iwm_nic_unlock(sc); |
2302 | |
2303 | /* |
2304 | * This value should initially be 0 (before preparing any RBs), |
2305 | * and should be 8 after preparing the first 8 RBs (for example). |
2306 | */ |
2307 | IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((((0x1000 ) + 0xBC0)) + 0x008)))), ((8)))); |
2308 | |
2309 | return 0; |
2310 | } |
2311 | |
2312 | int |
2313 | iwm_nic_tx_init(struct iwm_softc *sc) |
2314 | { |
2315 | int qid, err; |
2316 | |
2317 | if (!iwm_nic_lock(sc)) |
2318 | return EBUSY16; |
2319 | |
2320 | /* Deactivate TX scheduler. */ |
2321 | iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0); |
2322 | |
2323 | /* Set physical address of "keep warm" page (16-byte aligned). */ |
2324 | IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) + 0x97C))), ((sc->kw_dma.paddr >> 4)))); |
2325 | |
2326 | for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++) { |
2327 | struct iwm_tx_ring *txq = &sc->txq[qid]; |
2328 | |
2329 | /* Set physical address of TX ring (256-byte aligned). */ |
2330 | IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE (qid))), ((txq->desc_dma.paddr >> 8)))) |
2331 | txq->desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE (qid))), ((txq->desc_dma.paddr >> 8)))); |
2332 | } |
2333 | |
2334 | err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL(((0x00000) + 0xa02c00) + 0x1a8), |
2335 | IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE(1 << 18) | |
2336 | IWM_SCD_GP_CTRL_ENABLE_31_QUEUES(1 << 0)); |
2337 | |
2338 | iwm_nic_unlock(sc); |
2339 | |
2340 | return err; |
2341 | } |
2342 | |
2343 | int |
2344 | iwm_nic_init(struct iwm_softc *sc) |
2345 | { |
2346 | int err; |
2347 | |
2348 | iwm_apm_init(sc); |
2349 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) |
2350 | iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c), |
2351 | IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN(0x00000000), |
2352 | ~IWM_APMG_PS_CTRL_MSK_PWR_SRC(0x03000000)); |
2353 | |
2354 | iwm_nic_config(sc); |
2355 | |
2356 | err = iwm_nic_rx_init(sc); |
2357 | if (err) |
2358 | return err; |
2359 | |
2360 | err = iwm_nic_tx_init(sc); |
2361 | if (err) |
2362 | return err; |
2363 | |
2364 | IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8))))) | (0x800fffff))))); |
2365 | |
2366 | return 0; |
2367 | } |
2368 | |
2369 | /* Map a TID to an ieee80211_edca_ac category. */ |
2370 | const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT8] = { |
2371 | EDCA_AC_BE, |
2372 | EDCA_AC_BK, |
2373 | EDCA_AC_BK, |
2374 | EDCA_AC_BE, |
2375 | EDCA_AC_VI, |
2376 | EDCA_AC_VI, |
2377 | EDCA_AC_VO, |
2378 | EDCA_AC_VO, |
2379 | }; |
2380 | |
2381 | /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */ |
2382 | const uint8_t iwm_ac_to_tx_fifo[] = { |
2383 | IWM_TX_FIFO_BE1, |
2384 | IWM_TX_FIFO_BK0, |
2385 | IWM_TX_FIFO_VI2, |
2386 | IWM_TX_FIFO_VO3, |
2387 | }; |
2388 | |
2389 | int |
2390 | iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo) |
2391 | { |
2392 | int err; |
2393 | iwm_nic_assert_locked(sc); |
2394 | |
2395 | IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060 ))), ((qid << 8 | 0)))); |
2396 | |
2397 | iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), |
2398 | (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3)) |
2399 | | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN(19))); |
2400 | |
2401 | err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL(((0x00000) + 0xa02c00) + 0x248), (1 << qid)); |
2402 | if (err) { |
2403 | return err; |
2404 | } |
2405 | |
2406 | iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); |
2407 | |
2408 | iwm_write_mem32(sc, |
2409 | sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)), 0); |
2410 | |
2411 | /* Set scheduler window size and frame limit. */ |
2412 | iwm_write_mem32(sc, |
2413 | sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)) + |
2414 | sizeof(uint32_t), |
2415 | ((IWM_FRAME_LIMIT64 << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS(0)) & |
2416 | IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK(0x0000007F)) | |
2417 | ((IWM_FRAME_LIMIT64 |
2418 | << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS(16)) & |
2419 | IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK(0x007F0000))); |
2420 | |
2421 | iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), |
2422 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3)) | |
2423 | (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF(0)) | |
2424 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL(4)) | |
2425 | IWM_SCD_QUEUE_STTS_REG_MSK(0x017F0000)); |
2426 | |
2427 | if (qid == sc->cmdqid) |
2428 | iwm_write_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254), |
2429 | iwm_read_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254)) | (1 << qid)); |
2430 | |
2431 | return 0; |
2432 | } |
2433 | |
2434 | int |
2435 | iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo, |
2436 | int aggregate, uint8_t tid, uint16_t ssn) |
2437 | { |
2438 | struct iwm_tx_ring *ring = &sc->txq[qid]; |
2439 | struct iwm_scd_txq_cfg_cmd cmd; |
2440 | int err, idx, scd_bug; |
2441 | |
2442 | iwm_nic_assert_locked(sc); |
2443 | |
2444 | /* |
2445 | * If we need to move the SCD write pointer by steps of |
2446 | * 0x40, 0x80 or 0xc0, it gets stuck. |
2447 | * This is really ugly, but this is the easiest way out for |
2448 | * this sad hardware issue. |
2449 | * This bug has been fixed on devices 9000 and up. |
2450 | */ |
2451 | scd_bug = !sc->sc_mqrx_supported && |
2452 | !((ssn - ring->cur) & 0x3f) && |
2453 | (ssn != ring->cur); |
2454 | if (scd_bug) |
2455 | ssn = (ssn + 1) & 0xfff; |
2456 | |
2457 | idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)); |
2458 | IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060 ))), ((qid << 8 | idx)))); |
2459 | ring->cur = idx; |
2460 | ring->tail = idx; |
2461 | |
2462 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
2463 | cmd.tid = tid; |
2464 | cmd.scd_queue = qid; |
2465 | cmd.enable = 1; |
2466 | cmd.sta_id = sta_id; |
2467 | cmd.tx_fifo = fifo; |
2468 | cmd.aggregate = aggregate; |
2469 | cmd.ssn = htole16(ssn)((__uint16_t)(ssn)); |
2470 | cmd.window = IWM_FRAME_LIMIT64; |
2471 | |
2472 | err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0, |
2473 | sizeof(cmd), &cmd); |
2474 | if (err) |
2475 | return err; |
2476 | |
2477 | sc->qenablemsk |= (1 << qid); |
2478 | return 0; |
2479 | } |
2480 | |
2481 | int |
2482 | iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid) |
2483 | { |
2484 | struct iwm_scd_txq_cfg_cmd cmd; |
2485 | int err; |
2486 | |
2487 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
2488 | cmd.tid = tid; |
2489 | cmd.scd_queue = qid; |
2490 | cmd.enable = 0; |
2491 | cmd.sta_id = sta_id; |
2492 | |
2493 | err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0, sizeof(cmd), &cmd); |
2494 | if (err) |
2495 | return err; |
2496 | |
2497 | sc->qenablemsk &= ~(1 << qid); |
2498 | return 0; |
2499 | } |
2500 | |
2501 | int |
2502 | iwm_post_alive(struct iwm_softc *sc) |
2503 | { |
2504 | int nwords; |
2505 | int err, chnl; |
2506 | uint32_t base; |
2507 | |
2508 | if (!iwm_nic_lock(sc)) |
2509 | return EBUSY16; |
2510 | |
2511 | base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x0)); |
2512 | |
2513 | iwm_ict_reset(sc); |
2514 | |
2515 | iwm_nic_unlock(sc); |
2516 | |
2517 | /* Clear TX scheduler state in SRAM. */ |
2518 | nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND((0x0000) + 0x808) - |
2519 | IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600)) |
2520 | / sizeof(uint32_t); |
2521 | err = iwm_write_mem(sc, |
2522 | sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600), |
2523 | NULL((void *)0), nwords); |
2524 | if (err) |
2525 | return err; |
2526 | |
2527 | if (!iwm_nic_lock(sc)) |
2528 | return EBUSY16; |
2529 | |
2530 | /* Set physical address of TX scheduler rings (1KB aligned). */ |
2531 | iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x8), sc->sched_dma.paddr >> 10); |
2532 | |
2533 | iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN(((0x00000) + 0xa02c00) + 0x244), 0); |
2534 | |
2535 | /* enable command channel */ |
2536 | err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD7); |
2537 | if (err) { |
2538 | iwm_nic_unlock(sc); |
2539 | return err; |
2540 | } |
2541 | |
2542 | /* Activate TX scheduler. */ |
2543 | iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0xff); |
2544 | |
2545 | /* Enable DMA channels. */ |
2546 | for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) { |
2547 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008))))) |
2548 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008))))) |
2549 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008))))); |
2550 | } |
2551 | |
2552 | IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) + 0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x1000) + 0xE98))))) | ((0x00000002)))))) |
2553 | IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) + 0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x1000) + 0xE98))))) | ((0x00000002)))))); |
2554 | |
2555 | iwm_nic_unlock(sc); |
2556 | |
2557 | /* Enable L1-Active */ |
2558 | if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) { |
2559 | err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010), |
2560 | IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800)); |
2561 | } |
2562 | |
2563 | return err; |
2564 | } |
2565 | |
2566 | struct iwm_phy_db_entry * |
2567 | iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id) |
2568 | { |
2569 | struct iwm_phy_db *phy_db = &sc->sc_phy_db; |
2570 | |
2571 | if (type >= IWM_PHY_DB_MAX6) |
2572 | return NULL((void *)0); |
2573 | |
2574 | switch (type) { |
2575 | case IWM_PHY_DB_CFG1: |
2576 | return &phy_db->cfg; |
2577 | case IWM_PHY_DB_CALIB_NCH2: |
2578 | return &phy_db->calib_nch; |
2579 | case IWM_PHY_DB_CALIB_CHG_PAPD4: |
2580 | if (chg_id >= IWM_NUM_PAPD_CH_GROUPS9) |
2581 | return NULL((void *)0); |
2582 | return &phy_db->calib_ch_group_papd[chg_id]; |
2583 | case IWM_PHY_DB_CALIB_CHG_TXP5: |
2584 | if (chg_id >= IWM_NUM_TXP_CH_GROUPS9) |
2585 | return NULL((void *)0); |
2586 | return &phy_db->calib_ch_group_txp[chg_id]; |
2587 | default: |
2588 | return NULL((void *)0); |
2589 | } |
2590 | return NULL((void *)0); |
2591 | } |
2592 | |
2593 | int |
2594 | iwm_phy_db_set_section(struct iwm_softc *sc, |
2595 | struct iwm_calib_res_notif_phy_db *phy_db_notif) |
2596 | { |
2597 | uint16_t type = le16toh(phy_db_notif->type)((__uint16_t)(phy_db_notif->type)); |
2598 | uint16_t size = le16toh(phy_db_notif->length)((__uint16_t)(phy_db_notif->length)); |
2599 | struct iwm_phy_db_entry *entry; |
2600 | uint16_t chg_id = 0; |
2601 | |
2602 | if (type == IWM_PHY_DB_CALIB_CHG_PAPD4 || |
2603 | type == IWM_PHY_DB_CALIB_CHG_TXP5) |
2604 | chg_id = le16toh(*(uint16_t *)phy_db_notif->data)((__uint16_t)(*(uint16_t *)phy_db_notif->data)); |
2605 | |
2606 | entry = iwm_phy_db_get_section(sc, type, chg_id); |
2607 | if (!entry) |
2608 | return EINVAL22; |
2609 | |
2610 | if (entry->data) |
2611 | free(entry->data, M_DEVBUF2, entry->size); |
2612 | entry->data = malloc(size, M_DEVBUF2, M_NOWAIT0x0002); |
2613 | if (!entry->data) { |
2614 | entry->size = 0; |
2615 | return ENOMEM12; |
2616 | } |
2617 | memcpy(entry->data, phy_db_notif->data, size)__builtin_memcpy((entry->data), (phy_db_notif->data), ( size)); |
2618 | entry->size = size; |
2619 | |
2620 | return 0; |
2621 | } |
2622 | |
2623 | int |
2624 | iwm_is_valid_channel(uint16_t ch_id) |
2625 | { |
2626 | if (ch_id <= 14 || |
2627 | (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || |
2628 | (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || |
2629 | (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) |
2630 | return 1; |
2631 | return 0; |
2632 | } |
2633 | |
2634 | uint8_t |
2635 | iwm_ch_id_to_ch_index(uint16_t ch_id) |
2636 | { |
2637 | if (!iwm_is_valid_channel(ch_id)) |
2638 | return 0xff; |
2639 | |
2640 | if (ch_id <= 14) |
2641 | return ch_id - 1; |
2642 | if (ch_id <= 64) |
2643 | return (ch_id + 20) / 4; |
2644 | if (ch_id <= 140) |
2645 | return (ch_id - 12) / 4; |
2646 | return (ch_id - 13) / 4; |
2647 | } |
2648 | |
2649 | |
2650 | uint16_t |
2651 | iwm_channel_id_to_papd(uint16_t ch_id) |
2652 | { |
2653 | if (!iwm_is_valid_channel(ch_id)) |
2654 | return 0xff; |
2655 | |
2656 | if (1 <= ch_id && ch_id <= 14) |
2657 | return 0; |
2658 | if (36 <= ch_id && ch_id <= 64) |
2659 | return 1; |
2660 | if (100 <= ch_id && ch_id <= 140) |
2661 | return 2; |
2662 | return 3; |
2663 | } |
2664 | |
2665 | uint16_t |
2666 | iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id) |
2667 | { |
2668 | struct iwm_phy_db *phy_db = &sc->sc_phy_db; |
2669 | struct iwm_phy_db_chg_txp *txp_chg; |
2670 | int i; |
2671 | uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id); |
2672 | |
2673 | if (ch_index == 0xff) |
2674 | return 0xff; |
2675 | |
2676 | for (i = 0; i < IWM_NUM_TXP_CH_GROUPS9; i++) { |
2677 | txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; |
2678 | if (!txp_chg) |
2679 | return 0xff; |
2680 | /* |
2681 | * Looking for the first channel group the max channel |
2682 | * of which is higher than the requested channel. |
2683 | */ |
2684 | if (le16toh(txp_chg->max_channel_idx)((__uint16_t)(txp_chg->max_channel_idx)) >= ch_index) |
2685 | return i; |
2686 | } |
2687 | return 0xff; |
2688 | } |
2689 | |
2690 | int |
2691 | iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data, |
2692 | uint16_t *size, uint16_t ch_id) |
2693 | { |
2694 | struct iwm_phy_db_entry *entry; |
2695 | uint16_t ch_group_id = 0; |
2696 | |
2697 | if (type == IWM_PHY_DB_CALIB_CHG_PAPD4) |
2698 | ch_group_id = iwm_channel_id_to_papd(ch_id); |
2699 | else if (type == IWM_PHY_DB_CALIB_CHG_TXP5) |
2700 | ch_group_id = iwm_channel_id_to_txp(sc, ch_id); |
2701 | |
2702 | entry = iwm_phy_db_get_section(sc, type, ch_group_id); |
2703 | if (!entry) |
2704 | return EINVAL22; |
2705 | |
2706 | *data = entry->data; |
2707 | *size = entry->size; |
2708 | |
2709 | return 0; |
2710 | } |
2711 | |
2712 | int |
2713 | iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length, |
2714 | void *data) |
2715 | { |
2716 | struct iwm_phy_db_cmd phy_db_cmd; |
2717 | struct iwm_host_cmd cmd = { |
2718 | .id = IWM_PHY_DB_CMD0x6c, |
2719 | .flags = IWM_CMD_ASYNC, |
2720 | }; |
2721 | |
2722 | phy_db_cmd.type = le16toh(type)((__uint16_t)(type)); |
2723 | phy_db_cmd.length = le16toh(length)((__uint16_t)(length)); |
2724 | |
2725 | cmd.data[0] = &phy_db_cmd; |
2726 | cmd.len[0] = sizeof(struct iwm_phy_db_cmd); |
2727 | cmd.data[1] = data; |
2728 | cmd.len[1] = length; |
2729 | |
2730 | return iwm_send_cmd(sc, &cmd); |
2731 | } |
2732 | |
2733 | int |
2734 | iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type, |
2735 | uint8_t max_ch_groups) |
2736 | { |
2737 | uint16_t i; |
2738 | int err; |
2739 | struct iwm_phy_db_entry *entry; |
2740 | |
2741 | for (i = 0; i < max_ch_groups; i++) { |
2742 | entry = iwm_phy_db_get_section(sc, type, i); |
2743 | if (!entry) |
2744 | return EINVAL22; |
2745 | |
2746 | if (!entry->size) |
2747 | continue; |
2748 | |
2749 | err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data); |
2750 | if (err) |
2751 | return err; |
2752 | |
2753 | DELAY(1000)(*delay_func)(1000); |
2754 | } |
2755 | |
2756 | return 0; |
2757 | } |
2758 | |
2759 | int |
2760 | iwm_send_phy_db_data(struct iwm_softc *sc) |
2761 | { |
2762 | uint8_t *data = NULL((void *)0); |
2763 | uint16_t size = 0; |
2764 | int err; |
2765 | |
2766 | err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG1, &data, &size, 0); |
2767 | if (err) |
2768 | return err; |
2769 | |
2770 | err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG1, size, data); |
2771 | if (err) |
2772 | return err; |
2773 | |
2774 | err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH2, |
2775 | &data, &size, 0); |
2776 | if (err) |
2777 | return err; |
2778 | |
2779 | err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH2, size, data); |
2780 | if (err) |
2781 | return err; |
2782 | |
2783 | err = iwm_phy_db_send_all_channel_groups(sc, |
2784 | IWM_PHY_DB_CALIB_CHG_PAPD4, IWM_NUM_PAPD_CH_GROUPS9); |
2785 | if (err) |
2786 | return err; |
2787 | |
2788 | err = iwm_phy_db_send_all_channel_groups(sc, |
2789 | IWM_PHY_DB_CALIB_CHG_TXP5, IWM_NUM_TXP_CH_GROUPS9); |
2790 | if (err) |
2791 | return err; |
2792 | |
2793 | return 0; |
2794 | } |
2795 | |
2796 | /* |
2797 | * For the high priority TE use a time event type that has similar priority to |
2798 | * the FW's action scan priority. |
2799 | */ |
2800 | #define IWM_ROC_TE_TYPE_NORMAL4 IWM_TE_P2P_DEVICE_DISCOVERABLE4 |
2801 | #define IWM_ROC_TE_TYPE_MGMT_TX9 IWM_TE_P2P_CLIENT_ASSOC9 |
2802 | |
2803 | int |
2804 | iwm_send_time_event_cmd(struct iwm_softc *sc, |
2805 | const struct iwm_time_event_cmd *cmd) |
2806 | { |
2807 | struct iwm_rx_packet *pkt; |
2808 | struct iwm_time_event_resp *resp; |
2809 | struct iwm_host_cmd hcmd = { |
2810 | .id = IWM_TIME_EVENT_CMD0x29, |
2811 | .flags = IWM_CMD_WANT_RESP, |
2812 | .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), |
2813 | }; |
2814 | uint32_t resp_len; |
2815 | int err; |
2816 | |
2817 | hcmd.data[0] = cmd; |
2818 | hcmd.len[0] = sizeof(*cmd); |
2819 | err = iwm_send_cmd(sc, &hcmd); |
2820 | if (err) |
2821 | return err; |
2822 | |
2823 | pkt = hcmd.resp_pkt; |
2824 | if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40)) { |
2825 | err = EIO5; |
2826 | goto out; |
2827 | } |
2828 | |
2829 | resp_len = iwm_rx_packet_payload_len(pkt); |
2830 | if (resp_len != sizeof(*resp)) { |
2831 | err = EIO5; |
2832 | goto out; |
2833 | } |
2834 | |
2835 | resp = (void *)pkt->data; |
2836 | if (le32toh(resp->status)((__uint32_t)(resp->status)) == 0) |
2837 | sc->sc_time_event_uid = le32toh(resp->unique_id)((__uint32_t)(resp->unique_id)); |
2838 | else |
2839 | err = EIO5; |
2840 | out: |
2841 | iwm_free_resp(sc, &hcmd); |
2842 | return err; |
2843 | } |
2844 | |
2845 | void |
2846 | iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in, |
2847 | uint32_t duration, uint32_t max_delay) |
2848 | { |
2849 | struct iwm_time_event_cmd time_cmd; |
2850 | |
2851 | /* Do nothing if a time event is already scheduled. */ |
2852 | if (sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40) |
2853 | return; |
2854 | |
2855 | memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd))); |
2856 | |
2857 | time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD)((__uint32_t)(1)); |
2858 | time_cmd.id_and_color = |
2859 | htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
2860 | time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC)((__uint32_t)(0)); |
2861 | |
2862 | time_cmd.apply_time = htole32(0)((__uint32_t)(0)); |
2863 | |
2864 | time_cmd.max_frags = IWM_TE_V2_FRAG_NONE0; |
2865 | time_cmd.max_delay = htole32(max_delay)((__uint32_t)(max_delay)); |
2866 | /* TODO: why do we need to interval = bi if it is not periodic? */ |
2867 | time_cmd.interval = htole32(1)((__uint32_t)(1)); |
2868 | time_cmd.duration = htole32(duration)((__uint32_t)(duration)); |
2869 | time_cmd.repeat = 1; |
2870 | time_cmd.policy |
2871 | = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11 ))) |
2872 | IWM_TE_V2_NOTIF_HOST_EVENT_END |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11 ))) |
2873 | IWM_T2_V2_START_IMMEDIATELY)((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11 ))); |
2874 | |
2875 | if (iwm_send_time_event_cmd(sc, &time_cmd) == 0) |
2876 | sc->sc_flags |= IWM_FLAG_TE_ACTIVE0x40; |
2877 | |
2878 | DELAY(100)(*delay_func)(100); |
2879 | } |
2880 | |
2881 | void |
2882 | iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in) |
2883 | { |
2884 | struct iwm_time_event_cmd time_cmd; |
2885 | |
2886 | /* Do nothing if the time event has already ended. */ |
2887 | if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40) == 0) |
2888 | return; |
2889 | |
2890 | memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd))); |
2891 | |
2892 | time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE)((__uint32_t)(3)); |
2893 | time_cmd.id_and_color = |
2894 | htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
2895 | time_cmd.id = htole32(sc->sc_time_event_uid)((__uint32_t)(sc->sc_time_event_uid)); |
2896 | |
2897 | if (iwm_send_time_event_cmd(sc, &time_cmd) == 0) |
2898 | sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40; |
2899 | |
2900 | DELAY(100)(*delay_func)(100); |
2901 | } |
2902 | |
2903 | /* |
2904 | * NVM read access and content parsing. We do not support |
2905 | * external NVM or writing NVM. |
2906 | */ |
2907 | |
2908 | /* list of NVM sections we are allowed/need to read */ |
2909 | const int iwm_nvm_to_read[] = { |
2910 | IWM_NVM_SECTION_TYPE_HW0, |
2911 | IWM_NVM_SECTION_TYPE_SW1, |
2912 | IWM_NVM_SECTION_TYPE_REGULATORY3, |
2913 | IWM_NVM_SECTION_TYPE_CALIBRATION4, |
2914 | IWM_NVM_SECTION_TYPE_PRODUCTION5, |
2915 | IWM_NVM_SECTION_TYPE_REGULATORY_SDP8, |
2916 | IWM_NVM_SECTION_TYPE_HW_800010, |
2917 | IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11, |
2918 | IWM_NVM_SECTION_TYPE_PHY_SKU12, |
2919 | }; |
2920 | |
2921 | #define IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024) (2*1024) |
2922 | |
2923 | #define IWM_NVM_WRITE_OPCODE1 1 |
2924 | #define IWM_NVM_READ_OPCODE0 0 |
2925 | |
2926 | int |
2927 | iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset, |
2928 | uint16_t length, uint8_t *data, uint16_t *len) |
2929 | { |
2930 | offset = 0; |
2931 | struct iwm_nvm_access_cmd nvm_access_cmd = { |
2932 | .offset = htole16(offset)((__uint16_t)(offset)), |
2933 | .length = htole16(length)((__uint16_t)(length)), |
2934 | .type = htole16(section)((__uint16_t)(section)), |
2935 | .op_code = IWM_NVM_READ_OPCODE0, |
2936 | }; |
2937 | struct iwm_nvm_access_resp *nvm_resp; |
2938 | struct iwm_rx_packet *pkt; |
2939 | struct iwm_host_cmd cmd = { |
2940 | .id = IWM_NVM_ACCESS_CMD0x88, |
2941 | .flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL), |
2942 | .resp_pkt_len = IWM_CMD_RESP_MAX(1 << 12), |
2943 | .data = { &nvm_access_cmd, }, |
2944 | }; |
2945 | int err, offset_read; |
2946 | size_t bytes_read; |
2947 | uint8_t *resp_data; |
2948 | |
2949 | cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); |
2950 | |
2951 | err = iwm_send_cmd(sc, &cmd); |
2952 | if (err) |
2953 | return err; |
2954 | |
2955 | pkt = cmd.resp_pkt; |
2956 | if (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40) { |
2957 | err = EIO5; |
2958 | goto exit; |
2959 | } |
2960 | |
2961 | /* Extract NVM response */ |
2962 | nvm_resp = (void *)pkt->data; |
2963 | if (nvm_resp == NULL((void *)0)) |
2964 | return EIO5; |
2965 | |
2966 | err = le16toh(nvm_resp->status)((__uint16_t)(nvm_resp->status)); |
2967 | bytes_read = le16toh(nvm_resp->length)((__uint16_t)(nvm_resp->length)); |
2968 | offset_read = le16toh(nvm_resp->offset)((__uint16_t)(nvm_resp->offset)); |
2969 | resp_data = nvm_resp->data; |
2970 | if (err) { |
2971 | err = EINVAL22; |
2972 | goto exit; |
2973 | } |
2974 | |
2975 | if (offset_read != offset) { |
2976 | err = EINVAL22; |
2977 | goto exit; |
2978 | } |
2979 | |
2980 | if (bytes_read > length) { |
2981 | err = EINVAL22; |
2982 | goto exit; |
2983 | } |
2984 | |
2985 | memcpy(data + offset, resp_data, bytes_read)__builtin_memcpy((data + offset), (resp_data), (bytes_read)); |
2986 | *len = bytes_read; |
2987 | |
2988 | exit: |
2989 | iwm_free_resp(sc, &cmd); |
2990 | return err; |
2991 | } |
2992 | |
2993 | /* |
2994 | * Reads an NVM section completely. |
2995 | * NICs prior to 7000 family doesn't have a real NVM, but just read |
2996 | * section 0 which is the EEPROM. Because the EEPROM reading is unlimited |
2997 | * by uCode, we need to manually check in this case that we don't |
2998 | * overflow and try to read more than the EEPROM size. |
2999 | */ |
3000 | int |
3001 | iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data, |
3002 | uint16_t *len, size_t max_len) |
3003 | { |
3004 | uint16_t chunklen, seglen; |
3005 | int err = 0; |
3006 | |
3007 | chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024); |
3008 | *len = 0; |
3009 | |
3010 | /* Read NVM chunks until exhausted (reading less than requested) */ |
3011 | while (seglen == chunklen && *len < max_len) { |
3012 | err = iwm_nvm_read_chunk(sc, |
3013 | section, *len, chunklen, data, &seglen); |
3014 | if (err) |
3015 | return err; |
3016 | |
3017 | *len += seglen; |
3018 | } |
3019 | |
3020 | return err; |
3021 | } |
3022 | |
3023 | uint8_t |
3024 | iwm_fw_valid_tx_ant(struct iwm_softc *sc) |
3025 | { |
3026 | uint8_t tx_ant; |
3027 | |
3028 | tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN(0xf << 16)) |
3029 | >> IWM_FW_PHY_CFG_TX_CHAIN_POS16); |
3030 | |
3031 | if (sc->sc_nvm.valid_tx_ant) |
3032 | tx_ant &= sc->sc_nvm.valid_tx_ant; |
3033 | |
3034 | return tx_ant; |
3035 | } |
3036 | |
3037 | uint8_t |
3038 | iwm_fw_valid_rx_ant(struct iwm_softc *sc) |
3039 | { |
3040 | uint8_t rx_ant; |
3041 | |
3042 | rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN(0xf << 20)) |
3043 | >> IWM_FW_PHY_CFG_RX_CHAIN_POS20); |
3044 | |
3045 | if (sc->sc_nvm.valid_rx_ant) |
3046 | rx_ant &= sc->sc_nvm.valid_rx_ant; |
3047 | |
3048 | return rx_ant; |
3049 | } |
3050 | |
3051 | int |
3052 | iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc) |
3053 | { |
3054 | uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc); |
3055 | |
3056 | /* |
3057 | * According to the Linux driver, antenna B should be preferred |
3058 | * on 9k devices since it is not shared with bluetooth. However, |
3059 | * there are 9k devices which do not support antenna B at all. |
3060 | */ |
3061 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003 && |
3062 | (valid_tx_ant & IWM_ANT_B(1 << 1))) |
3063 | return IWM_RATE_MCS_ANT_B_MSK(2 << 14); |
3064 | |
3065 | return IWM_RATE_MCS_ANT_A_MSK(1 << 14); |
3066 | } |
3067 | |
3068 | void |
3069 | iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags, |
3070 | const uint8_t *nvm_channels, int nchan) |
3071 | { |
3072 | struct ieee80211com *ic = &sc->sc_ic; |
3073 | struct iwm_nvm_data *data = &sc->sc_nvm; |
3074 | int ch_idx; |
3075 | struct ieee80211_channel *channel; |
3076 | uint16_t ch_flags; |
3077 | int is_5ghz; |
3078 | int flags, hw_value; |
3079 | |
3080 | for (ch_idx = 0; ch_idx < nchan; ch_idx++) { |
3081 | ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx)(((__uint16_t)(*(const uint16_t *)(nvm_ch_flags + ch_idx)))); |
3082 | |
3083 | if (ch_idx >= IWM_NUM_2GHZ_CHANNELS14 && |
3084 | !data->sku_cap_band_52GHz_enable) |
3085 | ch_flags &= ~IWM_NVM_CHANNEL_VALID(1 << 0); |
3086 | |
3087 | if (!(ch_flags & IWM_NVM_CHANNEL_VALID(1 << 0))) |
3088 | continue; |
3089 | |
3090 | hw_value = nvm_channels[ch_idx]; |
3091 | channel = &ic->ic_channels[hw_value]; |
3092 | |
3093 | is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS14; |
3094 | if (!is_5ghz) { |
3095 | flags = IEEE80211_CHAN_2GHZ0x0080; |
3096 | channel->ic_flags |
3097 | = IEEE80211_CHAN_CCK0x0020 |
3098 | | IEEE80211_CHAN_OFDM0x0040 |
3099 | | IEEE80211_CHAN_DYN0x0400 |
3100 | | IEEE80211_CHAN_2GHZ0x0080; |
3101 | } else { |
3102 | flags = IEEE80211_CHAN_5GHZ0x0100; |
3103 | channel->ic_flags = |
3104 | IEEE80211_CHAN_A(0x0100 | 0x0040); |
3105 | } |
3106 | channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags); |
3107 | |
3108 | if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE(1 << 3))) |
3109 | channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200; |
3110 | |
3111 | if (data->sku_cap_11n_enable) { |
3112 | channel->ic_flags |= IEEE80211_CHAN_HT0x2000; |
3113 | if (ch_flags & IWM_NVM_CHANNEL_40MHZ(1 << 9)) |
3114 | channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000; |
3115 | } |
3116 | |
3117 | if (is_5ghz && data->sku_cap_11ac_enable) { |
3118 | channel->ic_flags |= IEEE80211_CHAN_VHT0x4000; |
3119 | if (ch_flags & IWM_NVM_CHANNEL_80MHZ(1 << 10)) |
3120 | channel->ic_xflags |= IEEE80211_CHANX_80MHZ0x00000001; |
3121 | } |
3122 | } |
3123 | } |
3124 | |
3125 | int |
3126 | iwm_mimo_enabled(struct iwm_softc *sc) |
3127 | { |
3128 | struct ieee80211com *ic = &sc->sc_ic; |
3129 | |
3130 | return !sc->sc_nvm.sku_cap_mimo_disable && |
3131 | (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0; |
3132 | } |
3133 | |
3134 | void |
3135 | iwm_setup_ht_rates(struct iwm_softc *sc) |
3136 | { |
3137 | struct ieee80211com *ic = &sc->sc_ic; |
3138 | uint8_t rx_ant; |
3139 | |
3140 | /* TX is supported with the same MCS as RX. */ |
3141 | ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01; |
3142 | |
3143 | memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs ))); |
3144 | ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ |
3145 | |
3146 | if (!iwm_mimo_enabled(sc)) |
3147 | return; |
3148 | |
3149 | rx_ant = iwm_fw_valid_rx_ant(sc); |
3150 | if ((rx_ant & IWM_ANT_AB((1 << 0) | (1 << 1))) == IWM_ANT_AB((1 << 0) | (1 << 1)) || |
3151 | (rx_ant & IWM_ANT_BC((1 << 1) | (1 << 2))) == IWM_ANT_BC((1 << 1) | (1 << 2))) |
3152 | ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */ |
3153 | } |
3154 | |
3155 | void |
3156 | iwm_setup_vht_rates(struct iwm_softc *sc) |
3157 | { |
3158 | struct ieee80211com *ic = &sc->sc_ic; |
3159 | uint8_t rx_ant = iwm_fw_valid_rx_ant(sc); |
3160 | int n; |
3161 | |
3162 | ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_92 << |
3163 | IEEE80211_VHT_MCS_FOR_SS_SHIFT(1)(2*((1)-1))); |
3164 | |
3165 | if (iwm_mimo_enabled(sc) && |
3166 | ((rx_ant & IWM_ANT_AB((1 << 0) | (1 << 1))) == IWM_ANT_AB((1 << 0) | (1 << 1)) || |
3167 | (rx_ant & IWM_ANT_BC((1 << 1) | (1 << 2))) == IWM_ANT_BC((1 << 1) | (1 << 2)))) { |
3168 | ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_92 << |
3169 | IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1))); |
3170 | } else { |
3171 | ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP3 << |
3172 | IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1))); |
3173 | } |
3174 | |
3175 | for (n = 3; n <= IEEE80211_VHT_NUM_SS8; n++) { |
3176 | ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP3 << |
3177 | IEEE80211_VHT_MCS_FOR_SS_SHIFT(n)(2*((n)-1))); |
3178 | } |
3179 | |
3180 | ic->ic_vht_txmcs = ic->ic_vht_rxmcs; |
3181 | } |
3182 | |
3183 | void |
3184 | iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf, |
3185 | uint16_t ssn, uint16_t buf_size) |
3186 | { |
3187 | reorder_buf->head_sn = ssn; |
3188 | reorder_buf->num_stored = 0; |
3189 | reorder_buf->buf_size = buf_size; |
3190 | reorder_buf->last_amsdu = 0; |
3191 | reorder_buf->last_sub_index = 0; |
3192 | reorder_buf->removed = 0; |
3193 | reorder_buf->valid = 0; |
3194 | reorder_buf->consec_oldsn_drops = 0; |
3195 | reorder_buf->consec_oldsn_ampdu_gp2 = 0; |
3196 | reorder_buf->consec_oldsn_prev_drop = 0; |
3197 | } |
3198 | |
3199 | void |
3200 | iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba) |
3201 | { |
3202 | int i; |
3203 | struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf; |
3204 | struct iwm_reorder_buf_entry *entry; |
3205 | |
3206 | for (i = 0; i < reorder_buf->buf_size; i++) { |
3207 | entry = &rxba->entries[i]; |
3208 | ml_purge(&entry->frames); |
3209 | timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time )->tv_usec = 0; |
3210 | } |
3211 | |
3212 | reorder_buf->removed = 1; |
3213 | timeout_del(&reorder_buf->reorder_timer); |
3214 | timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)-> tv_usec = 0; |
3215 | timeout_del(&rxba->session_timer); |
3216 | rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID0x7f; |
3217 | } |
3218 | |
3219 | #define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL) |
3220 | |
3221 | void |
3222 | iwm_rx_ba_session_expired(void *arg) |
3223 | { |
3224 | struct iwm_rxba_data *rxba = arg; |
3225 | struct iwm_softc *sc = rxba->sc; |
3226 | struct ieee80211com *ic = &sc->sc_ic; |
3227 | struct ieee80211_node *ni = ic->ic_bss; |
3228 | struct timeval now, timeout, expiry; |
3229 | int s; |
3230 | |
3231 | s = splnet()splraise(0x4); |
3232 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0 && |
3233 | ic->ic_state == IEEE80211_S_RUN && |
3234 | rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) { |
3235 | getmicrouptime(&now); |
3236 | USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout); |
3237 | timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec = (& rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if ((&expiry)->tv_usec >= 1000000) { (&expiry)-> tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0); |
3238 | if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((& now)->tv_usec < (&expiry)->tv_usec) : ((&now )->tv_sec < (&expiry)->tv_sec))) { |
3239 | timeout_add_usec(&rxba->session_timer, rxba->timeout); |
3240 | } else { |
3241 | ic->ic_stats.is_ht_rx_ba_timeout++; |
3242 | ieee80211_delba_request(ic, ni, |
3243 | IEEE80211_REASON_TIMEOUT, 0, rxba->tid); |
3244 | } |
3245 | } |
3246 | splx(s)spllower(s); |
3247 | } |
3248 | |
3249 | void |
3250 | iwm_reorder_timer_expired(void *arg) |
3251 | { |
3252 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
3253 | struct iwm_reorder_buffer *buf = arg; |
3254 | struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf); |
3255 | struct iwm_reorder_buf_entry *entries = &rxba->entries[0]; |
3256 | struct iwm_softc *sc = rxba->sc; |
3257 | struct ieee80211com *ic = &sc->sc_ic; |
3258 | struct ieee80211_node *ni = ic->ic_bss; |
3259 | int i, s; |
3260 | uint16_t sn = 0, index = 0; |
3261 | int expired = 0; |
3262 | int cont = 0; |
3263 | struct timeval now, timeout, expiry; |
3264 | |
3265 | if (!buf->num_stored || buf->removed) |
3266 | return; |
3267 | |
3268 | s = splnet()splraise(0x4); |
3269 | getmicrouptime(&now); |
3270 | USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout); |
3271 | |
3272 | for (i = 0; i < buf->buf_size ; i++) { |
3273 | index = (buf->head_sn + i) % buf->buf_size; |
3274 | |
3275 | if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) { |
3276 | /* |
3277 | * If there is a hole and the next frame didn't expire |
3278 | * we want to break and not advance SN. |
3279 | */ |
3280 | cont = 0; |
3281 | continue; |
3282 | } |
3283 | timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time )->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec = (&entries[index].reorder_time)->tv_usec + (&timeout )->tv_usec; if ((&expiry)->tv_usec >= 1000000) { (&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000 ; } } while (0); |
3284 | if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((& now)->tv_usec < (&expiry)->tv_usec) : ((&now )->tv_sec < (&expiry)->tv_sec))) |
3285 | break; |
3286 | |
3287 | expired = 1; |
3288 | /* continue until next hole after this expired frame */ |
3289 | cont = 1; |
3290 | sn = (buf->head_sn + (i + 1)) & 0xfff; |
3291 | } |
3292 | |
3293 | if (expired) { |
3294 | /* SN is set to the last expired frame + 1 */ |
3295 | iwm_release_frames(sc, ni, rxba, buf, sn, &ml); |
3296 | if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml); |
3297 | ic->ic_stats.is_ht_rx_ba_window_gap_timeout++; |
3298 | } else { |
3299 | /* |
3300 | * If no frame expired and there are stored frames, index is now |
3301 | * pointing to the first unexpired frame - modify reorder timeout |
3302 | * accordingly. |
3303 | */ |
3304 | timeout_add_usec(&buf->reorder_timer, |
3305 | RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL)); |
3306 | } |
3307 | |
3308 | splx(s)spllower(s); |
3309 | } |
3310 | |
3311 | #define IWM_MAX_RX_BA_SESSIONS16 16 |
3312 | |
3313 | int |
3314 | iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid, |
3315 | uint16_t ssn, uint16_t winsize, int timeout_val, int start) |
3316 | { |
3317 | struct ieee80211com *ic = &sc->sc_ic; |
3318 | struct iwm_add_sta_cmd cmd; |
3319 | struct iwm_node *in = (void *)ni; |
3320 | int err, s; |
3321 | uint32_t status; |
3322 | size_t cmdsize; |
3323 | struct iwm_rxba_data *rxba = NULL((void *)0); |
3324 | uint8_t baid = 0; |
3325 | |
3326 | s = splnet()splraise(0x4); |
3327 | |
3328 | if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16) { |
3329 | ieee80211_addba_req_refuse(ic, ni, tid); |
3330 | splx(s)spllower(s); |
3331 | return 0; |
3332 | } |
3333 | |
3334 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
3335 | |
3336 | cmd.sta_id = IWM_STATION_ID0; |
3337 | cmd.mac_id_n_color |
3338 | = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
3339 | cmd.add_modify = IWM_STA_MODE_MODIFY1; |
3340 | |
3341 | if (start) { |
3342 | cmd.add_immediate_ba_tid = (uint8_t)tid; |
3343 | cmd.add_immediate_ba_ssn = ssn; |
3344 | cmd.rx_ba_window = winsize; |
3345 | } else { |
3346 | cmd.remove_immediate_ba_tid = (uint8_t)tid; |
3347 | } |
3348 | cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID(1 << 3) : |
3349 | IWM_STA_MODIFY_REMOVE_BA_TID(1 << 4); |
3350 | |
3351 | status = IWM_ADD_STA_SUCCESS0x1; |
3352 | if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)& (8 -1))))) |
3353 | cmdsize = sizeof(cmd); |
3354 | else |
3355 | cmdsize = sizeof(struct iwm_add_sta_cmd_v7); |
3356 | err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd, |
3357 | &status); |
3358 | if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1) |
3359 | err = EIO5; |
3360 | if (err) { |
3361 | if (start) |
3362 | ieee80211_addba_req_refuse(ic, ni, tid); |
3363 | splx(s)spllower(s); |
3364 | return err; |
3365 | } |
3366 | |
3367 | if (sc->sc_mqrx_supported) { |
3368 | /* Deaggregation is done in hardware. */ |
3369 | if (start) { |
3370 | if (!(status & IWM_ADD_STA_BAID_VALID_MASK0x8000)) { |
3371 | ieee80211_addba_req_refuse(ic, ni, tid); |
3372 | splx(s)spllower(s); |
3373 | return EIO5; |
3374 | } |
3375 | baid = (status & IWM_ADD_STA_BAID_MASK0x7F00) >> |
3376 | IWM_ADD_STA_BAID_SHIFT8; |
3377 | if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f || |
3378 | baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data) [0]))) { |
3379 | ieee80211_addba_req_refuse(ic, ni, tid); |
3380 | splx(s)spllower(s); |
3381 | return EIO5; |
3382 | } |
3383 | rxba = &sc->sc_rxba_data[baid]; |
3384 | if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) { |
3385 | ieee80211_addba_req_refuse(ic, ni, tid); |
3386 | splx(s)spllower(s); |
3387 | return 0; |
3388 | } |
3389 | rxba->sta_id = IWM_STATION_ID0; |
3390 | rxba->tid = tid; |
3391 | rxba->baid = baid; |
3392 | rxba->timeout = timeout_val; |
3393 | getmicrouptime(&rxba->last_rx); |
3394 | iwm_init_reorder_buffer(&rxba->reorder_buf, ssn, |
3395 | winsize); |
3396 | if (timeout_val != 0) { |
3397 | struct ieee80211_rx_ba *ba; |
3398 | timeout_add_usec(&rxba->session_timer, |
3399 | timeout_val); |
3400 | /* XXX disable net80211's BA timeout handler */ |
3401 | ba = &ni->ni_rx_ba[tid]; |
3402 | ba->ba_timeout_val = 0; |
3403 | } |
3404 | } else { |
3405 | int i; |
3406 | for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data) [0])); i++) { |
3407 | rxba = &sc->sc_rxba_data[i]; |
3408 | if (rxba->baid == |
3409 | IWM_RX_REORDER_DATA_INVALID_BAID0x7f) |
3410 | continue; |
3411 | if (rxba->tid != tid) |
3412 | continue; |
3413 | iwm_clear_reorder_buffer(sc, rxba); |
3414 | break; |
3415 | } |
3416 | } |
3417 | } |
3418 | |
3419 | if (start) { |
3420 | sc->sc_rx_ba_sessions++; |
3421 | ieee80211_addba_req_accept(ic, ni, tid); |
3422 | } else if (sc->sc_rx_ba_sessions > 0) |
3423 | sc->sc_rx_ba_sessions--; |
3424 | |
3425 | splx(s)spllower(s); |
3426 | return 0; |
3427 | } |
3428 | |
3429 | void |
3430 | iwm_mac_ctxt_task(void *arg) |
3431 | { |
3432 | struct iwm_softc *sc = arg; |
3433 | struct ieee80211com *ic = &sc->sc_ic; |
3434 | struct iwm_node *in = (void *)ic->ic_bss; |
3435 | int err, s = splnet()splraise(0x4); |
3436 | |
3437 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) || |
3438 | ic->ic_state != IEEE80211_S_RUN) { |
3439 | refcnt_rele_wake(&sc->task_refs); |
3440 | splx(s)spllower(s); |
3441 | return; |
3442 | } |
3443 | |
3444 | err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 1); |
3445 | if (err) |
3446 | printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3447 | |
3448 | iwm_unprotect_session(sc, in); |
3449 | |
3450 | refcnt_rele_wake(&sc->task_refs); |
3451 | splx(s)spllower(s); |
3452 | } |
3453 | |
3454 | void |
3455 | iwm_updateprot(struct ieee80211com *ic) |
3456 | { |
3457 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3458 | |
3459 | if (ic->ic_state == IEEE80211_S_RUN && |
3460 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3461 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3462 | } |
3463 | |
3464 | void |
3465 | iwm_updateslot(struct ieee80211com *ic) |
3466 | { |
3467 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3468 | |
3469 | if (ic->ic_state == IEEE80211_S_RUN && |
3470 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3471 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3472 | } |
3473 | |
3474 | void |
3475 | iwm_updateedca(struct ieee80211com *ic) |
3476 | { |
3477 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3478 | |
3479 | if (ic->ic_state == IEEE80211_S_RUN && |
3480 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3481 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3482 | } |
3483 | |
3484 | void |
3485 | iwm_phy_ctxt_task(void *arg) |
3486 | { |
3487 | struct iwm_softc *sc = arg; |
3488 | struct ieee80211com *ic = &sc->sc_ic; |
3489 | struct iwm_node *in = (void *)ic->ic_bss; |
3490 | struct ieee80211_node *ni = &in->in_ni; |
3491 | uint8_t chains, sco, vht_chan_width; |
3492 | int err, s = splnet()splraise(0x4); |
3493 | |
3494 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) || |
3495 | ic->ic_state != IEEE80211_S_RUN || |
3496 | in->in_phyctxt == NULL((void *)0)) { |
3497 | refcnt_rele_wake(&sc->task_refs); |
3498 | splx(s)spllower(s); |
3499 | return; |
3500 | } |
3501 | |
3502 | chains = iwm_mimo_enabled(sc) ? 2 : 1; |
3503 | if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) && |
3504 | IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x8000) != 0) && |
3505 | ieee80211_node_supports_ht_chan40(ni)) |
3506 | sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03); |
3507 | else |
3508 | sco = IEEE80211_HTOP0_SCO_SCN0; |
3509 | if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) && |
3510 | IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_xflags & 0x00000001) != 0 ) && |
3511 | ieee80211_node_supports_vht_chan80(ni)) |
3512 | vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801; |
3513 | else |
3514 | vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT0; |
3515 | if (in->in_phyctxt->sco != sco || |
3516 | in->in_phyctxt->vht_chan_width != vht_chan_width) { |
3517 | err = iwm_phy_ctxt_update(sc, in->in_phyctxt, |
3518 | in->in_phyctxt->channel, chains, chains, 0, sco, |
3519 | vht_chan_width); |
3520 | if (err) |
3521 | printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3522 | iwm_setrates(in, 0); |
3523 | } |
3524 | |
3525 | refcnt_rele_wake(&sc->task_refs); |
3526 | splx(s)spllower(s); |
3527 | } |
3528 | |
3529 | void |
3530 | iwm_updatechan(struct ieee80211com *ic) |
3531 | { |
3532 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3533 | |
3534 | if (ic->ic_state == IEEE80211_S_RUN && |
3535 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3536 | iwm_add_task(sc, systq, &sc->phy_ctxt_task); |
3537 | } |
3538 | |
3539 | void |
3540 | iwm_updatedtim(struct ieee80211com *ic) |
3541 | { |
3542 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3543 | |
3544 | if (ic->ic_state == IEEE80211_S_RUN && |
3545 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3546 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3547 | } |
3548 | |
3549 | int |
3550 | iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid, |
3551 | uint16_t ssn, uint16_t winsize, int start) |
3552 | { |
3553 | struct iwm_add_sta_cmd cmd; |
3554 | struct ieee80211com *ic = &sc->sc_ic; |
3555 | struct iwm_node *in = (void *)ni; |
3556 | int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid; |
3557 | struct iwm_tx_ring *ring; |
3558 | enum ieee80211_edca_ac ac; |
3559 | int fifo; |
3560 | uint32_t status; |
3561 | int err; |
3562 | size_t cmdsize; |
3563 | |
3564 | /* Ensure we can map this TID to an aggregation queue. */ |
3565 | if (tid >= IWM_MAX_TID_COUNT8 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1)) |
3566 | return ENOSPC28; |
3567 | |
3568 | if (start) { |
3569 | if ((sc->tx_ba_queue_mask & (1 << qid)) != 0) |
3570 | return 0; |
3571 | } else { |
3572 | if ((sc->tx_ba_queue_mask & (1 << qid)) == 0) |
3573 | return 0; |
3574 | } |
3575 | |
3576 | ring = &sc->txq[qid]; |
3577 | ac = iwm_tid_to_ac[tid]; |
3578 | fifo = iwm_ac_to_tx_fifo[ac]; |
3579 | |
3580 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
3581 | |
3582 | cmd.sta_id = IWM_STATION_ID0; |
3583 | cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))) |
3584 | in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
3585 | cmd.add_modify = IWM_STA_MODE_MODIFY1; |
3586 | |
3587 | if (start) { |
3588 | /* Enable Tx aggregation for this queue. */ |
3589 | in->tid_disable_ampdu &= ~(1 << tid); |
3590 | in->tfd_queue_msk |= (1 << qid); |
3591 | } else { |
3592 | in->tid_disable_ampdu |= (1 << tid); |
3593 | /* |
3594 | * Queue remains enabled in the TFD queue mask |
3595 | * until we leave RUN state. |
3596 | */ |
3597 | err = iwm_flush_sta(sc, in); |
3598 | if (err) |
3599 | return err; |
3600 | } |
3601 | |
3602 | cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk)((__uint32_t)(in->tfd_queue_msk)); |
3603 | cmd.tid_disable_tx = htole16(in->tid_disable_ampdu)((__uint16_t)(in->tid_disable_ampdu)); |
3604 | cmd.modify_mask = (IWM_STA_MODIFY_QUEUES(1 << 7) | |
3605 | IWM_STA_MODIFY_TID_DISABLE_TX(1 << 1)); |
3606 | |
3607 | if (start && (sc->qenablemsk & (1 << qid)) == 0) { |
3608 | if (!iwm_nic_lock(sc)) { |
3609 | if (start) |
3610 | ieee80211_addba_resp_refuse(ic, ni, tid, |
3611 | IEEE80211_STATUS_UNSPECIFIED); |
3612 | return EBUSY16; |
3613 | } |
3614 | err = iwm_enable_txq(sc, IWM_STATION_ID0, qid, fifo, 1, tid, |
3615 | ssn); |
3616 | iwm_nic_unlock(sc); |
3617 | if (err) { |
3618 | printf("%s: could not enable Tx queue %d (error %d)\n", |
3619 | DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err); |
3620 | if (start) |
3621 | ieee80211_addba_resp_refuse(ic, ni, tid, |
3622 | IEEE80211_STATUS_UNSPECIFIED); |
3623 | return err; |
3624 | } |
3625 | /* |
3626 | * If iwm_enable_txq() employed the SCD hardware bug |
3627 | * workaround we must skip the frame with seqnum SSN. |
3628 | */ |
3629 | if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1))) { |
3630 | ssn = (ssn + 1) & 0xfff; |
3631 | KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn))((ring->cur == ((ssn) & (256 - 1))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 3631, "ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn)" )); |
3632 | ieee80211_output_ba_move_window(ic, ni, tid, ssn); |
3633 | ni->ni_qos_txseqs[tid] = ssn; |
3634 | } |
3635 | } |
3636 | |
3637 | if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)& (8 -1))))) |
3638 | cmdsize = sizeof(cmd); |
3639 | else |
3640 | cmdsize = sizeof(struct iwm_add_sta_cmd_v7); |
3641 | |
3642 | status = 0; |
3643 | err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd, &status); |
3644 | if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1) |
3645 | err = EIO5; |
3646 | if (err) { |
3647 | printf("%s: could not update sta (error %d)\n", |
3648 | DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
3649 | if (start) |
3650 | ieee80211_addba_resp_refuse(ic, ni, tid, |
3651 | IEEE80211_STATUS_UNSPECIFIED); |
3652 | return err; |
3653 | } |
3654 | |
3655 | if (start) { |
3656 | sc->tx_ba_queue_mask |= (1 << qid); |
3657 | ieee80211_addba_resp_accept(ic, ni, tid); |
3658 | } else { |
3659 | sc->tx_ba_queue_mask &= ~(1 << qid); |
3660 | |
3661 | /* |
3662 | * Clear pending frames but keep the queue enabled. |
3663 | * Firmware panics if we disable the queue here. |
3664 | */ |
3665 | iwm_txq_advance(sc, ring, ring->cur); |
3666 | iwm_clear_oactive(sc, ring); |
3667 | } |
3668 | |
3669 | return 0; |
3670 | } |
3671 | |
3672 | void |
3673 | iwm_ba_task(void *arg) |
3674 | { |
3675 | struct iwm_softc *sc = arg; |
3676 | struct ieee80211com *ic = &sc->sc_ic; |
3677 | struct ieee80211_node *ni = ic->ic_bss; |
3678 | int s = splnet()splraise(0x4); |
3679 | int tid, err = 0; |
3680 | |
3681 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) || |
3682 | ic->ic_state != IEEE80211_S_RUN) { |
3683 | refcnt_rele_wake(&sc->task_refs); |
3684 | splx(s)spllower(s); |
3685 | return; |
3686 | } |
3687 | |
3688 | for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) { |
3689 | if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) |
3690 | break; |
3691 | if (sc->ba_rx.start_tidmask & (1 << tid)) { |
3692 | struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; |
3693 | err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart, |
3694 | ba->ba_winsize, ba->ba_timeout_val, 1); |
3695 | sc->ba_rx.start_tidmask &= ~(1 << tid); |
3696 | } else if (sc->ba_rx.stop_tidmask & (1 << tid)) { |
3697 | err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0); |
3698 | sc->ba_rx.stop_tidmask &= ~(1 << tid); |
3699 | } |
3700 | } |
3701 | |
3702 | for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) { |
3703 | if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) |
3704 | break; |
3705 | if (sc->ba_tx.start_tidmask & (1 << tid)) { |
3706 | struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; |
3707 | err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart, |
3708 | ba->ba_winsize, 1); |
3709 | sc->ba_tx.start_tidmask &= ~(1 << tid); |
3710 | } else if (sc->ba_tx.stop_tidmask & (1 << tid)) { |
3711 | err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0); |
3712 | sc->ba_tx.stop_tidmask &= ~(1 << tid); |
3713 | } |
3714 | } |
3715 | |
3716 | /* |
3717 | * We "recover" from failure to start or stop a BA session |
3718 | * by resetting the device. |
3719 | */ |
3720 | if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) |
3721 | task_add(systq, &sc->init_task); |
3722 | |
3723 | refcnt_rele_wake(&sc->task_refs); |
3724 | splx(s)spllower(s); |
3725 | } |
3726 | |
3727 | /* |
3728 | * This function is called by upper layer when an ADDBA request is received |
3729 | * from another STA and before the ADDBA response is sent. |
3730 | */ |
3731 | int |
3732 | iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, |
3733 | uint8_t tid) |
3734 | { |
3735 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3736 | |
3737 | if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16 || |
3738 | tid > IWM_MAX_TID_COUNT8) |
3739 | return ENOSPC28; |
3740 | |
3741 | if (sc->ba_rx.start_tidmask & (1 << tid)) |
3742 | return EBUSY16; |
3743 | |
3744 | sc->ba_rx.start_tidmask |= (1 << tid); |
3745 | iwm_add_task(sc, systq, &sc->ba_task); |
3746 | |
3747 | return EBUSY16; |
3748 | } |
3749 | |
3750 | /* |
3751 | * This function is called by upper layer on teardown of an HT-immediate |
3752 | * Block Ack agreement (eg. upon receipt of a DELBA frame). |
3753 | */ |
3754 | void |
3755 | iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, |
3756 | uint8_t tid) |
3757 | { |
3758 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3759 | |
3760 | if (tid > IWM_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid)) |
3761 | return; |
3762 | |
3763 | sc->ba_rx.stop_tidmask |= (1 << tid); |
3764 | iwm_add_task(sc, systq, &sc->ba_task); |
3765 | } |
3766 | |
3767 | int |
3768 | iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, |
3769 | uint8_t tid) |
3770 | { |
3771 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3772 | struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; |
3773 | int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid; |
3774 | |
3775 | /* We only implement Tx aggregation with DQA-capable firmware. */ |
3776 | if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12 )&(8 -1))))) |
3777 | return ENOTSUP91; |
3778 | |
3779 | /* Ensure we can map this TID to an aggregation queue. */ |
3780 | if (tid >= IWM_MAX_TID_COUNT8) |
3781 | return EINVAL22; |
3782 | |
3783 | /* We only support a fixed Tx aggregation window size, for now. */ |
3784 | if (ba->ba_winsize != IWM_FRAME_LIMIT64) |
3785 | return ENOTSUP91; |
3786 | |
3787 | /* Is firmware already using Tx aggregation on this queue? */ |
3788 | if ((sc->tx_ba_queue_mask & (1 << qid)) != 0) |
3789 | return ENOSPC28; |
3790 | |
3791 | /* Are we already processing an ADDBA request? */ |
3792 | if (sc->ba_tx.start_tidmask & (1 << tid)) |
3793 | return EBUSY16; |
3794 | |
3795 | sc->ba_tx.start_tidmask |= (1 << tid); |
3796 | iwm_add_task(sc, systq, &sc->ba_task); |
3797 | |
3798 | return EBUSY16; |
3799 | } |
3800 | |
3801 | void |
3802 | iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, |
3803 | uint8_t tid) |
3804 | { |
3805 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3806 | int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid; |
3807 | |
3808 | if (tid > IWM_MAX_TID_COUNT8 || sc->ba_tx.stop_tidmask & (1 << tid)) |
3809 | return; |
3810 | |
3811 | /* Is firmware currently using Tx aggregation on this queue? */ |
3812 | if ((sc->tx_ba_queue_mask & (1 << qid)) == 0) |
3813 | return; |
3814 | |
3815 | sc->ba_tx.stop_tidmask |= (1 << tid); |
3816 | iwm_add_task(sc, systq, &sc->ba_task); |
3817 | } |
3818 | |
3819 | void |
3820 | iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, |
3821 | const uint16_t *mac_override, const uint16_t *nvm_hw) |
3822 | { |
3823 | const uint8_t *hw_addr; |
3824 | |
3825 | if (mac_override) { |
3826 | static const uint8_t reserved_mac[] = { |
3827 | 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 |
3828 | }; |
3829 | |
3830 | hw_addr = (const uint8_t *)(mac_override + |
3831 | IWM_MAC_ADDRESS_OVERRIDE_80001); |
3832 | |
3833 | /* |
3834 | * Store the MAC address from MAO section. |
3835 | * No byte swapping is required in MAO section |
3836 | */ |
3837 | memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN)__builtin_memcpy((data->hw_addr), (hw_addr), (6)); |
3838 | |
3839 | /* |
3840 | * Force the use of the OTP MAC address in case of reserved MAC |
3841 | * address in the NVM, or if address is given but invalid. |
3842 | */ |
3843 | if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (hw_addr), (6)) != 0 && |
3844 | (memcmp(etherbroadcastaddr, data->hw_addr,__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof (etherbroadcastaddr))) |
3845 | sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof (etherbroadcastaddr))) != 0) && |
3846 | (memcmp(etheranyaddr, data->hw_addr,__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof( etheranyaddr))) |
3847 | sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof( etheranyaddr))) != 0) && |
3848 | !ETHER_IS_MULTICAST(data->hw_addr)(*(data->hw_addr) & 0x01)) |
3849 | return; |
3850 | } |
3851 | |
3852 | if (nvm_hw) { |
3853 | /* Read the mac address from WFMP registers. */ |
3854 | uint32_t mac_addr0, mac_addr1; |
3855 | |
3856 | if (!iwm_nic_lock(sc)) |
3857 | goto out; |
3858 | mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0))((__uint32_t)(iwm_read_prph(sc, 0xa03080))); |
3859 | mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1))((__uint32_t)(iwm_read_prph(sc, 0xa03084))); |
3860 | iwm_nic_unlock(sc); |
3861 | |
3862 | hw_addr = (const uint8_t *)&mac_addr0; |
3863 | data->hw_addr[0] = hw_addr[3]; |
3864 | data->hw_addr[1] = hw_addr[2]; |
3865 | data->hw_addr[2] = hw_addr[1]; |
3866 | data->hw_addr[3] = hw_addr[0]; |
3867 | |
3868 | hw_addr = (const uint8_t *)&mac_addr1; |
3869 | data->hw_addr[4] = hw_addr[1]; |
3870 | data->hw_addr[5] = hw_addr[0]; |
3871 | |
3872 | return; |
3873 | } |
3874 | out: |
3875 | printf("%s: mac address not found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3876 | memset(data->hw_addr, 0, sizeof(data->hw_addr))__builtin_memset((data->hw_addr), (0), (sizeof(data->hw_addr ))); |
3877 | } |
3878 | |
3879 | int |
3880 | iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw, |
3881 | const uint16_t *nvm_sw, const uint16_t *nvm_calib, |
3882 | const uint16_t *mac_override, const uint16_t *phy_sku, |
3883 | const uint16_t *regulatory, int n_regulatory) |
3884 | { |
3885 | struct iwm_nvm_data *data = &sc->sc_nvm; |
3886 | uint8_t hw_addr[ETHER_ADDR_LEN6]; |
3887 | uint32_t sku; |
3888 | uint16_t lar_config; |
3889 | |
3890 | data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 0)))); |
3891 | |
3892 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3893 | uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 1)))); |
3894 | data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg)((radio_cfg >> 4) & 0x3); |
3895 | data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg)((radio_cfg >> 2) & 0x3); |
3896 | data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg)(radio_cfg & 0x3); |
3897 | data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg)((radio_cfg >> 6) & 0x3); |
3898 | |
3899 | sku = le16_to_cpup(nvm_sw + IWM_SKU)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 2)))); |
3900 | } else { |
3901 | uint32_t radio_cfg = |
3902 | le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 0)) ))); |
3903 | data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg)((radio_cfg >> 12) & 0xFFF); |
3904 | data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg)((radio_cfg >> 8) & 0xF); |
3905 | data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg)((radio_cfg >> 4) & 0xF); |
3906 | data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg)(radio_cfg & 0xF); |
3907 | data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 24) & 0xF); |
3908 | data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 28) & 0xF); |
3909 | |
3910 | sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 2)) ))); |
3911 | } |
3912 | |
3913 | data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ(1 << 0); |
3914 | data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ(1 << 1); |
3915 | data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE(1 << 2); |
3916 | data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE(1 << 3); |
3917 | data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE(1 << 5); |
3918 | |
3919 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) { |
3920 | uint16_t lar_offset = data->nvm_version < 0xE39 ? |
3921 | IWM_NVM_LAR_OFFSET_8000_OLD0x4C7 : |
3922 | IWM_NVM_LAR_OFFSET_80000x507; |
3923 | |
3924 | lar_config = le16_to_cpup(regulatory + lar_offset)(((__uint16_t)(*(const uint16_t *)(regulatory + lar_offset))) ); |
3925 | data->lar_enabled = !!(lar_config & |
3926 | IWM_NVM_LAR_ENABLED_80000x7); |
3927 | data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3)))); |
3928 | } else |
3929 | data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3)))); |
3930 | |
3931 | |
3932 | /* The byte order is little endian 16 bit, meaning 214365 */ |
3933 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3934 | memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN)__builtin_memcpy((hw_addr), (nvm_hw + 0x15), (6)); |
3935 | data->hw_addr[0] = hw_addr[1]; |
3936 | data->hw_addr[1] = hw_addr[0]; |
3937 | data->hw_addr[2] = hw_addr[3]; |
3938 | data->hw_addr[3] = hw_addr[2]; |
3939 | data->hw_addr[4] = hw_addr[5]; |
3940 | data->hw_addr[5] = hw_addr[4]; |
3941 | } else |
3942 | iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw); |
3943 | |
3944 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3945 | if (sc->nvm_type == IWM_NVM_SDP) { |
3946 | iwm_init_channel_map(sc, regulatory, iwm_nvm_channels, |
3947 | MIN(n_regulatory, nitems(iwm_nvm_channels))(((n_regulatory)<((sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels )[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels)) / sizeof ((iwm_nvm_channels)[0]))))); |
3948 | } else { |
3949 | iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS0x1E0 - 0x1C0], |
3950 | iwm_nvm_channels, nitems(iwm_nvm_channels)(sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels)[0]))); |
3951 | } |
3952 | } else |
3953 | iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_80000], |
3954 | iwm_nvm_channels_8000, |
3955 | MIN(n_regulatory, nitems(iwm_nvm_channels_8000))(((n_regulatory)<((sizeof((iwm_nvm_channels_8000)) / sizeof ((iwm_nvm_channels_8000)[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels_8000 )) / sizeof((iwm_nvm_channels_8000)[0]))))); |
3956 | |
3957 | data->calib_version = 255; /* TODO: |
3958 | this value will prevent some checks from |
3959 | failing, we need to check if this |
3960 | field is still needed, and if it does, |
3961 | where is it in the NVM */ |
3962 | |
3963 | return 0; |
3964 | } |
3965 | |
3966 | int |
3967 | iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) |
3968 | { |
3969 | const uint16_t *hw, *sw, *calib, *mac_override = NULL((void *)0), *phy_sku = NULL((void *)0); |
3970 | const uint16_t *regulatory = NULL((void *)0); |
3971 | int n_regulatory = 0; |
3972 | |
3973 | /* Checking for required sections */ |
3974 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3975 | if (!sections[IWM_NVM_SECTION_TYPE_SW1].data || |
3976 | !sections[IWM_NVM_SECTION_TYPE_HW0].data) { |
3977 | return ENOENT2; |
3978 | } |
3979 | |
3980 | hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW0].data; |
3981 | |
3982 | if (sc->nvm_type == IWM_NVM_SDP) { |
3983 | if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data) |
3984 | return ENOENT2; |
3985 | regulatory = (const uint16_t *) |
3986 | sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data; |
3987 | n_regulatory = |
3988 | sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].length; |
3989 | } |
3990 | } else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) { |
3991 | /* SW and REGULATORY sections are mandatory */ |
3992 | if (!sections[IWM_NVM_SECTION_TYPE_SW1].data || |
3993 | !sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data) { |
3994 | return ENOENT2; |
3995 | } |
3996 | /* MAC_OVERRIDE or at least HW section must exist */ |
3997 | if (!sections[IWM_NVM_SECTION_TYPE_HW_800010].data && |
3998 | !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data) { |
3999 | return ENOENT2; |
4000 | } |
4001 | |
4002 | /* PHY_SKU section is mandatory in B0 */ |
4003 | if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data) { |
4004 | return ENOENT2; |
4005 | } |
4006 | |
4007 | regulatory = (const uint16_t *) |
4008 | sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data; |
4009 | n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY3].length; |
4010 | hw = (const uint16_t *) |
4011 | sections[IWM_NVM_SECTION_TYPE_HW_800010].data; |
4012 | mac_override = |
4013 | (const uint16_t *) |
4014 | sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data; |
4015 | phy_sku = (const uint16_t *) |
4016 | sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data; |
4017 | } else { |
4018 | panic("unknown device family %d", sc->sc_device_family); |
4019 | } |
4020 | |
4021 | sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW1].data; |
4022 | calib = (const uint16_t *) |
4023 | sections[IWM_NVM_SECTION_TYPE_CALIBRATION4].data; |
4024 | |
4025 | /* XXX should pass in the length of every section */ |
4026 | return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, |
4027 | phy_sku, regulatory, n_regulatory); |
4028 | } |
4029 | |
4030 | int |
4031 | iwm_nvm_init(struct iwm_softc *sc) |
4032 | { |
4033 | struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS13]; |
4034 | int i, section, err; |
4035 | uint16_t len; |
4036 | uint8_t *buf; |
4037 | const size_t bufsz = sc->sc_nvm_max_section_size; |
4038 | |
4039 | memset(nvm_sections, 0, sizeof(nvm_sections))__builtin_memset((nvm_sections), (0), (sizeof(nvm_sections))); |
4040 | |
4041 | buf = malloc(bufsz, M_DEVBUF2, M_WAIT0x0001); |
4042 | if (buf == NULL((void *)0)) |
4043 | return ENOMEM12; |
4044 | |
4045 | for (i = 0; i < nitems(iwm_nvm_to_read)(sizeof((iwm_nvm_to_read)) / sizeof((iwm_nvm_to_read)[0])); i++) { |
4046 | section = iwm_nvm_to_read[i]; |
4047 | KASSERT(section <= nitems(nvm_sections))((section <= (sizeof((nvm_sections)) / sizeof((nvm_sections )[0]))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c" , 4047, "section <= nitems(nvm_sections)")); |
4048 | |
4049 | err = iwm_nvm_read_section(sc, section, buf, &len, bufsz); |
4050 | if (err) { |
4051 | err = 0; |
4052 | continue; |
4053 | } |
4054 | nvm_sections[section].data = malloc(len, M_DEVBUF2, M_WAIT0x0001); |
4055 | if (nvm_sections[section].data == NULL((void *)0)) { |
4056 | err = ENOMEM12; |
4057 | break; |
4058 | } |
4059 | memcpy(nvm_sections[section].data, buf, len)__builtin_memcpy((nvm_sections[section].data), (buf), (len)); |
4060 | nvm_sections[section].length = len; |
4061 | } |
4062 | free(buf, M_DEVBUF2, bufsz); |
4063 | if (err == 0) |
4064 | err = iwm_parse_nvm_sections(sc, nvm_sections); |
4065 | |
4066 | for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS13; i++) { |
4067 | if (nvm_sections[i].data != NULL((void *)0)) |
4068 | free(nvm_sections[i].data, M_DEVBUF2, |
4069 | nvm_sections[i].length); |
4070 | } |
4071 | |
4072 | return err; |
4073 | } |
4074 | |
4075 | int |
4076 | iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr, |
4077 | const uint8_t *section, uint32_t byte_cnt) |
4078 | { |
4079 | int err = EINVAL22; |
4080 | uint32_t chunk_sz, offset; |
4081 | |
4082 | chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt)(((0x20000)<(byte_cnt))?(0x20000):(byte_cnt)); |
4083 | |
4084 | for (offset = 0; offset < byte_cnt; offset += chunk_sz) { |
4085 | uint32_t addr, len; |
4086 | const uint8_t *data; |
4087 | |
4088 | addr = dst_addr + offset; |
4089 | len = MIN(chunk_sz, byte_cnt - offset)(((chunk_sz)<(byte_cnt - offset))?(chunk_sz):(byte_cnt - offset )); |
4090 | data = section + offset; |
4091 | |
4092 | err = iwm_firmware_load_chunk(sc, addr, data, len); |
4093 | if (err) |
4094 | break; |
4095 | } |
4096 | |
4097 | return err; |
4098 | } |
4099 | |
4100 | int |
4101 | iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr, |
4102 | const uint8_t *chunk, uint32_t byte_cnt) |
4103 | { |
4104 | struct iwm_dma_info *dma = &sc->fw_dma; |
4105 | int err; |
4106 | |
4107 | /* Copy firmware chunk into pre-allocated DMA-safe memory. */ |
4108 | memcpy(dma->vaddr, chunk, byte_cnt)__builtin_memcpy((dma->vaddr), (chunk), (byte_cnt)); |
4109 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma-> map), (0), (byte_cnt), (0x04)) |
4110 | dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma-> map), (0), (byte_cnt), (0x04)); |
4111 | |
4112 | if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 && |
4113 | dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) { |
4114 | err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8, |
4115 | IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01); |
4116 | if (err) |
4117 | return err; |
4118 | } |
4119 | |
4120 | sc->sc_fw_chunk_done = 0; |
4121 | |
4122 | if (!iwm_nic_lock(sc)) |
4123 | return EBUSY16; |
4124 | |
4125 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x00000000))))) |
4126 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x00000000))))); |
4127 | IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr)))) |
4128 | dst_addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr)))); |
4129 | IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF)) ))) |
4130 | dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF)) ))); |
4131 | IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma-> paddr) << 28) | byte_cnt)))) |
4132 | (iwm_get_dma_hi_addr(dma->paddr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma-> paddr) << 28) | byte_cnt)))) |
4133 | << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma-> paddr) << 28) | byte_cnt)))); |
4134 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))) |
4135 | 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))) |
4136 | 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))) |
4137 | IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))); |
4138 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))) |
4139 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))) |
4140 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))) |
4141 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))); |
4142 | |
4143 | iwm_nic_unlock(sc); |
4144 | |
4145 | /* Wait for this segment to load. */ |
4146 | err = 0; |
4147 | while (!sc->sc_fw_chunk_done) { |
4148 | err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1)); |
4149 | if (err) |
4150 | break; |
4151 | } |
4152 | |
4153 | if (!sc->sc_fw_chunk_done) |
4154 | printf("%s: fw chunk addr 0x%x len %d failed to load\n", |
4155 | DEVNAME(sc)((sc)->sc_dev.dv_xname), dst_addr, byte_cnt); |
4156 | |
4157 | if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 && |
4158 | dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) { |
4159 | int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8, |
4160 | IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01); |
4161 | if (!err) |
4162 | err = err2; |
4163 | } |
4164 | |
4165 | return err; |
4166 | } |
4167 | |
4168 | int |
4169 | iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4170 | { |
4171 | struct iwm_fw_sects *fws; |
4172 | int err, i; |
4173 | void *data; |
4174 | uint32_t dlen; |
4175 | uint32_t offset; |
4176 | |
4177 | fws = &sc->sc_fw.fw_sects[ucode_type]; |
4178 | for (i = 0; i < fws->fw_count; i++) { |
4179 | data = fws->fw_sect[i].fws_data; |
4180 | dlen = fws->fw_sect[i].fws_len; |
4181 | offset = fws->fw_sect[i].fws_devoff; |
4182 | if (dlen > sc->sc_fwdmasegsz) { |
4183 | err = EFBIG27; |
4184 | } else |
4185 | err = iwm_firmware_load_sect(sc, offset, data, dlen); |
4186 | if (err) { |
4187 | printf("%s: could not load firmware chunk %u of %u\n", |
4188 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i, fws->fw_count); |
4189 | return err; |
4190 | } |
4191 | } |
4192 | |
4193 | iwm_enable_interrupts(sc); |
4194 | |
4195 | IWM_WRITE(sc, IWM_CSR_RESET, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( (0)))); |
4196 | |
4197 | return 0; |
4198 | } |
4199 | |
4200 | int |
4201 | iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws, |
4202 | int cpu, int *first_ucode_section) |
4203 | { |
4204 | int shift_param; |
4205 | int i, err = 0, sec_num = 0x1; |
4206 | uint32_t val, last_read_idx = 0; |
4207 | void *data; |
4208 | uint32_t dlen; |
4209 | uint32_t offset; |
4210 | |
4211 | if (cpu == 1) { |
4212 | shift_param = 0; |
4213 | *first_ucode_section = 0; |
4214 | } else { |
4215 | shift_param = 16; |
4216 | (*first_ucode_section)++; |
4217 | } |
4218 | |
4219 | for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX16; i++) { |
4220 | last_read_idx = i; |
4221 | data = fws->fw_sect[i].fws_data; |
4222 | dlen = fws->fw_sect[i].fws_len; |
4223 | offset = fws->fw_sect[i].fws_devoff; |
4224 | |
4225 | /* |
4226 | * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between |
4227 | * CPU1 to CPU2. |
4228 | * PAGING_SEPARATOR_SECTION delimiter - separate between |
4229 | * CPU2 non paged to CPU2 paging sec. |
4230 | */ |
4231 | if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC || |
4232 | offset == IWM_PAGING_SEPARATOR_SECTION0xAAAABBBB) |
4233 | break; |
4234 | |
4235 | if (dlen > sc->sc_fwdmasegsz) { |
4236 | err = EFBIG27; |
4237 | } else |
4238 | err = iwm_firmware_load_sect(sc, offset, data, dlen); |
4239 | if (err) { |
4240 | printf("%s: could not load firmware chunk %d " |
4241 | "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err); |
4242 | return err; |
4243 | } |
4244 | |
4245 | /* Notify the ucode of the loaded section number and status */ |
4246 | if (iwm_nic_lock(sc)) { |
4247 | val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((0x1af0)))); |
4248 | val = val | (sec_num << shift_param); |
4249 | IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), ( (val)))); |
4250 | sec_num = (sec_num << 1) | 0x1; |
4251 | iwm_nic_unlock(sc); |
4252 | } else { |
4253 | err = EBUSY16; |
4254 | printf("%s: could not load firmware chunk %d " |
4255 | "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err); |
4256 | return err; |
4257 | } |
4258 | } |
4259 | |
4260 | *first_ucode_section = last_read_idx; |
4261 | |
4262 | if (iwm_nic_lock(sc)) { |
4263 | if (cpu == 1) |
4264 | IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), ( (0xFFFF)))); |
4265 | else |
4266 | IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), ( (0xFFFFFFFF)))); |
4267 | iwm_nic_unlock(sc); |
4268 | } else { |
4269 | err = EBUSY16; |
4270 | printf("%s: could not finalize firmware loading (error %d)\n", |
4271 | DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
4272 | return err; |
4273 | } |
4274 | |
4275 | return 0; |
4276 | } |
4277 | |
4278 | int |
4279 | iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4280 | { |
4281 | struct iwm_fw_sects *fws; |
4282 | int err = 0; |
4283 | int first_ucode_section; |
4284 | |
4285 | fws = &sc->sc_fw.fw_sects[ucode_type]; |
4286 | |
4287 | /* configure the ucode to be ready to get the secured image */ |
4288 | /* release CPU reset */ |
4289 | if (iwm_nic_lock(sc)) { |
4290 | iwm_write_prph(sc, IWM_RELEASE_CPU_RESET0x300c, |
4291 | IWM_RELEASE_CPU_RESET_BIT0x1000000); |
4292 | iwm_nic_unlock(sc); |
4293 | } |
4294 | |
4295 | /* load to FW the binary Secured sections of CPU1 */ |
4296 | err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section); |
4297 | if (err) |
4298 | return err; |
4299 | |
4300 | /* load to FW the binary sections of CPU2 */ |
4301 | err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section); |
4302 | if (err) |
4303 | return err; |
4304 | |
4305 | iwm_enable_interrupts(sc); |
4306 | return 0; |
4307 | } |
4308 | |
4309 | int |
4310 | iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4311 | { |
4312 | int err; |
4313 | |
4314 | splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__ ); } } while (0); |
4315 | |
4316 | sc->sc_uc.uc_intr = 0; |
4317 | sc->sc_uc.uc_ok = 0; |
4318 | |
4319 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) |
4320 | err = iwm_load_firmware_8000(sc, ucode_type); |
4321 | else |
4322 | err = iwm_load_firmware_7000(sc, ucode_type); |
4323 | |
4324 | if (err) |
4325 | return err; |
4326 | |
4327 | /* wait for the firmware to load */ |
4328 | err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1)); |
4329 | if (err || !sc->sc_uc.uc_ok) |
4330 | printf("%s: could not load firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4331 | |
4332 | return err; |
4333 | } |
4334 | |
4335 | int |
4336 | iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4337 | { |
4338 | int err; |
4339 | |
4340 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
4341 | |
4342 | err = iwm_nic_init(sc); |
4343 | if (err) { |
4344 | printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4345 | return err; |
4346 | } |
4347 | |
4348 | /* make sure rfkill handshake bits are cleared */ |
4349 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000002))))); |
4350 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000004))))) |
4351 | IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000004))))); |
4352 | |
4353 | /* clear (again), then enable firmware load interrupt */ |
4354 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
4355 | iwm_enable_fwload_interrupt(sc); |
4356 | |
4357 | /* really make sure rfkill handshake bits are cleared */ |
4358 | /* maybe we should write a few times more? just to make sure */ |
4359 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000002))))); |
4360 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000002))))); |
4361 | |
4362 | return iwm_load_firmware(sc, ucode_type); |
4363 | } |
4364 | |
4365 | int |
4366 | iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) |
4367 | { |
4368 | struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { |
4369 | .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)), |
4370 | }; |
4371 | |
4372 | return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD0x98, |
4373 | 0, sizeof(tx_ant_cmd), &tx_ant_cmd); |
4374 | } |
4375 | |
4376 | int |
4377 | iwm_send_phy_cfg_cmd(struct iwm_softc *sc) |
4378 | { |
4379 | struct iwm_phy_cfg_cmd phy_cfg_cmd; |
4380 | enum iwm_ucode_type ucode_type = sc->sc_uc_current; |
4381 | |
4382 | phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config )) |
4383 | sc->sc_extra_phy_config)((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config )); |
4384 | phy_cfg_cmd.calib_control.event_trigger = |
4385 | sc->sc_default_calib[ucode_type].event_trigger; |
4386 | phy_cfg_cmd.calib_control.flow_trigger = |
4387 | sc->sc_default_calib[ucode_type].flow_trigger; |
4388 | |
4389 | return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD0x6a, 0, |
4390 | sizeof(phy_cfg_cmd), &phy_cfg_cmd); |
4391 | } |
4392 | |
4393 | int |
4394 | iwm_send_dqa_cmd(struct iwm_softc *sc) |
4395 | { |
4396 | struct iwm_dqa_enable_cmd dqa_cmd = { |
4397 | .cmd_queue = htole32(IWM_DQA_CMD_QUEUE)((__uint32_t)(0)), |
4398 | }; |
4399 | uint32_t cmd_id; |
4400 | |
4401 | cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD0x00, IWM_DATA_PATH_GROUP0x5, 0); |
4402 | return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); |
4403 | } |
4404 | |
4405 | int |
4406 | iwm_load_ucode_wait_alive(struct iwm_softc *sc, |
4407 | enum iwm_ucode_type ucode_type) |
4408 | { |
4409 | enum iwm_ucode_type old_type = sc->sc_uc_current; |
4410 | struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type]; |
4411 | int err; |
4412 | |
4413 | err = iwm_read_firmware(sc); |
4414 | if (err) |
4415 | return err; |
4416 | |
4417 | if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12 )&(8 -1))))) |
4418 | sc->cmdqid = IWM_DQA_CMD_QUEUE0; |
4419 | else |
4420 | sc->cmdqid = IWM_CMD_QUEUE9; |
4421 | |
4422 | sc->sc_uc_current = ucode_type; |
4423 | err = iwm_start_fw(sc, ucode_type); |
4424 | if (err) { |
4425 | sc->sc_uc_current = old_type; |
4426 | return err; |
4427 | } |
4428 | |
4429 | err = iwm_post_alive(sc); |
4430 | if (err) |
4431 | return err; |
4432 | |
4433 | /* |
4434 | * configure and operate fw paging mechanism. |
4435 | * driver configures the paging flow only once, CPU2 paging image |
4436 | * included in the IWM_UCODE_INIT image. |
4437 | */ |
4438 | if (fw->paging_mem_size) { |
4439 | err = iwm_save_fw_paging(sc, fw); |
4440 | if (err) { |
4441 | printf("%s: failed to save the FW paging image\n", |
4442 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4443 | return err; |
4444 | } |
4445 | |
4446 | err = iwm_send_paging_cmd(sc, fw); |
4447 | if (err) { |
4448 | printf("%s: failed to send the paging cmd\n", |
4449 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4450 | iwm_free_fw_paging(sc); |
4451 | return err; |
4452 | } |
4453 | } |
4454 | |
4455 | return 0; |
4456 | } |
4457 | |
4458 | int |
4459 | iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm) |
4460 | { |
4461 | const int wait_flags = (IWM_INIT_COMPLETE0x01 | IWM_CALIB_COMPLETE0x02); |
4462 | int err, s; |
4463 | |
4464 | if ((sc->sc_flags & IWM_FLAG_RFKILL0x02) && !justnvm) { |
4465 | printf("%s: radio is disabled by hardware switch\n", |
4466 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4467 | return EPERM1; |
4468 | } |
4469 | |
4470 | s = splnet()splraise(0x4); |
4471 | sc->sc_init_complete = 0; |
4472 | err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT); |
4473 | if (err) { |
4474 | printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4475 | splx(s)spllower(s); |
4476 | return err; |
4477 | } |
4478 | |
4479 | if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) { |
4480 | err = iwm_send_bt_init_conf(sc); |
4481 | if (err) { |
4482 | printf("%s: could not init bt coex (error %d)\n", |
4483 | DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
4484 | splx(s)spllower(s); |
4485 | return err; |
4486 | } |
4487 | } |
4488 | |
4489 | if (justnvm) { |
4490 | err = iwm_nvm_init(sc); |
4491 | if (err) { |
4492 | printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4493 | splx(s)spllower(s); |
4494 | return err; |
4495 | } |
4496 | |
4497 | if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), ( 6)) == 0)) |
4498 | IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr ), (6)) |
4499 | sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr ), (6)); |
4500 | |
4501 | splx(s)spllower(s); |
4502 | return 0; |
4503 | } |
4504 | |
4505 | err = iwm_sf_config(sc, IWM_SF_INIT_OFF3); |
4506 | if (err) { |
4507 | splx(s)spllower(s); |
4508 | return err; |
4509 | } |
4510 | |
4511 | /* Send TX valid antennas before triggering calibrations */ |
4512 | err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc)); |
4513 | if (err) { |
4514 | splx(s)spllower(s); |
4515 | return err; |
4516 | } |
4517 | |
4518 | /* |
4519 | * Send phy configurations command to init uCode |
4520 | * to start the 16.0 uCode init image internal calibrations. |
4521 | */ |
4522 | err = iwm_send_phy_cfg_cmd(sc); |
4523 | if (err) { |
4524 | splx(s)spllower(s); |
4525 | return err; |
4526 | } |
4527 | |
4528 | /* |
4529 | * Nothing to do but wait for the init complete and phy DB |
4530 | * notifications from the firmware. |
4531 | */ |
4532 | while ((sc->sc_init_complete & wait_flags) != wait_flags) { |
4533 | err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit", |
4534 | SEC_TO_NSEC(2)); |
4535 | if (err) |
4536 | break; |
4537 | } |
4538 | |
4539 | splx(s)spllower(s); |
4540 | return err; |
4541 | } |
4542 | |
4543 | int |
4544 | iwm_config_ltr(struct iwm_softc *sc) |
4545 | { |
4546 | struct iwm_ltr_config_cmd cmd = { |
4547 | .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)), |
4548 | }; |
4549 | |
4550 | if (!sc->sc_ltr_enabled) |
4551 | return 0; |
4552 | |
4553 | return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd); |
4554 | } |
4555 | |
4556 | int |
4557 | iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) |
4558 | { |
4559 | struct iwm_rx_ring *ring = &sc->rxq; |
4560 | struct iwm_rx_data *data = &ring->data[idx]; |
4561 | struct mbuf *m; |
4562 | int err; |
4563 | int fatal = 0; |
4564 | |
4565 | m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1); |
4566 | if (m == NULL((void *)0)) |
4567 | return ENOBUFS55; |
4568 | |
4569 | if (size <= MCLBYTES(1 << 11)) { |
4570 | MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11)); |
4571 | } else { |
4572 | MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE)m_clget((m), (0x0002), (4096)); |
4573 | } |
4574 | if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) { |
4575 | m_freem(m); |
4576 | return ENOBUFS55; |
4577 | } |
4578 | |
4579 | if (data->m != NULL((void *)0)) { |
4580 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
4581 | fatal = 1; |
4582 | } |
4583 | |
4584 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size; |
4585 | err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( data->map), (m), (0x0200|0x0001)) |
4586 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( data->map), (m), (0x0200|0x0001)); |
4587 | if (err) { |
4588 | /* XXX */ |
4589 | if (fatal) |
4590 | panic("iwm: could not load RX mbuf"); |
4591 | m_freem(m); |
4592 | return err; |
4593 | } |
4594 | data->m = m; |
4595 | bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (size), (0x01)); |
4596 | |
4597 | /* Update RX descriptor. */ |
4598 | if (sc->sc_mqrx_supported) { |
4599 | ((uint64_t *)ring->desc)[idx] = |
4600 | htole64(data->map->dm_segs[0].ds_addr)((__uint64_t)(data->map->dm_segs[0].ds_addr)); |
4601 | bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t )), (0x04)) |
4602 | idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t )), (0x04)) |
4603 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t )), (0x04)); |
4604 | } else { |
4605 | ((uint32_t *)ring->desc)[idx] = |
4606 | htole32(data->map->dm_segs[0].ds_addr >> 8)((__uint32_t)(data->map->dm_segs[0].ds_addr >> 8) ); |
4607 | bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t )), (0x04)) |
4608 | idx * sizeof(uint32_t), sizeof(uint32_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t )), (0x04)) |
4609 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t )), (0x04)); |
4610 | } |
4611 | |
4612 | return 0; |
4613 | } |
4614 | |
4615 | /* |
4616 | * RSSI values are reported by the FW as positive values - need to negate |
4617 | * to obtain their dBM. Account for missing antennas by replacing 0 |
4618 | * values by -256dBm: practically 0 power and a non-feasible 8 bit value. |
4619 | */ |
4620 | int |
4621 | iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) |
4622 | { |
4623 | int energy_a, energy_b, energy_c, max_energy; |
4624 | uint32_t val; |
4625 | |
4626 | val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX])((__uint32_t)(phy_info->non_cfg_phy[1])); |
4627 | energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK0x000000ff) >> |
4628 | IWM_RX_INFO_ENERGY_ANT_A_POS0; |
4629 | energy_a = energy_a ? -energy_a : -256; |
4630 | energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK0x0000ff00) >> |
4631 | IWM_RX_INFO_ENERGY_ANT_B_POS8; |
4632 | energy_b = energy_b ? -energy_b : -256; |
4633 | energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK0x00ff0000) >> |
4634 | IWM_RX_INFO_ENERGY_ANT_C_POS16; |
4635 | energy_c = energy_c ? -energy_c : -256; |
4636 | max_energy = MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b)); |
4637 | max_energy = MAX(max_energy, energy_c)(((max_energy)>(energy_c))?(max_energy):(energy_c)); |
4638 | |
4639 | return max_energy; |
4640 | } |
4641 | |
4642 | int |
4643 | iwm_rxmq_get_signal_strength(struct iwm_softc *sc, |
4644 | struct iwm_rx_mpdu_desc *desc) |
4645 | { |
4646 | int energy_a, energy_b; |
4647 | |
4648 | energy_a = desc->v1.energy_a; |
4649 | energy_b = desc->v1.energy_b; |
4650 | energy_a = energy_a ? -energy_a : -256; |
4651 | energy_b = energy_b ? -energy_b : -256; |
4652 | return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b)); |
4653 | } |
4654 | |
4655 | void |
4656 | iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt, |
4657 | struct iwm_rx_data *data) |
4658 | { |
4659 | struct iwm_rx_phy_info *phy_info = (void *)pkt->data; |
4660 | |
4661 | bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02)) |
4662 | sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02)); |
4663 | |
4664 | memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), ( sizeof(sc->sc_last_phy_info))); |
4665 | } |
4666 | |
4667 | /* |
4668 | * Retrieve the average noise (in dBm) among receivers. |
4669 | */ |
4670 | int |
4671 | iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats) |
4672 | { |
4673 | int i, total, nbant, noise; |
4674 | |
4675 | total = nbant = noise = 0; |
4676 | for (i = 0; i < 3; i++) { |
4677 | noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff; |
4678 | if (noise) { |
4679 | total += noise; |
4680 | nbant++; |
4681 | } |
4682 | } |
4683 | |
4684 | /* There should be at least one antenna but check anyway. */ |
4685 | return (nbant == 0) ? -127 : (total / nbant) - 107; |
4686 | } |
4687 | |
4688 | int |
4689 | iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, |
4690 | struct ieee80211_rxinfo *rxi) |
4691 | { |
4692 | struct ieee80211com *ic = &sc->sc_ic; |
4693 | struct ieee80211_key *k = &ni->ni_pairwise_key; |
4694 | struct ieee80211_frame *wh; |
4695 | uint64_t pn, *prsc; |
4696 | uint8_t *ivp; |
4697 | uint8_t tid; |
4698 | int hdrlen, hasqos; |
4699 | |
4700 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4701 | hdrlen = ieee80211_get_hdrlen(wh); |
4702 | ivp = (uint8_t *)wh + hdrlen; |
4703 | |
4704 | /* Check that ExtIV bit is set. */ |
4705 | if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20)) |
4706 | return 1; |
4707 | |
4708 | hasqos = ieee80211_has_qos(wh); |
4709 | tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0; |
4710 | prsc = &k->k_rsc[tid]; |
4711 | |
4712 | /* Extract the 48-bit PN from the CCMP header. */ |
4713 | pn = (uint64_t)ivp[0] | |
4714 | (uint64_t)ivp[1] << 8 | |
4715 | (uint64_t)ivp[4] << 16 | |
4716 | (uint64_t)ivp[5] << 24 | |
4717 | (uint64_t)ivp[6] << 32 | |
4718 | (uint64_t)ivp[7] << 40; |
4719 | if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) { |
4720 | if (pn < *prsc) { |
4721 | ic->ic_stats.is_ccmp_replays++; |
4722 | return 1; |
4723 | } |
4724 | } else if (pn <= *prsc) { |
4725 | ic->ic_stats.is_ccmp_replays++; |
4726 | return 1; |
4727 | } |
4728 | /* Last seen packet number is updated in ieee80211_inputm(). */ |
4729 | |
4730 | /* |
4731 | * Some firmware versions strip the MIC, and some don't. It is not |
4732 | * clear which of the capability flags could tell us what to expect. |
4733 | * For now, keep things simple and just leave the MIC in place if |
4734 | * it is present. |
4735 | * |
4736 | * The IV will be stripped by ieee80211_inputm(). |
4737 | */ |
4738 | return 0; |
4739 | } |
4740 | |
4741 | int |
4742 | iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status, |
4743 | struct ieee80211_rxinfo *rxi) |
4744 | { |
4745 | struct ieee80211com *ic = &sc->sc_ic; |
4746 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
4747 | struct ieee80211_frame *wh; |
4748 | struct ieee80211_node *ni; |
4749 | int ret = 0; |
4750 | uint8_t type, subtype; |
4751 | |
4752 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4753 | |
4754 | type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
4755 | if (type == IEEE80211_FC0_TYPE_CTL0x04) |
4756 | return 0; |
4757 | |
4758 | subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0; |
4759 | if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) |
4760 | return 0; |
4761 | |
4762 | if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) || |
4763 | !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40)) |
4764 | return 0; |
4765 | |
4766 | ni = ieee80211_find_rxnode(ic, wh); |
4767 | /* Handle hardware decryption. */ |
4768 | if ((ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) && |
4769 | ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) { |
4770 | if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) != |
4771 | IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) { |
4772 | ic->ic_stats.is_ccmp_dec_errs++; |
4773 | ret = 1; |
4774 | goto out; |
4775 | } |
4776 | /* Check whether decryption was successful or not. */ |
4777 | if ((rx_pkt_status & |
4778 | (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) | |
4779 | IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) != |
4780 | (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) | |
4781 | IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) { |
4782 | ic->ic_stats.is_ccmp_dec_errs++; |
4783 | ret = 1; |
4784 | goto out; |
4785 | } |
4786 | rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001; |
4787 | } |
4788 | out: |
4789 | if (ret) |
4790 | ifp->if_ierrorsif_data.ifi_ierrors++; |
4791 | ieee80211_release_node(ic, ni); |
4792 | return ret; |
4793 | } |
4794 | |
4795 | void |
4796 | iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx, |
4797 | uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags, |
4798 | uint32_t device_timestamp, struct ieee80211_rxinfo *rxi, |
4799 | struct mbuf_list *ml) |
4800 | { |
4801 | struct ieee80211com *ic = &sc->sc_ic; |
4802 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
4803 | struct ieee80211_frame *wh; |
4804 | struct ieee80211_node *ni; |
4805 | |
4806 | if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0 ]))) |
4807 | chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); |
4808 | |
4809 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4810 | ni = ieee80211_find_rxnode(ic, wh); |
4811 | if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) && |
4812 | iwm_ccmp_decap(sc, m, ni, rxi) != 0) { |
4813 | ifp->if_ierrorsif_data.ifi_ierrors++; |
4814 | m_freem(m); |
4815 | ieee80211_release_node(ic, ni); |
4816 | return; |
4817 | } |
4818 | |
4819 | #if NBPFILTER1 > 0 |
4820 | if (sc->sc_drvbpf != NULL((void *)0)) { |
4821 | struct iwm_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th; |
4822 | uint16_t chan_flags; |
4823 | |
4824 | tap->wr_flags = 0; |
4825 | if (is_shortpre) |
4826 | tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02; |
4827 | tap->wr_chan_freq = |
4828 | htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq)); |
4829 | chan_flags = ic->ic_channels[chanidx].ic_flags; |
4830 | if (ic->ic_curmode != IEEE80211_MODE_11N && |
4831 | ic->ic_curmode != IEEE80211_MODE_11AC) { |
4832 | chan_flags &= ~IEEE80211_CHAN_HT0x2000; |
4833 | chan_flags &= ~IEEE80211_CHAN_40MHZ0x8000; |
4834 | } |
4835 | if (ic->ic_curmode != IEEE80211_MODE_11AC) |
4836 | chan_flags &= ~IEEE80211_CHAN_VHT0x4000; |
4837 | tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags)); |
4838 | tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi; |
4839 | tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; |
4840 | tap->wr_tsft = device_timestamp; |
4841 | if (rate_n_flags & IWM_RATE_MCS_HT_MSK(1 << 8)) { |
4842 | uint8_t mcs = (rate_n_flags & |
4843 | (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | |
4844 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3))); |
4845 | tap->wr_rate = (0x80 | mcs); |
4846 | } else { |
4847 | uint8_t rate = (rate_n_flags & |
4848 | IWM_RATE_LEGACY_RATE_MSK0xff); |
4849 | switch (rate) { |
4850 | /* CCK rates. */ |
4851 | case 10: tap->wr_rate = 2; break; |
4852 | case 20: tap->wr_rate = 4; break; |
4853 | case 55: tap->wr_rate = 11; break; |
4854 | case 110: tap->wr_rate = 22; break; |
4855 | /* OFDM rates. */ |
4856 | case 0xd: tap->wr_rate = 12; break; |
4857 | case 0xf: tap->wr_rate = 18; break; |
4858 | case 0x5: tap->wr_rate = 24; break; |
4859 | case 0x7: tap->wr_rate = 36; break; |
4860 | case 0x9: tap->wr_rate = 48; break; |
4861 | case 0xb: tap->wr_rate = 72; break; |
4862 | case 0x1: tap->wr_rate = 96; break; |
4863 | case 0x3: tap->wr_rate = 108; break; |
4864 | /* Unknown rate: should not happen. */ |
4865 | default: tap->wr_rate = 0; |
4866 | } |
4867 | } |
4868 | |
4869 | bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len, |
4870 | m, BPF_DIRECTION_IN(1 << 0)); |
4871 | } |
4872 | #endif |
4873 | ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml); |
4874 | ieee80211_release_node(ic, ni); |
4875 | } |
4876 | |
4877 | void |
4878 | iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata, |
4879 | size_t maxlen, struct mbuf_list *ml) |
4880 | { |
4881 | struct ieee80211com *ic = &sc->sc_ic; |
4882 | struct ieee80211_rxinfo rxi; |
4883 | struct iwm_rx_phy_info *phy_info; |
4884 | struct iwm_rx_mpdu_res_start *rx_res; |
4885 | int device_timestamp; |
4886 | uint16_t phy_flags; |
4887 | uint32_t len; |
4888 | uint32_t rx_pkt_status; |
4889 | int rssi, chanidx, rate_n_flags; |
4890 | |
4891 | memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi))); |
4892 | |
4893 | phy_info = &sc->sc_last_phy_info; |
4894 | rx_res = (struct iwm_rx_mpdu_res_start *)pktdata; |
4895 | len = le16toh(rx_res->byte_count)((__uint16_t)(rx_res->byte_count)); |
4896 | if (ic->ic_opmode == IEEE80211_M_MONITOR) { |
4897 | /* Allow control frames in monitor mode. */ |
4898 | if (len < sizeof(struct ieee80211_frame_cts)) { |
4899 | ic->ic_stats.is_rx_tooshort++; |
4900 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
4901 | m_freem(m); |
4902 | return; |
4903 | } |
4904 | } else if (len < sizeof(struct ieee80211_frame)) { |
4905 | ic->ic_stats.is_rx_tooshort++; |
4906 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
4907 | m_freem(m); |
4908 | return; |
4909 | } |
4910 | if (len > maxlen - sizeof(*rx_res)) { |
4911 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
4912 | m_freem(m); |
4913 | return; |
4914 | } |
4915 | |
4916 | if (__predict_false(phy_info->cfg_phy_cnt > 20)__builtin_expect(((phy_info->cfg_phy_cnt > 20) != 0), 0 )) { |
4917 | m_freem(m); |
4918 | return; |
4919 | } |
4920 | |
4921 | rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len))((__uint32_t)(*(uint32_t *)(pktdata + sizeof(*rx_res) + len)) ); |
4922 | if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK(1 << 0)) || |
4923 | !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK(1 << 1))) { |
4924 | m_freem(m); |
4925 | return; /* drop */ |
4926 | } |
4927 | |
4928 | m->m_datam_hdr.mh_data = pktdata + sizeof(*rx_res); |
4929 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
4930 | |
4931 | if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) { |
4932 | m_freem(m); |
4933 | return; |
4934 | } |
4935 | |
4936 | chanidx = letoh32(phy_info->channel)((__uint32_t)(phy_info->channel)); |
4937 | device_timestamp = le32toh(phy_info->system_timestamp)((__uint32_t)(phy_info->system_timestamp)); |
4938 | phy_flags = letoh16(phy_info->phy_flags)((__uint16_t)(phy_info->phy_flags)); |
4939 | rate_n_flags = le32toh(phy_info->rate_n_flags)((__uint32_t)(phy_info->rate_n_flags)); |
4940 | |
4941 | rssi = iwm_get_signal_strength(sc, phy_info); |
4942 | rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */ |
4943 | rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi) ); /* clip to max. 100% */ |
4944 | |
4945 | rxi.rxi_rssi = rssi; |
4946 | rxi.rxi_tstamp = device_timestamp; |
4947 | rxi.rxi_chan = chanidx; |
4948 | |
4949 | iwm_rx_frame(sc, m, chanidx, rx_pkt_status, |
4950 | (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE(1 << 2)), |
4951 | rate_n_flags, device_timestamp, &rxi, ml); |
4952 | } |
4953 | |
4954 | void |
4955 | iwm_flip_address(uint8_t *addr) |
4956 | { |
4957 | int i; |
4958 | uint8_t mac_addr[ETHER_ADDR_LEN6]; |
4959 | |
4960 | for (i = 0; i < ETHER_ADDR_LEN6; i++) |
4961 | mac_addr[i] = addr[ETHER_ADDR_LEN6 - i - 1]; |
4962 | IEEE80211_ADDR_COPY(addr, mac_addr)__builtin_memcpy((addr), (mac_addr), (6)); |
4963 | } |
4964 | |
4965 | /* |
4966 | * Drop duplicate 802.11 retransmissions |
4967 | * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") |
4968 | * and handle pseudo-duplicate frames which result from deaggregation |
4969 | * of A-MSDU frames in hardware. |
4970 | */ |
4971 | int |
4972 | iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m, |
4973 | struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi) |
4974 | { |
4975 | struct ieee80211com *ic = &sc->sc_ic; |
4976 | struct iwm_node *in = (void *)ic->ic_bss; |
4977 | struct iwm_rxq_dup_data *dup_data = &in->dup_data; |
4978 | uint8_t tid = IWM_MAX_TID_COUNT8, subframe_idx; |
4979 | struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4980 | uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
4981 | uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0; |
4982 | int hasqos = ieee80211_has_qos(wh); |
4983 | uint16_t seq; |
4984 | |
4985 | if (type == IEEE80211_FC0_TYPE_CTL0x04 || |
4986 | (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) || |
4987 | IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) |
4988 | return 0; |
4989 | |
4990 | if (hasqos) { |
4991 | tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f); |
4992 | if (tid > IWM_MAX_TID_COUNT8) |
4993 | tid = IWM_MAX_TID_COUNT8; |
4994 | } |
4995 | |
4996 | /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ |
4997 | subframe_idx = desc->amsdu_info & |
4998 | IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f; |
4999 | |
5000 | seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4; |
5001 | if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) && |
5002 | dup_data->last_seq[tid] == seq && |
5003 | dup_data->last_sub_frame[tid] >= subframe_idx) |
5004 | return 1; |
5005 | |
5006 | /* |
5007 | * Allow the same frame sequence number for all A-MSDU subframes |
5008 | * following the first subframe. |
5009 | * Otherwise these subframes would be discarded as replays. |
5010 | */ |
5011 | if (dup_data->last_seq[tid] == seq && |
5012 | subframe_idx > dup_data->last_sub_frame[tid] && |
5013 | (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40)) { |
5014 | rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008; |
5015 | } |
5016 | |
5017 | dup_data->last_seq[tid] = seq; |
5018 | dup_data->last_sub_frame[tid] = subframe_idx; |
5019 | |
5020 | return 0; |
5021 | } |
5022 | |
5023 | /* |
5024 | * Returns true if sn2 - buffer_size < sn1 < sn2. |
5025 | * To be used only in order to compare reorder buffer head with NSSN. |
5026 | * We fully trust NSSN unless it is behind us due to reorder timeout. |
5027 | * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. |
5028 | */ |
5029 | int |
5030 | iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size) |
5031 | { |
5032 | return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048 ) && !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff ) > 2048); |
5033 | } |
5034 | |
5035 | void |
5036 | iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni, |
5037 | struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf, |
5038 | uint16_t nssn, struct mbuf_list *ml) |
5039 | { |
5040 | struct iwm_reorder_buf_entry *entries = &rxba->entries[0]; |
5041 | uint16_t ssn = reorder_buf->head_sn; |
5042 | |
5043 | /* ignore nssn smaller than head sn - this can happen due to timeout */ |
5044 | if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) |
5045 | goto set_timer; |
5046 | |
5047 | while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { |
5048 | int index = ssn % reorder_buf->buf_size; |
5049 | struct mbuf *m; |
5050 | int chanidx, is_shortpre; |
5051 | uint32_t rx_pkt_status, rate_n_flags, device_timestamp; |
5052 | struct ieee80211_rxinfo *rxi; |
5053 | |
5054 | /* This data is the same for all A-MSDU subframes. */ |
5055 | chanidx = entries[index].chanidx; |
5056 | rx_pkt_status = entries[index].rx_pkt_status; |
5057 | is_shortpre = entries[index].is_shortpre; |
5058 | rate_n_flags = entries[index].rate_n_flags; |
5059 | device_timestamp = entries[index].device_timestamp; |
5060 | rxi = &entries[index].rxi; |
5061 | |
5062 | /* |
5063 | * Empty the list. Will have more than one frame for A-MSDU. |
5064 | * Empty list is valid as well since nssn indicates frames were |
5065 | * received. |
5066 | */ |
5067 | while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) { |
5068 | iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre, |
5069 | rate_n_flags, device_timestamp, rxi, ml); |
5070 | reorder_buf->num_stored--; |
5071 | |
5072 | /* |
5073 | * Allow the same frame sequence number and CCMP PN for |
5074 | * all A-MSDU subframes following the first subframe. |
5075 | * Otherwise they would be discarded as replays. |
5076 | */ |
5077 | rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008; |
5078 | rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004; |
5079 | } |
5080 | |
5081 | ssn = (ssn + 1) & 0xfff; |
5082 | } |
5083 | reorder_buf->head_sn = nssn; |
5084 | |
5085 | set_timer: |
5086 | if (reorder_buf->num_stored && !reorder_buf->removed) { |
5087 | timeout_add_usec(&reorder_buf->reorder_timer, |
5088 | RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL)); |
5089 | } else |
5090 | timeout_del(&reorder_buf->reorder_timer); |
5091 | } |
5092 | |
5093 | int |
5094 | iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid, |
5095 | struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2) |
5096 | { |
5097 | struct ieee80211com *ic = &sc->sc_ic; |
5098 | |
5099 | if (gp2 != buffer->consec_oldsn_ampdu_gp2) { |
5100 | /* we have a new (A-)MPDU ... */ |
5101 | |
5102 | /* |
5103 | * reset counter to 0 if we didn't have any oldsn in |
5104 | * the last A-MPDU (as detected by GP2 being identical) |
5105 | */ |
5106 | if (!buffer->consec_oldsn_prev_drop) |
5107 | buffer->consec_oldsn_drops = 0; |
5108 | |
5109 | /* either way, update our tracking state */ |
5110 | buffer->consec_oldsn_ampdu_gp2 = gp2; |
5111 | } else if (buffer->consec_oldsn_prev_drop) { |
5112 | /* |
5113 | * tracking state didn't change, and we had an old SN |
5114 | * indication before - do nothing in this case, we |
5115 | * already noted this one down and are waiting for the |
5116 | * next A-MPDU (by GP2) |
5117 | */ |
5118 | return 0; |
5119 | } |
5120 | |
5121 | /* return unless this MPDU has old SN */ |
5122 | if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000)) |
5123 | return 0; |
5124 | |
5125 | /* update state */ |
5126 | buffer->consec_oldsn_prev_drop = 1; |
5127 | buffer->consec_oldsn_drops++; |
5128 | |
5129 | /* if limit is reached, send del BA and reset state */ |
5130 | if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA10) { |
5131 | ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED, |
5132 | 0, tid); |
5133 | buffer->consec_oldsn_prev_drop = 0; |
5134 | buffer->consec_oldsn_drops = 0; |
5135 | return 1; |
5136 | } |
5137 | |
5138 | return 0; |
5139 | } |
5140 | |
5141 | /* |
5142 | * Handle re-ordering of frames which were de-aggregated in hardware. |
5143 | * Returns 1 if the MPDU was consumed (buffered or dropped). |
5144 | * Returns 0 if the MPDU should be passed to upper layer. |
5145 | */ |
5146 | int |
5147 | iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx, |
5148 | struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags, |
5149 | uint32_t device_timestamp, struct ieee80211_rxinfo *rxi, |
5150 | struct mbuf_list *ml) |
5151 | { |
5152 | struct ieee80211com *ic = &sc->sc_ic; |
5153 | struct ieee80211_frame *wh; |
5154 | struct ieee80211_node *ni; |
5155 | struct iwm_rxba_data *rxba; |
5156 | struct iwm_reorder_buffer *buffer; |
5157 | uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data)); |
5158 | int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40); |
5159 | int last_subframe = |
5160 | (desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME0x80); |
5161 | uint8_t tid; |
5162 | uint8_t subframe_idx = (desc->amsdu_info & |
5163 | IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f); |
5164 | struct iwm_reorder_buf_entry *entries; |
5165 | int index; |
5166 | uint16_t nssn, sn; |
5167 | uint8_t baid, type, subtype; |
5168 | int hasqos; |
5169 | |
5170 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
5171 | hasqos = ieee80211_has_qos(wh); |
5172 | tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0; |
5173 | |
5174 | type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
5175 | subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0; |
5176 | |
5177 | /* |
5178 | * We are only interested in Block Ack requests and unicast QoS data. |
5179 | */ |
5180 | if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) |
5181 | return 0; |
5182 | if (hasqos) { |
5183 | if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40) |
5184 | return 0; |
5185 | } else { |
5186 | if (type != IEEE80211_FC0_TYPE_CTL0x04 || |
5187 | subtype != IEEE80211_FC0_SUBTYPE_BAR0x80) |
5188 | return 0; |
5189 | } |
5190 | |
5191 | baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK0x7f000000) >> |
5192 | IWM_RX_MPDU_REORDER_BAID_SHIFT24; |
5193 | if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f || |
5194 | baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data) [0]))) |
5195 | return 0; |
5196 | |
5197 | rxba = &sc->sc_rxba_data[baid]; |
5198 | if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f || |
5199 | tid != rxba->tid || rxba->sta_id != IWM_STATION_ID0) |
5200 | return 0; |
5201 | |
5202 | if (rxba->timeout != 0) |
5203 | getmicrouptime(&rxba->last_rx); |
5204 | |
5205 | /* Bypass A-MPDU re-ordering in net80211. */ |
5206 | rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002; |
5207 | |
5208 | nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK0x00000fff; |
5209 | sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK0x00fff000) >> |
5210 | IWM_RX_MPDU_REORDER_SN_SHIFT12; |
5211 | |
5212 | buffer = &rxba->reorder_buf; |
5213 | entries = &rxba->entries[0]; |
5214 | |
5215 | if (!buffer->valid) { |
5216 | if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000) |
5217 | return 0; |
5218 | buffer->valid = 1; |
5219 | } |
5220 | |
5221 | ni = ieee80211_find_rxnode(ic, wh); |
5222 | if (type == IEEE80211_FC0_TYPE_CTL0x04 && |
5223 | subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) { |
5224 | iwm_release_frames(sc, ni, rxba, buffer, nssn, ml); |
5225 | goto drop; |
5226 | } |
5227 | |
5228 | /* |
5229 | * If there was a significant jump in the nssn - adjust. |
5230 | * If the SN is smaller than the NSSN it might need to first go into |
5231 | * the reorder buffer, in which case we just release up to it and the |
5232 | * rest of the function will take care of storing it and releasing up to |
5233 | * the nssn. |
5234 | */ |
5235 | if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, |
5236 | buffer->buf_size) || |
5237 | !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer-> buf_size)) & 0xfff) > 2048)) { |
5238 | uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048 ) ? sn : nssn; |
5239 | ic->ic_stats.is_ht_rx_frame_above_ba_winend++; |
5240 | iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml); |
5241 | } |
5242 | |
5243 | if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data, |
5244 | device_timestamp)) { |
5245 | /* BA session will be torn down. */ |
5246 | ic->ic_stats.is_ht_rx_ba_window_jump++; |
5247 | goto drop; |
5248 | |
5249 | } |
5250 | |
5251 | /* drop any outdated packets */ |
5252 | if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff ) > 2048)) { |
5253 | ic->ic_stats.is_ht_rx_frame_below_ba_winstart++; |
5254 | goto drop; |
5255 | } |
5256 | |
5257 | /* release immediately if allowed by nssn and no stored frames */ |
5258 | if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048 )) { |
5259 | if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && |
5260 | (!is_amsdu || last_subframe)) |
5261 | buffer->head_sn = nssn; |
5262 | ieee80211_release_node(ic, ni); |
5263 | return 0; |
5264 | } |
5265 | |
5266 | /* |
5267 | * release immediately if there are no stored frames, and the sn is |
5268 | * equal to the head. |
5269 | * This can happen due to reorder timer, where NSSN is behind head_sn. |
5270 | * When we released everything, and we got the next frame in the |
5271 | * sequence, according to the NSSN we can't release immediately, |
5272 | * while technically there is no hole and we can move forward. |
5273 | */ |
5274 | if (!buffer->num_stored && sn == buffer->head_sn) { |
5275 | if (!is_amsdu || last_subframe) |
5276 | buffer->head_sn = (buffer->head_sn + 1) & 0xfff; |
5277 | ieee80211_release_node(ic, ni); |
5278 | return 0; |
5279 | } |
5280 | |
5281 | index = sn % buffer->buf_size; |
5282 | |
5283 | /* |
5284 | * Check if we already stored this frame |
5285 | * As AMSDU is either received or not as whole, logic is simple: |
5286 | * If we have frames in that position in the buffer and the last frame |
5287 | * originated from AMSDU had a different SN then it is a retransmission. |
5288 | * If it is the same SN then if the subframe index is incrementing it |
5289 | * is the same AMSDU - otherwise it is a retransmission. |
5290 | */ |
5291 | if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) { |
5292 | if (!is_amsdu) { |
5293 | ic->ic_stats.is_ht_rx_ba_no_buf++; |
5294 | goto drop; |
5295 | } else if (sn != buffer->last_amsdu || |
5296 | buffer->last_sub_index >= subframe_idx) { |
5297 | ic->ic_stats.is_ht_rx_ba_no_buf++; |
5298 | goto drop; |
5299 | } |
5300 | } else { |
5301 | /* This data is the same for all A-MSDU subframes. */ |
5302 | entries[index].chanidx = chanidx; |
5303 | entries[index].is_shortpre = is_shortpre; |
5304 | entries[index].rate_n_flags = rate_n_flags; |
5305 | entries[index].device_timestamp = device_timestamp; |
5306 | memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries [index].rxi))); |
5307 | } |
5308 | |
5309 | /* put in reorder buffer */ |
5310 | ml_enqueue(&entries[index].frames, m); |
5311 | buffer->num_stored++; |
5312 | getmicrouptime(&entries[index].reorder_time); |
5313 | |
5314 | if (is_amsdu) { |
5315 | buffer->last_amsdu = sn; |
5316 | buffer->last_sub_index = subframe_idx; |
5317 | } |
5318 | |
5319 | /* |
5320 | * We cannot trust NSSN for AMSDU sub-frames that are not the last. |
5321 | * The reason is that NSSN advances on the first sub-frame, and may |
5322 | * cause the reorder buffer to advance before all the sub-frames arrive. |
5323 | * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with |
5324 | * SN 1. NSSN for first sub frame will be 3 with the result of driver |
5325 | * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is |
5326 | * already ahead and it will be dropped. |
5327 | * If the last sub-frame is not on this queue - we will get frame |
5328 | * release notification with up to date NSSN. |
5329 | */ |
5330 | if (!is_amsdu || last_subframe) |
5331 | iwm_release_frames(sc, ni, rxba, buffer, nssn, ml); |
5332 | |
5333 | ieee80211_release_node(ic, ni); |
5334 | return 1; |
5335 | |
5336 | drop: |
5337 | m_freem(m); |
5338 | ieee80211_release_node(ic, ni); |
5339 | return 1; |
5340 | } |
5341 | |
5342 | void |
5343 | iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata, |
5344 | size_t maxlen, struct mbuf_list *ml) |
5345 | { |
5346 | struct ieee80211com *ic = &sc->sc_ic; |
5347 | struct ieee80211_rxinfo rxi; |
5348 | struct iwm_rx_mpdu_desc *desc; |
5349 | uint32_t len, hdrlen, rate_n_flags, device_timestamp; |
5350 | int rssi; |
5351 | uint8_t chanidx; |
5352 | uint16_t phy_info; |
5353 | |
5354 | memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi))); |
5355 | |
5356 | desc = (struct iwm_rx_mpdu_desc *)pktdata; |
5357 | |
5358 | if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) || |
5359 | !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) { |
5360 | m_freem(m); |
5361 | return; /* drop */ |
5362 | } |
5363 | |
5364 | len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len)); |
5365 | if (ic->ic_opmode == IEEE80211_M_MONITOR) { |
5366 | /* Allow control frames in monitor mode. */ |
5367 | if (len < sizeof(struct ieee80211_frame_cts)) { |
5368 | ic->ic_stats.is_rx_tooshort++; |
5369 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
5370 | m_freem(m); |
5371 | return; |
5372 | } |
5373 | } else if (len < sizeof(struct ieee80211_frame)) { |
5374 | ic->ic_stats.is_rx_tooshort++; |
5375 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
5376 | m_freem(m); |
5377 | return; |
5378 | } |
5379 | if (len > maxlen - sizeof(*desc)) { |
5380 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
5381 | m_freem(m); |
5382 | return; |
5383 | } |
5384 | |
5385 | m->m_datam_hdr.mh_data = pktdata + sizeof(*desc); |
5386 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
5387 | |
5388 | /* Account for padding following the frame header. */ |
5389 | if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD0x20) { |
5390 | struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
5391 | int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
5392 | if (type == IEEE80211_FC0_TYPE_CTL0x04) { |
5393 | switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) { |
5394 | case IEEE80211_FC0_SUBTYPE_CTS0xc0: |
5395 | hdrlen = sizeof(struct ieee80211_frame_cts); |
5396 | break; |
5397 | case IEEE80211_FC0_SUBTYPE_ACK0xd0: |
5398 | hdrlen = sizeof(struct ieee80211_frame_ack); |
5399 | break; |
5400 | default: |
5401 | hdrlen = sizeof(struct ieee80211_frame_min); |
5402 | break; |
5403 | } |
5404 | } else |
5405 | hdrlen = ieee80211_get_hdrlen(wh); |
5406 | |
5407 | if ((le16toh(desc->status)((__uint16_t)(desc->status)) & |
5408 | IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) == |
5409 | IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) { |
5410 | /* Padding is inserted after the IV. */ |
5411 | hdrlen += IEEE80211_CCMP_HDRLEN8; |
5412 | } |
5413 | |
5414 | memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data ), (hdrlen)); |
5415 | m_adj(m, 2); |
5416 | } |
5417 | |
5418 | /* |
5419 | * Hardware de-aggregates A-MSDUs and copies the same MAC header |
5420 | * in place for each subframe. But it leaves the 'A-MSDU present' |
5421 | * bit set in the frame header. We need to clear this bit ourselves. |
5422 | * |
5423 | * And we must allow the same CCMP PN for subframes following the |
5424 | * first subframe. Otherwise they would be discarded as replays. |
5425 | */ |
5426 | if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40) { |
5427 | struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
5428 | uint8_t subframe_idx = (desc->amsdu_info & |
5429 | IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f); |
5430 | if (subframe_idx > 0) |
5431 | rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004; |
5432 | if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) && |
5433 | m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) { |
5434 | struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data)) |
5435 | struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data)); |
5436 | qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080)); |
5437 | |
5438 | /* HW reverses addr3 and addr4. */ |
5439 | iwm_flip_address(qwh4->i_addr3); |
5440 | iwm_flip_address(qwh4->i_addr4); |
5441 | } else if (ieee80211_has_qos(wh) && |
5442 | m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) { |
5443 | struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data)) |
5444 | struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data)); |
5445 | qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080)); |
5446 | |
5447 | /* HW reverses addr3. */ |
5448 | iwm_flip_address(qwh->i_addr3); |
5449 | } |
5450 | } |
5451 | |
5452 | /* |
5453 | * Verify decryption before duplicate detection. The latter uses |
5454 | * the TID supplied in QoS frame headers and this TID is implicitly |
5455 | * verified as part of the CCMP nonce. |
5456 | */ |
5457 | if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) { |
5458 | m_freem(m); |
5459 | return; |
5460 | } |
5461 | |
5462 | if (iwm_detect_duplicate(sc, m, desc, &rxi)) { |
5463 | m_freem(m); |
5464 | return; |
5465 | } |
5466 | |
5467 | phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info)); |
5468 | rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags)); |
5469 | chanidx = desc->v1.channel; |
5470 | device_timestamp = desc->v1.gp2_on_air_rise; |
5471 | |
5472 | rssi = iwm_rxmq_get_signal_strength(sc, desc); |
5473 | rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */ |
5474 | rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi) ); /* clip to max. 100% */ |
5475 | |
5476 | rxi.rxi_rssi = rssi; |
5477 | rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise)((__uint64_t)(desc->v1.tsf_on_air_rise)); |
5478 | rxi.rxi_chan = chanidx; |
5479 | |
5480 | if (iwm_rx_reorder(sc, m, chanidx, desc, |
5481 | (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)), |
5482 | rate_n_flags, device_timestamp, &rxi, ml)) |
5483 | return; |
5484 | |
5485 | iwm_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)), |
5486 | (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)), |
5487 | rate_n_flags, device_timestamp, &rxi, ml); |
5488 | } |
5489 | |
5490 | void |
5491 | iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni) |
5492 | { |
5493 | struct ieee80211com *ic = &sc->sc_ic; |
5494 | struct iwm_node *in = (void *)ni; |
5495 | int old_txmcs = ni->ni_txmcs; |
5496 | int old_nss = ni->ni_vht_ss; |
5497 | |
5498 | if (ni->ni_flags & IEEE80211_NODE_VHT0x10000) |
5499 | ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni); |
5500 | else |
5501 | ieee80211_ra_choose(&in->in_rn, ic, ni); |
5502 | |
5503 | /* |
5504 | * If RA has chosen a new TX rate we must update |
5505 | * the firmware's LQ rate table. |
5506 | */ |
5507 | if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss) |
5508 | iwm_setrates(in, 1); |
5509 | } |
5510 | |
5511 | void |
5512 | iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni, |
5513 | int txmcs, uint8_t failure_frame, int txfail) |
5514 | { |
5515 | struct ieee80211com *ic = &sc->sc_ic; |
5516 | struct iwm_node *in = (void *)ni; |
5517 | |
5518 | /* Ignore Tx reports which don't match our last LQ command. */ |
5519 | if (txmcs != ni->ni_txmcs) { |
5520 | if (++in->lq_rate_mismatch > 15) { |
5521 | /* Try to sync firmware with the driver... */ |
5522 | iwm_setrates(in, 1); |
5523 | in->lq_rate_mismatch = 0; |
5524 | } |
5525 | } else { |
5526 | int mcs = txmcs; |
5527 | const struct ieee80211_ht_rateset *rs = |
5528 | ieee80211_ra_get_ht_rateset(txmcs, |
5529 | ieee80211_node_supports_ht_chan40(ni), |
5530 | ieee80211_ra_use_ht_sgi(ni)); |
5531 | unsigned int retries = 0, i; |
5532 | |
5533 | in->lq_rate_mismatch = 0; |
5534 | |
5535 | for (i = 0; i < failure_frame; i++) { |
5536 | if (mcs > rs->min_mcs) { |
5537 | ieee80211_ra_add_stats_ht(&in->in_rn, |
5538 | ic, ni, mcs, 1, 1); |
5539 | mcs--; |
5540 | } else |
5541 | retries++; |
5542 | } |
5543 | |
5544 | if (txfail && failure_frame == 0) { |
5545 | ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni, |
5546 | txmcs, 1, 1); |
5547 | } else { |
5548 | ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni, |
5549 | mcs, retries + 1, retries); |
5550 | } |
5551 | |
5552 | iwm_ra_choose(sc, ni); |
5553 | } |
5554 | } |
5555 | |
5556 | void |
5557 | iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni, |
5558 | int txmcs, int nss, uint8_t failure_frame, int txfail) |
5559 | { |
5560 | struct ieee80211com *ic = &sc->sc_ic; |
5561 | struct iwm_node *in = (void *)ni; |
5562 | uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801; |
5563 | uint8_t sco = IEEE80211_HTOP0_SCO_SCN0; |
5564 | |
5565 | /* Ignore Tx reports which don't match our last LQ command. */ |
5566 | if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) { |
5567 | if (++in->lq_rate_mismatch > 15) { |
5568 | /* Try to sync firmware with the driver... */ |
5569 | iwm_setrates(in, 1); |
5570 | in->lq_rate_mismatch = 0; |
5571 | } |
5572 | } else { |
5573 | int mcs = txmcs; |
5574 | unsigned int retries = 0, i; |
5575 | |
5576 | if (in->in_phyctxt) { |
5577 | vht_chan_width = in->in_phyctxt->vht_chan_width; |
5578 | sco = in->in_phyctxt->sco; |
5579 | } |
5580 | in->lq_rate_mismatch = 0; |
5581 | |
5582 | for (i = 0; i < failure_frame; i++) { |
5583 | if (mcs > 0) { |
5584 | ieee80211_ra_vht_add_stats(&in->in_rn_vht, |
5585 | ic, ni, mcs, nss, 1, 1); |
5586 | if (vht_chan_width >= |
5587 | IEEE80211_VHTOP0_CHAN_WIDTH_801) { |
5588 | /* |
5589 | * First 4 Tx attempts used same MCS, |
5590 | * twice at 80MHz and twice at 40MHz. |
5591 | */ |
5592 | if (i >= 4) |
5593 | mcs--; |
5594 | } else if (sco == IEEE80211_HTOP0_SCO_SCA1 || |
5595 | sco == IEEE80211_HTOP0_SCO_SCB3) { |
5596 | /* |
5597 | * First 4 Tx attempts used same MCS, |
5598 | * four times at 40MHz. |
5599 | */ |
5600 | if (i >= 4) |
5601 | mcs--; |
5602 | } else |
5603 | mcs--; |
5604 | } else |
5605 | retries++; |
5606 | } |
5607 | |
5608 | if (txfail && failure_frame == 0) { |
5609 | ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni, |
5610 | txmcs, nss, 1, 1); |
5611 | } else { |
5612 | ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni, |
5613 | mcs, nss, retries + 1, retries); |
5614 | } |
5615 | |
5616 | iwm_ra_choose(sc, ni); |
5617 | } |
5618 | } |
5619 | |
5620 | void |
5621 | iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, |
5622 | struct iwm_node *in, int txmcs, int txrate) |
5623 | { |
5624 | struct ieee80211com *ic = &sc->sc_ic; |
5625 | struct ieee80211_node *ni = &in->in_ni; |
5626 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
5627 | struct iwm_tx_resp *tx_resp = (void *)pkt->data; |
5628 | int status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff; |
5629 | uint32_t initial_rate = le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)); |
5630 | int txfail; |
5631 | |
5632 | KASSERT(tx_resp->frame_count == 1)((tx_resp->frame_count == 1) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_iwm.c", 5632, "tx_resp->frame_count == 1" )); |
5633 | |
5634 | txfail = (status != IWM_TX_STATUS_SUCCESS0x01 && |
5635 | status != IWM_TX_STATUS_DIRECT_DONE0x02); |
5636 | |
5637 | /* |
5638 | * Update rate control statistics. |
5639 | * Only report frames which were actually queued with the currently |
5640 | * selected Tx rate. Because Tx queues are relatively long we may |
5641 | * encounter previously selected rates here during Tx bursts. |
5642 | * Providing feedback based on such frames can lead to suboptimal |
5643 | * Tx rate control decisions. |
5644 | */ |
5645 | if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) == 0) { |
5646 | if (txrate != ni->ni_txrate) { |
5647 | if (++in->lq_rate_mismatch > 15) { |
5648 | /* Try to sync firmware with the driver... */ |
5649 | iwm_setrates(in, 1); |
5650 | in->lq_rate_mismatch = 0; |
5651 | } |
5652 | } else { |
5653 | in->lq_rate_mismatch = 0; |
5654 | |
5655 | in->in_amn.amn_txcnt++; |
5656 | if (txfail) |
5657 | in->in_amn.amn_retrycnt++; |
5658 | if (tx_resp->failure_frame > 0) |
5659 | in->in_amn.amn_retrycnt++; |
5660 | } |
5661 | } else if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) && |
5662 | ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN && |
5663 | (initial_rate & IWM_RATE_MCS_VHT_MSK(1 << 26))) { |
5664 | int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK0xf; |
5665 | int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK(3 << 4)) >> |
5666 | IWM_RATE_VHT_MCS_NSS_POS4) + 1; |
5667 | iwm_vht_single_rate_control(sc, ni, txmcs, nss, |
5668 | tx_resp->failure_frame, txfail); |
5669 | } else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN && |
5670 | (initial_rate & IWM_RATE_MCS_HT_MSK(1 << 8))) { |
5671 | int txmcs = initial_rate & |
5672 | (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3)); |
5673 | iwm_ht_single_rate_control(sc, ni, txmcs, |
5674 | tx_resp->failure_frame, txfail); |
5675 | } |
5676 | |
5677 | if (txfail) |
5678 | ifp->if_oerrorsif_data.ifi_oerrors++; |
5679 | } |
5680 | |
5681 | void |
5682 | iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd) |
5683 | { |
5684 | struct ieee80211com *ic = &sc->sc_ic; |
5685 | |
5686 | bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd-> map), (0), (txd->map->dm_mapsize), (0x08)) |
5687 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd-> map), (0), (txd->map->dm_mapsize), (0x08)); |
5688 | bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd ->map)); |
5689 | m_freem(txd->m); |
5690 | txd->m = NULL((void *)0); |
5691 | |
5692 | KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c" , 5692, "txd->in")); |
5693 | ieee80211_release_node(ic, &txd->in->in_ni); |
5694 | txd->in = NULL((void *)0); |
5695 | txd->ampdu_nframes = 0; |
5696 | txd->ampdu_txmcs = 0; |
5697 | txd->ampdu_txnss = 0; |
5698 | } |
5699 | |
5700 | void |
5701 | iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx) |
5702 | { |
5703 | struct iwm_tx_data *txd; |
5704 | |
5705 | while (ring->tail != idx) { |
5706 | txd = &ring->data[ring->tail]; |
5707 | if (txd->m != NULL((void *)0)) { |
5708 | iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID0); |
5709 | iwm_txd_done(sc, txd); |
5710 | ring->queued--; |
5711 | } |
5712 | ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT256; |
5713 | } |
5714 | |
5715 | wakeup(ring); |
5716 | } |
5717 | |
5718 | void |
5719 | iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr, |
5720 | struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate, |
5721 | uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status, |
5722 | struct iwm_agg_tx_status *agg_status) |
5723 | { |
5724 | struct ieee80211com *ic = &sc->sc_ic; |
5725 | int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE10; |
5726 | struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx]; |
5727 | struct ieee80211_node *ni = &in->in_ni; |
5728 | struct ieee80211_tx_ba *ba; |
5729 | int txfail = (status != IWM_TX_STATUS_SUCCESS0x01 && |
5730 | status != IWM_TX_STATUS_DIRECT_DONE0x02); |
5731 | uint16_t seq; |
5732 | |
5733 | if (ic->ic_state != IEEE80211_S_RUN) |
5734 | return; |
5735 | |
5736 | if (nframes > 1) { |
5737 | int i; |
5738 | /* |
5739 | * Collect information about this A-MPDU. |
5740 | */ |
5741 | |
5742 | for (i = 0; i < nframes; i++) { |
5743 | uint8_t qid = agg_status[i].qid; |
5744 | uint8_t idx = agg_status[i].idx; |
5745 | uint16_t txstatus = (le16toh(agg_status[i].status)((__uint16_t)(agg_status[i].status)) & |
5746 | IWM_AGG_TX_STATE_STATUS_MSK0x0fff); |
5747 | |
5748 | if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED |