File: | dev/pci/if_iwm.c |
Warning: | line 11066, column 4 Value stored to 'handled' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: if_iwm.c,v 1.389 2022/01/09 05:42:50 jsg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2014, 2016 genua gmbh <info@genua.de> |
5 | * Author: Stefan Sperling <stsp@openbsd.org> |
6 | * Copyright (c) 2014 Fixup Software Ltd. |
7 | * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org> |
8 | * |
9 | * Permission to use, copy, modify, and distribute this software for any |
10 | * purpose with or without fee is hereby granted, provided that the above |
11 | * copyright notice and this permission notice appear in all copies. |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
14 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
15 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
16 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
17 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
18 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
19 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
20 | */ |
21 | |
22 | /*- |
23 | * Based on BSD-licensed source modules in the Linux iwlwifi driver, |
24 | * which were used as the reference documentation for this implementation. |
25 | * |
26 | *********************************************************************** |
27 | * |
28 | * This file is provided under a dual BSD/GPLv2 license. When using or |
29 | * redistributing this file, you may do so under either license. |
30 | * |
31 | * GPL LICENSE SUMMARY |
32 | * |
33 | * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. |
34 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
35 | * Copyright(c) 2016 Intel Deutschland GmbH |
36 | * |
37 | * This program is free software; you can redistribute it and/or modify |
38 | * it under the terms of version 2 of the GNU General Public License as |
39 | * published by the Free Software Foundation. |
40 | * |
41 | * This program is distributed in the hope that it will be useful, but |
42 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
43 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
44 | * General Public License for more details. |
45 | * |
46 | * You should have received a copy of the GNU General Public License |
47 | * along with this program; if not, write to the Free Software |
48 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, |
49 | * USA |
50 | * |
51 | * The full GNU General Public License is included in this distribution |
52 | * in the file called COPYING. |
53 | * |
54 | * Contact Information: |
55 | * Intel Linux Wireless <ilw@linux.intel.com> |
56 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
57 | * |
58 | * |
59 | * BSD LICENSE |
60 | * |
61 | * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. |
62 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
63 | * Copyright(c) 2016 Intel Deutschland GmbH |
64 | * All rights reserved. |
65 | * |
66 | * Redistribution and use in source and binary forms, with or without |
67 | * modification, are permitted provided that the following conditions |
68 | * are met: |
69 | * |
70 | * * Redistributions of source code must retain the above copyright |
71 | * notice, this list of conditions and the following disclaimer. |
72 | * * Redistributions in binary form must reproduce the above copyright |
73 | * notice, this list of conditions and the following disclaimer in |
74 | * the documentation and/or other materials provided with the |
75 | * distribution. |
76 | * * Neither the name Intel Corporation nor the names of its |
77 | * contributors may be used to endorse or promote products derived |
78 | * from this software without specific prior written permission. |
79 | * |
80 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
81 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
82 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
83 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
84 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
85 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
86 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
87 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
88 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
89 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
90 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
91 | */ |
92 | |
93 | /*- |
94 | * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> |
95 | * |
96 | * Permission to use, copy, modify, and distribute this software for any |
97 | * purpose with or without fee is hereby granted, provided that the above |
98 | * copyright notice and this permission notice appear in all copies. |
99 | * |
100 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
101 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
102 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
103 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
104 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
105 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
106 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
107 | */ |
108 | |
109 | #include "bpfilter.h" |
110 | |
111 | #include <sys/param.h> |
112 | #include <sys/conf.h> |
113 | #include <sys/kernel.h> |
114 | #include <sys/malloc.h> |
115 | #include <sys/mbuf.h> |
116 | #include <sys/mutex.h> |
117 | #include <sys/proc.h> |
118 | #include <sys/rwlock.h> |
119 | #include <sys/socket.h> |
120 | #include <sys/sockio.h> |
121 | #include <sys/systm.h> |
122 | #include <sys/endian.h> |
123 | |
124 | #include <sys/refcnt.h> |
125 | #include <sys/task.h> |
126 | #include <machine/bus.h> |
127 | #include <machine/intr.h> |
128 | |
129 | #include <dev/pci/pcireg.h> |
130 | #include <dev/pci/pcivar.h> |
131 | #include <dev/pci/pcidevs.h> |
132 | |
133 | #if NBPFILTER1 > 0 |
134 | #include <net/bpf.h> |
135 | #endif |
136 | #include <net/if.h> |
137 | #include <net/if_dl.h> |
138 | #include <net/if_media.h> |
139 | |
140 | #include <netinet/in.h> |
141 | #include <netinet/if_ether.h> |
142 | |
143 | #include <net80211/ieee80211_var.h> |
144 | #include <net80211/ieee80211_amrr.h> |
145 | #include <net80211/ieee80211_ra.h> |
146 | #include <net80211/ieee80211_radiotap.h> |
147 | #include <net80211/ieee80211_priv.h> /* for SEQ_LT */ |
148 | #undef DPRINTF /* defined in ieee80211_priv.h */ |
149 | |
150 | #define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname) |
151 | |
152 | #define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if) |
153 | |
154 | #define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_)))) |
155 | #define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_)))) |
156 | |
157 | #ifdef IWM_DEBUG |
158 | #define DPRINTF(x)do { ; } while (0) do { if (iwm_debug > 0) printf x; } while (0) |
159 | #define DPRINTFN(n, x)do { ; } while (0) do { if (iwm_debug >= (n)) printf x; } while (0) |
160 | int iwm_debug = 1; |
161 | #else |
162 | #define DPRINTF(x)do { ; } while (0) do { ; } while (0) |
163 | #define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0) |
164 | #endif |
165 | |
166 | #include <dev/pci/if_iwmreg.h> |
167 | #include <dev/pci/if_iwmvar.h> |
168 | |
169 | const uint8_t iwm_nvm_channels[] = { |
170 | /* 2.4 GHz */ |
171 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, |
172 | /* 5 GHz */ |
173 | 36, 40, 44 , 48, 52, 56, 60, 64, |
174 | 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, |
175 | 149, 153, 157, 161, 165 |
176 | }; |
177 | |
178 | const uint8_t iwm_nvm_channels_8000[] = { |
179 | /* 2.4 GHz */ |
180 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, |
181 | /* 5 GHz */ |
182 | 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, |
183 | 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, |
184 | 149, 153, 157, 161, 165, 169, 173, 177, 181 |
185 | }; |
186 | |
187 | #define IWM_NUM_2GHZ_CHANNELS14 14 |
188 | |
189 | const struct iwm_rate { |
190 | uint16_t rate; |
191 | uint8_t plcp; |
192 | uint8_t ht_plcp; |
193 | } iwm_rates[] = { |
194 | /* Legacy */ /* HT */ |
195 | { 2, IWM_RATE_1M_PLCP10, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
196 | { 4, IWM_RATE_2M_PLCP20, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
197 | { 11, IWM_RATE_5M_PLCP55, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
198 | { 22, IWM_RATE_11M_PLCP110, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
199 | { 12, IWM_RATE_6M_PLCP13, IWM_RATE_HT_SISO_MCS_0_PLCP0 }, |
200 | { 18, IWM_RATE_9M_PLCP15, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 }, |
201 | { 24, IWM_RATE_12M_PLCP5, IWM_RATE_HT_SISO_MCS_1_PLCP1 }, |
202 | { 26, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_8_PLCP0x8 }, |
203 | { 36, IWM_RATE_18M_PLCP7, IWM_RATE_HT_SISO_MCS_2_PLCP2 }, |
204 | { 48, IWM_RATE_24M_PLCP9, IWM_RATE_HT_SISO_MCS_3_PLCP3 }, |
205 | { 52, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_9_PLCP0x9 }, |
206 | { 72, IWM_RATE_36M_PLCP11, IWM_RATE_HT_SISO_MCS_4_PLCP4 }, |
207 | { 78, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_10_PLCP0xA }, |
208 | { 96, IWM_RATE_48M_PLCP1, IWM_RATE_HT_SISO_MCS_5_PLCP5 }, |
209 | { 104, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_11_PLCP0xB }, |
210 | { 108, IWM_RATE_54M_PLCP3, IWM_RATE_HT_SISO_MCS_6_PLCP6 }, |
211 | { 128, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_SISO_MCS_7_PLCP7 }, |
212 | { 156, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_12_PLCP0xC }, |
213 | { 208, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_13_PLCP0xD }, |
214 | { 234, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_14_PLCP0xE }, |
215 | { 260, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_15_PLCP0xF }, |
216 | }; |
217 | #define IWM_RIDX_CCK0 0 |
218 | #define IWM_RIDX_OFDM4 4 |
219 | #define IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) (nitems(iwm_rates)(sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) |
220 | #define IWM_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWM_RIDX_OFDM4) |
221 | #define IWM_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWM_RIDX_OFDM4) |
222 | #define IWM_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22) |
223 | |
224 | /* Convert an MCS index into an iwm_rates[] index. */ |
225 | const int iwm_mcs2ridx[] = { |
226 | IWM_RATE_MCS_0_INDEX, |
227 | IWM_RATE_MCS_1_INDEX, |
228 | IWM_RATE_MCS_2_INDEX, |
229 | IWM_RATE_MCS_3_INDEX, |
230 | IWM_RATE_MCS_4_INDEX, |
231 | IWM_RATE_MCS_5_INDEX, |
232 | IWM_RATE_MCS_6_INDEX, |
233 | IWM_RATE_MCS_7_INDEX, |
234 | IWM_RATE_MCS_8_INDEX, |
235 | IWM_RATE_MCS_9_INDEX, |
236 | IWM_RATE_MCS_10_INDEX, |
237 | IWM_RATE_MCS_11_INDEX, |
238 | IWM_RATE_MCS_12_INDEX, |
239 | IWM_RATE_MCS_13_INDEX, |
240 | IWM_RATE_MCS_14_INDEX, |
241 | IWM_RATE_MCS_15_INDEX, |
242 | }; |
243 | |
244 | struct iwm_nvm_section { |
245 | uint16_t length; |
246 | uint8_t *data; |
247 | }; |
248 | |
249 | int iwm_is_mimo_ht_plcp(uint8_t); |
250 | int iwm_is_mimo_mcs(int); |
251 | int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t); |
252 | int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type, |
253 | uint8_t *, size_t); |
254 | int iwm_set_default_calib(struct iwm_softc *, const void *); |
255 | void iwm_fw_info_free(struct iwm_fw_info *); |
256 | void iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t); |
257 | int iwm_read_firmware(struct iwm_softc *); |
258 | uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t); |
259 | uint32_t iwm_read_prph(struct iwm_softc *, uint32_t); |
260 | void iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t); |
261 | void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t); |
262 | int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int); |
263 | int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int); |
264 | int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t); |
265 | int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int); |
266 | int iwm_nic_lock(struct iwm_softc *); |
267 | void iwm_nic_assert_locked(struct iwm_softc *); |
268 | void iwm_nic_unlock(struct iwm_softc *); |
269 | int iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t, |
270 | uint32_t); |
271 | int iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t); |
272 | int iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t); |
273 | int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t, |
274 | bus_size_t); |
275 | void iwm_dma_contig_free(struct iwm_dma_info *); |
276 | int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); |
277 | void iwm_disable_rx_dma(struct iwm_softc *); |
278 | void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); |
279 | void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); |
280 | int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int); |
281 | void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); |
282 | void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); |
283 | void iwm_enable_rfkill_int(struct iwm_softc *); |
284 | int iwm_check_rfkill(struct iwm_softc *); |
285 | void iwm_enable_interrupts(struct iwm_softc *); |
286 | void iwm_enable_fwload_interrupt(struct iwm_softc *); |
287 | void iwm_restore_interrupts(struct iwm_softc *); |
288 | void iwm_disable_interrupts(struct iwm_softc *); |
289 | void iwm_ict_reset(struct iwm_softc *); |
290 | int iwm_set_hw_ready(struct iwm_softc *); |
291 | int iwm_prepare_card_hw(struct iwm_softc *); |
292 | void iwm_apm_config(struct iwm_softc *); |
293 | int iwm_apm_init(struct iwm_softc *); |
294 | void iwm_apm_stop(struct iwm_softc *); |
295 | int iwm_allow_mcast(struct iwm_softc *); |
296 | void iwm_init_msix_hw(struct iwm_softc *); |
297 | void iwm_conf_msix_hw(struct iwm_softc *, int); |
298 | int iwm_clear_persistence_bit(struct iwm_softc *); |
299 | int iwm_start_hw(struct iwm_softc *); |
300 | void iwm_stop_device(struct iwm_softc *); |
301 | void iwm_nic_config(struct iwm_softc *); |
302 | int iwm_nic_rx_init(struct iwm_softc *); |
303 | int iwm_nic_rx_legacy_init(struct iwm_softc *); |
304 | int iwm_nic_rx_mq_init(struct iwm_softc *); |
305 | int iwm_nic_tx_init(struct iwm_softc *); |
306 | int iwm_nic_init(struct iwm_softc *); |
307 | int iwm_enable_ac_txq(struct iwm_softc *, int, int); |
308 | int iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t, |
309 | uint16_t); |
310 | int iwm_disable_txq(struct iwm_softc *, int, int, uint8_t); |
311 | int iwm_post_alive(struct iwm_softc *); |
312 | struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t, |
313 | uint16_t); |
314 | int iwm_phy_db_set_section(struct iwm_softc *, |
315 | struct iwm_calib_res_notif_phy_db *); |
316 | int iwm_is_valid_channel(uint16_t); |
317 | uint8_t iwm_ch_id_to_ch_index(uint16_t); |
318 | uint16_t iwm_channel_id_to_papd(uint16_t); |
319 | uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t); |
320 | int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **, |
321 | uint16_t *, uint16_t); |
322 | int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *); |
323 | int iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t, |
324 | uint8_t); |
325 | int iwm_send_phy_db_data(struct iwm_softc *); |
326 | void iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t, |
327 | uint32_t); |
328 | void iwm_unprotect_session(struct iwm_softc *, struct iwm_node *); |
329 | int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t, |
330 | uint8_t *, uint16_t *); |
331 | int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, |
332 | uint16_t *, size_t); |
333 | void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const, |
334 | const uint8_t *nvm_channels, int nchan); |
335 | int iwm_mimo_enabled(struct iwm_softc *); |
336 | void iwm_setup_ht_rates(struct iwm_softc *); |
337 | void iwm_mac_ctxt_task(void *); |
338 | void iwm_phy_ctxt_task(void *); |
339 | void iwm_updateprot(struct ieee80211com *); |
340 | void iwm_updateslot(struct ieee80211com *); |
341 | void iwm_updateedca(struct ieee80211com *); |
342 | void iwm_updatechan(struct ieee80211com *); |
343 | void iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t, |
344 | uint16_t); |
345 | void iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *); |
346 | int iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *, |
347 | uint8_t); |
348 | void iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *, |
349 | uint8_t); |
350 | void iwm_rx_ba_session_expired(void *); |
351 | void iwm_reorder_timer_expired(void *); |
352 | int iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t, |
353 | uint16_t, uint16_t, int, int); |
354 | int iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *, |
355 | uint8_t); |
356 | void iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *, |
357 | uint8_t); |
358 | void iwm_ba_task(void *); |
359 | |
360 | int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, |
361 | const uint16_t *, const uint16_t *, |
362 | const uint16_t *, const uint16_t *, |
363 | const uint16_t *, int); |
364 | void iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *, |
365 | const uint16_t *, const uint16_t *); |
366 | int iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); |
367 | int iwm_nvm_init(struct iwm_softc *); |
368 | int iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *, |
369 | uint32_t); |
370 | int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *, |
371 | uint32_t); |
372 | int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type); |
373 | int iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *, |
374 | int , int *); |
375 | int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type); |
376 | int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type); |
377 | int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type); |
378 | int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); |
379 | int iwm_send_phy_cfg_cmd(struct iwm_softc *); |
380 | int iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type); |
381 | int iwm_send_dqa_cmd(struct iwm_softc *); |
382 | int iwm_run_init_mvm_ucode(struct iwm_softc *, int); |
383 | int iwm_config_ltr(struct iwm_softc *); |
384 | int iwm_rx_addbuf(struct iwm_softc *, int, int); |
385 | int iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *); |
386 | int iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *); |
387 | void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *, |
388 | struct iwm_rx_data *); |
389 | int iwm_get_noise(const struct iwm_statistics_rx_non_phy *); |
390 | int iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t, |
391 | struct ieee80211_rxinfo *); |
392 | int iwm_ccmp_decap(struct iwm_softc *, struct mbuf *, |
393 | struct ieee80211_node *, struct ieee80211_rxinfo *); |
394 | void iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int, |
395 | uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *); |
396 | void iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *, |
397 | int, uint8_t, int); |
398 | void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, |
399 | struct iwm_node *, int, int); |
400 | void iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *); |
401 | void iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int); |
402 | void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *, |
403 | struct iwm_rx_data *); |
404 | void iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *); |
405 | void iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *, |
406 | struct iwm_tx_ring *, int, uint16_t, uint16_t); |
407 | void iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *); |
408 | void iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *, |
409 | struct iwm_rx_data *); |
410 | int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t); |
411 | int iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t, |
412 | uint8_t, uint32_t, uint32_t, uint8_t); |
413 | void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *, |
414 | struct iwm_phy_context_cmd *, uint32_t, uint32_t); |
415 | void iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *, |
416 | struct ieee80211_channel *, uint8_t, uint8_t, uint8_t); |
417 | int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t, |
418 | uint8_t, uint32_t, uint32_t, uint8_t); |
419 | int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *); |
420 | int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t, |
421 | const void *); |
422 | int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *, |
423 | uint32_t *); |
424 | int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t, |
425 | const void *, uint32_t *); |
426 | void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *); |
427 | void iwm_cmd_done(struct iwm_softc *, int, int, int); |
428 | void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t); |
429 | void iwm_reset_sched(struct iwm_softc *, int, int, uint8_t); |
430 | const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, |
431 | struct ieee80211_frame *, struct iwm_tx_cmd *); |
432 | int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int); |
433 | int iwm_flush_tx_path(struct iwm_softc *, int); |
434 | int iwm_wait_tx_queues_empty(struct iwm_softc *); |
435 | void iwm_led_enable(struct iwm_softc *); |
436 | void iwm_led_disable(struct iwm_softc *); |
437 | int iwm_led_is_enabled(struct iwm_softc *); |
438 | void iwm_led_blink_timeout(void *); |
439 | void iwm_led_blink_start(struct iwm_softc *); |
440 | void iwm_led_blink_stop(struct iwm_softc *); |
441 | int iwm_beacon_filter_send_cmd(struct iwm_softc *, |
442 | struct iwm_beacon_filter_cmd *); |
443 | void iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *, |
444 | struct iwm_beacon_filter_cmd *); |
445 | int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int); |
446 | void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *, |
447 | struct iwm_mac_power_cmd *); |
448 | int iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *); |
449 | int iwm_power_update_device(struct iwm_softc *); |
450 | int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *); |
451 | int iwm_disable_beacon_filter(struct iwm_softc *); |
452 | int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int); |
453 | int iwm_add_aux_sta(struct iwm_softc *); |
454 | int iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int); |
455 | int iwm_flush_sta(struct iwm_softc *, struct iwm_node *); |
456 | int iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *); |
457 | uint16_t iwm_scan_rx_chain(struct iwm_softc *); |
458 | uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int); |
459 | uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *, |
460 | struct iwm_scan_channel_cfg_lmac *, int, int); |
461 | int iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *); |
462 | int iwm_lmac_scan(struct iwm_softc *, int); |
463 | int iwm_config_umac_scan(struct iwm_softc *); |
464 | int iwm_umac_scan(struct iwm_softc *, int); |
465 | void iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *); |
466 | uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int); |
467 | int iwm_rval2ridx(int); |
468 | void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *); |
469 | void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *, |
470 | struct iwm_mac_ctx_cmd *, uint32_t); |
471 | void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *, |
472 | struct iwm_mac_data_sta *, int); |
473 | int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int); |
474 | int iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int); |
475 | void iwm_add_task(struct iwm_softc *, struct taskq *, struct task *); |
476 | void iwm_del_task(struct iwm_softc *, struct taskq *, struct task *); |
477 | int iwm_scan(struct iwm_softc *); |
478 | int iwm_bgscan(struct ieee80211com *); |
479 | void iwm_bgscan_done(struct ieee80211com *, |
480 | struct ieee80211_node_switch_bss_arg *, size_t); |
481 | void iwm_bgscan_done_task(void *); |
482 | int iwm_umac_scan_abort(struct iwm_softc *); |
483 | int iwm_lmac_scan_abort(struct iwm_softc *); |
484 | int iwm_scan_abort(struct iwm_softc *); |
485 | int iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *, |
486 | struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t); |
487 | int iwm_auth(struct iwm_softc *); |
488 | int iwm_deauth(struct iwm_softc *); |
489 | int iwm_run(struct iwm_softc *); |
490 | int iwm_run_stop(struct iwm_softc *); |
491 | struct ieee80211_node *iwm_node_alloc(struct ieee80211com *); |
492 | int iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *, |
493 | struct ieee80211_key *); |
494 | int iwm_set_key(struct ieee80211com *, struct ieee80211_node *, |
495 | struct ieee80211_key *); |
496 | void iwm_delete_key_v1(struct ieee80211com *, |
497 | struct ieee80211_node *, struct ieee80211_key *); |
498 | void iwm_delete_key(struct ieee80211com *, |
499 | struct ieee80211_node *, struct ieee80211_key *); |
500 | void iwm_calib_timeout(void *); |
501 | void iwm_setrates(struct iwm_node *, int); |
502 | int iwm_media_change(struct ifnet *); |
503 | void iwm_newstate_task(void *); |
504 | int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int); |
505 | void iwm_endscan(struct iwm_softc *); |
506 | void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *, |
507 | struct ieee80211_node *); |
508 | int iwm_sf_config(struct iwm_softc *, int); |
509 | int iwm_send_bt_init_conf(struct iwm_softc *); |
510 | int iwm_send_soc_conf(struct iwm_softc *); |
511 | int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); |
512 | int iwm_send_temp_report_ths_cmd(struct iwm_softc *); |
513 | void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t); |
514 | void iwm_free_fw_paging(struct iwm_softc *); |
515 | int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *); |
516 | int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *); |
517 | int iwm_init_hw(struct iwm_softc *); |
518 | int iwm_init(struct ifnet *); |
519 | void iwm_start(struct ifnet *); |
520 | void iwm_stop(struct ifnet *); |
521 | void iwm_watchdog(struct ifnet *); |
522 | int iwm_ioctl(struct ifnet *, u_long, caddr_t); |
523 | const char *iwm_desc_lookup(uint32_t); |
524 | void iwm_nic_error(struct iwm_softc *); |
525 | void iwm_dump_driver_status(struct iwm_softc *); |
526 | void iwm_nic_umac_error(struct iwm_softc *); |
527 | void iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t, |
528 | struct mbuf_list *); |
529 | void iwm_flip_address(uint8_t *); |
530 | int iwm_detect_duplicate(struct iwm_softc *, struct mbuf *, |
531 | struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *); |
532 | int iwm_is_sn_less(uint16_t, uint16_t, uint16_t); |
533 | void iwm_release_frames(struct iwm_softc *, struct ieee80211_node *, |
534 | struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t, |
535 | struct mbuf_list *); |
536 | int iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *, |
537 | int, struct iwm_reorder_buffer *, uint32_t, uint32_t); |
538 | int iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int, |
539 | struct iwm_rx_mpdu_desc *, int, int, uint32_t, |
540 | struct ieee80211_rxinfo *, struct mbuf_list *); |
541 | void iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t, |
542 | struct mbuf_list *); |
543 | int iwm_rx_pkt_valid(struct iwm_rx_packet *); |
544 | void iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *, |
545 | struct mbuf_list *); |
546 | void iwm_notif_intr(struct iwm_softc *); |
547 | int iwm_intr(void *); |
548 | int iwm_intr_msix(void *); |
549 | int iwm_match(struct device *, void *, void *); |
550 | int iwm_preinit(struct iwm_softc *); |
551 | void iwm_attach_hook(struct device *); |
552 | void iwm_attach(struct device *, struct device *, void *); |
553 | void iwm_init_task(void *); |
554 | int iwm_activate(struct device *, int); |
555 | void iwm_resume(struct iwm_softc *); |
556 | int iwm_wakeup(struct iwm_softc *); |
557 | |
558 | #if NBPFILTER1 > 0 |
559 | void iwm_radiotap_attach(struct iwm_softc *); |
560 | #endif |
561 | |
562 | uint8_t |
563 | iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd) |
564 | { |
565 | const struct iwm_fw_cmd_version *entry; |
566 | int i; |
567 | |
568 | for (i = 0; i < sc->n_cmd_versions; i++) { |
569 | entry = &sc->cmd_versions[i]; |
570 | if (entry->group == grp && entry->cmd == cmd) |
571 | return entry->cmd_ver; |
572 | } |
573 | |
574 | return IWM_FW_CMD_VER_UNKNOWN99; |
575 | } |
576 | |
577 | int |
578 | iwm_is_mimo_ht_plcp(uint8_t ht_plcp) |
579 | { |
580 | return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 && |
581 | (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK(3 << 3))); |
582 | } |
583 | |
584 | int |
585 | iwm_is_mimo_mcs(int mcs) |
586 | { |
587 | int ridx = iwm_mcs2ridx[mcs]; |
588 | return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp); |
589 | |
590 | } |
591 | |
592 | int |
593 | iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen) |
594 | { |
595 | struct iwm_fw_cscheme_list *l = (void *)data; |
596 | |
597 | if (dlen < sizeof(*l) || |
598 | dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) |
599 | return EINVAL22; |
600 | |
601 | /* we don't actually store anything for now, always use s/w crypto */ |
602 | |
603 | return 0; |
604 | } |
605 | |
606 | int |
607 | iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type, |
608 | uint8_t *data, size_t dlen) |
609 | { |
610 | struct iwm_fw_sects *fws; |
611 | struct iwm_fw_onesect *fwone; |
612 | |
613 | if (type >= IWM_UCODE_TYPE_MAX) |
614 | return EINVAL22; |
615 | if (dlen < sizeof(uint32_t)) |
616 | return EINVAL22; |
617 | |
618 | fws = &sc->sc_fw.fw_sects[type]; |
619 | if (fws->fw_count >= IWM_UCODE_SECT_MAX16) |
620 | return EINVAL22; |
621 | |
622 | fwone = &fws->fw_sect[fws->fw_count]; |
623 | |
624 | /* first 32bit are device load offset */ |
625 | memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof (uint32_t))); |
626 | |
627 | /* rest is data */ |
628 | fwone->fws_data = data + sizeof(uint32_t); |
629 | fwone->fws_len = dlen - sizeof(uint32_t); |
630 | |
631 | fws->fw_count++; |
632 | fws->fw_totlen += fwone->fws_len; |
633 | |
634 | return 0; |
635 | } |
636 | |
637 | #define IWM_DEFAULT_SCAN_CHANNELS40 40 |
638 | /* Newer firmware might support more channels. Raise this value if needed. */ |
639 | #define IWM_MAX_SCAN_CHANNELS52 52 /* as of 8265-34 firmware image */ |
640 | |
641 | struct iwm_tlv_calib_data { |
642 | uint32_t ucode_type; |
643 | struct iwm_tlv_calib_ctrl calib; |
644 | } __packed__attribute__((__packed__)); |
645 | |
646 | int |
647 | iwm_set_default_calib(struct iwm_softc *sc, const void *data) |
648 | { |
649 | const struct iwm_tlv_calib_data *def_calib = data; |
650 | uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type)); |
651 | |
652 | if (ucode_type >= IWM_UCODE_TYPE_MAX) |
653 | return EINVAL22; |
654 | |
655 | sc->sc_default_calib[ucode_type].flow_trigger = |
656 | def_calib->calib.flow_trigger; |
657 | sc->sc_default_calib[ucode_type].event_trigger = |
658 | def_calib->calib.event_trigger; |
659 | |
660 | return 0; |
661 | } |
662 | |
663 | void |
664 | iwm_fw_info_free(struct iwm_fw_info *fw) |
665 | { |
666 | free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize); |
667 | fw->fw_rawdata = NULL((void *)0); |
668 | fw->fw_rawsize = 0; |
669 | /* don't touch fw->fw_status */ |
670 | memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects ))); |
671 | } |
672 | |
673 | void |
674 | iwm_fw_version_str(char *buf, size_t bufsize, |
675 | uint32_t major, uint32_t minor, uint32_t api) |
676 | { |
677 | /* |
678 | * Starting with major version 35 the Linux driver prints the minor |
679 | * version in hexadecimal. |
680 | */ |
681 | if (major >= 35) |
682 | snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api); |
683 | else |
684 | snprintf(buf, bufsize, "%u.%u.%u", major, minor, api); |
685 | } |
686 | |
687 | int |
688 | iwm_read_firmware(struct iwm_softc *sc) |
689 | { |
690 | struct iwm_fw_info *fw = &sc->sc_fw; |
691 | struct iwm_tlv_ucode_header *uhdr; |
692 | struct iwm_ucode_tlv tlv; |
693 | uint32_t tlv_type; |
694 | uint8_t *data; |
695 | uint32_t usniffer_img; |
696 | uint32_t paging_mem_size; |
697 | int err; |
698 | size_t len; |
699 | |
700 | if (fw->fw_status == IWM_FW_STATUS_DONE2) |
701 | return 0; |
702 | |
703 | while (fw->fw_status == IWM_FW_STATUS_INPROGRESS1) |
704 | tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP0xffffffffffffffffULL); |
705 | fw->fw_status = IWM_FW_STATUS_INPROGRESS1; |
706 | |
707 | if (fw->fw_rawdata != NULL((void *)0)) |
708 | iwm_fw_info_free(fw); |
709 | |
710 | err = loadfirmware(sc->sc_fwname, |
711 | (u_char **)&fw->fw_rawdata, &fw->fw_rawsize); |
712 | if (err) { |
713 | printf("%s: could not read firmware %s (error %d)\n", |
714 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err); |
715 | goto out; |
716 | } |
717 | |
718 | sc->sc_capaflags = 0; |
719 | sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS40; |
720 | memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc-> sc_enabled_capa))); |
721 | memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api ))); |
722 | sc->n_cmd_versions = 0; |
723 | |
724 | uhdr = (void *)fw->fw_rawdata; |
725 | if (*(uint32_t *)fw->fw_rawdata != 0 |
726 | || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWM_TLV_UCODE_MAGIC0x0a4c5749) { |
727 | printf("%s: invalid firmware %s\n", |
728 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname); |
729 | err = EINVAL22; |
730 | goto out; |
731 | } |
732 | |
733 | iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), |
734 | IWM_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24 ), |
735 | IWM_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16 ), |
736 | IWM_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8 )); |
737 | |
738 | data = uhdr->data; |
739 | len = fw->fw_rawsize - sizeof(*uhdr); |
740 | |
741 | while (len >= sizeof(tlv)) { |
742 | size_t tlv_len; |
743 | void *tlv_data; |
744 | |
745 | memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv))); |
746 | tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length)); |
747 | tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type)); |
748 | |
749 | len -= sizeof(tlv); |
750 | data += sizeof(tlv); |
751 | tlv_data = data; |
752 | |
753 | if (len < tlv_len) { |
754 | printf("%s: firmware too short: %zu bytes\n", |
755 | DEVNAME(sc)((sc)->sc_dev.dv_xname), len); |
756 | err = EINVAL22; |
757 | goto parse_out; |
758 | } |
759 | |
760 | switch (tlv_type) { |
761 | case IWM_UCODE_TLV_PROBE_MAX_LEN6: |
762 | if (tlv_len < sizeof(uint32_t)) { |
763 | err = EINVAL22; |
764 | goto parse_out; |
765 | } |
766 | sc->sc_capa_max_probe_len |
767 | = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
768 | if (sc->sc_capa_max_probe_len > |
769 | IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE512) { |
770 | err = EINVAL22; |
771 | goto parse_out; |
772 | } |
773 | break; |
774 | case IWM_UCODE_TLV_PAN7: |
775 | if (tlv_len) { |
776 | err = EINVAL22; |
777 | goto parse_out; |
778 | } |
779 | sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN(1 << 0); |
780 | break; |
781 | case IWM_UCODE_TLV_FLAGS18: |
782 | if (tlv_len < sizeof(uint32_t)) { |
783 | err = EINVAL22; |
784 | goto parse_out; |
785 | } |
786 | /* |
787 | * Apparently there can be many flags, but Linux driver |
788 | * parses only the first one, and so do we. |
789 | * |
790 | * XXX: why does this override IWM_UCODE_TLV_PAN? |
791 | * Intentional or a bug? Observations from |
792 | * current firmware file: |
793 | * 1) TLV_PAN is parsed first |
794 | * 2) TLV_FLAGS contains TLV_FLAGS_PAN |
795 | * ==> this resets TLV_PAN to itself... hnnnk |
796 | */ |
797 | sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
798 | break; |
799 | case IWM_UCODE_TLV_CSCHEME28: |
800 | err = iwm_store_cscheme(sc, tlv_data, tlv_len); |
801 | if (err) |
802 | goto parse_out; |
803 | break; |
804 | case IWM_UCODE_TLV_NUM_OF_CPU27: { |
805 | uint32_t num_cpu; |
806 | if (tlv_len != sizeof(uint32_t)) { |
807 | err = EINVAL22; |
808 | goto parse_out; |
809 | } |
810 | num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
811 | if (num_cpu < 1 || num_cpu > 2) { |
812 | err = EINVAL22; |
813 | goto parse_out; |
814 | } |
815 | break; |
816 | } |
817 | case IWM_UCODE_TLV_SEC_RT19: |
818 | err = iwm_firmware_store_section(sc, |
819 | IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len); |
820 | if (err) |
821 | goto parse_out; |
822 | break; |
823 | case IWM_UCODE_TLV_SEC_INIT20: |
824 | err = iwm_firmware_store_section(sc, |
825 | IWM_UCODE_TYPE_INIT, tlv_data, tlv_len); |
826 | if (err) |
827 | goto parse_out; |
828 | break; |
829 | case IWM_UCODE_TLV_SEC_WOWLAN21: |
830 | err = iwm_firmware_store_section(sc, |
831 | IWM_UCODE_TYPE_WOW, tlv_data, tlv_len); |
832 | if (err) |
833 | goto parse_out; |
834 | break; |
835 | case IWM_UCODE_TLV_DEF_CALIB22: |
836 | if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { |
837 | err = EINVAL22; |
838 | goto parse_out; |
839 | } |
840 | err = iwm_set_default_calib(sc, tlv_data); |
841 | if (err) |
842 | goto parse_out; |
843 | break; |
844 | case IWM_UCODE_TLV_PHY_SKU23: |
845 | if (tlv_len != sizeof(uint32_t)) { |
846 | err = EINVAL22; |
847 | goto parse_out; |
848 | } |
849 | sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
850 | break; |
851 | |
852 | case IWM_UCODE_TLV_API_CHANGES_SET29: { |
853 | struct iwm_ucode_api *api; |
854 | int idx, i; |
855 | if (tlv_len != sizeof(*api)) { |
856 | err = EINVAL22; |
857 | goto parse_out; |
858 | } |
859 | api = (struct iwm_ucode_api *)tlv_data; |
860 | idx = le32toh(api->api_index)((__uint32_t)(api->api_index)); |
861 | if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) { |
862 | err = EINVAL22; |
863 | goto parse_out; |
864 | } |
865 | for (i = 0; i < 32; i++) { |
866 | if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0) |
867 | continue; |
868 | setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<< ((i + (32 * idx))&(8 -1))); |
869 | } |
870 | break; |
871 | } |
872 | |
873 | case IWM_UCODE_TLV_ENABLED_CAPABILITIES30: { |
874 | struct iwm_ucode_capa *capa; |
875 | int idx, i; |
876 | if (tlv_len != sizeof(*capa)) { |
877 | err = EINVAL22; |
878 | goto parse_out; |
879 | } |
880 | capa = (struct iwm_ucode_capa *)tlv_data; |
881 | idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index)); |
882 | if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) { |
883 | goto parse_out; |
884 | } |
885 | for (i = 0; i < 32; i++) { |
886 | if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0) |
887 | continue; |
888 | setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<< ((i + (32 * idx))&(8 -1))); |
889 | } |
890 | break; |
891 | } |
892 | |
893 | case IWM_UCODE_TLV_CMD_VERSIONS48: |
894 | if (tlv_len % sizeof(struct iwm_fw_cmd_version)) { |
895 | tlv_len /= sizeof(struct iwm_fw_cmd_version); |
896 | tlv_len *= sizeof(struct iwm_fw_cmd_version); |
897 | } |
898 | if (sc->n_cmd_versions != 0) { |
899 | err = EINVAL22; |
900 | goto parse_out; |
901 | } |
902 | if (tlv_len > sizeof(sc->cmd_versions)) { |
903 | err = EINVAL22; |
904 | goto parse_out; |
905 | } |
906 | memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), ( tlv_len)); |
907 | sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version); |
908 | break; |
909 | |
910 | case IWM_UCODE_TLV_SDIO_ADMA_ADDR35: |
911 | case IWM_UCODE_TLV_FW_GSCAN_CAPA50: |
912 | /* ignore, not used by current driver */ |
913 | break; |
914 | |
915 | case IWM_UCODE_TLV_SEC_RT_USNIFFER34: |
916 | err = iwm_firmware_store_section(sc, |
917 | IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data, |
918 | tlv_len); |
919 | if (err) |
920 | goto parse_out; |
921 | break; |
922 | |
923 | case IWM_UCODE_TLV_PAGING32: |
924 | if (tlv_len != sizeof(uint32_t)) { |
925 | err = EINVAL22; |
926 | goto parse_out; |
927 | } |
928 | paging_mem_size = le32toh(*(const uint32_t *)tlv_data)((__uint32_t)(*(const uint32_t *)tlv_data)); |
929 | |
930 | DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",do { ; } while (0) |
931 | DEVNAME(sc), paging_mem_size))do { ; } while (0); |
932 | if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12)))) { |
933 | printf("%s: Driver only supports up to %u" |
934 | " bytes for paging image (%u requested)\n", |
935 | DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12))), |
936 | paging_mem_size); |
937 | err = EINVAL22; |
938 | goto out; |
939 | } |
940 | if (paging_mem_size & (IWM_FW_PAGING_SIZE(1 << 12) - 1)) { |
941 | printf("%s: Paging: image isn't multiple of %u\n", |
942 | DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_FW_PAGING_SIZE(1 << 12)); |
943 | err = EINVAL22; |
944 | goto out; |
945 | } |
946 | |
947 | fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size = |
948 | paging_mem_size; |
949 | usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER; |
950 | fw->fw_sects[usniffer_img].paging_mem_size = |
951 | paging_mem_size; |
952 | break; |
953 | |
954 | case IWM_UCODE_TLV_N_SCAN_CHANNELS31: |
955 | if (tlv_len != sizeof(uint32_t)) { |
956 | err = EINVAL22; |
957 | goto parse_out; |
958 | } |
959 | sc->sc_capa_n_scan_channels = |
960 | le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data)); |
961 | if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS52) { |
962 | err = ERANGE34; |
963 | goto parse_out; |
964 | } |
965 | break; |
966 | |
967 | case IWM_UCODE_TLV_FW_VERSION36: |
968 | if (tlv_len != sizeof(uint32_t) * 3) { |
969 | err = EINVAL22; |
970 | goto parse_out; |
971 | } |
972 | |
973 | iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver), |
974 | le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])), |
975 | le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])), |
976 | le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2]))); |
977 | break; |
978 | |
979 | case IWM_UCODE_TLV_FW_DBG_DEST38: |
980 | case IWM_UCODE_TLV_FW_DBG_CONF39: |
981 | case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS54: |
982 | case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS55: |
983 | case IWM_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0): |
984 | case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1): |
985 | case IWM_UCODE_TLV_TYPE_HCMD(0x1000005 + 2): |
986 | case IWM_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3): |
987 | case IWM_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4): |
988 | break; |
989 | |
990 | case IWM_UCODE_TLV_HW_TYPE58: |
991 | break; |
992 | |
993 | case IWM_UCODE_TLV_FW_MEM_SEG51: |
994 | break; |
995 | |
996 | /* undocumented TLVs found in iwm-9000-43 image */ |
997 | case 0x1000003: |
998 | case 0x1000004: |
999 | break; |
1000 | |
1001 | default: |
1002 | err = EINVAL22; |
1003 | goto parse_out; |
1004 | } |
1005 | |
1006 | len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)); |
1007 | data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)); |
1008 | } |
1009 | |
1010 | KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c" , 1010, "err == 0")); |
1011 | |
1012 | parse_out: |
1013 | if (err) { |
1014 | printf("%s: firmware parse error %d, " |
1015 | "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type); |
1016 | } |
1017 | |
1018 | out: |
1019 | if (err) { |
1020 | fw->fw_status = IWM_FW_STATUS_NONE0; |
1021 | if (fw->fw_rawdata != NULL((void *)0)) |
1022 | iwm_fw_info_free(fw); |
1023 | } else |
1024 | fw->fw_status = IWM_FW_STATUS_DONE2; |
1025 | wakeup(&sc->sc_fw); |
1026 | |
1027 | return err; |
1028 | } |
1029 | |
1030 | uint32_t |
1031 | iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr) |
1032 | { |
1033 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048 ))), ((((addr & 0x000fffff) | (3 << 24)))))) |
1034 | IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048 ))), ((((addr & 0x000fffff) | (3 << 24)))))); |
1035 | IWM_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)-> sc_sz, 0x01 | 0x02); |
1036 | return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050 ))))); |
1037 | } |
1038 | |
1039 | uint32_t |
1040 | iwm_read_prph(struct iwm_softc *sc, uint32_t addr) |
1041 | { |
1042 | iwm_nic_assert_locked(sc); |
1043 | return iwm_read_prph_unlocked(sc, addr); |
1044 | } |
1045 | |
1046 | void |
1047 | iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val) |
1048 | { |
1049 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044 ))), ((((addr & 0x000fffff) | (3 << 24)))))) |
1050 | IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044 ))), ((((addr & 0x000fffff) | (3 << 24)))))); |
1051 | IWM_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)-> sc_sz, 0x02); |
1052 | IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c ))), ((val)))); |
1053 | } |
1054 | |
1055 | void |
1056 | iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val) |
1057 | { |
1058 | iwm_nic_assert_locked(sc); |
1059 | iwm_write_prph_unlocked(sc, addr, val); |
1060 | } |
1061 | |
1062 | void |
1063 | iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val) |
1064 | { |
1065 | iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff); |
1066 | iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32); |
1067 | } |
1068 | |
1069 | int |
1070 | iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords) |
1071 | { |
1072 | int offs, err = 0; |
1073 | uint32_t *vals = buf; |
1074 | |
1075 | if (iwm_nic_lock(sc)) { |
1076 | IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c ))), ((addr)))); |
1077 | for (offs = 0; offs < dwords; offs++) |
1078 | vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x01c ))))); |
1079 | iwm_nic_unlock(sc); |
1080 | } else { |
1081 | err = EBUSY16; |
1082 | } |
1083 | return err; |
1084 | } |
1085 | |
1086 | int |
1087 | iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords) |
1088 | { |
1089 | int offs; |
1090 | const uint32_t *vals = buf; |
1091 | |
1092 | if (iwm_nic_lock(sc)) { |
1093 | IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010 ))), ((addr)))); |
1094 | /* WADDR auto-increments */ |
1095 | for (offs = 0; offs < dwords; offs++) { |
1096 | uint32_t val = vals ? vals[offs] : 0; |
1097 | IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018 ))), ((val)))); |
1098 | } |
1099 | iwm_nic_unlock(sc); |
1100 | } else { |
1101 | return EBUSY16; |
1102 | } |
1103 | return 0; |
1104 | } |
1105 | |
1106 | int |
1107 | iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val) |
1108 | { |
1109 | return iwm_write_mem(sc, addr, &val, 1); |
1110 | } |
1111 | |
1112 | int |
1113 | iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask, |
1114 | int timo) |
1115 | { |
1116 | for (;;) { |
1117 | if ((IWM_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) { |
1118 | return 1; |
1119 | } |
1120 | if (timo < 10) { |
1121 | return 0; |
1122 | } |
1123 | timo -= 10; |
1124 | DELAY(10)(*delay_func)(10); |
1125 | } |
1126 | } |
1127 | |
1128 | int |
1129 | iwm_nic_lock(struct iwm_softc *sc) |
1130 | { |
1131 | if (sc->sc_nic_locks > 0) { |
1132 | iwm_nic_assert_locked(sc); |
1133 | sc->sc_nic_locks++; |
1134 | return 1; /* already locked */ |
1135 | } |
1136 | |
1137 | IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x00000008)))))) |
1138 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x00000008)))))); |
1139 | |
1140 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) |
1141 | DELAY(2)(*delay_func)(2); |
1142 | |
1143 | if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024), |
1144 | IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001), |
1145 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001) |
1146 | | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) { |
1147 | sc->sc_nic_locks++; |
1148 | return 1; |
1149 | } |
1150 | |
1151 | printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1152 | return 0; |
1153 | } |
1154 | |
1155 | void |
1156 | iwm_nic_assert_locked(struct iwm_softc *sc) |
1157 | { |
1158 | if (sc->sc_nic_locks <= 0) |
1159 | panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks); |
1160 | } |
1161 | |
1162 | void |
1163 | iwm_nic_unlock(struct iwm_softc *sc) |
1164 | { |
1165 | if (sc->sc_nic_locks > 0) { |
1166 | if (--sc->sc_nic_locks == 0) |
1167 | IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))) |
1168 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))); |
1169 | } else |
1170 | printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1171 | } |
1172 | |
1173 | int |
1174 | iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits, |
1175 | uint32_t mask) |
1176 | { |
1177 | uint32_t val; |
1178 | |
1179 | if (iwm_nic_lock(sc)) { |
1180 | val = iwm_read_prph(sc, reg) & mask; |
1181 | val |= bits; |
1182 | iwm_write_prph(sc, reg, val); |
1183 | iwm_nic_unlock(sc); |
1184 | return 0; |
1185 | } |
1186 | return EBUSY16; |
1187 | } |
1188 | |
1189 | int |
1190 | iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits) |
1191 | { |
1192 | return iwm_set_bits_mask_prph(sc, reg, bits, ~0); |
1193 | } |
1194 | |
1195 | int |
1196 | iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits) |
1197 | { |
1198 | return iwm_set_bits_mask_prph(sc, reg, 0, ~bits); |
1199 | } |
1200 | |
1201 | int |
1202 | iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma, |
1203 | bus_size_t size, bus_size_t alignment) |
1204 | { |
1205 | int nsegs, err; |
1206 | caddr_t va; |
1207 | |
1208 | dma->tag = tag; |
1209 | dma->size = size; |
1210 | |
1211 | err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), ( 0x0001), (&dma->map)) |
1212 | &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), ( 0x0001), (&dma->map)); |
1213 | if (err) |
1214 | goto fail; |
1215 | |
1216 | err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), ( &dma->seg), (1), (&nsegs), (0x0001)) |
1217 | BUS_DMA_NOWAIT)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), ( &dma->seg), (1), (&nsegs), (0x0001)); |
1218 | if (err) |
1219 | goto fail; |
1220 | |
1221 | err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size ), (&va), (0x0001)) |
1222 | BUS_DMA_NOWAIT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size ), (&va), (0x0001)); |
1223 | if (err) |
1224 | goto fail; |
1225 | dma->vaddr = va; |
1226 | |
1227 | err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr ), (size), (((void *)0)), (0x0001)) |
1228 | BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr ), (size), (((void *)0)), (0x0001)); |
1229 | if (err) |
1230 | goto fail; |
1231 | |
1232 | memset(dma->vaddr, 0, size)__builtin_memset((dma->vaddr), (0), (size)); |
1233 | bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), ( 0x04)); |
1234 | dma->paddr = dma->map->dm_segs[0].ds_addr; |
1235 | |
1236 | return 0; |
1237 | |
1238 | fail: iwm_dma_contig_free(dma); |
1239 | return err; |
1240 | } |
1241 | |
1242 | void |
1243 | iwm_dma_contig_free(struct iwm_dma_info *dma) |
1244 | { |
1245 | if (dma->map != NULL((void *)0)) { |
1246 | if (dma->vaddr != NULL((void *)0)) { |
1247 | bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map ), (0), (dma->size), (0x02 | 0x08)) |
1248 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map ), (0), (dma->size), (0x02 | 0x08)); |
1249 | bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map )); |
1250 | bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr ), (dma->size)); |
1251 | bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma-> seg), (1)); |
1252 | dma->vaddr = NULL((void *)0); |
1253 | } |
1254 | bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma-> map)); |
1255 | dma->map = NULL((void *)0); |
1256 | } |
1257 | } |
1258 | |
1259 | int |
1260 | iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) |
1261 | { |
1262 | bus_size_t size; |
1263 | size_t descsz; |
1264 | int count, i, err; |
1265 | |
1266 | ring->cur = 0; |
1267 | |
1268 | if (sc->sc_mqrx_supported) { |
1269 | count = IWM_RX_MQ_RING_COUNT512; |
1270 | descsz = sizeof(uint64_t); |
1271 | } else { |
1272 | count = IWM_RX_RING_COUNT256; |
1273 | descsz = sizeof(uint32_t); |
1274 | } |
1275 | |
1276 | /* Allocate RX descriptors (256-byte aligned). */ |
1277 | size = count * descsz; |
1278 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256); |
1279 | if (err) { |
1280 | printf("%s: could not allocate RX ring DMA memory\n", |
1281 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1282 | goto fail; |
1283 | } |
1284 | ring->desc = ring->free_desc_dma.vaddr; |
1285 | |
1286 | /* Allocate RX status area (16-byte aligned). */ |
1287 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, |
1288 | sizeof(*ring->stat), 16); |
1289 | if (err) { |
1290 | printf("%s: could not allocate RX status DMA memory\n", |
1291 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1292 | goto fail; |
1293 | } |
1294 | ring->stat = ring->stat_dma.vaddr; |
1295 | |
1296 | if (sc->sc_mqrx_supported) { |
1297 | size = count * sizeof(uint32_t); |
1298 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, |
1299 | size, 256); |
1300 | if (err) { |
1301 | printf("%s: could not allocate RX ring DMA memory\n", |
1302 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1303 | goto fail; |
1304 | } |
1305 | } |
1306 | |
1307 | for (i = 0; i < count; i++) { |
1308 | struct iwm_rx_data *data = &ring->data[i]; |
1309 | |
1310 | memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data))); |
1311 | err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0001 | 0x0002), (&data->map)) |
1312 | IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0001 | 0x0002), (&data->map)) |
1313 | &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096 ), (1), (4096), (0), (0x0001 | 0x0002), (&data->map)); |
1314 | if (err) { |
1315 | printf("%s: could not create RX buf DMA map\n", |
1316 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1317 | goto fail; |
1318 | } |
1319 | |
1320 | err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE4096, i); |
1321 | if (err) |
1322 | goto fail; |
1323 | } |
1324 | return 0; |
1325 | |
1326 | fail: iwm_free_rx_ring(sc, ring); |
1327 | return err; |
1328 | } |
1329 | |
1330 | void |
1331 | iwm_disable_rx_dma(struct iwm_softc *sc) |
1332 | { |
1333 | int ntries; |
1334 | |
1335 | if (iwm_nic_lock(sc)) { |
1336 | if (sc->sc_mqrx_supported) { |
1337 | iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0); |
1338 | for (ntries = 0; ntries < 1000; ntries++) { |
1339 | if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS0xA09808) & |
1340 | IWM_RXF_DMA_IDLE(1U << 31)) |
1341 | break; |
1342 | DELAY(10)(*delay_func)(10); |
1343 | } |
1344 | } else { |
1345 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), ((0)))); |
1346 | for (ntries = 0; ntries < 1000; ntries++) { |
1347 | if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) + 0xC40) + 0x004)))))& |
1348 | IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE(0x01000000)) |
1349 | break; |
1350 | DELAY(10)(*delay_func)(10); |
1351 | } |
1352 | } |
1353 | iwm_nic_unlock(sc); |
1354 | } |
1355 | } |
1356 | |
1357 | void |
1358 | iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) |
1359 | { |
1360 | ring->cur = 0; |
1361 | bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x04)) |
1362 | ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x04)); |
1363 | memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat ))); |
1364 | bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x08)) |
1365 | ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> stat_dma.map), (0), (ring->stat_dma.size), (0x08)); |
1366 | |
1367 | } |
1368 | |
1369 | void |
1370 | iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) |
1371 | { |
1372 | int count, i; |
1373 | |
1374 | iwm_dma_contig_free(&ring->free_desc_dma); |
1375 | iwm_dma_contig_free(&ring->stat_dma); |
1376 | iwm_dma_contig_free(&ring->used_desc_dma); |
1377 | |
1378 | if (sc->sc_mqrx_supported) |
1379 | count = IWM_RX_MQ_RING_COUNT512; |
1380 | else |
1381 | count = IWM_RX_RING_COUNT256; |
1382 | |
1383 | for (i = 0; i < count; i++) { |
1384 | struct iwm_rx_data *data = &ring->data[i]; |
1385 | |
1386 | if (data->m != NULL((void *)0)) { |
1387 | bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x02)) |
1388 | data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x02)); |
1389 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
1390 | m_freem(data->m); |
1391 | data->m = NULL((void *)0); |
1392 | } |
1393 | if (data->map != NULL((void *)0)) |
1394 | bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data ->map)); |
1395 | } |
1396 | } |
1397 | |
1398 | int |
1399 | iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) |
1400 | { |
1401 | bus_addr_t paddr; |
1402 | bus_size_t size; |
1403 | int i, err; |
1404 | |
1405 | ring->qid = qid; |
1406 | ring->queued = 0; |
1407 | ring->cur = 0; |
1408 | ring->tail = 0; |
1409 | |
1410 | /* Allocate TX descriptors (256-byte aligned). */ |
1411 | size = IWM_TX_RING_COUNT256 * sizeof (struct iwm_tfd); |
1412 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); |
1413 | if (err) { |
1414 | printf("%s: could not allocate TX ring DMA memory\n", |
1415 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1416 | goto fail; |
1417 | } |
1418 | ring->desc = ring->desc_dma.vaddr; |
1419 | |
1420 | /* |
1421 | * There is no need to allocate DMA buffers for unused rings. |
1422 | * 7k/8k/9k hardware supports up to 31 Tx rings which is more |
1423 | * than we currently need. |
1424 | * |
1425 | * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues. |
1426 | * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues |
1427 | * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8], |
1428 | * in order to provide one queue per EDCA category. |
1429 | * Tx aggregation requires additional queues, one queue per TID for |
1430 | * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17]. |
1431 | * |
1432 | * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd), |
1433 | * and Tx aggregation is not supported. |
1434 | * |
1435 | * Unfortunately, we cannot tell if DQA will be used until the |
1436 | * firmware gets loaded later, so just allocate sufficient rings |
1437 | * in order to satisfy both cases. |
1438 | */ |
1439 | if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1)) |
1440 | return 0; |
1441 | |
1442 | size = IWM_TX_RING_COUNT256 * sizeof(struct iwm_device_cmd); |
1443 | err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); |
1444 | if (err) { |
1445 | printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1446 | goto fail; |
1447 | } |
1448 | ring->cmd = ring->cmd_dma.vaddr; |
1449 | |
1450 | paddr = ring->cmd_dma.paddr; |
1451 | for (i = 0; i < IWM_TX_RING_COUNT256; i++) { |
1452 | struct iwm_tx_data *data = &ring->data[i]; |
1453 | size_t mapsize; |
1454 | |
1455 | data->cmd_paddr = paddr; |
1456 | data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) |
1457 | + offsetof(struct iwm_tx_cmd, scratch)__builtin_offsetof(struct iwm_tx_cmd, scratch); |
1458 | paddr += sizeof(struct iwm_device_cmd); |
1459 | |
1460 | /* FW commands may require more mapped space than packets. */ |
1461 | if (qid == IWM_CMD_QUEUE9 || qid == IWM_DQA_CMD_QUEUE0) |
1462 | mapsize = (sizeof(struct iwm_cmd_header) + |
1463 | IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header))); |
1464 | else |
1465 | mapsize = MCLBYTES(1 << 11); |
1466 | err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize ), (20 - 2), (mapsize), (0), (0x0001), (&data->map)) |
1467 | IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize ), (20 - 2), (mapsize), (0), (0x0001), (&data->map)) |
1468 | &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize ), (20 - 2), (mapsize), (0), (0x0001), (&data->map)); |
1469 | if (err) { |
1470 | printf("%s: could not create TX buf DMA map\n", |
1471 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1472 | goto fail; |
1473 | } |
1474 | } |
1475 | KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 1475, "paddr == ring->cmd_dma.paddr + size" )); |
1476 | return 0; |
1477 | |
1478 | fail: iwm_free_tx_ring(sc, ring); |
1479 | return err; |
1480 | } |
1481 | |
1482 | void |
1483 | iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) |
1484 | { |
1485 | int i; |
1486 | |
1487 | for (i = 0; i < IWM_TX_RING_COUNT256; i++) { |
1488 | struct iwm_tx_data *data = &ring->data[i]; |
1489 | |
1490 | if (data->m != NULL((void *)0)) { |
1491 | bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)) |
1492 | data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)); |
1493 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
1494 | m_freem(data->m); |
1495 | data->m = NULL((void *)0); |
1496 | } |
1497 | } |
1498 | /* Clear TX descriptors. */ |
1499 | memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size )); |
1500 | bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> desc_dma.map), (0), (ring->desc_dma.size), (0x04)) |
1501 | ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> desc_dma.map), (0), (ring->desc_dma.size), (0x04)); |
1502 | sc->qfullmsk &= ~(1 << ring->qid); |
1503 | sc->qenablemsk &= ~(1 << ring->qid); |
1504 | /* 7000 family NICs are locked while commands are in progress. */ |
1505 | if (ring->qid == sc->cmdqid && ring->queued > 0) { |
1506 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) |
1507 | iwm_nic_unlock(sc); |
1508 | } |
1509 | ring->queued = 0; |
1510 | ring->cur = 0; |
1511 | ring->tail = 0; |
1512 | } |
1513 | |
1514 | void |
1515 | iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) |
1516 | { |
1517 | int i; |
1518 | |
1519 | iwm_dma_contig_free(&ring->desc_dma); |
1520 | iwm_dma_contig_free(&ring->cmd_dma); |
1521 | |
1522 | for (i = 0; i < IWM_TX_RING_COUNT256; i++) { |
1523 | struct iwm_tx_data *data = &ring->data[i]; |
1524 | |
1525 | if (data->m != NULL((void *)0)) { |
1526 | bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)) |
1527 | data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (data->map->dm_mapsize), (0x08)); |
1528 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
1529 | m_freem(data->m); |
1530 | data->m = NULL((void *)0); |
1531 | } |
1532 | if (data->map != NULL((void *)0)) |
1533 | bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data ->map)); |
1534 | } |
1535 | } |
1536 | |
1537 | void |
1538 | iwm_enable_rfkill_int(struct iwm_softc *sc) |
1539 | { |
1540 | if (!sc->sc_msix) { |
1541 | sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL(1 << 7); |
1542 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1543 | } else { |
1544 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))) |
1545 | sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))); |
1546 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)))) |
1547 | ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)))); |
1548 | sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL; |
1549 | } |
1550 | |
1551 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_90003) |
1552 | IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x04000000)))))) |
1553 | IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x04000000)))))); |
1554 | } |
1555 | |
1556 | int |
1557 | iwm_check_rfkill(struct iwm_softc *sc) |
1558 | { |
1559 | uint32_t v; |
1560 | int rv; |
1561 | |
1562 | /* |
1563 | * "documentation" is not really helpful here: |
1564 | * 27: HW_RF_KILL_SW |
1565 | * Indicates state of (platform's) hardware RF-Kill switch |
1566 | * |
1567 | * But apparently when it's off, it's on ... |
1568 | */ |
1569 | v = IWM_READ(sc, IWM_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))); |
1570 | rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0; |
1571 | if (rv) { |
1572 | sc->sc_flags |= IWM_FLAG_RFKILL0x02; |
1573 | } else { |
1574 | sc->sc_flags &= ~IWM_FLAG_RFKILL0x02; |
1575 | } |
1576 | |
1577 | return rv; |
1578 | } |
1579 | |
1580 | void |
1581 | iwm_enable_interrupts(struct iwm_softc *sc) |
1582 | { |
1583 | if (!sc->sc_msix) { |
1584 | sc->sc_intmask = IWM_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 << 25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 << 0) | (1 << 28)); |
1585 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1586 | } else { |
1587 | /* |
1588 | * fh/hw_mask keeps all the unmasked causes. |
1589 | * Unlike msi, in msix cause is enabled when it is unset. |
1590 | */ |
1591 | sc->sc_hw_mask = sc->sc_hw_init_mask; |
1592 | sc->sc_fh_mask = sc->sc_fh_init_mask; |
1593 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~sc->sc_fh_mask)))) |
1594 | ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~sc->sc_fh_mask)))); |
1595 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~sc->sc_hw_mask)))) |
1596 | ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~sc->sc_hw_mask)))); |
1597 | } |
1598 | } |
1599 | |
1600 | void |
1601 | iwm_enable_fwload_interrupt(struct iwm_softc *sc) |
1602 | { |
1603 | if (!sc->sc_msix) { |
1604 | sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX(1 << 27); |
1605 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1606 | } else { |
1607 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))) |
1608 | sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))); |
1609 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)))) |
1610 | ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)))); |
1611 | sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM; |
1612 | } |
1613 | } |
1614 | |
1615 | void |
1616 | iwm_restore_interrupts(struct iwm_softc *sc) |
1617 | { |
1618 | IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (sc->sc_intmask)))); |
1619 | } |
1620 | |
1621 | void |
1622 | iwm_disable_interrupts(struct iwm_softc *sc) |
1623 | { |
1624 | if (!sc->sc_msix) { |
1625 | IWM_WRITE(sc, IWM_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), ( (0)))); |
1626 | |
1627 | /* acknowledge all interrupts */ |
1628 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
1629 | IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), ( (~0)))); |
1630 | } else { |
1631 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))) |
1632 | sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((sc->sc_fh_init_mask)))); |
1633 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))) |
1634 | sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((sc->sc_hw_init_mask)))); |
1635 | } |
1636 | } |
1637 | |
1638 | void |
1639 | iwm_ict_reset(struct iwm_softc *sc) |
1640 | { |
1641 | iwm_disable_interrupts(sc); |
1642 | |
1643 | memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096)); |
1644 | sc->ict_cur = 0; |
1645 | |
1646 | /* Set physical address of ICT (4KB aligned). */ |
1647 | IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1648 | IWM_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1649 | | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1650 | | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))) |
1651 | | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), ( ((1U << 31) | (1 << 27) | (1 << 28) | sc-> ict_dma.paddr >> 12)))); |
1652 | |
1653 | /* Switch to ICT interrupt mode in driver. */ |
1654 | sc->sc_flags |= IWM_FLAG_USE_ICT0x01; |
1655 | |
1656 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
1657 | iwm_enable_interrupts(sc); |
1658 | } |
1659 | |
1660 | #define IWM_HW_READY_TIMEOUT 50 |
1661 | int |
1662 | iwm_set_hw_ready(struct iwm_softc *sc) |
1663 | { |
1664 | int ready; |
1665 | |
1666 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00400000)))))) |
1667 | IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00400000)))))); |
1668 | |
1669 | ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG(0x000), |
1670 | IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000), |
1671 | IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000), |
1672 | IWM_HW_READY_TIMEOUT); |
1673 | if (ready) |
1674 | IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088))))) | (0x20))))) |
1675 | IWM_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088))))) | (0x20))))); |
1676 | |
1677 | return ready; |
1678 | } |
1679 | #undef IWM_HW_READY_TIMEOUT |
1680 | |
1681 | int |
1682 | iwm_prepare_card_hw(struct iwm_softc *sc) |
1683 | { |
1684 | int t = 0; |
1685 | int ntries; |
1686 | |
1687 | if (iwm_set_hw_ready(sc)) |
1688 | return 0; |
1689 | |
1690 | IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))) |
1691 | IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))); |
1692 | DELAY(1000)(*delay_func)(1000); |
1693 | |
1694 | for (ntries = 0; ntries < 10; ntries++) { |
1695 | /* If HW is not ready, prepare the conditions to check again */ |
1696 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000)))))) |
1697 | IWM_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000)))))); |
1698 | |
1699 | do { |
1700 | if (iwm_set_hw_ready(sc)) |
1701 | return 0; |
1702 | DELAY(200)(*delay_func)(200); |
1703 | t += 200; |
1704 | } while (t < 150000); |
1705 | DELAY(25000)(*delay_func)(25000); |
1706 | } |
1707 | |
1708 | return ETIMEDOUT60; |
1709 | } |
1710 | |
1711 | void |
1712 | iwm_apm_config(struct iwm_softc *sc) |
1713 | { |
1714 | pcireg_t lctl, cap; |
1715 | |
1716 | /* |
1717 | * HW bug W/A for instability in PCIe bus L0S->L1 transition. |
1718 | * Check if BIOS (or OS) enabled L1-ASPM on this device. |
1719 | * If so (likely), disable L0S, so device moves directly L0->L1; |
1720 | * costs negligible amount of power savings. |
1721 | * If not (unlikely), enable L0S, so there is at least some |
1722 | * power savings, even without L1. |
1723 | */ |
1724 | lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag, |
1725 | sc->sc_cap_off + PCI_PCIE_LCSR0x10); |
1726 | if (lctl & PCI_PCIE_LCSR_ASPM_L10x00000002) { |
1727 | IWM_SETBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) | ((0x00000002)))))) |
1728 | IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) | ((0x00000002)))))); |
1729 | } else { |
1730 | IWM_CLRBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) & ~((0x00000002)))))) |
1731 | IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C))))) & ~((0x00000002)))))); |
1732 | } |
1733 | |
1734 | cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag, |
1735 | sc->sc_cap_off + PCI_PCIE_DCSR20x28); |
1736 | sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0; |
1737 | DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0) |
1738 | DEVNAME(sc),do { ; } while (0) |
1739 | (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0) |
1740 | sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0); |
1741 | } |
1742 | |
1743 | /* |
1744 | * Start up NIC's basic functionality after it has been reset |
1745 | * e.g. after platform boot or shutdown. |
1746 | * NOTE: This does not load uCode nor start the embedded processor |
1747 | */ |
1748 | int |
1749 | iwm_apm_init(struct iwm_softc *sc) |
1750 | { |
1751 | int err = 0; |
1752 | |
1753 | /* Disable L0S exit timer (platform NMI workaround) */ |
1754 | if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) |
1755 | IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x20000000)))))) |
1756 | IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x20000000)))))); |
1757 | |
1758 | /* |
1759 | * Disable L0s without affecting L1; |
1760 | * don't wait for ICH L0s (ICH bug W/A) |
1761 | */ |
1762 | IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x00800000)))))) |
1763 | IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100))))) | ((0x00800000)))))); |
1764 | |
1765 | /* Set FH wait threshold to maximum (HW error during stress W/A) */ |
1766 | IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240))))) | ((0xFFFF0000)))))); |
1767 | |
1768 | /* |
1769 | * Enable HAP INTA (interrupt from management bus) to |
1770 | * wake device's PCI Express link L1a -> L0s |
1771 | */ |
1772 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00080000)))))) |
1773 | IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x00080000)))))); |
1774 | |
1775 | iwm_apm_config(sc); |
1776 | |
1777 | #if 0 /* not for 7k/8k */ |
1778 | /* Configure analog phase-lock-loop before activating to D0A */ |
1779 | if (trans->cfg->base_params->pll_cfg_val) |
1780 | IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c ))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), ( ((0x20c))))) | (trans->cfg->base_params->pll_cfg_val ))))) |
1781 | trans->cfg->base_params->pll_cfg_val)(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c ))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), ( ((0x20c))))) | (trans->cfg->base_params->pll_cfg_val ))))); |
1782 | #endif |
1783 | |
1784 | /* |
1785 | * Set "initialization complete" bit to move adapter from |
1786 | * D0U* --> D0A* (powered-up active) state. |
1787 | */ |
1788 | IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) | ((0x00000004)))))); |
1789 | |
1790 | /* |
1791 | * Wait for clock stabilization; once stabilized, access to |
1792 | * device-internal resources is supported, e.g. iwm_write_prph() |
1793 | * and accesses to uCode SRAM. |
1794 | */ |
1795 | if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024), |
1796 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), |
1797 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) { |
1798 | printf("%s: timeout waiting for clock stabilization\n", |
1799 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1800 | err = ETIMEDOUT60; |
1801 | goto out; |
1802 | } |
1803 | |
1804 | if (sc->host_interrupt_operation_mode) { |
1805 | /* |
1806 | * This is a bit of an abuse - This is needed for 7260 / 3160 |
1807 | * only check host_interrupt_operation_mode even if this is |
1808 | * not related to host_interrupt_operation_mode. |
1809 | * |
1810 | * Enable the oscillator to count wake up time for L1 exit. This |
1811 | * consumes slightly more power (100uA) - but allows to be sure |
1812 | * that we wake up from L1 on time. |
1813 | * |
1814 | * This looks weird: read twice the same register, discard the |
1815 | * value, set a bit, and yet again, read that same register |
1816 | * just to discard the value. But that's the way the hardware |
1817 | * seems to like it. |
1818 | */ |
1819 | if (iwm_nic_lock(sc)) { |
1820 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1821 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1822 | iwm_nic_unlock(sc); |
1823 | } |
1824 | err = iwm_set_bits_prph(sc, IWM_OSC_CLK(0xa04068), |
1825 | IWM_OSC_CLK_FORCE_CONTROL(0x8)); |
1826 | if (err) |
1827 | goto out; |
1828 | if (iwm_nic_lock(sc)) { |
1829 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1830 | iwm_read_prph(sc, IWM_OSC_CLK(0xa04068)); |
1831 | iwm_nic_unlock(sc); |
1832 | } |
1833 | } |
1834 | |
1835 | /* |
1836 | * Enable DMA clock and wait for it to stabilize. |
1837 | * |
1838 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits |
1839 | * do not disable clocks. This preserves any hardware bits already |
1840 | * set by default in "CLK_CTRL_REG" after reset. |
1841 | */ |
1842 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
1843 | if (iwm_nic_lock(sc)) { |
1844 | iwm_write_prph(sc, IWM_APMG_CLK_EN_REG(((0x00000) + 0x3000) + 0x0004), |
1845 | IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200)); |
1846 | iwm_nic_unlock(sc); |
1847 | } |
1848 | DELAY(20)(*delay_func)(20); |
1849 | |
1850 | /* Disable L1-Active */ |
1851 | err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010), |
1852 | IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800)); |
1853 | if (err) |
1854 | goto out; |
1855 | |
1856 | /* Clear the interrupt in APMG if the NIC is in RFKILL */ |
1857 | if (iwm_nic_lock(sc)) { |
1858 | iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG(((0x00000) + 0x3000) + 0x001c), |
1859 | IWM_APMG_RTC_INT_STT_RFKILL(0x10000000)); |
1860 | iwm_nic_unlock(sc); |
1861 | } |
1862 | } |
1863 | out: |
1864 | if (err) |
1865 | printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
1866 | return err; |
1867 | } |
1868 | |
1869 | void |
1870 | iwm_apm_stop(struct iwm_softc *sc) |
1871 | { |
1872 | IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))) |
1873 | IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) | ((0x80000000)))))); |
1874 | IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000) | (0x10000000)))))) |
1875 | IWM_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000) | (0x10000000)))))) |
1876 | IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))) | ((0x08000000) | (0x10000000)))))); |
1877 | DELAY(1000)(*delay_func)(1000); |
1878 | IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) & ~((0x80000000)))))) |
1879 | IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250))))) & ~((0x80000000)))))); |
1880 | DELAY(5000)(*delay_func)(5000); |
1881 | |
1882 | /* stop device's busmaster DMA activity */ |
1883 | IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020))))) | ((0x00000200)))))); |
1884 | |
1885 | if (!iwm_poll_bit(sc, IWM_CSR_RESET(0x020), |
1886 | IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), |
1887 | IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100)) |
1888 | printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1889 | |
1890 | /* |
1891 | * Clear "initialization complete" bit to move adapter from |
1892 | * D0A* (powered-up Active) --> D0U* (Uninitialized) state. |
1893 | */ |
1894 | IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000004)))))) |
1895 | IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000004)))))); |
1896 | } |
1897 | |
1898 | void |
1899 | iwm_init_msix_hw(struct iwm_softc *sc) |
1900 | { |
1901 | iwm_conf_msix_hw(sc, 0); |
1902 | |
1903 | if (!sc->sc_msix) |
1904 | return; |
1905 | |
1906 | sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804 ))))); |
1907 | sc->sc_fh_mask = sc->sc_fh_init_mask; |
1908 | sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C ))))); |
1909 | sc->sc_hw_mask = sc->sc_hw_init_mask; |
1910 | } |
1911 | |
1912 | void |
1913 | iwm_conf_msix_hw(struct iwm_softc *sc, int stopped) |
1914 | { |
1915 | int vector = 0; |
1916 | |
1917 | if (!sc->sc_msix) { |
1918 | /* Newer chips default to MSIX. */ |
1919 | if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) { |
1920 | iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00, |
1921 | IWM_UREG_CHICK_MSI_ENABLE(1 << 24)); |
1922 | iwm_nic_unlock(sc); |
1923 | } |
1924 | return; |
1925 | } |
1926 | |
1927 | if (!stopped && iwm_nic_lock(sc)) { |
1928 | iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00, IWM_UREG_CHICK_MSIX_ENABLE(1 << 25)); |
1929 | iwm_nic_unlock(sc); |
1930 | } |
1931 | |
1932 | /* Disable all interrupts */ |
1933 | IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), ((~0)))); |
1934 | IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), ((~0)))); |
1935 | |
1936 | /* Map fallback-queue (command/mgmt) to a single vector */ |
1937 | IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (0)))), ((vector | (1 << 7))))) |
1938 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (0)))), ((vector | (1 << 7))))); |
1939 | /* Map RSS queue (data) to the same vector */ |
1940 | IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (1)))), ((vector | (1 << 7))))) |
1941 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x880) + (1)))), ((vector | (1 << 7))))); |
1942 | |
1943 | /* Enable the RX queues cause interrupts */ |
1944 | IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1 ))))) |
1945 | IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1 ))))); |
1946 | |
1947 | /* Map non-RX causes to the same vector */ |
1948 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1 << 7))))) |
1949 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1 << 7))))); |
1950 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1 << 7))))) |
1951 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1 << 7))))); |
1952 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 << 7))))) |
1953 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 << 7))))); |
1954 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 << 7))))) |
1955 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 << 7))))); |
1956 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 << 7))))) |
1957 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 << 7))))); |
1958 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 << 7))))) |
1959 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 << 7))))); |
1960 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 << 7))))) |
1961 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 << 7))))); |
1962 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1 << 7))))) |
1963 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1 << 7))))); |
1964 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1 << 7))))) |
1965 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1 << 7))))); |
1966 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | ( 1 << 7))))) |
1967 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | ( 1 << 7))))); |
1968 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 << 7))))) |
1969 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 << 7))))); |
1970 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 << 7))))) |
1971 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 << 7))))); |
1972 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 << 7))))) |
1973 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 << 7))))); |
1974 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 << 7))))) |
1975 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 << 7))))); |
1976 | IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 << 7))))) |
1977 | vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) + 0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 << 7))))); |
1978 | |
1979 | /* Enable non-RX causes interrupts */ |
1980 | IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
1981 | IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
1982 | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
1983 | IWM_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))) |
1984 | IWM_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM | IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D | IWM_MSIX_FH_INT_CAUSES_FH_ERR))))); |
1985 | IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1986 | IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1987 | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1988 | IWM_MSIX_HW_INT_CAUSES_REG_IML |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1989 | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1990 | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1991 | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1992 | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1993 | IWM_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1994 | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1995 | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))) |
1996 | IWM_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) + 0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE | IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML | IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL | IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX | IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP ))))); |
1997 | } |
1998 | |
1999 | int |
2000 | iwm_clear_persistence_bit(struct iwm_softc *sc) |
2001 | { |
2002 | uint32_t hpm, wprot; |
2003 | |
2004 | hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440); |
2005 | if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT(1 << 12))) { |
2006 | wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_90000xa04ce0); |
2007 | if (wprot & IWM_PREG_WFPM_ACCESS(1 << 12)) { |
2008 | printf("%s: cannot clear persistence bit\n", |
2009 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2010 | return EPERM1; |
2011 | } |
2012 | iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440, |
2013 | hpm & ~IWM_HPM_PERSISTENCE_BIT(1 << 12)); |
2014 | } |
2015 | |
2016 | return 0; |
2017 | } |
2018 | |
2019 | int |
2020 | iwm_start_hw(struct iwm_softc *sc) |
2021 | { |
2022 | int err; |
2023 | |
2024 | err = iwm_prepare_card_hw(sc); |
2025 | if (err) |
2026 | return err; |
2027 | |
2028 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003) { |
2029 | err = iwm_clear_persistence_bit(sc); |
2030 | if (err) |
2031 | return err; |
2032 | } |
2033 | |
2034 | /* Reset the entire device */ |
2035 | IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( ((0x00000080))))); |
2036 | DELAY(5000)(*delay_func)(5000); |
2037 | |
2038 | err = iwm_apm_init(sc); |
2039 | if (err) |
2040 | return err; |
2041 | |
2042 | iwm_init_msix_hw(sc); |
2043 | |
2044 | iwm_enable_rfkill_int(sc); |
2045 | iwm_check_rfkill(sc); |
2046 | |
2047 | return 0; |
2048 | } |
2049 | |
2050 | |
2051 | void |
2052 | iwm_stop_device(struct iwm_softc *sc) |
2053 | { |
2054 | int chnl, ntries; |
2055 | int qid; |
2056 | |
2057 | iwm_disable_interrupts(sc); |
2058 | sc->sc_flags &= ~IWM_FLAG_USE_ICT0x01; |
2059 | |
2060 | /* Stop all DMA channels. */ |
2061 | if (iwm_nic_lock(sc)) { |
2062 | /* Deactivate TX scheduler. */ |
2063 | iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0); |
2064 | |
2065 | for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) { |
2066 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), ((0)))) |
2067 | IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), ((0)))); |
2068 | for (ntries = 0; ntries < 200; ntries++) { |
2069 | uint32_t r; |
2070 | |
2071 | r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) + 0xEA0) + 0x010))))); |
2072 | if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(((1 << (chnl)) << 16) |
2073 | chnl)((1 << (chnl)) << 16)) |
2074 | break; |
2075 | DELAY(20)(*delay_func)(20); |
2076 | } |
2077 | } |
2078 | iwm_nic_unlock(sc); |
2079 | } |
2080 | iwm_disable_rx_dma(sc); |
2081 | |
2082 | iwm_reset_rx_ring(sc, &sc->rxq); |
2083 | |
2084 | for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++) |
2085 | iwm_reset_tx_ring(sc, &sc->txq[qid]); |
2086 | |
2087 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
2088 | if (iwm_nic_lock(sc)) { |
2089 | /* Power-down device's busmaster DMA clocks */ |
2090 | iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG(((0x00000) + 0x3000) + 0x0008), |
2091 | IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200)); |
2092 | iwm_nic_unlock(sc); |
2093 | } |
2094 | DELAY(5)(*delay_func)(5); |
2095 | } |
2096 | |
2097 | /* Make sure (redundant) we've released our request to stay awake */ |
2098 | IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))) |
2099 | IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024))))) & ~((0x00000008)))))); |
2100 | if (sc->sc_nic_locks > 0) |
2101 | printf("%s: %d active NIC locks forcefully cleared\n", |
2102 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks); |
2103 | sc->sc_nic_locks = 0; |
2104 | |
2105 | /* Stop the device, and put it in low power state */ |
2106 | iwm_apm_stop(sc); |
2107 | |
2108 | /* Reset the on-board processor. */ |
2109 | IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( ((0x00000080))))); |
2110 | DELAY(5000)(*delay_func)(5000); |
2111 | |
2112 | /* |
2113 | * Upon stop, the IVAR table gets erased, so msi-x won't |
2114 | * work. This causes a bug in RF-KILL flows, since the interrupt |
2115 | * that enables radio won't fire on the correct irq, and the |
2116 | * driver won't be able to handle the interrupt. |
2117 | * Configure the IVAR table again after reset. |
2118 | */ |
2119 | iwm_conf_msix_hw(sc, 1); |
2120 | |
2121 | /* |
2122 | * Upon stop, the APM issues an interrupt if HW RF kill is set. |
2123 | * Clear the interrupt again. |
2124 | */ |
2125 | iwm_disable_interrupts(sc); |
2126 | |
2127 | /* Even though we stop the HW we still want the RF kill interrupt. */ |
2128 | iwm_enable_rfkill_int(sc); |
2129 | iwm_check_rfkill(sc); |
2130 | |
2131 | iwm_prepare_card_hw(sc); |
2132 | } |
2133 | |
2134 | void |
2135 | iwm_nic_config(struct iwm_softc *sc) |
2136 | { |
2137 | uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; |
2138 | uint32_t mask, val, reg_val = 0; |
2139 | |
2140 | radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >> |
2141 | IWM_FW_PHY_CFG_RADIO_TYPE_POS0; |
2142 | radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >> |
2143 | IWM_FW_PHY_CFG_RADIO_STEP_POS2; |
2144 | radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >> |
2145 | IWM_FW_PHY_CFG_RADIO_DASH_POS4; |
2146 | |
2147 | reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) << |
2148 | IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2); |
2149 | reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) << |
2150 | IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0); |
2151 | |
2152 | /* radio configuration */ |
2153 | reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10); |
2154 | reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14); |
2155 | reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12); |
2156 | |
2157 | mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) | |
2158 | IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) | |
2159 | IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) | |
2160 | IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) | |
2161 | IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) | |
2162 | IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) | |
2163 | IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100); |
2164 | |
2165 | val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000))))); |
2166 | val &= ~mask; |
2167 | val |= reg_val; |
2168 | IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), ( (val)))); |
2169 | |
2170 | /* |
2171 | * W/A : NIC is stuck in a reset state after Early PCIe power off |
2172 | * (PCIe power is lost before PERST# is asserted), causing ME FW |
2173 | * to lose ownership and not being able to obtain it back. |
2174 | */ |
2175 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) |
2176 | iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c), |
2177 | IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000), |
2178 | ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000)); |
2179 | } |
2180 | |
2181 | int |
2182 | iwm_nic_rx_init(struct iwm_softc *sc) |
2183 | { |
2184 | if (sc->sc_mqrx_supported) |
2185 | return iwm_nic_rx_mq_init(sc); |
2186 | else |
2187 | return iwm_nic_rx_legacy_init(sc); |
2188 | } |
2189 | |
2190 | int |
2191 | iwm_nic_rx_mq_init(struct iwm_softc *sc) |
2192 | { |
2193 | int enabled; |
2194 | |
2195 | if (!iwm_nic_lock(sc)) |
2196 | return EBUSY16; |
2197 | |
2198 | /* Stop RX DMA. */ |
2199 | iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0); |
2200 | /* Disable RX used and free queue operation. */ |
2201 | iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, 0); |
2202 | |
2203 | iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB0xA08000, |
2204 | sc->rxq.free_desc_dma.paddr); |
2205 | iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB0xA08100, |
2206 | sc->rxq.used_desc_dma.paddr); |
2207 | iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB0xA08200, |
2208 | sc->rxq.stat_dma.paddr); |
2209 | iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX0xA08080, 0); |
2210 | iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX0xA080C0, 0); |
2211 | iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX0xA08180, 0); |
2212 | |
2213 | /* We configure only queue 0 for now. */ |
2214 | enabled = ((1 << 0) << 16) | (1 << 0); |
2215 | |
2216 | /* Enable RX DMA, 4KB buffer size. */ |
2217 | iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, |
2218 | IWM_RFH_DMA_EN_ENABLE_VAL(1U << 31) | |
2219 | IWM_RFH_RXF_DMA_RB_SIZE_4K(0x4 << 16) | |
2220 | IWM_RFH_RXF_DMA_MIN_RB_4_8(3 << 24) | |
2221 | IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK(0x04000000) | |
2222 | IWM_RFH_RXF_DMA_RBDCB_SIZE_512(0x9 << 20)); |
2223 | |
2224 | /* Enable RX DMA snooping. */ |
2225 | iwm_write_prph(sc, IWM_RFH_GEN_CFG0xA09800, |
2226 | IWM_RFH_GEN_CFG_RFH_DMA_SNOOP(1 << 1) | |
2227 | IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP(1 << 0) | |
2228 | (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_640x00000000 : |
2229 | IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_1280x00000010)); |
2230 | |
2231 | /* Enable the configured queue(s). */ |
2232 | iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, enabled); |
2233 | |
2234 | iwm_nic_unlock(sc); |
2235 | |
2236 | IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), ( ((0x40))))); |
2237 | |
2238 | IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), ( (8)))); |
2239 | |
2240 | return 0; |
2241 | } |
2242 | |
2243 | int |
2244 | iwm_nic_rx_legacy_init(struct iwm_softc *sc) |
2245 | { |
2246 | memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat))__builtin_memset((sc->rxq.stat), (0), (sizeof(*sc->rxq. stat))); |
2247 | |
2248 | iwm_disable_rx_dma(sc); |
2249 | |
2250 | if (!iwm_nic_lock(sc)) |
2251 | return EBUSY16; |
2252 | |
2253 | /* reset and flush pointers */ |
2254 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00)) + 0x8))), ((0)))); |
2255 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00)) + 0x10))), ((0)))); |
2256 | IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x00c))), ((0)))); |
2257 | IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x008))), ((0)))); |
2258 | |
2259 | /* Set physical address of RX ring (256-byte aligned). */ |
2260 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >> 8)))) |
2261 | IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >> 8)))); |
2262 | |
2263 | /* Set physical address of RX status (16-byte aligned). */ |
2264 | IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4)))) |
2265 | IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4)))); |
2266 | |
2267 | /* Enable RX. */ |
2268 | IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2269 | IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2270 | IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2271 | IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2272 | (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2273 | IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))) |
2274 | IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000) + 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) | ((0x11) << (4)) | (0x00000000) | 8 << (20))))); |
2275 | |
2276 | IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), ( ((0x40))))); |
2277 | |
2278 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ |
2279 | if (sc->host_interrupt_operation_mode) |
2280 | IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x004))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x004))))) | ((1U << 31)))))); |
2281 | |
2282 | iwm_nic_unlock(sc); |
2283 | |
2284 | /* |
2285 | * This value should initially be 0 (before preparing any RBs), |
2286 | * and should be 8 after preparing the first 8 RBs (for example). |
2287 | */ |
2288 | IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((((0x1000 ) + 0xBC0)) + 0x008)))), ((8)))); |
2289 | |
2290 | return 0; |
2291 | } |
2292 | |
2293 | int |
2294 | iwm_nic_tx_init(struct iwm_softc *sc) |
2295 | { |
2296 | int qid, err; |
2297 | |
2298 | if (!iwm_nic_lock(sc)) |
2299 | return EBUSY16; |
2300 | |
2301 | /* Deactivate TX scheduler. */ |
2302 | iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0); |
2303 | |
2304 | /* Set physical address of "keep warm" page (16-byte aligned). */ |
2305 | IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) + 0x97C))), ((sc->kw_dma.paddr >> 4)))); |
2306 | |
2307 | for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++) { |
2308 | struct iwm_tx_ring *txq = &sc->txq[qid]; |
2309 | |
2310 | /* Set physical address of TX ring (256-byte aligned). */ |
2311 | IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE (qid))), ((txq->desc_dma.paddr >> 8)))) |
2312 | txq->desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE (qid))), ((txq->desc_dma.paddr >> 8)))); |
2313 | } |
2314 | |
2315 | err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL(((0x00000) + 0xa02c00) + 0x1a8), |
2316 | IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE(1 << 18) | |
2317 | IWM_SCD_GP_CTRL_ENABLE_31_QUEUES(1 << 0)); |
2318 | |
2319 | iwm_nic_unlock(sc); |
2320 | |
2321 | return err; |
2322 | } |
2323 | |
2324 | int |
2325 | iwm_nic_init(struct iwm_softc *sc) |
2326 | { |
2327 | int err; |
2328 | |
2329 | iwm_apm_init(sc); |
2330 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) |
2331 | iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c), |
2332 | IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN(0x00000000), |
2333 | ~IWM_APMG_PS_CTRL_MSK_PWR_SRC(0x03000000)); |
2334 | |
2335 | iwm_nic_config(sc); |
2336 | |
2337 | err = iwm_nic_rx_init(sc); |
2338 | if (err) |
2339 | return err; |
2340 | |
2341 | err = iwm_nic_tx_init(sc); |
2342 | if (err) |
2343 | return err; |
2344 | |
2345 | IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), ( ((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8))))) | (0x800fffff))))); |
2346 | |
2347 | return 0; |
2348 | } |
2349 | |
2350 | /* Map a TID to an ieee80211_edca_ac category. */ |
2351 | const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT8] = { |
2352 | EDCA_AC_BE, |
2353 | EDCA_AC_BK, |
2354 | EDCA_AC_BK, |
2355 | EDCA_AC_BE, |
2356 | EDCA_AC_VI, |
2357 | EDCA_AC_VI, |
2358 | EDCA_AC_VO, |
2359 | EDCA_AC_VO, |
2360 | }; |
2361 | |
2362 | /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */ |
2363 | const uint8_t iwm_ac_to_tx_fifo[] = { |
2364 | IWM_TX_FIFO_BE1, |
2365 | IWM_TX_FIFO_BK0, |
2366 | IWM_TX_FIFO_VI2, |
2367 | IWM_TX_FIFO_VO3, |
2368 | }; |
2369 | |
2370 | int |
2371 | iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo) |
2372 | { |
2373 | int err; |
2374 | iwm_nic_assert_locked(sc); |
2375 | |
2376 | IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060 ))), ((qid << 8 | 0)))); |
2377 | |
2378 | iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), |
2379 | (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3)) |
2380 | | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN(19))); |
2381 | |
2382 | err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL(((0x00000) + 0xa02c00) + 0x248), (1 << qid)); |
2383 | if (err) { |
2384 | return err; |
2385 | } |
2386 | |
2387 | iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); |
2388 | |
2389 | iwm_write_mem32(sc, |
2390 | sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)), 0); |
2391 | |
2392 | /* Set scheduler window size and frame limit. */ |
2393 | iwm_write_mem32(sc, |
2394 | sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)) + |
2395 | sizeof(uint32_t), |
2396 | ((IWM_FRAME_LIMIT64 << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS(0)) & |
2397 | IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK(0x0000007F)) | |
2398 | ((IWM_FRAME_LIMIT64 |
2399 | << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS(16)) & |
2400 | IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK(0x007F0000))); |
2401 | |
2402 | iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), |
2403 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3)) | |
2404 | (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF(0)) | |
2405 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL(4)) | |
2406 | IWM_SCD_QUEUE_STTS_REG_MSK(0x017F0000)); |
2407 | |
2408 | if (qid == sc->cmdqid) |
2409 | iwm_write_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254), |
2410 | iwm_read_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254)) | (1 << qid)); |
2411 | |
2412 | return 0; |
2413 | } |
2414 | |
2415 | int |
2416 | iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo, |
2417 | int aggregate, uint8_t tid, uint16_t ssn) |
2418 | { |
2419 | struct iwm_tx_ring *ring = &sc->txq[qid]; |
2420 | struct iwm_scd_txq_cfg_cmd cmd; |
2421 | int err, idx, scd_bug; |
2422 | |
2423 | iwm_nic_assert_locked(sc); |
2424 | |
2425 | /* |
2426 | * If we need to move the SCD write pointer by steps of |
2427 | * 0x40, 0x80 or 0xc0, it gets stuck. |
2428 | * This is really ugly, but this is the easiest way out for |
2429 | * this sad hardware issue. |
2430 | * This bug has been fixed on devices 9000 and up. |
2431 | */ |
2432 | scd_bug = !sc->sc_mqrx_supported && |
2433 | !((ssn - ring->cur) & 0x3f) && |
2434 | (ssn != ring->cur); |
2435 | if (scd_bug) |
2436 | ssn = (ssn + 1) & 0xfff; |
2437 | |
2438 | idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)); |
2439 | IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060 ))), ((qid << 8 | idx)))); |
2440 | ring->cur = idx; |
2441 | ring->tail = idx; |
2442 | |
2443 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
2444 | cmd.tid = tid; |
2445 | cmd.scd_queue = qid; |
2446 | cmd.enable = 1; |
2447 | cmd.sta_id = sta_id; |
2448 | cmd.tx_fifo = fifo; |
2449 | cmd.aggregate = aggregate; |
2450 | cmd.ssn = htole16(ssn)((__uint16_t)(ssn)); |
2451 | cmd.window = IWM_FRAME_LIMIT64; |
2452 | |
2453 | err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0, |
2454 | sizeof(cmd), &cmd); |
2455 | if (err) |
2456 | return err; |
2457 | |
2458 | sc->qenablemsk |= (1 << qid); |
2459 | return 0; |
2460 | } |
2461 | |
2462 | int |
2463 | iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid) |
2464 | { |
2465 | struct iwm_scd_txq_cfg_cmd cmd; |
2466 | int err; |
2467 | |
2468 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
2469 | cmd.tid = tid; |
2470 | cmd.scd_queue = qid; |
2471 | cmd.enable = 0; |
2472 | cmd.sta_id = sta_id; |
2473 | |
2474 | err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0, sizeof(cmd), &cmd); |
2475 | if (err) |
2476 | return err; |
2477 | |
2478 | sc->qenablemsk &= ~(1 << qid); |
2479 | return 0; |
2480 | } |
2481 | |
2482 | int |
2483 | iwm_post_alive(struct iwm_softc *sc) |
2484 | { |
2485 | int nwords; |
2486 | int err, chnl; |
2487 | uint32_t base; |
2488 | |
2489 | if (!iwm_nic_lock(sc)) |
2490 | return EBUSY16; |
2491 | |
2492 | base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x0)); |
2493 | |
2494 | iwm_ict_reset(sc); |
2495 | |
2496 | iwm_nic_unlock(sc); |
2497 | |
2498 | /* Clear TX scheduler state in SRAM. */ |
2499 | nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND((0x0000) + 0x808) - |
2500 | IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600)) |
2501 | / sizeof(uint32_t); |
2502 | err = iwm_write_mem(sc, |
2503 | sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600), |
2504 | NULL((void *)0), nwords); |
2505 | if (err) |
2506 | return err; |
2507 | |
2508 | if (!iwm_nic_lock(sc)) |
2509 | return EBUSY16; |
2510 | |
2511 | /* Set physical address of TX scheduler rings (1KB aligned). */ |
2512 | iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x8), sc->sched_dma.paddr >> 10); |
2513 | |
2514 | iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN(((0x00000) + 0xa02c00) + 0x244), 0); |
2515 | |
2516 | /* enable command channel */ |
2517 | err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD7); |
2518 | if (err) { |
2519 | iwm_nic_unlock(sc); |
2520 | return err; |
2521 | } |
2522 | |
2523 | /* Activate TX scheduler. */ |
2524 | iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0xff); |
2525 | |
2526 | /* Enable DMA channels. */ |
2527 | for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) { |
2528 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008))))) |
2529 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008))))) |
2530 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008))))); |
2531 | } |
2532 | |
2533 | IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) + 0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x1000) + 0xE98))))) | ((0x00000002)))))) |
2534 | IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) + 0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), ( (((0x1000) + 0xE98))))) | ((0x00000002)))))); |
2535 | |
2536 | iwm_nic_unlock(sc); |
2537 | |
2538 | /* Enable L1-Active */ |
2539 | if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) { |
2540 | err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010), |
2541 | IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800)); |
2542 | } |
2543 | |
2544 | return err; |
2545 | } |
2546 | |
2547 | struct iwm_phy_db_entry * |
2548 | iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id) |
2549 | { |
2550 | struct iwm_phy_db *phy_db = &sc->sc_phy_db; |
2551 | |
2552 | if (type >= IWM_PHY_DB_MAX6) |
2553 | return NULL((void *)0); |
2554 | |
2555 | switch (type) { |
2556 | case IWM_PHY_DB_CFG1: |
2557 | return &phy_db->cfg; |
2558 | case IWM_PHY_DB_CALIB_NCH2: |
2559 | return &phy_db->calib_nch; |
2560 | case IWM_PHY_DB_CALIB_CHG_PAPD4: |
2561 | if (chg_id >= IWM_NUM_PAPD_CH_GROUPS9) |
2562 | return NULL((void *)0); |
2563 | return &phy_db->calib_ch_group_papd[chg_id]; |
2564 | case IWM_PHY_DB_CALIB_CHG_TXP5: |
2565 | if (chg_id >= IWM_NUM_TXP_CH_GROUPS9) |
2566 | return NULL((void *)0); |
2567 | return &phy_db->calib_ch_group_txp[chg_id]; |
2568 | default: |
2569 | return NULL((void *)0); |
2570 | } |
2571 | return NULL((void *)0); |
2572 | } |
2573 | |
2574 | int |
2575 | iwm_phy_db_set_section(struct iwm_softc *sc, |
2576 | struct iwm_calib_res_notif_phy_db *phy_db_notif) |
2577 | { |
2578 | uint16_t type = le16toh(phy_db_notif->type)((__uint16_t)(phy_db_notif->type)); |
2579 | uint16_t size = le16toh(phy_db_notif->length)((__uint16_t)(phy_db_notif->length)); |
2580 | struct iwm_phy_db_entry *entry; |
2581 | uint16_t chg_id = 0; |
2582 | |
2583 | if (type == IWM_PHY_DB_CALIB_CHG_PAPD4 || |
2584 | type == IWM_PHY_DB_CALIB_CHG_TXP5) |
2585 | chg_id = le16toh(*(uint16_t *)phy_db_notif->data)((__uint16_t)(*(uint16_t *)phy_db_notif->data)); |
2586 | |
2587 | entry = iwm_phy_db_get_section(sc, type, chg_id); |
2588 | if (!entry) |
2589 | return EINVAL22; |
2590 | |
2591 | if (entry->data) |
2592 | free(entry->data, M_DEVBUF2, entry->size); |
2593 | entry->data = malloc(size, M_DEVBUF2, M_NOWAIT0x0002); |
2594 | if (!entry->data) { |
2595 | entry->size = 0; |
2596 | return ENOMEM12; |
2597 | } |
2598 | memcpy(entry->data, phy_db_notif->data, size)__builtin_memcpy((entry->data), (phy_db_notif->data), ( size)); |
2599 | entry->size = size; |
2600 | |
2601 | return 0; |
2602 | } |
2603 | |
2604 | int |
2605 | iwm_is_valid_channel(uint16_t ch_id) |
2606 | { |
2607 | if (ch_id <= 14 || |
2608 | (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) || |
2609 | (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) || |
2610 | (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1)) |
2611 | return 1; |
2612 | return 0; |
2613 | } |
2614 | |
2615 | uint8_t |
2616 | iwm_ch_id_to_ch_index(uint16_t ch_id) |
2617 | { |
2618 | if (!iwm_is_valid_channel(ch_id)) |
2619 | return 0xff; |
2620 | |
2621 | if (ch_id <= 14) |
2622 | return ch_id - 1; |
2623 | if (ch_id <= 64) |
2624 | return (ch_id + 20) / 4; |
2625 | if (ch_id <= 140) |
2626 | return (ch_id - 12) / 4; |
2627 | return (ch_id - 13) / 4; |
2628 | } |
2629 | |
2630 | |
2631 | uint16_t |
2632 | iwm_channel_id_to_papd(uint16_t ch_id) |
2633 | { |
2634 | if (!iwm_is_valid_channel(ch_id)) |
2635 | return 0xff; |
2636 | |
2637 | if (1 <= ch_id && ch_id <= 14) |
2638 | return 0; |
2639 | if (36 <= ch_id && ch_id <= 64) |
2640 | return 1; |
2641 | if (100 <= ch_id && ch_id <= 140) |
2642 | return 2; |
2643 | return 3; |
2644 | } |
2645 | |
2646 | uint16_t |
2647 | iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id) |
2648 | { |
2649 | struct iwm_phy_db *phy_db = &sc->sc_phy_db; |
2650 | struct iwm_phy_db_chg_txp *txp_chg; |
2651 | int i; |
2652 | uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id); |
2653 | |
2654 | if (ch_index == 0xff) |
2655 | return 0xff; |
2656 | |
2657 | for (i = 0; i < IWM_NUM_TXP_CH_GROUPS9; i++) { |
2658 | txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; |
2659 | if (!txp_chg) |
2660 | return 0xff; |
2661 | /* |
2662 | * Looking for the first channel group the max channel |
2663 | * of which is higher than the requested channel. |
2664 | */ |
2665 | if (le16toh(txp_chg->max_channel_idx)((__uint16_t)(txp_chg->max_channel_idx)) >= ch_index) |
2666 | return i; |
2667 | } |
2668 | return 0xff; |
2669 | } |
2670 | |
2671 | int |
2672 | iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data, |
2673 | uint16_t *size, uint16_t ch_id) |
2674 | { |
2675 | struct iwm_phy_db_entry *entry; |
2676 | uint16_t ch_group_id = 0; |
2677 | |
2678 | if (type == IWM_PHY_DB_CALIB_CHG_PAPD4) |
2679 | ch_group_id = iwm_channel_id_to_papd(ch_id); |
2680 | else if (type == IWM_PHY_DB_CALIB_CHG_TXP5) |
2681 | ch_group_id = iwm_channel_id_to_txp(sc, ch_id); |
2682 | |
2683 | entry = iwm_phy_db_get_section(sc, type, ch_group_id); |
2684 | if (!entry) |
2685 | return EINVAL22; |
2686 | |
2687 | *data = entry->data; |
2688 | *size = entry->size; |
2689 | |
2690 | return 0; |
2691 | } |
2692 | |
2693 | int |
2694 | iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length, |
2695 | void *data) |
2696 | { |
2697 | struct iwm_phy_db_cmd phy_db_cmd; |
2698 | struct iwm_host_cmd cmd = { |
2699 | .id = IWM_PHY_DB_CMD0x6c, |
2700 | .flags = IWM_CMD_ASYNC, |
2701 | }; |
2702 | |
2703 | phy_db_cmd.type = le16toh(type)((__uint16_t)(type)); |
2704 | phy_db_cmd.length = le16toh(length)((__uint16_t)(length)); |
2705 | |
2706 | cmd.data[0] = &phy_db_cmd; |
2707 | cmd.len[0] = sizeof(struct iwm_phy_db_cmd); |
2708 | cmd.data[1] = data; |
2709 | cmd.len[1] = length; |
2710 | |
2711 | return iwm_send_cmd(sc, &cmd); |
2712 | } |
2713 | |
2714 | int |
2715 | iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type, |
2716 | uint8_t max_ch_groups) |
2717 | { |
2718 | uint16_t i; |
2719 | int err; |
2720 | struct iwm_phy_db_entry *entry; |
2721 | |
2722 | for (i = 0; i < max_ch_groups; i++) { |
2723 | entry = iwm_phy_db_get_section(sc, type, i); |
2724 | if (!entry) |
2725 | return EINVAL22; |
2726 | |
2727 | if (!entry->size) |
2728 | continue; |
2729 | |
2730 | err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data); |
2731 | if (err) |
2732 | return err; |
2733 | |
2734 | DELAY(1000)(*delay_func)(1000); |
2735 | } |
2736 | |
2737 | return 0; |
2738 | } |
2739 | |
2740 | int |
2741 | iwm_send_phy_db_data(struct iwm_softc *sc) |
2742 | { |
2743 | uint8_t *data = NULL((void *)0); |
2744 | uint16_t size = 0; |
2745 | int err; |
2746 | |
2747 | err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG1, &data, &size, 0); |
2748 | if (err) |
2749 | return err; |
2750 | |
2751 | err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG1, size, data); |
2752 | if (err) |
2753 | return err; |
2754 | |
2755 | err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH2, |
2756 | &data, &size, 0); |
2757 | if (err) |
2758 | return err; |
2759 | |
2760 | err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH2, size, data); |
2761 | if (err) |
2762 | return err; |
2763 | |
2764 | err = iwm_phy_db_send_all_channel_groups(sc, |
2765 | IWM_PHY_DB_CALIB_CHG_PAPD4, IWM_NUM_PAPD_CH_GROUPS9); |
2766 | if (err) |
2767 | return err; |
2768 | |
2769 | err = iwm_phy_db_send_all_channel_groups(sc, |
2770 | IWM_PHY_DB_CALIB_CHG_TXP5, IWM_NUM_TXP_CH_GROUPS9); |
2771 | if (err) |
2772 | return err; |
2773 | |
2774 | return 0; |
2775 | } |
2776 | |
2777 | /* |
2778 | * For the high priority TE use a time event type that has similar priority to |
2779 | * the FW's action scan priority. |
2780 | */ |
2781 | #define IWM_ROC_TE_TYPE_NORMAL4 IWM_TE_P2P_DEVICE_DISCOVERABLE4 |
2782 | #define IWM_ROC_TE_TYPE_MGMT_TX9 IWM_TE_P2P_CLIENT_ASSOC9 |
2783 | |
2784 | int |
2785 | iwm_send_time_event_cmd(struct iwm_softc *sc, |
2786 | const struct iwm_time_event_cmd *cmd) |
2787 | { |
2788 | struct iwm_rx_packet *pkt; |
2789 | struct iwm_time_event_resp *resp; |
2790 | struct iwm_host_cmd hcmd = { |
2791 | .id = IWM_TIME_EVENT_CMD0x29, |
2792 | .flags = IWM_CMD_WANT_RESP, |
2793 | .resp_pkt_len = sizeof(*pkt) + sizeof(*resp), |
2794 | }; |
2795 | uint32_t resp_len; |
2796 | int err; |
2797 | |
2798 | hcmd.data[0] = cmd; |
2799 | hcmd.len[0] = sizeof(*cmd); |
2800 | err = iwm_send_cmd(sc, &hcmd); |
2801 | if (err) |
2802 | return err; |
2803 | |
2804 | pkt = hcmd.resp_pkt; |
2805 | if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40)) { |
2806 | err = EIO5; |
2807 | goto out; |
2808 | } |
2809 | |
2810 | resp_len = iwm_rx_packet_payload_len(pkt); |
2811 | if (resp_len != sizeof(*resp)) { |
2812 | err = EIO5; |
2813 | goto out; |
2814 | } |
2815 | |
2816 | resp = (void *)pkt->data; |
2817 | if (le32toh(resp->status)((__uint32_t)(resp->status)) == 0) |
2818 | sc->sc_time_event_uid = le32toh(resp->unique_id)((__uint32_t)(resp->unique_id)); |
2819 | else |
2820 | err = EIO5; |
2821 | out: |
2822 | iwm_free_resp(sc, &hcmd); |
2823 | return err; |
2824 | } |
2825 | |
2826 | void |
2827 | iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in, |
2828 | uint32_t duration, uint32_t max_delay) |
2829 | { |
2830 | struct iwm_time_event_cmd time_cmd; |
2831 | |
2832 | /* Do nothing if a time event is already scheduled. */ |
2833 | if (sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40) |
2834 | return; |
2835 | |
2836 | memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd))); |
2837 | |
2838 | time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD)((__uint32_t)(1)); |
2839 | time_cmd.id_and_color = |
2840 | htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
2841 | time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC)((__uint32_t)(0)); |
2842 | |
2843 | time_cmd.apply_time = htole32(0)((__uint32_t)(0)); |
2844 | |
2845 | time_cmd.max_frags = IWM_TE_V2_FRAG_NONE0; |
2846 | time_cmd.max_delay = htole32(max_delay)((__uint32_t)(max_delay)); |
2847 | /* TODO: why do we need to interval = bi if it is not periodic? */ |
2848 | time_cmd.interval = htole32(1)((__uint32_t)(1)); |
2849 | time_cmd.duration = htole32(duration)((__uint32_t)(duration)); |
2850 | time_cmd.repeat = 1; |
2851 | time_cmd.policy |
2852 | = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11 ))) |
2853 | IWM_TE_V2_NOTIF_HOST_EVENT_END |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11 ))) |
2854 | IWM_T2_V2_START_IMMEDIATELY)((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11 ))); |
2855 | |
2856 | if (iwm_send_time_event_cmd(sc, &time_cmd) == 0) |
2857 | sc->sc_flags |= IWM_FLAG_TE_ACTIVE0x40; |
2858 | |
2859 | DELAY(100)(*delay_func)(100); |
2860 | } |
2861 | |
2862 | void |
2863 | iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in) |
2864 | { |
2865 | struct iwm_time_event_cmd time_cmd; |
2866 | |
2867 | /* Do nothing if the time event has already ended. */ |
2868 | if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40) == 0) |
2869 | return; |
2870 | |
2871 | memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd))); |
2872 | |
2873 | time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE)((__uint32_t)(3)); |
2874 | time_cmd.id_and_color = |
2875 | htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
2876 | time_cmd.id = htole32(sc->sc_time_event_uid)((__uint32_t)(sc->sc_time_event_uid)); |
2877 | |
2878 | if (iwm_send_time_event_cmd(sc, &time_cmd) == 0) |
2879 | sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40; |
2880 | |
2881 | DELAY(100)(*delay_func)(100); |
2882 | } |
2883 | |
2884 | /* |
2885 | * NVM read access and content parsing. We do not support |
2886 | * external NVM or writing NVM. |
2887 | */ |
2888 | |
2889 | /* list of NVM sections we are allowed/need to read */ |
2890 | const int iwm_nvm_to_read[] = { |
2891 | IWM_NVM_SECTION_TYPE_HW0, |
2892 | IWM_NVM_SECTION_TYPE_SW1, |
2893 | IWM_NVM_SECTION_TYPE_REGULATORY3, |
2894 | IWM_NVM_SECTION_TYPE_CALIBRATION4, |
2895 | IWM_NVM_SECTION_TYPE_PRODUCTION5, |
2896 | IWM_NVM_SECTION_TYPE_REGULATORY_SDP8, |
2897 | IWM_NVM_SECTION_TYPE_HW_800010, |
2898 | IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11, |
2899 | IWM_NVM_SECTION_TYPE_PHY_SKU12, |
2900 | }; |
2901 | |
2902 | #define IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024) (2*1024) |
2903 | |
2904 | #define IWM_NVM_WRITE_OPCODE1 1 |
2905 | #define IWM_NVM_READ_OPCODE0 0 |
2906 | |
2907 | int |
2908 | iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset, |
2909 | uint16_t length, uint8_t *data, uint16_t *len) |
2910 | { |
2911 | offset = 0; |
2912 | struct iwm_nvm_access_cmd nvm_access_cmd = { |
2913 | .offset = htole16(offset)((__uint16_t)(offset)), |
2914 | .length = htole16(length)((__uint16_t)(length)), |
2915 | .type = htole16(section)((__uint16_t)(section)), |
2916 | .op_code = IWM_NVM_READ_OPCODE0, |
2917 | }; |
2918 | struct iwm_nvm_access_resp *nvm_resp; |
2919 | struct iwm_rx_packet *pkt; |
2920 | struct iwm_host_cmd cmd = { |
2921 | .id = IWM_NVM_ACCESS_CMD0x88, |
2922 | .flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL), |
2923 | .resp_pkt_len = IWM_CMD_RESP_MAX(1 << 12), |
2924 | .data = { &nvm_access_cmd, }, |
2925 | }; |
2926 | int err, offset_read; |
2927 | size_t bytes_read; |
2928 | uint8_t *resp_data; |
2929 | |
2930 | cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); |
2931 | |
2932 | err = iwm_send_cmd(sc, &cmd); |
2933 | if (err) |
2934 | return err; |
2935 | |
2936 | pkt = cmd.resp_pkt; |
2937 | if (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40) { |
2938 | err = EIO5; |
2939 | goto exit; |
2940 | } |
2941 | |
2942 | /* Extract NVM response */ |
2943 | nvm_resp = (void *)pkt->data; |
2944 | if (nvm_resp == NULL((void *)0)) |
2945 | return EIO5; |
2946 | |
2947 | err = le16toh(nvm_resp->status)((__uint16_t)(nvm_resp->status)); |
2948 | bytes_read = le16toh(nvm_resp->length)((__uint16_t)(nvm_resp->length)); |
2949 | offset_read = le16toh(nvm_resp->offset)((__uint16_t)(nvm_resp->offset)); |
2950 | resp_data = nvm_resp->data; |
2951 | if (err) { |
2952 | err = EINVAL22; |
2953 | goto exit; |
2954 | } |
2955 | |
2956 | if (offset_read != offset) { |
2957 | err = EINVAL22; |
2958 | goto exit; |
2959 | } |
2960 | |
2961 | if (bytes_read > length) { |
2962 | err = EINVAL22; |
2963 | goto exit; |
2964 | } |
2965 | |
2966 | memcpy(data + offset, resp_data, bytes_read)__builtin_memcpy((data + offset), (resp_data), (bytes_read)); |
2967 | *len = bytes_read; |
2968 | |
2969 | exit: |
2970 | iwm_free_resp(sc, &cmd); |
2971 | return err; |
2972 | } |
2973 | |
2974 | /* |
2975 | * Reads an NVM section completely. |
2976 | * NICs prior to 7000 family doesn't have a real NVM, but just read |
2977 | * section 0 which is the EEPROM. Because the EEPROM reading is unlimited |
2978 | * by uCode, we need to manually check in this case that we don't |
2979 | * overflow and try to read more than the EEPROM size. |
2980 | */ |
2981 | int |
2982 | iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data, |
2983 | uint16_t *len, size_t max_len) |
2984 | { |
2985 | uint16_t chunklen, seglen; |
2986 | int err = 0; |
2987 | |
2988 | chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024); |
2989 | *len = 0; |
2990 | |
2991 | /* Read NVM chunks until exhausted (reading less than requested) */ |
2992 | while (seglen == chunklen && *len < max_len) { |
2993 | err = iwm_nvm_read_chunk(sc, |
2994 | section, *len, chunklen, data, &seglen); |
2995 | if (err) |
2996 | return err; |
2997 | |
2998 | *len += seglen; |
2999 | } |
3000 | |
3001 | return err; |
3002 | } |
3003 | |
3004 | uint8_t |
3005 | iwm_fw_valid_tx_ant(struct iwm_softc *sc) |
3006 | { |
3007 | uint8_t tx_ant; |
3008 | |
3009 | tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN(0xf << 16)) |
3010 | >> IWM_FW_PHY_CFG_TX_CHAIN_POS16); |
3011 | |
3012 | if (sc->sc_nvm.valid_tx_ant) |
3013 | tx_ant &= sc->sc_nvm.valid_tx_ant; |
3014 | |
3015 | return tx_ant; |
3016 | } |
3017 | |
3018 | uint8_t |
3019 | iwm_fw_valid_rx_ant(struct iwm_softc *sc) |
3020 | { |
3021 | uint8_t rx_ant; |
3022 | |
3023 | rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN(0xf << 20)) |
3024 | >> IWM_FW_PHY_CFG_RX_CHAIN_POS20); |
3025 | |
3026 | if (sc->sc_nvm.valid_rx_ant) |
3027 | rx_ant &= sc->sc_nvm.valid_rx_ant; |
3028 | |
3029 | return rx_ant; |
3030 | } |
3031 | |
3032 | void |
3033 | iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags, |
3034 | const uint8_t *nvm_channels, int nchan) |
3035 | { |
3036 | struct ieee80211com *ic = &sc->sc_ic; |
3037 | struct iwm_nvm_data *data = &sc->sc_nvm; |
3038 | int ch_idx; |
3039 | struct ieee80211_channel *channel; |
3040 | uint16_t ch_flags; |
3041 | int is_5ghz; |
3042 | int flags, hw_value; |
3043 | |
3044 | for (ch_idx = 0; ch_idx < nchan; ch_idx++) { |
3045 | ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx)(((__uint16_t)(*(const uint16_t *)(nvm_ch_flags + ch_idx)))); |
3046 | |
3047 | if (ch_idx >= IWM_NUM_2GHZ_CHANNELS14 && |
3048 | !data->sku_cap_band_52GHz_enable) |
3049 | ch_flags &= ~IWM_NVM_CHANNEL_VALID(1 << 0); |
3050 | |
3051 | if (!(ch_flags & IWM_NVM_CHANNEL_VALID(1 << 0))) |
3052 | continue; |
3053 | |
3054 | hw_value = nvm_channels[ch_idx]; |
3055 | channel = &ic->ic_channels[hw_value]; |
3056 | |
3057 | is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS14; |
3058 | if (!is_5ghz) { |
3059 | flags = IEEE80211_CHAN_2GHZ0x0080; |
3060 | channel->ic_flags |
3061 | = IEEE80211_CHAN_CCK0x0020 |
3062 | | IEEE80211_CHAN_OFDM0x0040 |
3063 | | IEEE80211_CHAN_DYN0x0400 |
3064 | | IEEE80211_CHAN_2GHZ0x0080; |
3065 | } else { |
3066 | flags = IEEE80211_CHAN_5GHZ0x0100; |
3067 | channel->ic_flags = |
3068 | IEEE80211_CHAN_A(0x0100 | 0x0040); |
3069 | } |
3070 | channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags); |
3071 | |
3072 | if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE(1 << 3))) |
3073 | channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200; |
3074 | |
3075 | if (data->sku_cap_11n_enable) { |
3076 | channel->ic_flags |= IEEE80211_CHAN_HT0x2000; |
3077 | if (ch_flags & IWM_NVM_CHANNEL_40MHZ(1 << 9)) |
3078 | channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000; |
3079 | } |
3080 | } |
3081 | } |
3082 | |
3083 | int |
3084 | iwm_mimo_enabled(struct iwm_softc *sc) |
3085 | { |
3086 | struct ieee80211com *ic = &sc->sc_ic; |
3087 | |
3088 | return !sc->sc_nvm.sku_cap_mimo_disable && |
3089 | (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0; |
3090 | } |
3091 | |
3092 | void |
3093 | iwm_setup_ht_rates(struct iwm_softc *sc) |
3094 | { |
3095 | struct ieee80211com *ic = &sc->sc_ic; |
3096 | uint8_t rx_ant; |
3097 | |
3098 | /* TX is supported with the same MCS as RX. */ |
3099 | ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01; |
3100 | |
3101 | memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs ))); |
3102 | ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ |
3103 | |
3104 | if (!iwm_mimo_enabled(sc)) |
3105 | return; |
3106 | |
3107 | rx_ant = iwm_fw_valid_rx_ant(sc); |
3108 | if ((rx_ant & IWM_ANT_AB((1 << 0) | (1 << 1))) == IWM_ANT_AB((1 << 0) | (1 << 1)) || |
3109 | (rx_ant & IWM_ANT_BC((1 << 1) | (1 << 2))) == IWM_ANT_BC((1 << 1) | (1 << 2))) |
3110 | ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */ |
3111 | } |
3112 | |
3113 | void |
3114 | iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf, |
3115 | uint16_t ssn, uint16_t buf_size) |
3116 | { |
3117 | reorder_buf->head_sn = ssn; |
3118 | reorder_buf->num_stored = 0; |
3119 | reorder_buf->buf_size = buf_size; |
3120 | reorder_buf->last_amsdu = 0; |
3121 | reorder_buf->last_sub_index = 0; |
3122 | reorder_buf->removed = 0; |
3123 | reorder_buf->valid = 0; |
3124 | reorder_buf->consec_oldsn_drops = 0; |
3125 | reorder_buf->consec_oldsn_ampdu_gp2 = 0; |
3126 | reorder_buf->consec_oldsn_prev_drop = 0; |
3127 | } |
3128 | |
3129 | void |
3130 | iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba) |
3131 | { |
3132 | int i; |
3133 | struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf; |
3134 | struct iwm_reorder_buf_entry *entry; |
3135 | |
3136 | for (i = 0; i < reorder_buf->buf_size; i++) { |
3137 | entry = &rxba->entries[i]; |
3138 | ml_purge(&entry->frames); |
3139 | timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time )->tv_usec = 0; |
3140 | } |
3141 | |
3142 | reorder_buf->removed = 1; |
3143 | timeout_del(&reorder_buf->reorder_timer); |
3144 | timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)-> tv_usec = 0; |
3145 | timeout_del(&rxba->session_timer); |
3146 | rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID0x7f; |
3147 | } |
3148 | |
3149 | #define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL) |
3150 | |
3151 | void |
3152 | iwm_rx_ba_session_expired(void *arg) |
3153 | { |
3154 | struct iwm_rxba_data *rxba = arg; |
3155 | struct iwm_softc *sc = rxba->sc; |
3156 | struct ieee80211com *ic = &sc->sc_ic; |
3157 | struct ieee80211_node *ni = ic->ic_bss; |
3158 | struct timeval now, timeout, expiry; |
3159 | int s; |
3160 | |
3161 | s = splnet()splraise(0x7); |
3162 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0 && |
3163 | ic->ic_state == IEEE80211_S_RUN && |
3164 | rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) { |
3165 | getmicrouptime(&now); |
3166 | USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout); |
3167 | timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec = (& rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if ((&expiry)->tv_usec >= 1000000) { (&expiry)-> tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0); |
3168 | if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((& now)->tv_usec < (&expiry)->tv_usec) : ((&now )->tv_sec < (&expiry)->tv_sec))) { |
3169 | timeout_add_usec(&rxba->session_timer, rxba->timeout); |
3170 | } else { |
3171 | ic->ic_stats.is_ht_rx_ba_timeout++; |
3172 | ieee80211_delba_request(ic, ni, |
3173 | IEEE80211_REASON_TIMEOUT, 0, rxba->tid); |
3174 | } |
3175 | } |
3176 | splx(s)spllower(s); |
3177 | } |
3178 | |
3179 | void |
3180 | iwm_reorder_timer_expired(void *arg) |
3181 | { |
3182 | struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 }; |
3183 | struct iwm_reorder_buffer *buf = arg; |
3184 | struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf); |
3185 | struct iwm_reorder_buf_entry *entries = &rxba->entries[0]; |
3186 | struct iwm_softc *sc = rxba->sc; |
3187 | struct ieee80211com *ic = &sc->sc_ic; |
3188 | struct ieee80211_node *ni = ic->ic_bss; |
3189 | int i, s; |
3190 | uint16_t sn = 0, index = 0; |
3191 | int expired = 0; |
3192 | int cont = 0; |
3193 | struct timeval now, timeout, expiry; |
3194 | |
3195 | if (!buf->num_stored || buf->removed) |
3196 | return; |
3197 | |
3198 | s = splnet()splraise(0x7); |
3199 | getmicrouptime(&now); |
3200 | USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout); |
3201 | |
3202 | for (i = 0; i < buf->buf_size ; i++) { |
3203 | index = (buf->head_sn + i) % buf->buf_size; |
3204 | |
3205 | if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) { |
3206 | /* |
3207 | * If there is a hole and the next frame didn't expire |
3208 | * we want to break and not advance SN. |
3209 | */ |
3210 | cont = 0; |
3211 | continue; |
3212 | } |
3213 | timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time )->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec = (&entries[index].reorder_time)->tv_usec + (&timeout )->tv_usec; if ((&expiry)->tv_usec >= 1000000) { (&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000 ; } } while (0); |
3214 | if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((& now)->tv_usec < (&expiry)->tv_usec) : ((&now )->tv_sec < (&expiry)->tv_sec))) |
3215 | break; |
3216 | |
3217 | expired = 1; |
3218 | /* continue until next hole after this expired frame */ |
3219 | cont = 1; |
3220 | sn = (buf->head_sn + (i + 1)) & 0xfff; |
3221 | } |
3222 | |
3223 | if (expired) { |
3224 | /* SN is set to the last expired frame + 1 */ |
3225 | iwm_release_frames(sc, ni, rxba, buf, sn, &ml); |
3226 | if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml); |
3227 | ic->ic_stats.is_ht_rx_ba_window_gap_timeout++; |
3228 | } else { |
3229 | /* |
3230 | * If no frame expired and there are stored frames, index is now |
3231 | * pointing to the first unexpired frame - modify reorder timeout |
3232 | * accordingly. |
3233 | */ |
3234 | timeout_add_usec(&buf->reorder_timer, |
3235 | RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL)); |
3236 | } |
3237 | |
3238 | splx(s)spllower(s); |
3239 | } |
3240 | |
3241 | #define IWM_MAX_RX_BA_SESSIONS16 16 |
3242 | |
3243 | int |
3244 | iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid, |
3245 | uint16_t ssn, uint16_t winsize, int timeout_val, int start) |
3246 | { |
3247 | struct ieee80211com *ic = &sc->sc_ic; |
3248 | struct iwm_add_sta_cmd cmd; |
3249 | struct iwm_node *in = (void *)ni; |
3250 | int err, s; |
3251 | uint32_t status; |
3252 | size_t cmdsize; |
3253 | struct iwm_rxba_data *rxba = NULL((void *)0); |
3254 | uint8_t baid = 0; |
3255 | |
3256 | s = splnet()splraise(0x7); |
3257 | |
3258 | if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16) { |
3259 | ieee80211_addba_req_refuse(ic, ni, tid); |
3260 | splx(s)spllower(s); |
3261 | return 0; |
3262 | } |
3263 | |
3264 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
3265 | |
3266 | cmd.sta_id = IWM_STATION_ID0; |
3267 | cmd.mac_id_n_color |
3268 | = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
3269 | cmd.add_modify = IWM_STA_MODE_MODIFY1; |
3270 | |
3271 | if (start) { |
3272 | cmd.add_immediate_ba_tid = (uint8_t)tid; |
3273 | cmd.add_immediate_ba_ssn = ssn; |
3274 | cmd.rx_ba_window = winsize; |
3275 | } else { |
3276 | cmd.remove_immediate_ba_tid = (uint8_t)tid; |
3277 | } |
3278 | cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID(1 << 3) : |
3279 | IWM_STA_MODIFY_REMOVE_BA_TID(1 << 4); |
3280 | |
3281 | status = IWM_ADD_STA_SUCCESS0x1; |
3282 | if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)& (8 -1))))) |
3283 | cmdsize = sizeof(cmd); |
3284 | else |
3285 | cmdsize = sizeof(struct iwm_add_sta_cmd_v7); |
3286 | err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd, |
3287 | &status); |
3288 | if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1) |
3289 | err = EIO5; |
3290 | if (err) { |
3291 | if (start) |
3292 | ieee80211_addba_req_refuse(ic, ni, tid); |
3293 | splx(s)spllower(s); |
3294 | return err; |
3295 | } |
3296 | |
3297 | if (sc->sc_mqrx_supported) { |
3298 | /* Deaggregation is done in hardware. */ |
3299 | if (start) { |
3300 | if (!(status & IWM_ADD_STA_BAID_VALID_MASK0x8000)) { |
3301 | ieee80211_addba_req_refuse(ic, ni, tid); |
3302 | splx(s)spllower(s); |
3303 | return EIO5; |
3304 | } |
3305 | baid = (status & IWM_ADD_STA_BAID_MASK0x7F00) >> |
3306 | IWM_ADD_STA_BAID_SHIFT8; |
3307 | if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f || |
3308 | baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data) [0]))) { |
3309 | ieee80211_addba_req_refuse(ic, ni, tid); |
3310 | splx(s)spllower(s); |
3311 | return EIO5; |
3312 | } |
3313 | rxba = &sc->sc_rxba_data[baid]; |
3314 | if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) { |
3315 | ieee80211_addba_req_refuse(ic, ni, tid); |
3316 | splx(s)spllower(s); |
3317 | return 0; |
3318 | } |
3319 | rxba->sta_id = IWM_STATION_ID0; |
3320 | rxba->tid = tid; |
3321 | rxba->baid = baid; |
3322 | rxba->timeout = timeout_val; |
3323 | getmicrouptime(&rxba->last_rx); |
3324 | iwm_init_reorder_buffer(&rxba->reorder_buf, ssn, |
3325 | winsize); |
3326 | if (timeout_val != 0) { |
3327 | struct ieee80211_rx_ba *ba; |
3328 | timeout_add_usec(&rxba->session_timer, |
3329 | timeout_val); |
3330 | /* XXX disable net80211's BA timeout handler */ |
3331 | ba = &ni->ni_rx_ba[tid]; |
3332 | ba->ba_timeout_val = 0; |
3333 | } |
3334 | } else { |
3335 | int i; |
3336 | for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data) [0])); i++) { |
3337 | rxba = &sc->sc_rxba_data[i]; |
3338 | if (rxba->baid == |
3339 | IWM_RX_REORDER_DATA_INVALID_BAID0x7f) |
3340 | continue; |
3341 | if (rxba->tid != tid) |
3342 | continue; |
3343 | iwm_clear_reorder_buffer(sc, rxba); |
3344 | break; |
3345 | } |
3346 | } |
3347 | } |
3348 | |
3349 | if (start) { |
3350 | sc->sc_rx_ba_sessions++; |
3351 | ieee80211_addba_req_accept(ic, ni, tid); |
3352 | } else if (sc->sc_rx_ba_sessions > 0) |
3353 | sc->sc_rx_ba_sessions--; |
3354 | |
3355 | splx(s)spllower(s); |
3356 | return 0; |
3357 | } |
3358 | |
3359 | void |
3360 | iwm_mac_ctxt_task(void *arg) |
3361 | { |
3362 | struct iwm_softc *sc = arg; |
3363 | struct ieee80211com *ic = &sc->sc_ic; |
3364 | struct iwm_node *in = (void *)ic->ic_bss; |
3365 | int err, s = splnet()splraise(0x7); |
3366 | |
3367 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) || |
3368 | ic->ic_state != IEEE80211_S_RUN) { |
3369 | refcnt_rele_wake(&sc->task_refs); |
3370 | splx(s)spllower(s); |
3371 | return; |
3372 | } |
3373 | |
3374 | err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 1); |
3375 | if (err) |
3376 | printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3377 | |
3378 | refcnt_rele_wake(&sc->task_refs); |
3379 | splx(s)spllower(s); |
3380 | } |
3381 | |
3382 | void |
3383 | iwm_updateprot(struct ieee80211com *ic) |
3384 | { |
3385 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3386 | |
3387 | if (ic->ic_state == IEEE80211_S_RUN && |
3388 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3389 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3390 | } |
3391 | |
3392 | void |
3393 | iwm_updateslot(struct ieee80211com *ic) |
3394 | { |
3395 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3396 | |
3397 | if (ic->ic_state == IEEE80211_S_RUN && |
3398 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3399 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3400 | } |
3401 | |
3402 | void |
3403 | iwm_updateedca(struct ieee80211com *ic) |
3404 | { |
3405 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3406 | |
3407 | if (ic->ic_state == IEEE80211_S_RUN && |
3408 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3409 | iwm_add_task(sc, systq, &sc->mac_ctxt_task); |
3410 | } |
3411 | |
3412 | void |
3413 | iwm_phy_ctxt_task(void *arg) |
3414 | { |
3415 | struct iwm_softc *sc = arg; |
3416 | struct ieee80211com *ic = &sc->sc_ic; |
3417 | struct iwm_node *in = (void *)ic->ic_bss; |
3418 | struct ieee80211_node *ni = &in->in_ni; |
3419 | uint8_t chains, sco; |
3420 | int err, s = splnet()splraise(0x7); |
3421 | |
3422 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) || |
3423 | ic->ic_state != IEEE80211_S_RUN || |
3424 | in->in_phyctxt == NULL((void *)0)) { |
3425 | refcnt_rele_wake(&sc->task_refs); |
3426 | splx(s)spllower(s); |
3427 | return; |
3428 | } |
3429 | |
3430 | chains = iwm_mimo_enabled(sc) ? 2 : 1; |
3431 | if (ieee80211_node_supports_ht_chan40(ni)) |
3432 | sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03); |
3433 | else |
3434 | sco = IEEE80211_HTOP0_SCO_SCN0; |
3435 | if (in->in_phyctxt->sco != sco) { |
3436 | err = iwm_phy_ctxt_update(sc, in->in_phyctxt, |
3437 | in->in_phyctxt->channel, chains, chains, 0, sco); |
3438 | if (err) |
3439 | printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3440 | iwm_setrates(in, 0); |
3441 | } |
3442 | |
3443 | refcnt_rele_wake(&sc->task_refs); |
3444 | splx(s)spllower(s); |
3445 | } |
3446 | |
3447 | void |
3448 | iwm_updatechan(struct ieee80211com *ic) |
3449 | { |
3450 | struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc; |
3451 | |
3452 | if (ic->ic_state == IEEE80211_S_RUN && |
3453 | !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1)) |
3454 | iwm_add_task(sc, systq, &sc->phy_ctxt_task); |
3455 | } |
3456 | |
3457 | int |
3458 | iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid, |
3459 | uint16_t ssn, uint16_t winsize, int start) |
3460 | { |
3461 | struct iwm_add_sta_cmd cmd; |
3462 | struct ieee80211com *ic = &sc->sc_ic; |
3463 | struct iwm_node *in = (void *)ni; |
3464 | int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid; |
3465 | struct iwm_tx_ring *ring; |
3466 | enum ieee80211_edca_ac ac; |
3467 | int fifo; |
3468 | uint32_t status; |
3469 | int err; |
3470 | size_t cmdsize; |
3471 | |
3472 | /* Ensure we can map this TID to an aggregation queue. */ |
3473 | if (tid >= IWM_MAX_TID_COUNT8 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1)) |
3474 | return ENOSPC28; |
3475 | |
3476 | if (start) { |
3477 | if ((sc->tx_ba_queue_mask & (1 << qid)) != 0) |
3478 | return 0; |
3479 | } else { |
3480 | if ((sc->tx_ba_queue_mask & (1 << qid)) == 0) |
3481 | return 0; |
3482 | } |
3483 | |
3484 | ring = &sc->txq[qid]; |
3485 | ac = iwm_tid_to_ac[tid]; |
3486 | fifo = iwm_ac_to_tx_fifo[ac]; |
3487 | |
3488 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
3489 | |
3490 | cmd.sta_id = IWM_STATION_ID0; |
3491 | cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))) |
3492 | in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color << (8))))); |
3493 | cmd.add_modify = IWM_STA_MODE_MODIFY1; |
3494 | |
3495 | if (start) { |
3496 | /* Enable Tx aggregation for this queue. */ |
3497 | in->tid_disable_ampdu &= ~(1 << tid); |
3498 | in->tfd_queue_msk |= (1 << qid); |
3499 | } else { |
3500 | in->tid_disable_ampdu |= (1 << tid); |
3501 | /* |
3502 | * Queue remains enabled in the TFD queue mask |
3503 | * until we leave RUN state. |
3504 | */ |
3505 | err = iwm_flush_sta(sc, in); |
3506 | if (err) |
3507 | return err; |
3508 | } |
3509 | |
3510 | cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk)((__uint32_t)(in->tfd_queue_msk)); |
3511 | cmd.tid_disable_tx = htole16(in->tid_disable_ampdu)((__uint16_t)(in->tid_disable_ampdu)); |
3512 | cmd.modify_mask = (IWM_STA_MODIFY_QUEUES(1 << 7) | |
3513 | IWM_STA_MODIFY_TID_DISABLE_TX(1 << 1)); |
3514 | |
3515 | if (start && (sc->qenablemsk & (1 << qid)) == 0) { |
3516 | if (!iwm_nic_lock(sc)) { |
3517 | if (start) |
3518 | ieee80211_addba_resp_refuse(ic, ni, tid, |
3519 | IEEE80211_STATUS_UNSPECIFIED); |
3520 | return EBUSY16; |
3521 | } |
3522 | err = iwm_enable_txq(sc, IWM_STATION_ID0, qid, fifo, 1, tid, |
3523 | ssn); |
3524 | iwm_nic_unlock(sc); |
3525 | if (err) { |
3526 | printf("%s: could not enable Tx queue %d (error %d)\n", |
3527 | DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err); |
3528 | if (start) |
3529 | ieee80211_addba_resp_refuse(ic, ni, tid, |
3530 | IEEE80211_STATUS_UNSPECIFIED); |
3531 | return err; |
3532 | } |
3533 | /* |
3534 | * If iwm_enable_txq() employed the SCD hardware bug |
3535 | * workaround we must skip the frame with seqnum SSN. |
3536 | */ |
3537 | if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1))) { |
3538 | ssn = (ssn + 1) & 0xfff; |
3539 | KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn))((ring->cur == ((ssn) & (256 - 1))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 3539, "ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn)" )); |
3540 | ieee80211_output_ba_move_window(ic, ni, tid, ssn); |
3541 | ni->ni_qos_txseqs[tid] = ssn; |
3542 | } |
3543 | } |
3544 | |
3545 | if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)& (8 -1))))) |
3546 | cmdsize = sizeof(cmd); |
3547 | else |
3548 | cmdsize = sizeof(struct iwm_add_sta_cmd_v7); |
3549 | |
3550 | status = 0; |
3551 | err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd, &status); |
3552 | if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1) |
3553 | err = EIO5; |
3554 | if (err) { |
3555 | printf("%s: could not update sta (error %d)\n", |
3556 | DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
3557 | if (start) |
3558 | ieee80211_addba_resp_refuse(ic, ni, tid, |
3559 | IEEE80211_STATUS_UNSPECIFIED); |
3560 | return err; |
3561 | } |
3562 | |
3563 | if (start) { |
3564 | sc->tx_ba_queue_mask |= (1 << qid); |
3565 | ieee80211_addba_resp_accept(ic, ni, tid); |
3566 | } else { |
3567 | sc->tx_ba_queue_mask &= ~(1 << qid); |
3568 | |
3569 | /* |
3570 | * Clear pending frames but keep the queue enabled. |
3571 | * Firmware panics if we disable the queue here. |
3572 | */ |
3573 | iwm_txq_advance(sc, ring, ring->cur); |
3574 | iwm_clear_oactive(sc, ring); |
3575 | } |
3576 | |
3577 | return 0; |
3578 | } |
3579 | |
3580 | void |
3581 | iwm_ba_task(void *arg) |
3582 | { |
3583 | struct iwm_softc *sc = arg; |
3584 | struct ieee80211com *ic = &sc->sc_ic; |
3585 | struct ieee80211_node *ni = ic->ic_bss; |
3586 | int s = splnet()splraise(0x7); |
3587 | int tid, err = 0; |
3588 | |
3589 | if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) || |
3590 | ic->ic_state != IEEE80211_S_RUN) { |
3591 | refcnt_rele_wake(&sc->task_refs); |
3592 | splx(s)spllower(s); |
3593 | return; |
3594 | } |
3595 | |
3596 | for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) { |
3597 | if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) |
3598 | break; |
3599 | if (sc->ba_rx.start_tidmask & (1 << tid)) { |
3600 | struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; |
3601 | err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart, |
3602 | ba->ba_winsize, ba->ba_timeout_val, 1); |
3603 | sc->ba_rx.start_tidmask &= ~(1 << tid); |
3604 | } else if (sc->ba_rx.stop_tidmask & (1 << tid)) { |
3605 | err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0); |
3606 | sc->ba_rx.stop_tidmask &= ~(1 << tid); |
3607 | } |
3608 | } |
3609 | |
3610 | for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) { |
3611 | if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) |
3612 | break; |
3613 | if (sc->ba_tx.start_tidmask & (1 << tid)) { |
3614 | struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; |
3615 | err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart, |
3616 | ba->ba_winsize, 1); |
3617 | sc->ba_tx.start_tidmask &= ~(1 << tid); |
3618 | } else if (sc->ba_tx.stop_tidmask & (1 << tid)) { |
3619 | err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0); |
3620 | sc->ba_tx.stop_tidmask &= ~(1 << tid); |
3621 | } |
3622 | } |
3623 | |
3624 | /* |
3625 | * We "recover" from failure to start or stop a BA session |
3626 | * by resetting the device. |
3627 | */ |
3628 | if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) |
3629 | task_add(systq, &sc->init_task); |
3630 | |
3631 | refcnt_rele_wake(&sc->task_refs); |
3632 | splx(s)spllower(s); |
3633 | } |
3634 | |
3635 | /* |
3636 | * This function is called by upper layer when an ADDBA request is received |
3637 | * from another STA and before the ADDBA response is sent. |
3638 | */ |
3639 | int |
3640 | iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, |
3641 | uint8_t tid) |
3642 | { |
3643 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3644 | |
3645 | if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16 || |
3646 | tid > IWM_MAX_TID_COUNT8) |
3647 | return ENOSPC28; |
3648 | |
3649 | if (sc->ba_rx.start_tidmask & (1 << tid)) |
3650 | return EBUSY16; |
3651 | |
3652 | sc->ba_rx.start_tidmask |= (1 << tid); |
3653 | iwm_add_task(sc, systq, &sc->ba_task); |
3654 | |
3655 | return EBUSY16; |
3656 | } |
3657 | |
3658 | /* |
3659 | * This function is called by upper layer on teardown of an HT-immediate |
3660 | * Block Ack agreement (eg. upon receipt of a DELBA frame). |
3661 | */ |
3662 | void |
3663 | iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, |
3664 | uint8_t tid) |
3665 | { |
3666 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3667 | |
3668 | if (tid > IWM_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid)) |
3669 | return; |
3670 | |
3671 | sc->ba_rx.stop_tidmask |= (1 << tid); |
3672 | iwm_add_task(sc, systq, &sc->ba_task); |
3673 | } |
3674 | |
3675 | int |
3676 | iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, |
3677 | uint8_t tid) |
3678 | { |
3679 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3680 | struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; |
3681 | int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid; |
3682 | |
3683 | /* We only implement Tx aggregation with DQA-capable firmware. */ |
3684 | if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12 )&(8 -1))))) |
3685 | return ENOTSUP91; |
3686 | |
3687 | /* Ensure we can map this TID to an aggregation queue. */ |
3688 | if (tid >= IWM_MAX_TID_COUNT8) |
3689 | return EINVAL22; |
3690 | |
3691 | /* We only support a fixed Tx aggregation window size, for now. */ |
3692 | if (ba->ba_winsize != IWM_FRAME_LIMIT64) |
3693 | return ENOTSUP91; |
3694 | |
3695 | /* Is firmware already using Tx aggregation on this queue? */ |
3696 | if ((sc->tx_ba_queue_mask & (1 << qid)) != 0) |
3697 | return ENOSPC28; |
3698 | |
3699 | /* Are we already processing an ADDBA request? */ |
3700 | if (sc->ba_tx.start_tidmask & (1 << tid)) |
3701 | return EBUSY16; |
3702 | |
3703 | sc->ba_tx.start_tidmask |= (1 << tid); |
3704 | iwm_add_task(sc, systq, &sc->ba_task); |
3705 | |
3706 | return EBUSY16; |
3707 | } |
3708 | |
3709 | void |
3710 | iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, |
3711 | uint8_t tid) |
3712 | { |
3713 | struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc; |
3714 | int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid; |
3715 | |
3716 | if (tid > IWM_MAX_TID_COUNT8 || sc->ba_tx.stop_tidmask & (1 << tid)) |
3717 | return; |
3718 | |
3719 | /* Is firmware currently using Tx aggregation on this queue? */ |
3720 | if ((sc->tx_ba_queue_mask & (1 << qid)) == 0) |
3721 | return; |
3722 | |
3723 | sc->ba_tx.stop_tidmask |= (1 << tid); |
3724 | iwm_add_task(sc, systq, &sc->ba_task); |
3725 | } |
3726 | |
3727 | void |
3728 | iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, |
3729 | const uint16_t *mac_override, const uint16_t *nvm_hw) |
3730 | { |
3731 | const uint8_t *hw_addr; |
3732 | |
3733 | if (mac_override) { |
3734 | static const uint8_t reserved_mac[] = { |
3735 | 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 |
3736 | }; |
3737 | |
3738 | hw_addr = (const uint8_t *)(mac_override + |
3739 | IWM_MAC_ADDRESS_OVERRIDE_80001); |
3740 | |
3741 | /* |
3742 | * Store the MAC address from MAO section. |
3743 | * No byte swapping is required in MAO section |
3744 | */ |
3745 | memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN)__builtin_memcpy((data->hw_addr), (hw_addr), (6)); |
3746 | |
3747 | /* |
3748 | * Force the use of the OTP MAC address in case of reserved MAC |
3749 | * address in the NVM, or if address is given but invalid. |
3750 | */ |
3751 | if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (hw_addr), (6)) != 0 && |
3752 | (memcmp(etherbroadcastaddr, data->hw_addr,__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof (etherbroadcastaddr))) |
3753 | sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof (etherbroadcastaddr))) != 0) && |
3754 | (memcmp(etheranyaddr, data->hw_addr,__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof( etheranyaddr))) |
3755 | sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof( etheranyaddr))) != 0) && |
3756 | !ETHER_IS_MULTICAST(data->hw_addr)(*(data->hw_addr) & 0x01)) |
3757 | return; |
3758 | } |
3759 | |
3760 | if (nvm_hw) { |
3761 | /* Read the mac address from WFMP registers. */ |
3762 | uint32_t mac_addr0, mac_addr1; |
3763 | |
3764 | if (!iwm_nic_lock(sc)) |
3765 | goto out; |
3766 | mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0))((__uint32_t)(iwm_read_prph(sc, 0xa03080))); |
3767 | mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1))((__uint32_t)(iwm_read_prph(sc, 0xa03084))); |
3768 | iwm_nic_unlock(sc); |
3769 | |
3770 | hw_addr = (const uint8_t *)&mac_addr0; |
3771 | data->hw_addr[0] = hw_addr[3]; |
3772 | data->hw_addr[1] = hw_addr[2]; |
3773 | data->hw_addr[2] = hw_addr[1]; |
3774 | data->hw_addr[3] = hw_addr[0]; |
3775 | |
3776 | hw_addr = (const uint8_t *)&mac_addr1; |
3777 | data->hw_addr[4] = hw_addr[1]; |
3778 | data->hw_addr[5] = hw_addr[0]; |
3779 | |
3780 | return; |
3781 | } |
3782 | out: |
3783 | printf("%s: mac address not found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3784 | memset(data->hw_addr, 0, sizeof(data->hw_addr))__builtin_memset((data->hw_addr), (0), (sizeof(data->hw_addr ))); |
3785 | } |
3786 | |
3787 | int |
3788 | iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw, |
3789 | const uint16_t *nvm_sw, const uint16_t *nvm_calib, |
3790 | const uint16_t *mac_override, const uint16_t *phy_sku, |
3791 | const uint16_t *regulatory, int n_regulatory) |
3792 | { |
3793 | struct iwm_nvm_data *data = &sc->sc_nvm; |
3794 | uint8_t hw_addr[ETHER_ADDR_LEN6]; |
3795 | uint32_t sku; |
3796 | uint16_t lar_config; |
3797 | |
3798 | data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 0)))); |
3799 | |
3800 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3801 | uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 1)))); |
3802 | data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg)((radio_cfg >> 4) & 0x3); |
3803 | data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg)((radio_cfg >> 2) & 0x3); |
3804 | data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg)(radio_cfg & 0x3); |
3805 | data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg)((radio_cfg >> 6) & 0x3); |
3806 | |
3807 | sku = le16_to_cpup(nvm_sw + IWM_SKU)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 2)))); |
3808 | } else { |
3809 | uint32_t radio_cfg = |
3810 | le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 0)) ))); |
3811 | data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg)((radio_cfg >> 12) & 0xFFF); |
3812 | data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg)((radio_cfg >> 8) & 0xF); |
3813 | data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg)((radio_cfg >> 4) & 0xF); |
3814 | data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg)(radio_cfg & 0xF); |
3815 | data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 24) & 0xF); |
3816 | data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 28) & 0xF); |
3817 | |
3818 | sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 2)) ))); |
3819 | } |
3820 | |
3821 | data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ(1 << 0); |
3822 | data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ(1 << 1); |
3823 | data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE(1 << 2); |
3824 | data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE(1 << 5); |
3825 | |
3826 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) { |
3827 | uint16_t lar_offset = data->nvm_version < 0xE39 ? |
3828 | IWM_NVM_LAR_OFFSET_8000_OLD0x4C7 : |
3829 | IWM_NVM_LAR_OFFSET_80000x507; |
3830 | |
3831 | lar_config = le16_to_cpup(regulatory + lar_offset)(((__uint16_t)(*(const uint16_t *)(regulatory + lar_offset))) ); |
3832 | data->lar_enabled = !!(lar_config & |
3833 | IWM_NVM_LAR_ENABLED_80000x7); |
3834 | data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3)))); |
3835 | } else |
3836 | data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3)))); |
3837 | |
3838 | |
3839 | /* The byte order is little endian 16 bit, meaning 214365 */ |
3840 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3841 | memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN)__builtin_memcpy((hw_addr), (nvm_hw + 0x15), (6)); |
3842 | data->hw_addr[0] = hw_addr[1]; |
3843 | data->hw_addr[1] = hw_addr[0]; |
3844 | data->hw_addr[2] = hw_addr[3]; |
3845 | data->hw_addr[3] = hw_addr[2]; |
3846 | data->hw_addr[4] = hw_addr[5]; |
3847 | data->hw_addr[5] = hw_addr[4]; |
3848 | } else |
3849 | iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw); |
3850 | |
3851 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3852 | if (sc->nvm_type == IWM_NVM_SDP) { |
3853 | iwm_init_channel_map(sc, regulatory, iwm_nvm_channels, |
3854 | MIN(n_regulatory, nitems(iwm_nvm_channels))(((n_regulatory)<((sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels )[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels)) / sizeof ((iwm_nvm_channels)[0]))))); |
3855 | } else { |
3856 | iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS0x1E0 - 0x1C0], |
3857 | iwm_nvm_channels, nitems(iwm_nvm_channels)(sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels)[0]))); |
3858 | } |
3859 | } else |
3860 | iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_80000], |
3861 | iwm_nvm_channels_8000, |
3862 | MIN(n_regulatory, nitems(iwm_nvm_channels_8000))(((n_regulatory)<((sizeof((iwm_nvm_channels_8000)) / sizeof ((iwm_nvm_channels_8000)[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels_8000 )) / sizeof((iwm_nvm_channels_8000)[0]))))); |
3863 | |
3864 | data->calib_version = 255; /* TODO: |
3865 | this value will prevent some checks from |
3866 | failing, we need to check if this |
3867 | field is still needed, and if it does, |
3868 | where is it in the NVM */ |
3869 | |
3870 | return 0; |
3871 | } |
3872 | |
3873 | int |
3874 | iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) |
3875 | { |
3876 | const uint16_t *hw, *sw, *calib, *mac_override = NULL((void *)0), *phy_sku = NULL((void *)0); |
3877 | const uint16_t *regulatory = NULL((void *)0); |
3878 | int n_regulatory = 0; |
3879 | |
3880 | /* Checking for required sections */ |
3881 | if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) { |
3882 | if (!sections[IWM_NVM_SECTION_TYPE_SW1].data || |
3883 | !sections[IWM_NVM_SECTION_TYPE_HW0].data) { |
3884 | return ENOENT2; |
3885 | } |
3886 | |
3887 | hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW0].data; |
3888 | |
3889 | if (sc->nvm_type == IWM_NVM_SDP) { |
3890 | if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data) |
3891 | return ENOENT2; |
3892 | regulatory = (const uint16_t *) |
3893 | sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data; |
3894 | n_regulatory = |
3895 | sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].length; |
3896 | } |
3897 | } else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) { |
3898 | /* SW and REGULATORY sections are mandatory */ |
3899 | if (!sections[IWM_NVM_SECTION_TYPE_SW1].data || |
3900 | !sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data) { |
3901 | return ENOENT2; |
3902 | } |
3903 | /* MAC_OVERRIDE or at least HW section must exist */ |
3904 | if (!sections[IWM_NVM_SECTION_TYPE_HW_800010].data && |
3905 | !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data) { |
3906 | return ENOENT2; |
3907 | } |
3908 | |
3909 | /* PHY_SKU section is mandatory in B0 */ |
3910 | if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data) { |
3911 | return ENOENT2; |
3912 | } |
3913 | |
3914 | regulatory = (const uint16_t *) |
3915 | sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data; |
3916 | n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY3].length; |
3917 | hw = (const uint16_t *) |
3918 | sections[IWM_NVM_SECTION_TYPE_HW_800010].data; |
3919 | mac_override = |
3920 | (const uint16_t *) |
3921 | sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data; |
3922 | phy_sku = (const uint16_t *) |
3923 | sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data; |
3924 | } else { |
3925 | panic("unknown device family %d", sc->sc_device_family); |
3926 | } |
3927 | |
3928 | sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW1].data; |
3929 | calib = (const uint16_t *) |
3930 | sections[IWM_NVM_SECTION_TYPE_CALIBRATION4].data; |
3931 | |
3932 | /* XXX should pass in the length of every section */ |
3933 | return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, |
3934 | phy_sku, regulatory, n_regulatory); |
3935 | } |
3936 | |
3937 | int |
3938 | iwm_nvm_init(struct iwm_softc *sc) |
3939 | { |
3940 | struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS13]; |
3941 | int i, section, err; |
3942 | uint16_t len; |
3943 | uint8_t *buf; |
3944 | const size_t bufsz = sc->sc_nvm_max_section_size; |
3945 | |
3946 | memset(nvm_sections, 0, sizeof(nvm_sections))__builtin_memset((nvm_sections), (0), (sizeof(nvm_sections))); |
3947 | |
3948 | buf = malloc(bufsz, M_DEVBUF2, M_WAIT0x0001); |
3949 | if (buf == NULL((void *)0)) |
3950 | return ENOMEM12; |
3951 | |
3952 | for (i = 0; i < nitems(iwm_nvm_to_read)(sizeof((iwm_nvm_to_read)) / sizeof((iwm_nvm_to_read)[0])); i++) { |
3953 | section = iwm_nvm_to_read[i]; |
3954 | KASSERT(section <= nitems(nvm_sections))((section <= (sizeof((nvm_sections)) / sizeof((nvm_sections )[0]))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c" , 3954, "section <= nitems(nvm_sections)")); |
3955 | |
3956 | err = iwm_nvm_read_section(sc, section, buf, &len, bufsz); |
3957 | if (err) { |
3958 | err = 0; |
3959 | continue; |
3960 | } |
3961 | nvm_sections[section].data = malloc(len, M_DEVBUF2, M_WAIT0x0001); |
3962 | if (nvm_sections[section].data == NULL((void *)0)) { |
3963 | err = ENOMEM12; |
3964 | break; |
3965 | } |
3966 | memcpy(nvm_sections[section].data, buf, len)__builtin_memcpy((nvm_sections[section].data), (buf), (len)); |
3967 | nvm_sections[section].length = len; |
3968 | } |
3969 | free(buf, M_DEVBUF2, bufsz); |
3970 | if (err == 0) |
3971 | err = iwm_parse_nvm_sections(sc, nvm_sections); |
3972 | |
3973 | for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS13; i++) { |
3974 | if (nvm_sections[i].data != NULL((void *)0)) |
3975 | free(nvm_sections[i].data, M_DEVBUF2, |
3976 | nvm_sections[i].length); |
3977 | } |
3978 | |
3979 | return err; |
3980 | } |
3981 | |
3982 | int |
3983 | iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr, |
3984 | const uint8_t *section, uint32_t byte_cnt) |
3985 | { |
3986 | int err = EINVAL22; |
3987 | uint32_t chunk_sz, offset; |
3988 | |
3989 | chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt)(((0x20000)<(byte_cnt))?(0x20000):(byte_cnt)); |
3990 | |
3991 | for (offset = 0; offset < byte_cnt; offset += chunk_sz) { |
3992 | uint32_t addr, len; |
3993 | const uint8_t *data; |
3994 | |
3995 | addr = dst_addr + offset; |
3996 | len = MIN(chunk_sz, byte_cnt - offset)(((chunk_sz)<(byte_cnt - offset))?(chunk_sz):(byte_cnt - offset )); |
3997 | data = section + offset; |
3998 | |
3999 | err = iwm_firmware_load_chunk(sc, addr, data, len); |
4000 | if (err) |
4001 | break; |
4002 | } |
4003 | |
4004 | return err; |
4005 | } |
4006 | |
4007 | int |
4008 | iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr, |
4009 | const uint8_t *chunk, uint32_t byte_cnt) |
4010 | { |
4011 | struct iwm_dma_info *dma = &sc->fw_dma; |
4012 | int err; |
4013 | |
4014 | /* Copy firmware chunk into pre-allocated DMA-safe memory. */ |
4015 | memcpy(dma->vaddr, chunk, byte_cnt)__builtin_memcpy((dma->vaddr), (chunk), (byte_cnt)); |
4016 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma-> map), (0), (byte_cnt), (0x04)) |
4017 | dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma-> map), (0), (byte_cnt), (0x04)); |
4018 | |
4019 | if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 && |
4020 | dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) { |
4021 | err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8, |
4022 | IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01); |
4023 | if (err) |
4024 | return err; |
4025 | } |
4026 | |
4027 | sc->sc_fw_chunk_done = 0; |
4028 | |
4029 | if (!iwm_nic_lock(sc)) |
4030 | return EBUSY16; |
4031 | |
4032 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x00000000))))) |
4033 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x00000000))))); |
4034 | IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr)))) |
4035 | dst_addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr)))); |
4036 | IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF)) ))) |
4037 | dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF)) ))); |
4038 | IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma-> paddr) << 28) | byte_cnt)))) |
4039 | (iwm_get_dma_hi_addr(dma->paddr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma-> paddr) << 28) | byte_cnt)))) |
4040 | << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma-> paddr) << 28) | byte_cnt)))); |
4041 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))) |
4042 | 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))) |
4043 | 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))) |
4044 | IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 << (12) | (0x00000003))))); |
4045 | IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))) |
4046 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))) |
4047 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))) |
4048 | IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) + 0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000 ))))); |
4049 | |
4050 | iwm_nic_unlock(sc); |
4051 | |
4052 | /* Wait for this segment to load. */ |
4053 | err = 0; |
4054 | while (!sc->sc_fw_chunk_done) { |
4055 | err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1)); |
4056 | if (err) |
4057 | break; |
4058 | } |
4059 | |
4060 | if (!sc->sc_fw_chunk_done) |
4061 | printf("%s: fw chunk addr 0x%x len %d failed to load\n", |
4062 | DEVNAME(sc)((sc)->sc_dev.dv_xname), dst_addr, byte_cnt); |
4063 | |
4064 | if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 && |
4065 | dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) { |
4066 | int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8, |
4067 | IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01); |
4068 | if (!err) |
4069 | err = err2; |
4070 | } |
4071 | |
4072 | return err; |
4073 | } |
4074 | |
4075 | int |
4076 | iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4077 | { |
4078 | struct iwm_fw_sects *fws; |
4079 | int err, i; |
4080 | void *data; |
4081 | uint32_t dlen; |
4082 | uint32_t offset; |
4083 | |
4084 | fws = &sc->sc_fw.fw_sects[ucode_type]; |
4085 | for (i = 0; i < fws->fw_count; i++) { |
4086 | data = fws->fw_sect[i].fws_data; |
4087 | dlen = fws->fw_sect[i].fws_len; |
4088 | offset = fws->fw_sect[i].fws_devoff; |
4089 | if (dlen > sc->sc_fwdmasegsz) { |
4090 | err = EFBIG27; |
4091 | } else |
4092 | err = iwm_firmware_load_sect(sc, offset, data, dlen); |
4093 | if (err) { |
4094 | printf("%s: could not load firmware chunk %u of %u\n", |
4095 | DEVNAME(sc)((sc)->sc_dev.dv_xname), i, fws->fw_count); |
4096 | return err; |
4097 | } |
4098 | } |
4099 | |
4100 | iwm_enable_interrupts(sc); |
4101 | |
4102 | IWM_WRITE(sc, IWM_CSR_RESET, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), ( (0)))); |
4103 | |
4104 | return 0; |
4105 | } |
4106 | |
4107 | int |
4108 | iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws, |
4109 | int cpu, int *first_ucode_section) |
4110 | { |
4111 | int shift_param; |
4112 | int i, err = 0, sec_num = 0x1; |
4113 | uint32_t val, last_read_idx = 0; |
4114 | void *data; |
4115 | uint32_t dlen; |
4116 | uint32_t offset; |
4117 | |
4118 | if (cpu == 1) { |
4119 | shift_param = 0; |
4120 | *first_ucode_section = 0; |
4121 | } else { |
4122 | shift_param = 16; |
4123 | (*first_ucode_section)++; |
4124 | } |
4125 | |
4126 | for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX16; i++) { |
4127 | last_read_idx = i; |
4128 | data = fws->fw_sect[i].fws_data; |
4129 | dlen = fws->fw_sect[i].fws_len; |
4130 | offset = fws->fw_sect[i].fws_devoff; |
4131 | |
4132 | /* |
4133 | * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between |
4134 | * CPU1 to CPU2. |
4135 | * PAGING_SEPARATOR_SECTION delimiter - separate between |
4136 | * CPU2 non paged to CPU2 paging sec. |
4137 | */ |
4138 | if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC || |
4139 | offset == IWM_PAGING_SEPARATOR_SECTION0xAAAABBBB) |
4140 | break; |
4141 | |
4142 | if (dlen > sc->sc_fwdmasegsz) { |
4143 | err = EFBIG27; |
4144 | } else |
4145 | err = iwm_firmware_load_sect(sc, offset, data, dlen); |
4146 | if (err) { |
4147 | printf("%s: could not load firmware chunk %d " |
4148 | "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err); |
4149 | return err; |
4150 | } |
4151 | |
4152 | /* Notify the ucode of the loaded section number and status */ |
4153 | if (iwm_nic_lock(sc)) { |
4154 | val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((0x1af0)))); |
4155 | val = val | (sec_num << shift_param); |
4156 | IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), ( (val)))); |
4157 | sec_num = (sec_num << 1) | 0x1; |
4158 | iwm_nic_unlock(sc); |
4159 | } else { |
4160 | err = EBUSY16; |
4161 | printf("%s: could not load firmware chunk %d " |
4162 | "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err); |
4163 | return err; |
4164 | } |
4165 | } |
4166 | |
4167 | *first_ucode_section = last_read_idx; |
4168 | |
4169 | if (iwm_nic_lock(sc)) { |
4170 | if (cpu == 1) |
4171 | IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), ( (0xFFFF)))); |
4172 | else |
4173 | IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), ( (0xFFFFFFFF)))); |
4174 | iwm_nic_unlock(sc); |
4175 | } else { |
4176 | err = EBUSY16; |
4177 | printf("%s: could not finalize firmware loading (error %d)\n", |
4178 | DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
4179 | return err; |
4180 | } |
4181 | |
4182 | return 0; |
4183 | } |
4184 | |
4185 | int |
4186 | iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4187 | { |
4188 | struct iwm_fw_sects *fws; |
4189 | int err = 0; |
4190 | int first_ucode_section; |
4191 | |
4192 | fws = &sc->sc_fw.fw_sects[ucode_type]; |
4193 | |
4194 | /* configure the ucode to be ready to get the secured image */ |
4195 | /* release CPU reset */ |
4196 | if (iwm_nic_lock(sc)) { |
4197 | iwm_write_prph(sc, IWM_RELEASE_CPU_RESET0x300c, |
4198 | IWM_RELEASE_CPU_RESET_BIT0x1000000); |
4199 | iwm_nic_unlock(sc); |
4200 | } |
4201 | |
4202 | /* load to FW the binary Secured sections of CPU1 */ |
4203 | err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section); |
4204 | if (err) |
4205 | return err; |
4206 | |
4207 | /* load to FW the binary sections of CPU2 */ |
4208 | err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section); |
4209 | if (err) |
4210 | return err; |
4211 | |
4212 | iwm_enable_interrupts(sc); |
4213 | return 0; |
4214 | } |
4215 | |
4216 | int |
4217 | iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4218 | { |
4219 | int err; |
4220 | |
4221 | splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__ ); } } while (0); |
4222 | |
4223 | sc->sc_uc.uc_intr = 0; |
4224 | sc->sc_uc.uc_ok = 0; |
4225 | |
4226 | if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) |
4227 | err = iwm_load_firmware_8000(sc, ucode_type); |
4228 | else |
4229 | err = iwm_load_firmware_7000(sc, ucode_type); |
4230 | |
4231 | if (err) |
4232 | return err; |
4233 | |
4234 | /* wait for the firmware to load */ |
4235 | err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1)); |
4236 | if (err || !sc->sc_uc.uc_ok) |
4237 | printf("%s: could not load firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4238 | |
4239 | return err; |
4240 | } |
4241 | |
4242 | int |
4243 | iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) |
4244 | { |
4245 | int err; |
4246 | |
4247 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
4248 | |
4249 | err = iwm_nic_init(sc); |
4250 | if (err) { |
4251 | printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4252 | return err; |
4253 | } |
4254 | |
4255 | /* make sure rfkill handshake bits are cleared */ |
4256 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000002))))); |
4257 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000004))))) |
4258 | IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000004))))); |
4259 | |
4260 | /* clear (again), then enable firmware load interrupt */ |
4261 | IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), ( (~0)))); |
4262 | iwm_enable_fwload_interrupt(sc); |
4263 | |
4264 | /* really make sure rfkill handshake bits are cleared */ |
4265 | /* maybe we should write a few times more? just to make sure */ |
4266 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000002))))); |
4267 | IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), ( ((0x00000002))))); |
4268 | |
4269 | return iwm_load_firmware(sc, ucode_type); |
4270 | } |
4271 | |
4272 | int |
4273 | iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) |
4274 | { |
4275 | struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { |
4276 | .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)), |
4277 | }; |
4278 | |
4279 | return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD0x98, |
4280 | 0, sizeof(tx_ant_cmd), &tx_ant_cmd); |
4281 | } |
4282 | |
4283 | int |
4284 | iwm_send_phy_cfg_cmd(struct iwm_softc *sc) |
4285 | { |
4286 | struct iwm_phy_cfg_cmd phy_cfg_cmd; |
4287 | enum iwm_ucode_type ucode_type = sc->sc_uc_current; |
4288 | |
4289 | phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config )) |
4290 | sc->sc_extra_phy_config)((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config )); |
4291 | phy_cfg_cmd.calib_control.event_trigger = |
4292 | sc->sc_default_calib[ucode_type].event_trigger; |
4293 | phy_cfg_cmd.calib_control.flow_trigger = |
4294 | sc->sc_default_calib[ucode_type].flow_trigger; |
4295 | |
4296 | return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD0x6a, 0, |
4297 | sizeof(phy_cfg_cmd), &phy_cfg_cmd); |
4298 | } |
4299 | |
4300 | int |
4301 | iwm_send_dqa_cmd(struct iwm_softc *sc) |
4302 | { |
4303 | struct iwm_dqa_enable_cmd dqa_cmd = { |
4304 | .cmd_queue = htole32(IWM_DQA_CMD_QUEUE)((__uint32_t)(0)), |
4305 | }; |
4306 | uint32_t cmd_id; |
4307 | |
4308 | cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD0x00, IWM_DATA_PATH_GROUP0x5, 0); |
4309 | return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); |
4310 | } |
4311 | |
4312 | int |
4313 | iwm_load_ucode_wait_alive(struct iwm_softc *sc, |
4314 | enum iwm_ucode_type ucode_type) |
4315 | { |
4316 | enum iwm_ucode_type old_type = sc->sc_uc_current; |
4317 | struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type]; |
4318 | int err; |
4319 | |
4320 | err = iwm_read_firmware(sc); |
4321 | if (err) |
4322 | return err; |
4323 | |
4324 | if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12 )&(8 -1))))) |
4325 | sc->cmdqid = IWM_DQA_CMD_QUEUE0; |
4326 | else |
4327 | sc->cmdqid = IWM_CMD_QUEUE9; |
4328 | |
4329 | sc->sc_uc_current = ucode_type; |
4330 | err = iwm_start_fw(sc, ucode_type); |
4331 | if (err) { |
4332 | sc->sc_uc_current = old_type; |
4333 | return err; |
4334 | } |
4335 | |
4336 | err = iwm_post_alive(sc); |
4337 | if (err) |
4338 | return err; |
4339 | |
4340 | /* |
4341 | * configure and operate fw paging mechanism. |
4342 | * driver configures the paging flow only once, CPU2 paging image |
4343 | * included in the IWM_UCODE_INIT image. |
4344 | */ |
4345 | if (fw->paging_mem_size) { |
4346 | err = iwm_save_fw_paging(sc, fw); |
4347 | if (err) { |
4348 | printf("%s: failed to save the FW paging image\n", |
4349 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4350 | return err; |
4351 | } |
4352 | |
4353 | err = iwm_send_paging_cmd(sc, fw); |
4354 | if (err) { |
4355 | printf("%s: failed to send the paging cmd\n", |
4356 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4357 | iwm_free_fw_paging(sc); |
4358 | return err; |
4359 | } |
4360 | } |
4361 | |
4362 | return 0; |
4363 | } |
4364 | |
4365 | int |
4366 | iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm) |
4367 | { |
4368 | const int wait_flags = (IWM_INIT_COMPLETE0x01 | IWM_CALIB_COMPLETE0x02); |
4369 | int err, s; |
4370 | |
4371 | if ((sc->sc_flags & IWM_FLAG_RFKILL0x02) && !justnvm) { |
4372 | printf("%s: radio is disabled by hardware switch\n", |
4373 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4374 | return EPERM1; |
4375 | } |
4376 | |
4377 | s = splnet()splraise(0x7); |
4378 | sc->sc_init_complete = 0; |
4379 | err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT); |
4380 | if (err) { |
4381 | printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4382 | splx(s)spllower(s); |
4383 | return err; |
4384 | } |
4385 | |
4386 | if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) { |
4387 | err = iwm_send_bt_init_conf(sc); |
4388 | if (err) { |
4389 | printf("%s: could not init bt coex (error %d)\n", |
4390 | DEVNAME(sc)((sc)->sc_dev.dv_xname), err); |
4391 | splx(s)spllower(s); |
4392 | return err; |
4393 | } |
4394 | } |
4395 | |
4396 | if (justnvm) { |
4397 | err = iwm_nvm_init(sc); |
4398 | if (err) { |
4399 | printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4400 | splx(s)spllower(s); |
4401 | return err; |
4402 | } |
4403 | |
4404 | if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), ( 6)) == 0)) |
4405 | IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr ), (6)) |
4406 | sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr ), (6)); |
4407 | |
4408 | splx(s)spllower(s); |
4409 | return 0; |
4410 | } |
4411 | |
4412 | err = iwm_sf_config(sc, IWM_SF_INIT_OFF3); |
4413 | if (err) { |
4414 | splx(s)spllower(s); |
4415 | return err; |
4416 | } |
4417 | |
4418 | /* Send TX valid antennas before triggering calibrations */ |
4419 | err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc)); |
4420 | if (err) { |
4421 | splx(s)spllower(s); |
4422 | return err; |
4423 | } |
4424 | |
4425 | /* |
4426 | * Send phy configurations command to init uCode |
4427 | * to start the 16.0 uCode init image internal calibrations. |
4428 | */ |
4429 | err = iwm_send_phy_cfg_cmd(sc); |
4430 | if (err) { |
4431 | splx(s)spllower(s); |
4432 | return err; |
4433 | } |
4434 | |
4435 | /* |
4436 | * Nothing to do but wait for the init complete and phy DB |
4437 | * notifications from the firmware. |
4438 | */ |
4439 | while ((sc->sc_init_complete & wait_flags) != wait_flags) { |
4440 | err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit", |
4441 | SEC_TO_NSEC(2)); |
4442 | if (err) |
4443 | break; |
4444 | } |
4445 | |
4446 | splx(s)spllower(s); |
4447 | return err; |
4448 | } |
4449 | |
4450 | int |
4451 | iwm_config_ltr(struct iwm_softc *sc) |
4452 | { |
4453 | struct iwm_ltr_config_cmd cmd = { |
4454 | .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)), |
4455 | }; |
4456 | |
4457 | if (!sc->sc_ltr_enabled) |
4458 | return 0; |
4459 | |
4460 | return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd); |
4461 | } |
4462 | |
4463 | int |
4464 | iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) |
4465 | { |
4466 | struct iwm_rx_ring *ring = &sc->rxq; |
4467 | struct iwm_rx_data *data = &ring->data[idx]; |
4468 | struct mbuf *m; |
4469 | int err; |
4470 | int fatal = 0; |
4471 | |
4472 | m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1); |
4473 | if (m == NULL((void *)0)) |
4474 | return ENOBUFS55; |
4475 | |
4476 | if (size <= MCLBYTES(1 << 11)) { |
4477 | MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11)); |
4478 | } else { |
4479 | MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE)m_clget((m), (0x0002), (4096)); |
4480 | } |
4481 | if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) { |
4482 | m_freem(m); |
4483 | return ENOBUFS55; |
4484 | } |
4485 | |
4486 | if (data->m != NULL((void *)0)) { |
4487 | bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data ->map)); |
4488 | fatal = 1; |
4489 | } |
4490 | |
4491 | m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size; |
4492 | err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( data->map), (m), (0x0200|0x0001)) |
4493 | BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), ( data->map), (m), (0x0200|0x0001)); |
4494 | if (err) { |
4495 | /* XXX */ |
4496 | if (fatal) |
4497 | panic("iwm: could not load RX mbuf"); |
4498 | m_freem(m); |
4499 | return err; |
4500 | } |
4501 | data->m = m; |
4502 | bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (size), (0x01)); |
4503 | |
4504 | /* Update RX descriptor. */ |
4505 | if (sc->sc_mqrx_supported) { |
4506 | ((uint64_t *)ring->desc)[idx] = |
4507 | htole64(data->map->dm_segs[0].ds_addr)((__uint64_t)(data->map->dm_segs[0].ds_addr)); |
4508 | bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t )), (0x04)) |
4509 | idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t )), (0x04)) |
4510 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t )), (0x04)); |
4511 | } else { |
4512 | ((uint32_t *)ring->desc)[idx] = |
4513 | htole32(data->map->dm_segs[0].ds_addr >> 8)((__uint32_t)(data->map->dm_segs[0].ds_addr >> 8) ); |
4514 | bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t )), (0x04)) |
4515 | idx * sizeof(uint32_t), sizeof(uint32_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t )), (0x04)) |
4516 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring-> free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t )), (0x04)); |
4517 | } |
4518 | |
4519 | return 0; |
4520 | } |
4521 | |
4522 | /* |
4523 | * RSSI values are reported by the FW as positive values - need to negate |
4524 | * to obtain their dBM. Account for missing antennas by replacing 0 |
4525 | * values by -256dBm: practically 0 power and a non-feasible 8 bit value. |
4526 | */ |
4527 | int |
4528 | iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) |
4529 | { |
4530 | int energy_a, energy_b, energy_c, max_energy; |
4531 | uint32_t val; |
4532 | |
4533 | val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX])((__uint32_t)(phy_info->non_cfg_phy[1])); |
4534 | energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK0x000000ff) >> |
4535 | IWM_RX_INFO_ENERGY_ANT_A_POS0; |
4536 | energy_a = energy_a ? -energy_a : -256; |
4537 | energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK0x0000ff00) >> |
4538 | IWM_RX_INFO_ENERGY_ANT_B_POS8; |
4539 | energy_b = energy_b ? -energy_b : -256; |
4540 | energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK0x00ff0000) >> |
4541 | IWM_RX_INFO_ENERGY_ANT_C_POS16; |
4542 | energy_c = energy_c ? -energy_c : -256; |
4543 | max_energy = MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b)); |
4544 | max_energy = MAX(max_energy, energy_c)(((max_energy)>(energy_c))?(max_energy):(energy_c)); |
4545 | |
4546 | return max_energy; |
4547 | } |
4548 | |
4549 | int |
4550 | iwm_rxmq_get_signal_strength(struct iwm_softc *sc, |
4551 | struct iwm_rx_mpdu_desc *desc) |
4552 | { |
4553 | int energy_a, energy_b; |
4554 | |
4555 | energy_a = desc->v1.energy_a; |
4556 | energy_b = desc->v1.energy_b; |
4557 | energy_a = energy_a ? -energy_a : -256; |
4558 | energy_b = energy_b ? -energy_b : -256; |
4559 | return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b)); |
4560 | } |
4561 | |
4562 | void |
4563 | iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt, |
4564 | struct iwm_rx_data *data) |
4565 | { |
4566 | struct iwm_rx_phy_info *phy_info = (void *)pkt->data; |
4567 | |
4568 | bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02)) |
4569 | sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02)); |
4570 | |
4571 | memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), ( sizeof(sc->sc_last_phy_info))); |
4572 | } |
4573 | |
4574 | /* |
4575 | * Retrieve the average noise (in dBm) among receivers. |
4576 | */ |
4577 | int |
4578 | iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats) |
4579 | { |
4580 | int i, total, nbant, noise; |
4581 | |
4582 | total = nbant = noise = 0; |
4583 | for (i = 0; i < 3; i++) { |
4584 | noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff; |
4585 | if (noise) { |
4586 | total += noise; |
4587 | nbant++; |
4588 | } |
4589 | } |
4590 | |
4591 | /* There should be at least one antenna but check anyway. */ |
4592 | return (nbant == 0) ? -127 : (total / nbant) - 107; |
4593 | } |
4594 | |
4595 | int |
4596 | iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, |
4597 | struct ieee80211_rxinfo *rxi) |
4598 | { |
4599 | struct ieee80211com *ic = &sc->sc_ic; |
4600 | struct ieee80211_key *k = &ni->ni_pairwise_key; |
4601 | struct ieee80211_frame *wh; |
4602 | uint64_t pn, *prsc; |
4603 | uint8_t *ivp; |
4604 | uint8_t tid; |
4605 | int hdrlen, hasqos; |
4606 | |
4607 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4608 | hdrlen = ieee80211_get_hdrlen(wh); |
4609 | ivp = (uint8_t *)wh + hdrlen; |
4610 | |
4611 | /* Check that ExtIV bit is set. */ |
4612 | if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20)) |
4613 | return 1; |
4614 | |
4615 | hasqos = ieee80211_has_qos(wh); |
4616 | tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0; |
4617 | prsc = &k->k_rsc[tid]; |
4618 | |
4619 | /* Extract the 48-bit PN from the CCMP header. */ |
4620 | pn = (uint64_t)ivp[0] | |
4621 | (uint64_t)ivp[1] << 8 | |
4622 | (uint64_t)ivp[4] << 16 | |
4623 | (uint64_t)ivp[5] << 24 | |
4624 | (uint64_t)ivp[6] << 32 | |
4625 | (uint64_t)ivp[7] << 40; |
4626 | if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) { |
4627 | if (pn < *prsc) { |
4628 | ic->ic_stats.is_ccmp_replays++; |
4629 | return 1; |
4630 | } |
4631 | } else if (pn <= *prsc) { |
4632 | ic->ic_stats.is_ccmp_replays++; |
4633 | return 1; |
4634 | } |
4635 | /* Last seen packet number is updated in ieee80211_inputm(). */ |
4636 | |
4637 | /* |
4638 | * Some firmware versions strip the MIC, and some don't. It is not |
4639 | * clear which of the capability flags could tell us what to expect. |
4640 | * For now, keep things simple and just leave the MIC in place if |
4641 | * it is present. |
4642 | * |
4643 | * The IV will be stripped by ieee80211_inputm(). |
4644 | */ |
4645 | return 0; |
4646 | } |
4647 | |
4648 | int |
4649 | iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status, |
4650 | struct ieee80211_rxinfo *rxi) |
4651 | { |
4652 | struct ieee80211com *ic = &sc->sc_ic; |
4653 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
4654 | struct ieee80211_frame *wh; |
4655 | struct ieee80211_node *ni; |
4656 | int ret = 0; |
4657 | uint8_t type, subtype; |
4658 | |
4659 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4660 | |
4661 | type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
4662 | if (type == IEEE80211_FC0_TYPE_CTL0x04) |
4663 | return 0; |
4664 | |
4665 | subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0; |
4666 | if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) |
4667 | return 0; |
4668 | |
4669 | if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) || |
4670 | !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40)) |
4671 | return 0; |
4672 | |
4673 | ni = ieee80211_find_rxnode(ic, wh); |
4674 | /* Handle hardware decryption. */ |
4675 | if ((ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) && |
4676 | ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) { |
4677 | if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) != |
4678 | IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) { |
4679 | ic->ic_stats.is_ccmp_dec_errs++; |
4680 | ret = 1; |
4681 | goto out; |
4682 | } |
4683 | /* Check whether decryption was successful or not. */ |
4684 | if ((rx_pkt_status & |
4685 | (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) | |
4686 | IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) != |
4687 | (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) | |
4688 | IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) { |
4689 | ic->ic_stats.is_ccmp_dec_errs++; |
4690 | ret = 1; |
4691 | goto out; |
4692 | } |
4693 | rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001; |
4694 | } |
4695 | out: |
4696 | if (ret) |
4697 | ifp->if_ierrorsif_data.ifi_ierrors++; |
4698 | ieee80211_release_node(ic, ni); |
4699 | return ret; |
4700 | } |
4701 | |
4702 | void |
4703 | iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx, |
4704 | uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags, |
4705 | uint32_t device_timestamp, struct ieee80211_rxinfo *rxi, |
4706 | struct mbuf_list *ml) |
4707 | { |
4708 | struct ieee80211com *ic = &sc->sc_ic; |
4709 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
4710 | struct ieee80211_frame *wh; |
4711 | struct ieee80211_node *ni; |
4712 | struct ieee80211_channel *bss_chan; |
4713 | uint8_t saved_bssid[IEEE80211_ADDR_LEN6] = { 0 }; |
4714 | |
4715 | if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0 ]))) |
4716 | chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); |
4717 | |
4718 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4719 | ni = ieee80211_find_rxnode(ic, wh); |
4720 | if (ni == ic->ic_bss) { |
4721 | /* |
4722 | * We may switch ic_bss's channel during scans. |
4723 | * Record the current channel so we can restore it later. |
4724 | */ |
4725 | bss_chan = ni->ni_chan; |
4726 | IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr)__builtin_memcpy((&saved_bssid), (ni->ni_macaddr), (6) ); |
4727 | } |
4728 | ni->ni_chan = &ic->ic_channels[chanidx]; |
4729 | |
4730 | if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) && |
4731 | iwm_ccmp_decap(sc, m, ni, rxi) != 0) { |
4732 | ifp->if_ierrorsif_data.ifi_ierrors++; |
4733 | m_freem(m); |
4734 | ieee80211_release_node(ic, ni); |
4735 | return; |
4736 | } |
4737 | |
4738 | #if NBPFILTER1 > 0 |
4739 | if (sc->sc_drvbpf != NULL((void *)0)) { |
4740 | struct iwm_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th; |
4741 | uint16_t chan_flags; |
4742 | |
4743 | tap->wr_flags = 0; |
4744 | if (is_shortpre) |
4745 | tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02; |
4746 | tap->wr_chan_freq = |
4747 | htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq)); |
4748 | chan_flags = ic->ic_channels[chanidx].ic_flags; |
4749 | if (ic->ic_curmode != IEEE80211_MODE_11N) |
4750 | chan_flags &= ~IEEE80211_CHAN_HT0x2000; |
4751 | tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags)); |
4752 | tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi; |
4753 | tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; |
4754 | tap->wr_tsft = device_timestamp; |
4755 | if (rate_n_flags & IWM_RATE_MCS_HT_MSK(1 << 8)) { |
4756 | uint8_t mcs = (rate_n_flags & |
4757 | (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | |
4758 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3))); |
4759 | tap->wr_rate = (0x80 | mcs); |
4760 | } else { |
4761 | uint8_t rate = (rate_n_flags & |
4762 | IWM_RATE_LEGACY_RATE_MSK0xff); |
4763 | switch (rate) { |
4764 | /* CCK rates. */ |
4765 | case 10: tap->wr_rate = 2; break; |
4766 | case 20: tap->wr_rate = 4; break; |
4767 | case 55: tap->wr_rate = 11; break; |
4768 | case 110: tap->wr_rate = 22; break; |
4769 | /* OFDM rates. */ |
4770 | case 0xd: tap->wr_rate = 12; break; |
4771 | case 0xf: tap->wr_rate = 18; break; |
4772 | case 0x5: tap->wr_rate = 24; break; |
4773 | case 0x7: tap->wr_rate = 36; break; |
4774 | case 0x9: tap->wr_rate = 48; break; |
4775 | case 0xb: tap->wr_rate = 72; break; |
4776 | case 0x1: tap->wr_rate = 96; break; |
4777 | case 0x3: tap->wr_rate = 108; break; |
4778 | /* Unknown rate: should not happen. */ |
4779 | default: tap->wr_rate = 0; |
4780 | } |
4781 | } |
4782 | |
4783 | bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len, |
4784 | m, BPF_DIRECTION_IN(1 << 0)); |
4785 | } |
4786 | #endif |
4787 | ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml); |
4788 | /* |
4789 | * ieee80211_inputm() might have changed our BSS. |
4790 | * Restore ic_bss's channel if we are still in the same BSS. |
4791 | */ |
4792 | if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr)(__builtin_memcmp((saved_bssid), (ni->ni_macaddr), (6)) == 0)) |
4793 | ni->ni_chan = bss_chan; |
4794 | ieee80211_release_node(ic, ni); |
4795 | } |
4796 | |
4797 | void |
4798 | iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata, |
4799 | size_t maxlen, struct mbuf_list *ml) |
4800 | { |
4801 | struct ieee80211com *ic = &sc->sc_ic; |
4802 | struct ieee80211_rxinfo rxi; |
4803 | struct iwm_rx_phy_info *phy_info; |
4804 | struct iwm_rx_mpdu_res_start *rx_res; |
4805 | int device_timestamp; |
4806 | uint16_t phy_flags; |
4807 | uint32_t len; |
4808 | uint32_t rx_pkt_status; |
4809 | int rssi, chanidx, rate_n_flags; |
4810 | |
4811 | memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi))); |
4812 | |
4813 | phy_info = &sc->sc_last_phy_info; |
4814 | rx_res = (struct iwm_rx_mpdu_res_start *)pktdata; |
4815 | len = le16toh(rx_res->byte_count)((__uint16_t)(rx_res->byte_count)); |
4816 | if (ic->ic_opmode == IEEE80211_M_MONITOR) { |
4817 | /* Allow control frames in monitor mode. */ |
4818 | if (len < sizeof(struct ieee80211_frame_cts)) { |
4819 | ic->ic_stats.is_rx_tooshort++; |
4820 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
4821 | m_freem(m); |
4822 | return; |
4823 | } |
4824 | } else if (len < sizeof(struct ieee80211_frame)) { |
4825 | ic->ic_stats.is_rx_tooshort++; |
4826 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
4827 | m_freem(m); |
4828 | return; |
4829 | } |
4830 | if (len > maxlen - sizeof(*rx_res)) { |
4831 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
4832 | m_freem(m); |
4833 | return; |
4834 | } |
4835 | |
4836 | if (__predict_false(phy_info->cfg_phy_cnt > 20)__builtin_expect(((phy_info->cfg_phy_cnt > 20) != 0), 0 )) { |
4837 | m_freem(m); |
4838 | return; |
4839 | } |
4840 | |
4841 | rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len))((__uint32_t)(*(uint32_t *)(pktdata + sizeof(*rx_res) + len)) ); |
4842 | if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK(1 << 0)) || |
4843 | !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK(1 << 1))) { |
4844 | m_freem(m); |
4845 | return; /* drop */ |
4846 | } |
4847 | |
4848 | m->m_datam_hdr.mh_data = pktdata + sizeof(*rx_res); |
4849 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
4850 | |
4851 | if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) { |
4852 | m_freem(m); |
4853 | return; |
4854 | } |
4855 | |
4856 | chanidx = letoh32(phy_info->channel)((__uint32_t)(phy_info->channel)); |
4857 | device_timestamp = le32toh(phy_info->system_timestamp)((__uint32_t)(phy_info->system_timestamp)); |
4858 | phy_flags = letoh16(phy_info->phy_flags)((__uint16_t)(phy_info->phy_flags)); |
4859 | rate_n_flags = le32toh(phy_info->rate_n_flags)((__uint32_t)(phy_info->rate_n_flags)); |
4860 | |
4861 | rssi = iwm_get_signal_strength(sc, phy_info); |
4862 | rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */ |
4863 | rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi) ); /* clip to max. 100% */ |
4864 | |
4865 | rxi.rxi_rssi = rssi; |
4866 | rxi.rxi_tstamp = device_timestamp; |
4867 | |
4868 | iwm_rx_frame(sc, m, chanidx, rx_pkt_status, |
4869 | (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE(1 << 2)), |
4870 | rate_n_flags, device_timestamp, &rxi, ml); |
4871 | } |
4872 | |
4873 | void |
4874 | iwm_flip_address(uint8_t *addr) |
4875 | { |
4876 | int i; |
4877 | uint8_t mac_addr[ETHER_ADDR_LEN6]; |
4878 | |
4879 | for (i = 0; i < ETHER_ADDR_LEN6; i++) |
4880 | mac_addr[i] = addr[ETHER_ADDR_LEN6 - i - 1]; |
4881 | IEEE80211_ADDR_COPY(addr, mac_addr)__builtin_memcpy((addr), (mac_addr), (6)); |
4882 | } |
4883 | |
4884 | /* |
4885 | * Drop duplicate 802.11 retransmissions |
4886 | * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") |
4887 | * and handle pseudo-duplicate frames which result from deaggregation |
4888 | * of A-MSDU frames in hardware. |
4889 | */ |
4890 | int |
4891 | iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m, |
4892 | struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi) |
4893 | { |
4894 | struct ieee80211com *ic = &sc->sc_ic; |
4895 | struct iwm_node *in = (void *)ic->ic_bss; |
4896 | struct iwm_rxq_dup_data *dup_data = &in->dup_data; |
4897 | uint8_t tid = IWM_MAX_TID_COUNT8, subframe_idx; |
4898 | struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
4899 | uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
4900 | uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0; |
4901 | int hasqos = ieee80211_has_qos(wh); |
4902 | uint16_t seq; |
4903 | |
4904 | if (type == IEEE80211_FC0_TYPE_CTL0x04 || |
4905 | (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) || |
4906 | IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) |
4907 | return 0; |
4908 | |
4909 | if (hasqos) { |
4910 | tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f); |
4911 | if (tid > IWM_MAX_TID_COUNT8) |
4912 | tid = IWM_MAX_TID_COUNT8; |
4913 | } |
4914 | |
4915 | /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ |
4916 | subframe_idx = desc->amsdu_info & |
4917 | IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f; |
4918 | |
4919 | seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4; |
4920 | if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) && |
4921 | dup_data->last_seq[tid] == seq && |
4922 | dup_data->last_sub_frame[tid] >= subframe_idx) |
4923 | return 1; |
4924 | |
4925 | /* |
4926 | * Allow the same frame sequence number for all A-MSDU subframes |
4927 | * following the first subframe. |
4928 | * Otherwise these subframes would be discarded as replays. |
4929 | */ |
4930 | if (dup_data->last_seq[tid] == seq && |
4931 | subframe_idx > dup_data->last_sub_frame[tid] && |
4932 | (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40)) { |
4933 | rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008; |
4934 | } |
4935 | |
4936 | dup_data->last_seq[tid] = seq; |
4937 | dup_data->last_sub_frame[tid] = subframe_idx; |
4938 | |
4939 | return 0; |
4940 | } |
4941 | |
4942 | /* |
4943 | * Returns true if sn2 - buffer_size < sn1 < sn2. |
4944 | * To be used only in order to compare reorder buffer head with NSSN. |
4945 | * We fully trust NSSN unless it is behind us due to reorder timeout. |
4946 | * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. |
4947 | */ |
4948 | int |
4949 | iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size) |
4950 | { |
4951 | return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048 ) && !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff ) > 2048); |
4952 | } |
4953 | |
4954 | void |
4955 | iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni, |
4956 | struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf, |
4957 | uint16_t nssn, struct mbuf_list *ml) |
4958 | { |
4959 | struct iwm_reorder_buf_entry *entries = &rxba->entries[0]; |
4960 | uint16_t ssn = reorder_buf->head_sn; |
4961 | |
4962 | /* ignore nssn smaller than head sn - this can happen due to timeout */ |
4963 | if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) |
4964 | goto set_timer; |
4965 | |
4966 | while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { |
4967 | int index = ssn % reorder_buf->buf_size; |
4968 | struct mbuf *m; |
4969 | int chanidx, is_shortpre; |
4970 | uint32_t rx_pkt_status, rate_n_flags, device_timestamp; |
4971 | struct ieee80211_rxinfo *rxi; |
4972 | |
4973 | /* This data is the same for all A-MSDU subframes. */ |
4974 | chanidx = entries[index].chanidx; |
4975 | rx_pkt_status = entries[index].rx_pkt_status; |
4976 | is_shortpre = entries[index].is_shortpre; |
4977 | rate_n_flags = entries[index].rate_n_flags; |
4978 | device_timestamp = entries[index].device_timestamp; |
4979 | rxi = &entries[index].rxi; |
4980 | |
4981 | /* |
4982 | * Empty the list. Will have more than one frame for A-MSDU. |
4983 | * Empty list is valid as well since nssn indicates frames were |
4984 | * received. |
4985 | */ |
4986 | while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) { |
4987 | iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre, |
4988 | rate_n_flags, device_timestamp, rxi, ml); |
4989 | reorder_buf->num_stored--; |
4990 | |
4991 | /* |
4992 | * Allow the same frame sequence number and CCMP PN for |
4993 | * all A-MSDU subframes following the first subframe. |
4994 | * Otherwise they would be discarded as replays. |
4995 | */ |
4996 | rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008; |
4997 | rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004; |
4998 | } |
4999 | |
5000 | ssn = (ssn + 1) & 0xfff; |
5001 | } |
5002 | reorder_buf->head_sn = nssn; |
5003 | |
5004 | set_timer: |
5005 | if (reorder_buf->num_stored && !reorder_buf->removed) { |
5006 | timeout_add_usec(&reorder_buf->reorder_timer, |
5007 | RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL)); |
5008 | } else |
5009 | timeout_del(&reorder_buf->reorder_timer); |
5010 | } |
5011 | |
5012 | int |
5013 | iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid, |
5014 | struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2) |
5015 | { |
5016 | struct ieee80211com *ic = &sc->sc_ic; |
5017 | |
5018 | if (gp2 != buffer->consec_oldsn_ampdu_gp2) { |
5019 | /* we have a new (A-)MPDU ... */ |
5020 | |
5021 | /* |
5022 | * reset counter to 0 if we didn't have any oldsn in |
5023 | * the last A-MPDU (as detected by GP2 being identical) |
5024 | */ |
5025 | if (!buffer->consec_oldsn_prev_drop) |
5026 | buffer->consec_oldsn_drops = 0; |
5027 | |
5028 | /* either way, update our tracking state */ |
5029 | buffer->consec_oldsn_ampdu_gp2 = gp2; |
5030 | } else if (buffer->consec_oldsn_prev_drop) { |
5031 | /* |
5032 | * tracking state didn't change, and we had an old SN |
5033 | * indication before - do nothing in this case, we |
5034 | * already noted this one down and are waiting for the |
5035 | * next A-MPDU (by GP2) |
5036 | */ |
5037 | return 0; |
5038 | } |
5039 | |
5040 | /* return unless this MPDU has old SN */ |
5041 | if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000)) |
5042 | return 0; |
5043 | |
5044 | /* update state */ |
5045 | buffer->consec_oldsn_prev_drop = 1; |
5046 | buffer->consec_oldsn_drops++; |
5047 | |
5048 | /* if limit is reached, send del BA and reset state */ |
5049 | if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA10) { |
5050 | ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED, |
5051 | 0, tid); |
5052 | buffer->consec_oldsn_prev_drop = 0; |
5053 | buffer->consec_oldsn_drops = 0; |
5054 | return 1; |
5055 | } |
5056 | |
5057 | return 0; |
5058 | } |
5059 | |
5060 | /* |
5061 | * Handle re-ordering of frames which were de-aggregated in hardware. |
5062 | * Returns 1 if the MPDU was consumed (buffered or dropped). |
5063 | * Returns 0 if the MPDU should be passed to upper layer. |
5064 | */ |
5065 | int |
5066 | iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx, |
5067 | struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags, |
5068 | uint32_t device_timestamp, struct ieee80211_rxinfo *rxi, |
5069 | struct mbuf_list *ml) |
5070 | { |
5071 | struct ieee80211com *ic = &sc->sc_ic; |
5072 | struct ieee80211_frame *wh; |
5073 | struct ieee80211_node *ni; |
5074 | struct iwm_rxba_data *rxba; |
5075 | struct iwm_reorder_buffer *buffer; |
5076 | uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data)); |
5077 | int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40); |
5078 | int last_subframe = |
5079 | (desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME0x80); |
5080 | uint8_t tid; |
5081 | uint8_t subframe_idx = (desc->amsdu_info & |
5082 | IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f); |
5083 | struct iwm_reorder_buf_entry *entries; |
5084 | int index; |
5085 | uint16_t nssn, sn; |
5086 | uint8_t baid, type, subtype; |
5087 | int hasqos; |
5088 | |
5089 | wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
5090 | hasqos = ieee80211_has_qos(wh); |
5091 | tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0; |
5092 | |
5093 | type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
5094 | subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0; |
5095 | |
5096 | /* |
5097 | * We are only interested in Block Ack requests and unicast QoS data. |
5098 | */ |
5099 | if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) |
5100 | return 0; |
5101 | if (hasqos) { |
5102 | if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40) |
5103 | return 0; |
5104 | } else { |
5105 | if (type != IEEE80211_FC0_TYPE_CTL0x04 || |
5106 | subtype != IEEE80211_FC0_SUBTYPE_BAR0x80) |
5107 | return 0; |
5108 | } |
5109 | |
5110 | baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK0x7f000000) >> |
5111 | IWM_RX_MPDU_REORDER_BAID_SHIFT24; |
5112 | if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f || |
5113 | baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data) [0]))) |
5114 | return 0; |
5115 | |
5116 | rxba = &sc->sc_rxba_data[baid]; |
5117 | if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f || |
5118 | tid != rxba->tid || rxba->sta_id != IWM_STATION_ID0) |
5119 | return 0; |
5120 | |
5121 | if (rxba->timeout != 0) |
5122 | getmicrouptime(&rxba->last_rx); |
5123 | |
5124 | /* Bypass A-MPDU re-ordering in net80211. */ |
5125 | rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002; |
5126 | |
5127 | nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK0x00000fff; |
5128 | sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK0x00fff000) >> |
5129 | IWM_RX_MPDU_REORDER_SN_SHIFT12; |
5130 | |
5131 | buffer = &rxba->reorder_buf; |
5132 | entries = &rxba->entries[0]; |
5133 | |
5134 | if (!buffer->valid) { |
5135 | if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000) |
5136 | return 0; |
5137 | buffer->valid = 1; |
5138 | } |
5139 | |
5140 | ni = ieee80211_find_rxnode(ic, wh); |
5141 | if (type == IEEE80211_FC0_TYPE_CTL0x04 && |
5142 | subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) { |
5143 | iwm_release_frames(sc, ni, rxba, buffer, nssn, ml); |
5144 | goto drop; |
5145 | } |
5146 | |
5147 | /* |
5148 | * If there was a significant jump in the nssn - adjust. |
5149 | * If the SN is smaller than the NSSN it might need to first go into |
5150 | * the reorder buffer, in which case we just release up to it and the |
5151 | * rest of the function will take care of storing it and releasing up to |
5152 | * the nssn. |
5153 | */ |
5154 | if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, |
5155 | buffer->buf_size) || |
5156 | !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer-> buf_size)) & 0xfff) > 2048)) { |
5157 | uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048 ) ? sn : nssn; |
5158 | ic->ic_stats.is_ht_rx_frame_above_ba_winend++; |
5159 | iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml); |
5160 | } |
5161 | |
5162 | if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data, |
5163 | device_timestamp)) { |
5164 | /* BA session will be torn down. */ |
5165 | ic->ic_stats.is_ht_rx_ba_window_jump++; |
5166 | goto drop; |
5167 | |
5168 | } |
5169 | |
5170 | /* drop any outdated packets */ |
5171 | if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff ) > 2048)) { |
5172 | ic->ic_stats.is_ht_rx_frame_below_ba_winstart++; |
5173 | goto drop; |
5174 | } |
5175 | |
5176 | /* release immediately if allowed by nssn and no stored frames */ |
5177 | if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048 )) { |
5178 | if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && |
5179 | (!is_amsdu || last_subframe)) |
5180 | buffer->head_sn = nssn; |
5181 | ieee80211_release_node(ic, ni); |
5182 | return 0; |
5183 | } |
5184 | |
5185 | /* |
5186 | * release immediately if there are no stored frames, and the sn is |
5187 | * equal to the head. |
5188 | * This can happen due to reorder timer, where NSSN is behind head_sn. |
5189 | * When we released everything, and we got the next frame in the |
5190 | * sequence, according to the NSSN we can't release immediately, |
5191 | * while technically there is no hole and we can move forward. |
5192 | */ |
5193 | if (!buffer->num_stored && sn == buffer->head_sn) { |
5194 | if (!is_amsdu || last_subframe) |
5195 | buffer->head_sn = (buffer->head_sn + 1) & 0xfff; |
5196 | ieee80211_release_node(ic, ni); |
5197 | return 0; |
5198 | } |
5199 | |
5200 | index = sn % buffer->buf_size; |
5201 | |
5202 | /* |
5203 | * Check if we already stored this frame |
5204 | * As AMSDU is either received or not as whole, logic is simple: |
5205 | * If we have frames in that position in the buffer and the last frame |
5206 | * originated from AMSDU had a different SN then it is a retransmission. |
5207 | * If it is the same SN then if the subframe index is incrementing it |
5208 | * is the same AMSDU - otherwise it is a retransmission. |
5209 | */ |
5210 | if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) { |
5211 | if (!is_amsdu) { |
5212 | ic->ic_stats.is_ht_rx_ba_no_buf++; |
5213 | goto drop; |
5214 | } else if (sn != buffer->last_amsdu || |
5215 | buffer->last_sub_index >= subframe_idx) { |
5216 | ic->ic_stats.is_ht_rx_ba_no_buf++; |
5217 | goto drop; |
5218 | } |
5219 | } else { |
5220 | /* This data is the same for all A-MSDU subframes. */ |
5221 | entries[index].chanidx = chanidx; |
5222 | entries[index].is_shortpre = is_shortpre; |
5223 | entries[index].rate_n_flags = rate_n_flags; |
5224 | entries[index].device_timestamp = device_timestamp; |
5225 | memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries [index].rxi))); |
5226 | } |
5227 | |
5228 | /* put in reorder buffer */ |
5229 | ml_enqueue(&entries[index].frames, m); |
5230 | buffer->num_stored++; |
5231 | getmicrouptime(&entries[index].reorder_time); |
5232 | |
5233 | if (is_amsdu) { |
5234 | buffer->last_amsdu = sn; |
5235 | buffer->last_sub_index = subframe_idx; |
5236 | } |
5237 | |
5238 | /* |
5239 | * We cannot trust NSSN for AMSDU sub-frames that are not the last. |
5240 | * The reason is that NSSN advances on the first sub-frame, and may |
5241 | * cause the reorder buffer to advance before all the sub-frames arrive. |
5242 | * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with |
5243 | * SN 1. NSSN for first sub frame will be 3 with the result of driver |
5244 | * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is |
5245 | * already ahead and it will be dropped. |
5246 | * If the last sub-frame is not on this queue - we will get frame |
5247 | * release notification with up to date NSSN. |
5248 | */ |
5249 | if (!is_amsdu || last_subframe) |
5250 | iwm_release_frames(sc, ni, rxba, buffer, nssn, ml); |
5251 | |
5252 | ieee80211_release_node(ic, ni); |
5253 | return 1; |
5254 | |
5255 | drop: |
5256 | m_freem(m); |
5257 | ieee80211_release_node(ic, ni); |
5258 | return 1; |
5259 | } |
5260 | |
5261 | void |
5262 | iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata, |
5263 | size_t maxlen, struct mbuf_list *ml) |
5264 | { |
5265 | struct ieee80211com *ic = &sc->sc_ic; |
5266 | struct ieee80211_rxinfo rxi; |
5267 | struct iwm_rx_mpdu_desc *desc; |
5268 | uint32_t len, hdrlen, rate_n_flags, device_timestamp; |
5269 | int rssi; |
5270 | uint8_t chanidx; |
5271 | uint16_t phy_info; |
5272 | |
5273 | memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi))); |
5274 | |
5275 | desc = (struct iwm_rx_mpdu_desc *)pktdata; |
5276 | |
5277 | if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) || |
5278 | !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) { |
5279 | m_freem(m); |
5280 | return; /* drop */ |
5281 | } |
5282 | |
5283 | len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len)); |
5284 | if (ic->ic_opmode == IEEE80211_M_MONITOR) { |
5285 | /* Allow control frames in monitor mode. */ |
5286 | if (len < sizeof(struct ieee80211_frame_cts)) { |
5287 | ic->ic_stats.is_rx_tooshort++; |
5288 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
5289 | m_freem(m); |
5290 | return; |
5291 | } |
5292 | } else if (len < sizeof(struct ieee80211_frame)) { |
5293 | ic->ic_stats.is_rx_tooshort++; |
5294 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
5295 | m_freem(m); |
5296 | return; |
5297 | } |
5298 | if (len > maxlen - sizeof(*desc)) { |
5299 | IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++; |
5300 | m_freem(m); |
5301 | return; |
5302 | } |
5303 | |
5304 | m->m_datam_hdr.mh_data = pktdata + sizeof(*desc); |
5305 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; |
5306 | |
5307 | /* Account for padding following the frame header. */ |
5308 | if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD0x20) { |
5309 | struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
5310 | int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c; |
5311 | if (type == IEEE80211_FC0_TYPE_CTL0x04) { |
5312 | switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) { |
5313 | case IEEE80211_FC0_SUBTYPE_CTS0xc0: |
5314 | hdrlen = sizeof(struct ieee80211_frame_cts); |
5315 | break; |
5316 | case IEEE80211_FC0_SUBTYPE_ACK0xd0: |
5317 | hdrlen = sizeof(struct ieee80211_frame_ack); |
5318 | break; |
5319 | default: |
5320 | hdrlen = sizeof(struct ieee80211_frame_min); |
5321 | break; |
5322 | } |
5323 | } else |
5324 | hdrlen = ieee80211_get_hdrlen(wh); |
5325 | |
5326 | if ((le16toh(desc->status)((__uint16_t)(desc->status)) & |
5327 | IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) == |
5328 | IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) { |
5329 | /* Padding is inserted after the IV. */ |
5330 | hdrlen += IEEE80211_CCMP_HDRLEN8; |
5331 | } |
5332 | |
5333 | memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data ), (hdrlen)); |
5334 | m_adj(m, 2); |
5335 | } |
5336 | |
5337 | /* |
5338 | * Hardware de-aggregates A-MSDUs and copies the same MAC header |
5339 | * in place for each subframe. But it leaves the 'A-MSDU present' |
5340 | * bit set in the frame header. We need to clear this bit ourselves. |
5341 | * |
5342 | * And we must allow the same CCMP PN for subframes following the |
5343 | * first subframe. Otherwise they would be discarded as replays. |
5344 | */ |
5345 | if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40) { |
5346 | struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data)); |
5347 | uint8_t subframe_idx = (desc->amsdu_info & |
5348 | IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f); |
5349 | if (subframe_idx > 0) |
5350 | rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004; |
5351 | if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) && |
5352 | m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) { |
5353 | struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data)) |
5354 | struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data)); |
5355 | qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080)); |
5356 | |
5357 | /* HW reverses addr3 and addr4. */ |
5358 | iwm_flip_address(qwh4->i_addr3); |
5359 | iwm_flip_address(qwh4->i_addr4); |
5360 | } else if (ieee80211_has_qos(wh) && |
5361 | m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) { |
5362 | struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data)) |
5363 | struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data)); |
5364 | qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080)); |
5365 | |
5366 | /* HW reverses addr3. */ |
5367 | iwm_flip_address(qwh->i_addr3); |
5368 | } |
5369 | } |
5370 | |
5371 | /* |
5372 | * Verify decryption before duplicate detection. The latter uses |
5373 | * the TID supplied in QoS frame headers and this TID is implicitly |
5374 | * verified as part of the CCMP nonce. |
5375 | */ |
5376 | if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) { |
5377 | m_freem(m); |
5378 | return; |
5379 | } |
5380 | |
5381 | if (iwm_detect_duplicate(sc, m, desc, &rxi)) { |
5382 | m_freem(m); |
5383 | return; |
5384 | } |
5385 | |
5386 | phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info)); |
5387 | rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags)); |
5388 | chanidx = desc->v1.channel; |
5389 | device_timestamp = desc->v1.gp2_on_air_rise; |
5390 | |
5391 | rssi = iwm_rxmq_get_signal_strength(sc, desc); |
5392 | rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */ |
5393 | rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi) ); /* clip to max. 100% */ |
5394 | |
5395 | rxi.rxi_rssi = rssi; |
5396 | rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise)((__uint64_t)(desc->v1.tsf_on_air_rise)); |
5397 | |
5398 | if (iwm_rx_reorder(sc, m, chanidx, desc, |
5399 | (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)), |
5400 | rate_n_flags, device_timestamp, &rxi, ml)) |
5401 | return; |
5402 | |
5403 | iwm_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)), |
5404 | (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)), |
5405 | rate_n_flags, device_timestamp, &rxi, ml); |
5406 | } |
5407 | |
5408 | void |
5409 | iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni) |
5410 | { |
5411 | struct ieee80211com *ic = &sc->sc_ic; |
5412 | struct iwm_node *in = (void *)ni; |
5413 | int old_txmcs = ni->ni_txmcs; |
5414 | |
5415 | ieee80211_ra_choose(&in->in_rn, ic, ni); |
5416 | |
5417 | /* |
5418 | * If RA has chosen a new TX rate we must update |
5419 | * the firmware's LQ rate table. |
5420 | */ |
5421 | if (ni->ni_txmcs != old_txmcs) |
5422 | iwm_setrates(in, 1); |
5423 | } |
5424 | |
5425 | void |
5426 | iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni, |
5427 | int txmcs, uint8_t failure_frame, int txfail) |
5428 | { |
5429 | struct ieee80211com *ic = &sc->sc_ic; |
5430 | struct iwm_node *in = (void *)ni; |
5431 | |
5432 | /* Ignore Tx reports which don't match our last LQ command. */ |
5433 | if (txmcs != ni->ni_txmcs) { |
5434 | if (++in->lq_rate_mismatch > 15) { |
5435 | /* Try to sync firmware with the driver... */ |
5436 | iwm_setrates(in, 1); |
5437 | in->lq_rate_mismatch = 0; |
5438 | } |
5439 | } else { |
5440 | int mcs = txmcs; |
5441 | const struct ieee80211_ht_rateset *rs = |
5442 | ieee80211_ra_get_ht_rateset(txmcs, |
5443 | ieee80211_node_supports_ht_chan40(ni), |
5444 | ieee80211_ra_use_ht_sgi(ni)); |
5445 | unsigned int retries = 0, i; |
5446 | |
5447 | in->lq_rate_mismatch = 0; |
5448 | |
5449 | for (i = 0; i < failure_frame; i++) { |
5450 | if (mcs > rs->min_mcs) { |
5451 | ieee80211_ra_add_stats_ht(&in->in_rn, |
5452 | ic, ni, mcs, 1, 1); |
5453 | mcs--; |
5454 | } else |
5455 | retries++; |
5456 | } |
5457 | |
5458 | if (txfail && failure_frame == 0) { |
5459 | ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni, |
5460 | txmcs, 1, 1); |
5461 | } else { |
5462 | ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni, |
5463 | mcs, retries + 1, retries); |
5464 | } |
5465 | |
5466 | iwm_ra_choose(sc, ni); |
5467 | } |
5468 | } |
5469 | |
5470 | void |
5471 | iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, |
5472 | struct iwm_node *in, int txmcs, int txrate) |
5473 | { |
5474 | struct ieee80211com *ic = &sc->sc_ic; |
5475 | struct ieee80211_node *ni = &in->in_ni; |
5476 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
5477 | struct iwm_tx_resp *tx_resp = (void *)pkt->data; |
5478 | int status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff; |
5479 | int txfail; |
5480 | |
5481 | KASSERT(tx_resp->frame_count == 1)((tx_resp->frame_count == 1) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/if_iwm.c", 5481, "tx_resp->frame_count == 1" )); |
5482 | |
5483 | txfail = (status != IWM_TX_STATUS_SUCCESS0x01 && |
5484 | status != IWM_TX_STATUS_DIRECT_DONE0x02); |
5485 | |
5486 | /* |
5487 | * Update rate control statistics. |
5488 | * Only report frames which were actually queued with the currently |
5489 | * selected Tx rate. Because Tx queues are relatively long we may |
5490 | * encounter previously selected rates here during Tx bursts. |
5491 | * Providing feedback based on such frames can lead to suboptimal |
5492 | * Tx rate control decisions. |
5493 | */ |
5494 | if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) == 0) { |
5495 | if (txrate != ni->ni_txrate) { |
5496 | if (++in->lq_rate_mismatch > 15) { |
5497 | /* Try to sync firmware with the driver... */ |
5498 | iwm_setrates(in, 1); |
5499 | in->lq_rate_mismatch = 0; |
5500 | } |
5501 | } else { |
5502 | in->lq_rate_mismatch = 0; |
5503 | |
5504 | in->in_amn.amn_txcnt++; |
5505 | if (txfail) |
5506 | in->in_amn.amn_retrycnt++; |
5507 | if (tx_resp->failure_frame > 0) |
5508 | in->in_amn.amn_retrycnt++; |
5509 | } |
5510 | } else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN && |
5511 | (le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)) & IWM_RATE_MCS_HT_MSK(1 << 8))) { |
5512 | int txmcs = le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)) & |
5513 | (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3)); |
5514 | iwm_ht_single_rate_control(sc, ni, txmcs, |
5515 | tx_resp->failure_frame, txfail); |
5516 | } |
5517 | |
5518 | if (txfail) |
5519 | ifp->if_oerrorsif_data.ifi_oerrors++; |
5520 | } |
5521 | |
5522 | void |
5523 | iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd) |
5524 | { |
5525 | struct ieee80211com *ic = &sc->sc_ic; |
5526 | |
5527 | bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd-> map), (0), (txd->map->dm_mapsize), (0x08)) |
5528 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd-> map), (0), (txd->map->dm_mapsize), (0x08)); |
5529 | bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd ->map)); |
5530 | m_freem(txd->m); |
5531 | txd->m = NULL((void *)0); |
5532 | |
5533 | KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c" , 5533, "txd->in")); |
5534 | ieee80211_release_node(ic, &txd->in->in_ni); |
5535 | txd->in = NULL((void *)0); |
5536 | txd->ampdu_nframes = 0; |
5537 | txd->ampdu_txmcs = 0; |
5538 | } |
5539 | |
5540 | void |
5541 | iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx) |
5542 | { |
5543 | struct iwm_tx_data *txd; |
5544 | |
5545 | while (ring->tail != idx) { |
5546 | txd = &ring->data[ring->tail]; |
5547 | if (txd->m != NULL((void *)0)) { |
5548 | if (ring->qid < IWM_FIRST_AGG_TX_QUEUE10) |
5549 | DPRINTF(("%s: missed Tx completion: tail=%d "do { ; } while (0) |
5550 | "idx=%d\n", __func__, ring->tail, idx))do { ; } while (0); |
5551 | iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID0); |
5552 | iwm_txd_done(sc, txd); |
5553 | ring->queued--; |
5554 | } |
5555 | ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT256; |
5556 | } |
5557 | |
5558 | wakeup(ring); |
5559 | } |
5560 | |
5561 | void |
5562 | iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr, |
5563 | struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate, |
5564 | uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status, |
5565 | struct iwm_agg_tx_status *agg_status) |
5566 | { |
5567 | struct ieee80211com *ic = &sc->sc_ic; |
5568 | int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE10; |
5569 | struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx]; |
5570 | struct ieee80211_node *ni = &in->in_ni; |
5571 | struct ieee80211_tx_ba *ba; |
5572 | int txfail = (status != IWM_TX_STATUS_SUCCESS0x01 && |
5573 | status != IWM_TX_STATUS_DIRECT_DONE0x02); |
5574 | uint16_t seq; |
5575 | |
5576 | if (ic->ic_state != IEEE80211_S_RUN) |
5577 | return; |
5578 | |
5579 | if (nframes > 1) { |
5580 | int i; |
5581 | /* |
5582 | * Collect information about this A-MPDU. |
5583 | */ |
5584 | |
5585 | for (i = 0; i < nframes; i++) { |
5586 | uint8_t qid = agg_status[i].qid; |
5587 | uint8_t idx = agg_status[i].idx; |
5588 | uint16_t txstatus = (le16toh(agg_status[i].status)((__uint16_t)(agg_status[i].status)) & |
5589 | IWM_AGG_TX_STATE_STATUS_MSK0x0fff); |
5590 | |
5591 | if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED0x0000) |
5592 | continue; |
5593 | |
5594 | if (qid != cmd_hdr->qid) |
5595 | continue; |
5596 | |
5597 | txdata = &txq->data[idx]; |
5598 | if (txdata->m == NULL((void *)0)) |
5599 | continue; |
5600 | |
5601 | /* The Tx rate was the same for all subframes. */ |
5602 | txdata->ampdu_txmcs = initial_rate & |
5603 | (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | |
5604 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3)); |
5605 | txdata->ampdu_nframes = nframes; |
5606 | } |
5607 | return; |
5608 | } |
5609 | |
5610 | ba = &ni->ni_tx_ba[tid]; |
5611 | if (ba->ba_state != IEEE80211_BA_AGREED2) |
5612 | return; |
5613 | if (SEQ_LT(ssn, ba->ba_winstart)((((u_int16_t)(ssn) - (u_int16_t)(ba->ba_winstart)) & 0xfff ) > 2048)) |
5614 | return; |
5615 | |
5616 | /* This was a final single-frame Tx attempt for frame SSN-1. */ |
5617 | seq = (ssn - 1) & 0xfff; |
5618 | |
5619 | /* |
5620 | * Skip rate control if our Tx rate is fixed. |
5621 | * Don't report frames to MiRA which were sent at a different |
5622 | * Tx rate than ni->ni_txmcs. |
5623 | */ |
5624 | if (ic->ic_fixed_mcs == -1) { |
5625 | if (txdata->ampdu_nframes > 1) { |
5626 | /* |
5627 | * This frame was once part of an A-MPDU. |
5628 | * Report one failed A-MPDU Tx attempt. |
5629 | * The firmware might have made several such |
5630 | * attempts but we don't keep track of this. |
5631 | */ |
5632 | ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni, |
5633 | txdata->ampdu_txmcs, 1, 1); |
5634 | } |
5635 | |
5636 | /* Report the final single-frame Tx attempt. */ |
5637 | if (initial_rate & IWM_RATE_HT_MCS_RATE_CODE_MSK0x7) { |
5638 | int txmcs = initial_rate & |
5639 | (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | |
5640 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3)); |
5641 | iwm_ht_single_rate_control(sc, ni, txmcs, |
5642 | failure_frame, txfail); |
5643 | } |
5644 | } |
5645 | |
5646 | if (txfail) |
5647 | ieee80211_tx_compressed_bar(ic, ni, tid, ssn); |
5648 | |
5649 | /* |
5650 | * SSN corresponds to the first (perhaps not yet transmitted) frame |
5651 | * in firmware's BA window. Firmware is not going to retransmit any |
5652 | * frames before its BA window so mark them all as done. |
5653 | */ |
5654 | ieee80211_output_ba_move_window(ic, ni, tid, ssn); |
5655 | iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1))); |
5656 | iwm_clear_oactive(sc, txq); |
5657 | } |
5658 | |
5659 | void |
5660 | iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt, |
5661 | struct iwm_rx_data *data) |
5662 | { |
5663 | struct iwm_cmd_header *cmd_hdr = &pkt->hdr; |
5664 | int idx = cmd_hdr->idx; |
5665 | int qid = cmd_hdr->qid; |
5666 | struct iwm_tx_ring *ring = &sc->txq[qid]; |
5667 | struct iwm_tx_data *txd; |
5668 | struct iwm_tx_resp *tx_resp = (void *)pkt->data; |
5669 | uint32_t ssn; |
5670 | uint32_t len = iwm_rx_packet_len(pkt); |
5671 | |
5672 | bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (4096), (0x02)) |
5673 | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data-> map), (0), (4096), (0x02)); |
5674 | |
5675 | /* Sanity checks. */ |
5676 | if (sizeof(*tx_resp) > len) |
5677 | return; |
5678 | if (qid < IWM_FIRST_AGG_TX_QUEUE10 && tx_resp->frame_count > 1) |
5679 | return; |
5680 | if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1)) |
5681 | return; |
5682 | if (sizeof(*tx_resp) + sizeof(ssn) + |
5683 | tx_resp->frame_count * sizeof(tx_resp->status) > len) |
5684 | return; |
5685 | |
5686 | sc->sc_tx_timer[qid] = 0; |
5687 | |
5688 | txd = &ring->data[idx]; |
5689 | if (txd->m == NULL((void *)0)) |
5690 | return; |
5691 | |
5692 | memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn))__builtin_memcpy((&ssn), (&tx_resp->status + tx_resp ->frame_count), (sizeof(ssn))); |
5693 | ssn = le32toh(ssn)((__uint32_t)(ssn)) & 0xfff; |
5694 | if (qid >= IWM_FIRST_AGG_TX_QUEUE10) { |
5695 | int status; |
5696 | status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff; |
5697 | iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring, |
5698 | le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)), tx_resp->frame_count, |
5699 | tx_resp->failure_frame, ssn, status, &tx_resp->status); |
5700 | } else { |
5701 | /* |
5702 | * Even though this is not an agg queue, we must only free |
5703 | * frames before the firmware's starting sequence number. |
5704 | */ |
5705 | iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate); |
5706 | iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1))); |
5707 | iwm_clear_oactive(sc, ring); |
5708 | } |
5709 | } |
5710 | |
5711 | void |
5712 | iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring) |
5713 | { |
5714 | struct ieee80211com *ic = &sc->sc_ic; |
5715 | struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if); |
5716 | |
5717 | if (ring->queued < IWM_TX_RING_LOMARK192) { |
5718 | sc->qfullmsk &= ~(1 << ring->qid); |
5719 | if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { |
5720 | ifq_clr_oactive(&ifp->if_snd); |
5721 | /* |
5722 | * Well, we're in interrupt context, but then again |
5723 | * I guess net80211 does all sorts of stunts in |
5724 | * interrupt context, so maybe this is no biggie. |
5725 | */ |
5726 | (*ifp->if_start)(ifp); |
5727 | } |
5728 | } |
5729 | } |
5730 | |
5731 | void |
5732 | iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni, |
5733 | struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn) |
5734 | { |
5735 | struct ieee80211com *ic = &sc->sc_ic; |
5736 | struct iwm_node *in = (void *)ni; |
5737 | int idx, end_idx; |
5738 | |
5739 | /* |
5740 | * Update Tx rate statistics for A-MPDUs before firmware's BA window. |
5741 | */ |
5742 | idx = IWM_AGG_SSN_TO_TXQ_IDX(seq)((seq) & (256 - 1)); |
5743 | end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)); |
5744 | while (idx != end_idx) { |
5745 | struct iwm_tx_data *txdata = &txq->data[idx]; |
5746 | if (txdata->m != NULL((void *)0) && txdata->ampdu_nframes > 1) { |
5747 | /* |
5748 | * We can assume that this subframe has been ACKed |
5749 | * because ACK failures come as single frames and |
5750 | * before failing an A-MPDU subframe the firmware |
5751 | * sends it as a single frame at least once. |
5752 | */ |
5753 | ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni, |
5754 | txdata->ampdu_txmcs, 1, 0); |
5755 | |
5756 | /* Report this frame only once. */ |
5757 | txdata->ampdu_nframes = 0; |
5758 | } |
5759 | |
5760 | idx = (idx + 1) % IWM_TX_RING_COUNT256; |
5761 | } |
5762 | |
5763 | iwm_ra_choose(sc, ni); |
5764 | } |
5765 | |
5766 | void |
5767 | iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt) |
5768 | { |
5769 | struct iwm_ba_notif *ban = (void *)pkt->data; |
5770 | struct ieee80211com *ic = &sc->sc_ic; |
5771 | struct ieee80211_node *ni = ic->ic_bss; |
5772 | struct iwm_node *in = (void *)ni; |
5773 | struct ieee80211_tx_ba *ba; |
5774 | struct iwm_tx_ring *ring; |
5775 | uint16_t seq, ssn; |
5776 | int qid; |
5777 | |
5778 | if (ic->ic_state != IEEE80211_S_RUN) |
5779 | return; |
5780 | |
5781 | if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban)) |
5782 | return; |
5783 | |
5784 | if (ban->sta_id != IWM_STATION_ID0 || |
5785 | !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr)(__builtin_memcmp((in->in_macaddr), (ban->sta_addr), (6 )) == 0)) |
5786 | return; |
5787 | |
5788 | qid = le16toh(ban->scd_flow)((__uint16_t)(ban->scd_flow)); |
5789 | if (qid < IWM_FIRST_AGG_TX_QUEUE10 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1)) |
5790 | return; |
5791 | |
5792 | /* Protect against a firmware bug where the queue/TID are off. */ |
5793 | if (qid != IWM_FIRST_AGG_TX_QUEUE10 + ban->tid) |
5794 | return; |
5795 | |
5796 | sc->sc_tx_timer[qid] = 0; |
5797 | |
5798 | ba = &ni->ni_tx_ba[ban->tid]; |
5799 | if (ba->ba_state != IEEE80211_BA_AGREED2) |
5800 | return; |
5801 | |
5802 | ring = &sc->txq[qid]; |
5803 | |
5804 | /* |
5805 | * The first bit in ban->bitmap corresponds to the sequence number |
5806 | * stored in the sequence control field ban->seq_ctl. |
5807 | * Multiple BA notifications in a row may be using this number, with |
5808 | * additional bits being set in cba->bitmap. It is unclear how the |
5809 | * firmware decides to shift this window forward. |
5810 | * We rely on ba->ba_winstart instead. |
5811 | */ |
5812 | seq = le16toh(ban->seq_ctl)((__uint16_t)(ban->seq_ctl)) >> |