Bug Summary

File:dev/pci/if_em.c
Warning:line 1317, column 15
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_em.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_em.c
1/**************************************************************************
2
3Copyright (c) 2001-2003, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/* $OpenBSD: if_em.c,v 1.370 2023/12/31 08:42:33 mglocker Exp $ */
35/* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36
37#include <dev/pci/if_em.h>
38#include <dev/pci/if_em_soc.h>
39
40#include <netinet/ip6.h>
41
42/*********************************************************************
43 * Driver version
44 *********************************************************************/
45
46#define EM_DRIVER_VERSION"6.2.9" "6.2.9"
47
48/*********************************************************************
49 * PCI Device ID Table
50 *********************************************************************/
51const struct pci_matchid em_devices[] = {
52 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT0x1096 },
53 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT0x1098 },
54 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT0x10ba },
55 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT0x10bb },
56 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82540EM0x100e },
57 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82540EM_LOM0x1015 },
58 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82540EP0x1017 },
59 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82540EP_LOM0x1016 },
60 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82540EP_LP0x101e },
61 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541EI0x1013 },
62 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541EI_MOBILE0x1018 },
63 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541ER0x1078 },
64 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541ER_LOM0x1014 },
65 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541GI0x1076 },
66 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541GI_LF0x107c },
67 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82541GI_MOBILE0x1077 },
68 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_825420x1000 },
69 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82543GC_COPPER0x1004 },
70 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82543GC_FIBER0x1001 },
71 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82544EI_COPPER0x1008 },
72 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82544EI_FIBER0x1009 },
73 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82544GC_COPPER0x100c },
74 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82544GC_LOM0x100d },
75 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82545EM_COPPER0x100f },
76 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82545EM_FIBER0x1011 },
77 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82545GM_COPPER0x1026 },
78 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82545GM_FIBER0x1027 },
79 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82545GM_SERDES0x1028 },
80 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546EB_COPPER0x1010 },
81 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546EB_FIBER0x1012 },
82 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR0x101d },
83 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_COPPER0x1079 },
84 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_FIBER0x107a },
85 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_PCIE0x108a },
86 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR0x1099 },
87 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K0x10b5 },
88 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_SERDES0x107b },
89 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82546GB_20x109b },
90 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82547EI0x1019 },
91 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82547EI_MOBILE0x101a },
92 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82547GI0x1075 },
93 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_AF0x10a1 },
94 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_AT0x10a0 },
95 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_COPPER0x105e },
96 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_FIBER0x105f },
97 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR0x10a4 },
98 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP0x10bc },
99 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR0x10a5 },
100 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_SERDES0x1060 },
101 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL0x10d9 },
102 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD0x10da },
103 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR0x10d5 },
104 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82572EI_COPPER0x107d },
105 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82572EI_FIBER0x107e },
106 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82572EI_SERDES0x107f },
107 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82572EI0x10b9 },
108 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573E0x108b },
109 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573E_IAMT0x108c },
110 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573E_PM0x10b3 },
111 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573L0x109a },
112 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573L_PL_10x10b0 },
113 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573L_PL_20x10b4 },
114 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82573V_PM0x10b2 },
115 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82574L0x10d3 },
116 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82574LA0x10f6 },
117 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82575EB_COPPER0x10a7 },
118 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82575EB_SERDES0x10a9 },
119 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR0x10d6 },
120 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82575GB_QP_PM0x10e2 },
121 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_825760x10c9 },
122 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_FIBER0x10e6 },
123 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_SERDES0x10e7 },
124 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_QUAD_COPPER0x10e8 },
125 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET20x1526 },
126 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_NS0x150a },
127 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_NS_SERDES0x1518 },
128 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82576_SERDES_QUAD0x150d },
129 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82577LC0x10eb },
130 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82577LM0x10ea },
131 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82578DC0x10f0 },
132 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82578DM0x10ef },
133 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82579LM0x1502 },
134 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82579V0x1503 },
135 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_COPPER0x1533 },
136 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_COPPER_OEM10x1534 },
137 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_COPPER_IT0x1535 },
138 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_FIBER0x1536 },
139 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_SERDES0x1537 },
140 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_SGMII0x1538 },
141 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_COPPER_NF0x157b },
142 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I210_SERDES_NF0x157c },
143 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I211_COPPER0x1539 },
144 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I217_LM0x153a },
145 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I217_V0x153b },
146 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I218_LM0x155a },
147 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I218_LM_20x15a0 },
148 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I218_LM_30x15a2 },
149 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I218_V0x1559 },
150 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I218_V_20x15a1 },
151 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I218_V_30x15a3 },
152 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM0x156f },
153 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM20x15b7 },
154 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM30x15b9 },
155 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM40x15d7 },
156 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM50x15e3 },
157 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM60x15bd },
158 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM70x15bb },
159 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM80x15df },
160 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM90x15e1 },
161 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM100x0d4e },
162 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM110x0d4c },
163 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM120x0d53 },
164 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM130x15fb },
165 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM140x15f9 },
166 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM150x15f4 },
167 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM160x1a1e },
168 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM170x1a1c },
169 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM180x550a },
170 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM190x550c },
171 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM200x550e },
172 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM210x5510 },
173 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM220x0dc7 },
174 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM230x0dc5 },
175 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_LM240x57a0 },
176 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V0x1570 },
177 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V20x15b8 },
178 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V40x15d8 },
179 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V50x15d6 },
180 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V60x15be },
181 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V70x15bc },
182 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V80x15e0 },
183 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V90x15e2 },
184 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V100x0d4f },
185 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V110x0d4d },
186 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V120x0d55 },
187 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V130x15fc },
188 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V140x15fa },
189 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V150x15f5 },
190 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V160x1a1f },
191 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V170x1a1d },
192 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V180x550b },
193 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V190x550d },
194 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V200x550f },
195 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V210x5511 },
196 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V220x0dc8 },
197 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V230x0dc6 },
198 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I219_V240x57a1 },
199 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82580_COPPER0x150e },
200 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82580_FIBER0x150f },
201 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82580_SERDES0x1510 },
202 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82580_SGMII0x1511 },
203 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82580_COPPER_DUAL0x1516 },
204 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82580_QUAD_FIBER0x1527 },
205 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_DH89XXCC_SGMII0x0438 },
206 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_DH89XXCC_SERDES0x043a },
207 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE0x043c },
208 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_DH89XXCC_SFP0x0440 },
209 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_82583V0x150c },
210 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I350_COPPER0x1521 },
211 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I350_FIBER0x1522 },
212 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I350_SERDES0x1523 },
213 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I350_SGMII0x1524 },
214 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I354_BP_1GBPS0x1f40 },
215 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I354_BP_2_5GBPS0x1f45 },
216 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_I354_SGMII0x1f41 },
217 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_82567V_30x1501 },
218 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IFE0x104c },
219 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IFE_G0x10c5 },
220 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IFE_GT0x10c4 },
221 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IGP_AMT0x104a },
222 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IGP_C0x104b },
223 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IGP_M0x104d },
224 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT0x1049 },
225 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_BM0x10e5 },
226 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IFE0x10c0 },
227 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IFE_G0x10c2 },
228 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IFE_GT0x10c3 },
229 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IGP_AMT0x10bd },
230 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IGP_C0x294c },
231 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IGP_M0x10bf },
232 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT0x10f5 },
233 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH9_IGP_M_V0x10cb },
234 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH10_D_BM_LF0x10df },
235 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH10_D_BM_LM0x10de },
236 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH10_D_BM_V0x1525 },
237 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH10_R_BM_LF0x10cd },
238 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH10_R_BM_LM0x10cc },
239 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_ICH10_R_BM_V0x10ce },
240 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_EP80579_LAN_10x5040 },
241 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_EP80579_LAN_20x5044 },
242 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_EP80579_LAN_30x5048 },
243 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_EP80579_LAN_40x5041 },
244 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_EP80579_LAN_50x5045 },
245 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_EP80579_LAN_60x5049 }
246};
247
248/*********************************************************************
249 * Function prototypes
250 *********************************************************************/
251int em_probe(struct device *, void *, void *);
252void em_attach(struct device *, struct device *, void *);
253void em_defer_attach(struct device*);
254int em_detach(struct device *, int);
255int em_activate(struct device *, int);
256int em_intr(void *);
257int em_allocate_legacy(struct em_softc *);
258void em_start(struct ifqueue *);
259int em_ioctl(struct ifnet *, u_long, caddr_t);
260void em_watchdog(struct ifnet *);
261void em_init(void *);
262void em_stop(void *, int);
263void em_media_status(struct ifnet *, struct ifmediareq *);
264int em_media_change(struct ifnet *);
265uint64_t em_flowstatus(struct em_softc *);
266void em_identify_hardware(struct em_softc *);
267int em_allocate_pci_resources(struct em_softc *);
268void em_free_pci_resources(struct em_softc *);
269void em_local_timer(void *);
270int em_hardware_init(struct em_softc *);
271void em_setup_interface(struct em_softc *);
272int em_setup_transmit_structures(struct em_softc *);
273void em_initialize_transmit_unit(struct em_softc *);
274int em_setup_receive_structures(struct em_softc *);
275void em_initialize_receive_unit(struct em_softc *);
276void em_enable_intr(struct em_softc *);
277void em_disable_intr(struct em_softc *);
278void em_free_transmit_structures(struct em_softc *);
279void em_free_receive_structures(struct em_softc *);
280void em_update_stats_counters(struct em_softc *);
281void em_disable_aspm(struct em_softc *);
282void em_txeof(struct em_queue *);
283int em_allocate_receive_structures(struct em_softc *);
284int em_allocate_transmit_structures(struct em_softc *);
285int em_allocate_desc_rings(struct em_softc *);
286int em_rxfill(struct em_queue *);
287void em_rxrefill(void *);
288void em_rxrefill_locked(struct em_queue *);
289int em_rxeof(struct em_queue *);
290void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
291 struct mbuf *);
292u_int em_transmit_checksum_setup(struct em_queue *, struct mbuf *, u_int,
293 u_int32_t *, u_int32_t *);
294u_int em_tso_setup(struct em_queue *, struct mbuf *, u_int, u_int32_t *,
295 u_int32_t *);
296u_int em_tx_ctx_setup(struct em_queue *, struct mbuf *, u_int, u_int32_t *,
297 u_int32_t *);
298void em_iff(struct em_softc *);
299void em_update_link_status(struct em_softc *);
300int em_get_buf(struct em_queue *, int);
301void em_enable_hw_vlans(struct em_softc *);
302u_int em_encap(struct em_queue *, struct mbuf *);
303void em_smartspeed(struct em_softc *);
304int em_82547_fifo_workaround(struct em_softc *, int);
305void em_82547_update_fifo_head(struct em_softc *, int);
306int em_82547_tx_fifo_reset(struct em_softc *);
307void em_82547_move_tail(void *arg);
308void em_82547_move_tail_locked(struct em_softc *);
309int em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *);
310void em_dma_free(struct em_softc *, struct em_dma_alloc *);
311u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
312 PDESC_ARRAY desc_array);
313void em_flush_tx_ring(struct em_queue *);
314void em_flush_rx_ring(struct em_queue *);
315void em_flush_desc_rings(struct em_softc *);
316int em_get_sffpage(struct em_softc *, struct if_sffpage *);
317
318#ifndef SMALL_KERNEL
319/* MSIX/Multiqueue functions */
320int em_allocate_msix(struct em_softc *);
321int em_setup_queues_msix(struct em_softc *);
322int em_queue_intr_msix(void *);
323int em_link_intr_msix(void *);
324void em_enable_queue_intr_msix(struct em_queue *);
325#else
326#define em_allocate_msix(_sc) (-1)
327#endif
328
329#if NKSTAT1 > 0
330void em_kstat_attach(struct em_softc *);
331int em_kstat_read(struct kstat *);
332void em_tbi_adjust_stats(struct em_softc *, uint32_t, uint8_t *);
333#endif
334
335/*********************************************************************
336 * OpenBSD Device Interface Entry Points
337 *********************************************************************/
338
339const struct cfattach em_ca = {
340 sizeof(struct em_softc), em_probe, em_attach, em_detach,
341 em_activate
342};
343
344struct cfdriver em_cd = {
345 NULL((void *)0), "em", DV_IFNET
346};
347
348static int em_smart_pwr_down = FALSE0;
349int em_enable_msix = 0;
350
351/*********************************************************************
352 * Device identification routine
353 *
354 * em_probe determines if the driver should be loaded on
355 * adapter based on PCI vendor/device id of the adapter.
356 *
357 * return 0 on no match, positive on match
358 *********************************************************************/
359
360int
361em_probe(struct device *parent, void *match, void *aux)
362{
363 INIT_DEBUGOUT("em_probe: begin")if (0) printf("em_probe: begin" "\n");
364
365 return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
366 nitems(em_devices)(sizeof((em_devices)) / sizeof((em_devices)[0]))));
367}
368
369void
370em_defer_attach(struct device *self)
371{
372 struct em_softc *sc = (struct em_softc *)self;
373 struct pci_attach_args *pa = &sc->osdep.em_pa;
374 pci_chipset_tag_t pc = pa->pa_pc;
375 void *gcu;
376
377 INIT_DEBUGOUT("em_defer_attach: begin")if (0) printf("em_defer_attach: begin" "\n");
378
379 if ((gcu = em_lookup_gcu(self)) == 0) {
380 printf("%s: No GCU found, deferred attachment failed\n",
381 DEVNAME(sc)((sc)->sc_dev.dv_xname));
382
383 if (sc->sc_intrhand)
384 pci_intr_disestablish(pc, sc->sc_intrhand);
385 sc->sc_intrhand = 0;
386
387 em_stop(sc, 1);
388
389 em_free_pci_resources(sc);
390
391 return;
392 }
393
394 sc->hw.gcu = gcu;
395
396 em_attach_miibus(self);
397
398 em_setup_interface(sc);
399
400 em_setup_link(&sc->hw);
401
402 em_update_link_status(sc);
403}
404
405/*********************************************************************
406 * Device initialization routine
407 *
408 * The attach entry point is called when the driver is being loaded.
409 * This routine identifies the type of hardware, allocates all resources
410 * and initializes the hardware.
411 *
412 *********************************************************************/
413
414void
415em_attach(struct device *parent, struct device *self, void *aux)
416{
417 struct pci_attach_args *pa = aux;
418 struct em_softc *sc;
419 int defer = 0;
420
421 INIT_DEBUGOUT("em_attach: begin")if (0) printf("em_attach: begin" "\n");
422
423 sc = (struct em_softc *)self;
424 sc->sc_dmat = pa->pa_dmat;
425 sc->osdep.em_pa = *pa;
426
427 timeout_set(&sc->timer_handle, em_local_timer, sc);
428 timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
429
430 rw_init(&sc->sfflock, "emsff")_rw_init_flags(&sc->sfflock, "emsff", 0, ((void *)0));
431
432 /* Determine hardware revision */
433 em_identify_hardware(sc);
434
435 /*
436 * Only use MSI on the newer PCIe parts, with the exception
437 * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
438 */
439 if (sc->hw.mac_type <= em_82572)
440 sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED0x20;
441
442 /* Parameters (to be read from user) */
443 if (sc->hw.mac_type >= em_82544) {
444 sc->sc_tx_slots = EM_MAX_TXD512;
445 sc->sc_rx_slots = EM_MAX_RXD256;
446 } else {
447 sc->sc_tx_slots = EM_MAX_TXD_82543256;
448 sc->sc_rx_slots = EM_MAX_RXD_82543256;
449 }
450 sc->tx_int_delay = EM_TIDV64;
451 sc->tx_abs_int_delay = EM_TADV64;
452 sc->rx_int_delay = EM_RDTR0;
453 sc->rx_abs_int_delay = EM_RADV64;
454 sc->hw.autoneg = DO_AUTO_NEG1;
455 sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT0;
456 sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT(0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0020);
457 sc->hw.tbi_compatibility_en = TRUE1;
458 sc->sc_rx_buffer_len = EM_RXBUFFER_20482048;
459
460 sc->hw.phy_init_script = 1;
461 sc->hw.phy_reset_disable = FALSE0;
462
463#ifndef EM_MASTER_SLAVE
464 sc->hw.master_slave = em_ms_hw_default;
465#else
466 sc->hw.master_slave = EM_MASTER_SLAVE;
467#endif
468
469 /*
470 * This controls when hardware reports transmit completion
471 * status.
472 */
473 sc->hw.report_tx_early = 1;
474
475 if (em_allocate_pci_resources(sc))
476 goto err_pci;
477
478 /* Initialize eeprom parameters */
479 em_init_eeprom_params(&sc->hw);
480
481 /*
482 * Set the max frame size assuming standard Ethernet
483 * sized frames.
484 */
485 switch (sc->hw.mac_type) {
486 case em_82573:
487 {
488 uint16_t eeprom_data = 0;
489
490 /*
491 * 82573 only supports Jumbo frames
492 * if ASPM is disabled.
493 */
494 em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_30x001A,
495 1, &eeprom_data);
496 if (eeprom_data & EEPROM_WORD1A_ASPM_MASK0x000C) {
497 sc->hw.max_frame_size = ETHER_MAX_LEN1518;
498 break;
499 }
500 /* Allow Jumbo frames */
501 /* FALLTHROUGH */
502 }
503 case em_82571:
504 case em_82572:
505 case em_82574:
506 case em_82575:
507 case em_82576:
508 case em_82580:
509 case em_i210:
510 case em_i350:
511 case em_ich9lan:
512 case em_ich10lan:
513 case em_pch2lan:
514 case em_pch_lpt:
515 case em_pch_spt:
516 case em_pch_cnp:
517 case em_pch_tgp:
518 case em_pch_adp:
519 case em_80003es2lan:
520 /* 9K Jumbo Frame size */
521 sc->hw.max_frame_size = 9234;
522 break;
523 case em_pchlan:
524 sc->hw.max_frame_size = 4096;
525 break;
526 case em_82542_rev2_0:
527 case em_82542_rev2_1:
528 case em_ich8lan:
529 /* Adapters that do not support Jumbo frames */
530 sc->hw.max_frame_size = ETHER_MAX_LEN1518;
531 break;
532 default:
533 sc->hw.max_frame_size =
534 MAX_JUMBO_FRAME_SIZE0x3F00;
535 }
536
537 sc->hw.min_frame_size =
538 ETHER_MIN_LEN64 + ETHER_CRC_LEN4;
539
540 if (em_allocate_desc_rings(sc) != 0) {
541 printf("%s: Unable to allocate descriptor ring memory\n",
542 DEVNAME(sc)((sc)->sc_dev.dv_xname));
543 goto err_pci;
544 }
545
546 /* Initialize the hardware */
547 if ((defer = em_hardware_init(sc))) {
548 if (defer == EAGAIN35)
549 config_defer(self, em_defer_attach);
550 else {
551 printf("%s: Unable to initialize the hardware\n",
552 DEVNAME(sc)((sc)->sc_dev.dv_xname));
553 goto err_pci;
554 }
555 }
556
557 if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
558 sc->hw.mac_type == em_82576 ||
559 sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i210 ||
560 sc->hw.mac_type == em_i350) {
561 uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0x00008)))
;
562 sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK0x0000000C) >>
563 E1000_STATUS_FUNC_SHIFT2;
564
565 switch (sc->hw.bus_func) {
566 case 0:
567 sc->hw.swfw = E1000_SWFW_PHY0_SM0x0002;
568 break;
569 case 1:
570 sc->hw.swfw = E1000_SWFW_PHY1_SM0x0004;
571 break;
572 case 2:
573 sc->hw.swfw = E1000_SWFW_PHY2_SM0x0020;
574 break;
575 case 3:
576 sc->hw.swfw = E1000_SWFW_PHY3_SM0x0040;
577 break;
578 }
579 } else {
580 sc->hw.bus_func = 0;
581 }
582
583 /* Copy the permanent MAC address out of the EEPROM */
584 if (em_read_mac_addr(&sc->hw) < 0) {
585 printf("%s: EEPROM read error while reading mac address\n",
586 DEVNAME(sc)((sc)->sc_dev.dv_xname));
587 goto err_pci;
588 }
589
590 bcopy(sc->hw.mac_addr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN6);
591
592 /* Setup OS specific network interface */
593 if (!defer)
594 em_setup_interface(sc);
595
596 /* Initialize statistics */
597 em_clear_hw_cntrs(&sc->hw);
598#if NKSTAT1 > 0
599 em_kstat_attach(sc);
600#endif
601 sc->hw.get_link_status = 1;
602 if (!defer)
603 em_update_link_status(sc);
604
605#ifdef EM_DEBUG
606 printf(", mac %#x phy %#x", sc->hw.mac_type, sc->hw.phy_type);
607#endif
608 printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
609
610 /* Indicate SOL/IDER usage */
611 if (em_check_phy_reset_block(&sc->hw))
612 printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
613 DEVNAME(sc)((sc)->sc_dev.dv_xname));
614
615 /* Identify 82544 on PCI-X */
616 em_get_bus_info(&sc->hw);
617 if (sc->hw.bus_type == em_bus_type_pcix &&
618 sc->hw.mac_type == em_82544)
619 sc->pcix_82544 = TRUE1;
620 else
621 sc->pcix_82544 = FALSE0;
622
623 sc->hw.icp_xxxx_is_link_up = FALSE0;
624
625 INIT_DEBUGOUT("em_attach: end")if (0) printf("em_attach: end" "\n");
626 return;
627
628err_pci:
629 em_free_pci_resources(sc);
630}
631
632/*********************************************************************
633 * Transmit entry point
634 *
635 * em_start is called by the stack to initiate a transmit.
636 * The driver will remain in this routine as long as there are
637 * packets to transmit and transmit resources are available.
638 * In case resources are not available stack is notified and
639 * the packet is requeued.
640 **********************************************************************/
641
642void
643em_start(struct ifqueue *ifq)
644{
645 struct ifnet *ifp = ifq->ifq_if;
646 struct em_softc *sc = ifp->if_softc;
647 u_int head, free, used;
648 struct mbuf *m;
649 int post = 0;
650 struct em_queue *que = sc->queues; /* Use only first queue. */
651
652 if (!sc->link_active) {
1
Assuming field 'link_active' is not equal to 0
2
Taking false branch
653 ifq_purge(ifq);
654 return;
655 }
656
657 /* calculate free space */
658 head = que->tx.sc_tx_desc_head;
659 free = que->tx.sc_tx_desc_tail;
660 if (free <= head)
3
Assuming 'free' is > 'head'
4
Taking false branch
661 free += sc->sc_tx_slots;
662 free -= head;
663
664 if (sc->hw.mac_type != em_82547) {
5
Assuming field 'mac_type' is equal to em_82547
6
Taking false branch
665 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
666 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
667 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
;
668 }
669
670 for (;;) {
7
Loop condition is true. Entering loop body
671 /* use 2 because cksum setup can use an extra slot */
672 if (EM_MAX_SCATTER64 + 2 > free) {
8
Assuming the condition is false
9
Taking false branch
673 ifq_set_oactive(ifq);
674 break;
675 }
676
677 m = ifq_dequeue(ifq);
678 if (m == NULL((void *)0))
10
Assuming 'm' is not equal to NULL
11
Taking false branch
679 break;
680
681 used = em_encap(que, m);
12
Calling 'em_encap'
682 if (used == 0) {
683 m_freem(m);
684 continue;
685 }
686
687 KASSERT(used <= free)((used <= free) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_em.c"
, 687, "used <= free"))
;
688
689 free -= used;
690
691#if NBPFILTER1 > 0
692 /* Send a copy of the frame to the BPF listener */
693 if (ifp->if_bpf)
694 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
695#endif
696
697 /* Set timeout in case hardware has problems transmitting */
698 ifp->if_timer = EM_TX_TIMEOUT5;
699
700 if (sc->hw.mac_type == em_82547) {
701 int len = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
702
703 if (sc->link_duplex == HALF_DUPLEX1)
704 em_82547_move_tail_locked(sc);
705 else {
706 E1000_WRITE_REG(&sc->hw, TDT(que->me),((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03818 + ((que->me) *
0x100)) : (0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40)))))), (que->tx.sc_tx_desc_head
)))
707 que->tx.sc_tx_desc_head)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03818 + ((que->me) *
0x100)) : (0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40)))))), (que->tx.sc_tx_desc_head
)))
;
708 em_82547_update_fifo_head(sc, len);
709 }
710 }
711
712 post = 1;
713 }
714
715 if (sc->hw.mac_type != em_82547) {
716 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
717 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
;
719 /*
720 * Advance the Transmit Descriptor Tail (Tdt),
721 * this tells the E1000 that this frame is
722 * available to transmit.
723 */
724 if (post)
725 E1000_WRITE_REG(&sc->hw, TDT(que->me),((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03818 + ((que->me) *
0x100)) : (0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40)))))), (que->tx.sc_tx_desc_head
)))
726 que->tx.sc_tx_desc_head)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03818 + ((que->me) *
0x100)) : (0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40)))))), (que->tx.sc_tx_desc_head
)))
;
727 }
728}
729
730/*********************************************************************
731 * Ioctl entry point
732 *
733 * em_ioctl is called when the user wants to configure the
734 * interface.
735 *
736 * return 0 on success, positive on failure
737 **********************************************************************/
738
739int
740em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
741{
742 int error = 0;
743 struct ifreq *ifr = (struct ifreq *) data;
744 struct em_softc *sc = ifp->if_softc;
745 int s;
746
747 s = splnet()splraise(0x4);
748
749 switch (command) {
750 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
751 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "if (0) printf("ioctl rcv'd: SIOCSIFADDR (Set Interface " "Addr)"
"\n")
752 "Addr)")if (0) printf("ioctl rcv'd: SIOCSIFADDR (Set Interface " "Addr)"
"\n")
;
753 if (!(ifp->if_flags & IFF_UP0x1)) {
754 ifp->if_flags |= IFF_UP0x1;
755 em_init(sc);
756 }
757 break;
758
759 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
760 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)")if (0) printf("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"
"\n")
;
761 if (ifp->if_flags & IFF_UP0x1) {
762 if (ifp->if_flags & IFF_RUNNING0x40)
763 error = ENETRESET52;
764 else
765 em_init(sc);
766 } else {
767 if (ifp->if_flags & IFF_RUNNING0x40)
768 em_stop(sc, 0);
769 }
770 break;
771
772 case SIOCSIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((55)))
:
773 /* Check SOL/IDER usage */
774 if (em_check_phy_reset_block(&sc->hw)) {
775 printf("%s: Media change is blocked due to SOL/IDER session.\n",
776 DEVNAME(sc)((sc)->sc_dev.dv_xname));
777 break;
778 }
779 case SIOCGIFMEDIA(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifmediareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((56)))
:
780 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)")if (0) printf("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"
"\n")
;
781 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
782 break;
783
784 case SIOCGIFRXR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((170)))
:
785 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_dataifr_ifru.ifru_data,
786 NULL((void *)0), EM_MCLBYTES(2048 + 2), &sc->queues->rx.sc_rx_ring);
787 break;
788
789 case SIOCGIFSFFPAGE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_sffpage) & 0x1fff) << 16) | ((('i')) <<
8) | ((57)))
:
790 error = rw_enter(&sc->sfflock, RW_WRITE0x0001UL|RW_INTR0x0010UL);
791 if (error != 0)
792 break;
793
794 error = em_get_sffpage(sc, (struct if_sffpage *)data);
795 rw_exit(&sc->sfflock);
796 break;
797
798 default:
799 error = ether_ioctl(ifp, &sc->sc_ac, command, data);
800 }
801
802 if (error == ENETRESET52) {
803 if (ifp->if_flags & IFF_RUNNING0x40) {
804 em_disable_intr(sc);
805 em_iff(sc);
806 if (sc->hw.mac_type == em_82542_rev2_0)
807 em_initialize_receive_unit(sc);
808 em_enable_intr(sc);
809 }
810 error = 0;
811 }
812
813 splx(s)spllower(s);
814 return (error);
815}
816
817/*********************************************************************
818 * Watchdog entry point
819 *
820 * This routine is called whenever hardware quits transmitting.
821 *
822 **********************************************************************/
823
824void
825em_watchdog(struct ifnet *ifp)
826{
827 struct em_softc *sc = ifp->if_softc;
828 struct em_queue *que = sc->queues; /* Use only first queue. */
829
830
831 /* If we are in this routine because of pause frames, then
832 * don't reset the hardware.
833 */
834 if (E1000_READ_REG(&sc->hw, STATUS)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00008 : em_translate_82542_register(0x00008)))))
& E1000_STATUS_TXOFF0x00000010) {
835 ifp->if_timer = EM_TX_TIMEOUT5;
836 return;
837 }
838 printf("%s: watchdog: head %u tail %u TDH %u TDT %u\n",
839 DEVNAME(sc)((sc)->sc_dev.dv_xname),
840 que->tx.sc_tx_desc_head, que->tx.sc_tx_desc_tail,
841 E1000_READ_REG(&sc->hw, TDH(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x03810 + ((que->me) * 0x100)) :
(0x0E010 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03810 + ((que->me) * 0x100)) : (
0x0E010 + ((que->me) * 0x40))))))))
,
842 E1000_READ_REG(&sc->hw, TDT(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) :
(0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40))))))))
);
843
844 em_init(sc);
845
846 sc->watchdog_events++;
847}
848
849/*********************************************************************
850 * Init entry point
851 *
852 * This routine is used in two ways. It is used by the stack as
853 * init entry point in network interface structure. It is also used
854 * by the driver as a hw/sw initialization routine to get to a
855 * consistent state.
856 *
857 **********************************************************************/
858
859void
860em_init(void *arg)
861{
862 struct em_softc *sc = arg;
863 struct ifnet *ifp = &sc->sc_ac.ac_if;
864 uint32_t pba;
865 int s;
866
867 s = splnet()splraise(0x4);
868
869 INIT_DEBUGOUT("em_init: begin")if (0) printf("em_init: begin" "\n");
870
871 em_stop(sc, 0);
872
873 /*
874 * Packet Buffer Allocation (PBA)
875 * Writing PBA sets the receive portion of the buffer
876 * the remainder is used for the transmit buffer.
877 *
878 * Devices before the 82547 had a Packet Buffer of 64K.
879 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
880 * After the 82547 the buffer was reduced to 40K.
881 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
882 * Note: default does not leave enough room for Jumbo Frame >10k.
883 */
884 switch (sc->hw.mac_type) {
885 case em_82547:
886 case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
887 if (sc->hw.max_frame_size > EM_RXBUFFER_81928192)
888 pba = E1000_PBA_22K0x0016; /* 22K for Rx, 18K for Tx */
889 else
890 pba = E1000_PBA_30K0x001E; /* 30K for Rx, 10K for Tx */
891 sc->tx_fifo_head = 0;
892 sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT7;
893 sc->tx_fifo_size = (E1000_PBA_40K0x0028 - pba) << EM_PBA_BYTES_SHIFT0xA;
894 break;
895 case em_82571:
896 case em_82572: /* Total Packet Buffer on these is 48k */
897 case em_82575:
898 case em_82576:
899 case em_82580:
900 case em_80003es2lan:
901 case em_i350:
902 pba = E1000_PBA_32K0x0020; /* 32K for Rx, 16K for Tx */
903 break;
904 case em_i210:
905 pba = E1000_PBA_34K0x0022;
906 break;
907 case em_82573: /* 82573: Total Packet Buffer is 32K */
908 /* Jumbo frames not supported */
909 pba = E1000_PBA_12K0x000C; /* 12K for Rx, 20K for Tx */
910 break;
911 case em_82574: /* Total Packet Buffer is 40k */
912 pba = E1000_PBA_20K0x0014; /* 20K for Rx, 20K for Tx */
913 break;
914 case em_ich8lan:
915 pba = E1000_PBA_8K0x0008;
916 break;
917 case em_ich9lan:
918 case em_ich10lan:
919 /* Boost Receive side for jumbo frames */
920 if (sc->hw.max_frame_size > EM_RXBUFFER_40964096)
921 pba = E1000_PBA_14K0x000E;
922 else
923 pba = E1000_PBA_10K0x000A;
924 break;
925 case em_pchlan:
926 case em_pch2lan:
927 case em_pch_lpt:
928 case em_pch_spt:
929 case em_pch_cnp:
930 case em_pch_tgp:
931 case em_pch_adp:
932 pba = E1000_PBA_26K0x001A;
933 break;
934 default:
935 /* Devices before 82547 had a Packet Buffer of 64K. */
936 if (sc->hw.max_frame_size > EM_RXBUFFER_81928192)
937 pba = E1000_PBA_40K0x0028; /* 40K for Rx, 24K for Tx */
938 else
939 pba = E1000_PBA_48K0x0030; /* 48K for Rx, 16K for Tx */
940 }
941 INIT_DEBUGOUT1("em_init: pba=%dK",pba)if (0) printf("em_init: pba=%dK" "\n", pba);
942 E1000_WRITE_REG(&sc->hw, PBA, pba)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01000 : em_translate_82542_register(0x01000))),
(pba)))
;
943
944 /* Get the latest mac address, User can use a LAA */
945 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac_addr, ETHER_ADDR_LEN6);
946
947 /* Initialize the hardware */
948 if (em_hardware_init(sc)) {
949 printf("%s: Unable to initialize the hardware\n",
950 DEVNAME(sc)((sc)->sc_dev.dv_xname));
951 splx(s)spllower(s);
952 return;
953 }
954 em_update_link_status(sc);
955
956 E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00038 : em_translate_82542_register(0x00038))),
(0x8100)))
;
957 if (ifp->if_capabilitiesif_data.ifi_capabilities & IFCAP_VLAN_HWTAGGING0x00000020)
958 em_enable_hw_vlans(sc);
959
960 /* Prepare transmit descriptors and buffers */
961 if (em_setup_transmit_structures(sc)) {
962 printf("%s: Could not setup transmit structures\n",
963 DEVNAME(sc)((sc)->sc_dev.dv_xname));
964 em_stop(sc, 0);
965 splx(s)spllower(s);
966 return;
967 }
968 em_initialize_transmit_unit(sc);
969
970 /* Prepare receive descriptors and buffers */
971 if (em_setup_receive_structures(sc)) {
972 printf("%s: Could not setup receive structures\n",
973 DEVNAME(sc)((sc)->sc_dev.dv_xname));
974 em_stop(sc, 0);
975 splx(s)spllower(s);
976 return;
977 }
978 em_initialize_receive_unit(sc);
979
980#ifndef SMALL_KERNEL
981 if (sc->msix) {
982 if (em_setup_queues_msix(sc)) {
983 printf("%s: Can't setup msix queues\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
984 splx(s)spllower(s);
985 return;
986 }
987 }
988#endif
989
990 /* Program promiscuous mode and multicast filters. */
991 em_iff(sc);
992
993 ifp->if_flags |= IFF_RUNNING0x40;
994 ifq_clr_oactive(&ifp->if_snd);
995
996 timeout_add_sec(&sc->timer_handle, 1);
997 em_clear_hw_cntrs(&sc->hw);
998 em_enable_intr(sc);
999
1000 /* Don't reset the phy next time init gets called */
1001 sc->hw.phy_reset_disable = TRUE1;
1002
1003 splx(s)spllower(s);
1004}
1005
1006/*********************************************************************
1007 *
1008 * Interrupt Service routine
1009 *
1010 **********************************************************************/
1011int
1012em_intr(void *arg)
1013{
1014 struct em_softc *sc = arg;
1015 struct em_queue *que = sc->queues; /* single queue */
1016 struct ifnet *ifp = &sc->sc_ac.ac_if;
1017 u_int32_t reg_icr, test_icr;
1018
1019 test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x000C0 : em_translate_82542_register(0x000C0)))))
;
1020 if (sc->hw.mac_type >= em_82571)
1021 test_icr = (reg_icr & E1000_ICR_INT_ASSERTED0x80000000);
1022 if (!test_icr)
1023 return (0);
1024
1025 if (ifp->if_flags & IFF_RUNNING0x40) {
1026 em_txeof(que);
1027 if (em_rxeof(que))
1028 em_rxrefill_locked(que);
1029 }
1030
1031 /* Link status change */
1032 if (reg_icr & (E1000_ICR_RXSEQ0x00000008 | E1000_ICR_LSC0x00000004)) {
1033 KERNEL_LOCK()_kernel_lock();
1034 sc->hw.get_link_status = 1;
1035 em_check_for_link(&sc->hw);
1036 em_update_link_status(sc);
1037 KERNEL_UNLOCK()_kernel_unlock();
1038 }
1039
1040 return (1);
1041}
1042
1043/*********************************************************************
1044 *
1045 * Media Ioctl callback
1046 *
1047 * This routine is called whenever the user queries the status of
1048 * the interface using ifconfig.
1049 *
1050 **********************************************************************/
1051void
1052em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1053{
1054 struct em_softc *sc = ifp->if_softc;
1055 uint64_t fiber_type = IFM_1000_SX11;
1056 u_int16_t gsr;
1057
1058 INIT_DEBUGOUT("em_media_status: begin")if (0) printf("em_media_status: begin" "\n");
1059
1060 em_check_for_link(&sc->hw);
1061 em_update_link_status(sc);
1062
1063 ifmr->ifm_status = IFM_AVALID0x0000000000000001ULL;
1064 ifmr->ifm_active = IFM_ETHER0x0000000000000100ULL;
1065
1066 if (!sc->link_active) {
1067 ifmr->ifm_active |= IFM_NONE2ULL;
1068 return;
1069 }
1070
1071 ifmr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
1072
1073 if (sc->hw.media_type == em_media_type_fiber ||
1074 sc->hw.media_type == em_media_type_internal_serdes) {
1075 if (sc->hw.mac_type == em_82545)
1076 fiber_type = IFM_1000_LX14;
1077 ifmr->ifm_active |= fiber_type | IFM_FDX0x0000010000000000ULL;
1078 } else {
1079 switch (sc->link_speed) {
1080 case 10:
1081 ifmr->ifm_active |= IFM_10_T3;
1082 break;
1083 case 100:
1084 ifmr->ifm_active |= IFM_100_TX6;
1085 break;
1086 case 1000:
1087 ifmr->ifm_active |= IFM_1000_T16;
1088 break;
1089 }
1090
1091 if (sc->link_duplex == FULL_DUPLEX2)
1092 ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX0x0000010000000000ULL;
1093 else
1094 ifmr->ifm_active |= IFM_HDX0x0000020000000000ULL;
1095
1096 if (IFM_SUBTYPE(ifmr->ifm_active)((ifmr->ifm_active) & 0x00000000000000ffULL) == IFM_1000_T16) {
1097 em_read_phy_reg(&sc->hw, PHY_1000T_STATUS0x0A, &gsr);
1098 if (gsr & SR_1000T_MS_CONFIG_RES0x4000)
1099 ifmr->ifm_active |= IFM_ETH_MASTER0x0000000000010000ULL;
1100 }
1101 }
1102}
1103
1104/*********************************************************************
1105 *
1106 * Media Ioctl callback
1107 *
1108 * This routine is called when the user changes speed/duplex using
1109 * media/mediopt option with ifconfig.
1110 *
1111 **********************************************************************/
1112int
1113em_media_change(struct ifnet *ifp)
1114{
1115 struct em_softc *sc = ifp->if_softc;
1116 struct ifmedia *ifm = &sc->media;
1117
1118 INIT_DEBUGOUT("em_media_change: begin")if (0) printf("em_media_change: begin" "\n");
1119
1120 if (IFM_TYPE(ifm->ifm_media)((ifm->ifm_media) & 0x000000000000ff00ULL) != IFM_ETHER0x0000000000000100ULL)
1121 return (EINVAL22);
1122
1123 switch (IFM_SUBTYPE(ifm->ifm_media)((ifm->ifm_media) & 0x00000000000000ffULL)) {
1124 case IFM_AUTO0ULL:
1125 sc->hw.autoneg = DO_AUTO_NEG1;
1126 sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT(0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0020);
1127 break;
1128 case IFM_1000_LX14:
1129 case IFM_1000_SX11:
1130 case IFM_1000_T16:
1131 sc->hw.autoneg = DO_AUTO_NEG1;
1132 sc->hw.autoneg_advertised = ADVERTISE_1000_FULL0x0020;
1133 break;
1134 case IFM_100_TX6:
1135 sc->hw.autoneg = FALSE0;
1136 sc->hw.autoneg_advertised = 0;
1137 if ((ifm->ifm_media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL)
1138 sc->hw.forced_speed_duplex = em_100_full;
1139 else
1140 sc->hw.forced_speed_duplex = em_100_half;
1141 break;
1142 case IFM_10_T3:
1143 sc->hw.autoneg = FALSE0;
1144 sc->hw.autoneg_advertised = 0;
1145 if ((ifm->ifm_media & IFM_GMASK0x00ffff0000000000ULL) == IFM_FDX0x0000010000000000ULL)
1146 sc->hw.forced_speed_duplex = em_10_full;
1147 else
1148 sc->hw.forced_speed_duplex = em_10_half;
1149 break;
1150 default:
1151 printf("%s: Unsupported media type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1152 }
1153
1154 /*
1155 * As the speed/duplex settings may have changed we need to
1156 * reset the PHY.
1157 */
1158 sc->hw.phy_reset_disable = FALSE0;
1159
1160 em_init(sc);
1161
1162 return (0);
1163}
1164
1165uint64_t
1166em_flowstatus(struct em_softc *sc)
1167{
1168 u_int16_t ar, lpar;
1169
1170 if (sc->hw.media_type == em_media_type_fiber ||
1171 sc->hw.media_type == em_media_type_internal_serdes)
1172 return (0);
1173
1174 em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV0x04, &ar);
1175 em_read_phy_reg(&sc->hw, PHY_LP_ABILITY0x05, &lpar);
1176
1177 if ((ar & NWAY_AR_PAUSE0x0400) && (lpar & NWAY_LPAR_PAUSE0x0400))
1178 return (IFM_FLOW0x0000040000000000ULL|IFM_ETH_TXPAUSE0x0000000000040000ULL|IFM_ETH_RXPAUSE0x0000000000020000ULL);
1179 else if (!(ar & NWAY_AR_PAUSE0x0400) && (ar & NWAY_AR_ASM_DIR0x0800) &&
1180 (lpar & NWAY_LPAR_PAUSE0x0400) && (lpar & NWAY_LPAR_ASM_DIR0x0800))
1181 return (IFM_FLOW0x0000040000000000ULL|IFM_ETH_TXPAUSE0x0000000000040000ULL);
1182 else if ((ar & NWAY_AR_PAUSE0x0400) && (ar & NWAY_AR_ASM_DIR0x0800) &&
1183 !(lpar & NWAY_LPAR_PAUSE0x0400) && (lpar & NWAY_LPAR_ASM_DIR0x0800))
1184 return (IFM_FLOW0x0000040000000000ULL|IFM_ETH_RXPAUSE0x0000000000020000ULL);
1185
1186 return (0);
1187}
1188
1189/*********************************************************************
1190 *
1191 * This routine maps the mbufs to tx descriptors.
1192 *
1193 * return 0 on failure, positive on success
1194 **********************************************************************/
1195u_int
1196em_encap(struct em_queue *que, struct mbuf *m)
1197{
1198 struct em_softc *sc = que->sc;
1199 struct em_packet *pkt;
1200 struct em_tx_desc *desc;
1201 bus_dmamap_t map;
1202 u_int32_t txd_upper, txd_lower;
1203 u_int head, last, used = 0;
13
'last' declared without an initial value
1204 int i, j;
1205
1206 /* For 82544 Workaround */
1207 DESC_ARRAY desc_array;
1208 u_int32_t array_elements;
1209
1210 /* get a dmamap for this packet from the next free slot */
1211 head = que->tx.sc_tx_desc_head;
1212 pkt = &que->tx.sc_tx_pkts_ring[head];
1213 map = pkt->pkt_map;
1214
1215 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001))
) {
14
Control jumps to 'case 27:' at line 1218
1216 case 0:
1217 break;
1218 case EFBIG27:
1219 if (m_defrag(m, M_DONTWAIT0x0002) == 0 &&
15
Assuming the condition is true
17
Taking true branch
1220 bus_dmamap_load_mbuf(sc->sc_dmat, map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001))
16
Assuming the condition is true
1221 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
map), (m), (0x0001))
== 0
)
1222 break;
18
Execution continues on line 1230
1223
1224 /* FALLTHROUGH */
1225 default:
1226 sc->no_tx_dma_setup++;
1227 return (0);
1228 }
1229
1230 bus_dmamap_sync(sc->sc_dmat, map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
1231 0, map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
1232 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (map),
(0), (map->dm_mapsize), (0x04))
;
1233
1234 if (sc->hw.mac_type == em_82547) {
19
Assuming field 'mac_type' is not equal to em_82547
1235 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
1236 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
1237 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
;
1238 }
1239
1240 if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210) {
20
Assuming field 'mac_type' is < em_82575
1241 if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x8000))) {
1242 used += em_tso_setup(que, m, head, &txd_upper,
1243 &txd_lower);
1244 if (!used)
1245 return (used);
1246 } else {
1247 used += em_tx_ctx_setup(que, m, head, &txd_upper,
1248 &txd_lower);
1249 }
1250 } else if (sc->hw.mac_type >= em_82543) {
21
Assuming field 'mac_type' is < em_82543
22
Taking false branch
1251 used += em_transmit_checksum_setup(que, m, head,
1252 &txd_upper, &txd_lower);
1253 } else {
1254 txd_upper = txd_lower = 0;
1255 }
1256
1257 head += used;
1258 if (head >= sc->sc_tx_slots)
23
Assuming 'head' is < field 'sc_tx_slots'
24
Taking false branch
1259 head -= sc->sc_tx_slots;
1260
1261 for (i = 0; i < map->dm_nsegs; i++) {
25
Assuming 'i' is >= field 'dm_nsegs'
1262 /* If sc is 82544 and on PCI-X bus */
1263 if (sc->pcix_82544) {
1264 /*
1265 * Check the Address and Length combination and
1266 * split the data accordingly
1267 */
1268 array_elements = em_fill_descriptors(
1269 map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len,
1270 &desc_array);
1271 for (j = 0; j < array_elements; j++) {
1272 desc = &que->tx.sc_tx_desc_ring[head];
1273
1274 desc->buffer_addr = htole64(((__uint64_t)(desc_array.descriptor[j].address))
1275 desc_array.descriptor[j].address)((__uint64_t)(desc_array.descriptor[j].address));
1276 desc->lower.data = htole32(((__uint32_t)((que->tx.sc_txd_cmd | txd_lower | (u_int16_t
)desc_array.descriptor[j].length)))
1277 (que->tx.sc_txd_cmd | txd_lower |((__uint32_t)((que->tx.sc_txd_cmd | txd_lower | (u_int16_t
)desc_array.descriptor[j].length)))
1278 (u_int16_t)desc_array.descriptor[j].length))((__uint32_t)((que->tx.sc_txd_cmd | txd_lower | (u_int16_t
)desc_array.descriptor[j].length)))
;
1279 desc->upper.data = htole32(txd_upper)((__uint32_t)(txd_upper));
1280
1281 last = head;
1282 if (++head == sc->sc_tx_slots)
1283 head = 0;
1284
1285 used++;
1286 }
1287 } else {
1288 desc = &que->tx.sc_tx_desc_ring[head];
1289
1290 desc->buffer_addr = htole64(map->dm_segs[i].ds_addr)((__uint64_t)(map->dm_segs[i].ds_addr));
1291 desc->lower.data = htole32(que->tx.sc_txd_cmd |((__uint32_t)(que->tx.sc_txd_cmd | txd_lower | map->dm_segs
[i].ds_len))
1292 txd_lower | map->dm_segs[i].ds_len)((__uint32_t)(que->tx.sc_txd_cmd | txd_lower | map->dm_segs
[i].ds_len))
;
1293 desc->upper.data = htole32(txd_upper)((__uint32_t)(txd_upper));
1294
1295 last = head;
1296 if (++head == sc->sc_tx_slots)
1297 head = 0;
1298
1299 used++;
1300 }
1301 }
1302
1303#if NVLAN1 > 0
1304 /* Find out if we are in VLAN mode */
1305 if (m->m_flagsm_hdr.mh_flags & M_VLANTAG0x0020 && (sc->hw.mac_type < em_82575 ||
26
Assuming the condition is false
1306 sc->hw.mac_type > em_i210)) {
1307 /* Set the VLAN id */
1308 desc->upper.fields.special = htole16(m->m_pkthdr.ether_vtag)((__uint16_t)(m->M_dat.MH.MH_pkthdr.ether_vtag));
1309
1310 /* Tell hardware to add tag */
1311 desc->lower.data |= htole32(E1000_TXD_CMD_VLE)((__uint32_t)(0x40000000));
1312 }
1313#endif
1314
1315 /* mark the packet with the mbuf and last desc slot */
1316 pkt->pkt_m = m;
1317 pkt->pkt_eop = last;
27
Assigned value is garbage or undefined
1318
1319 que->tx.sc_tx_desc_head = head;
1320
1321 /*
1322 * Last Descriptor of Packet
1323 * needs End Of Packet (EOP)
1324 * and Report Status (RS)
1325 */
1326 desc->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)((__uint32_t)(0x01000000 | 0x08000000));
1327
1328 if (sc->hw.mac_type == em_82547) {
1329 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
1330 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
1331 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
;
1332 }
1333
1334 return (used);
1335}
1336
1337/*********************************************************************
1338 *
1339 * 82547 workaround to avoid controller hang in half-duplex environment.
1340 * The workaround is to avoid queuing a large packet that would span
1341 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1342 * in this case. We do that only when FIFO is quiescent.
1343 *
1344 **********************************************************************/
1345void
1346em_82547_move_tail_locked(struct em_softc *sc)
1347{
1348 uint16_t hw_tdt;
1349 uint16_t sw_tdt;
1350 struct em_tx_desc *tx_desc;
1351 uint16_t length = 0;
1352 boolean_t eop = 0;
1353 struct em_queue *que = sc->queues; /* single queue chip */
1354
1355 hw_tdt = E1000_READ_REG(&sc->hw, TDT(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) :
(0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40))))))))
;
1356 sw_tdt = que->tx.sc_tx_desc_head;
1357
1358 while (hw_tdt != sw_tdt) {
1359 tx_desc = &que->tx.sc_tx_desc_ring[hw_tdt];
1360 length += tx_desc->lower.flags.length;
1361 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP0x01000000;
1362 if (++hw_tdt == sc->sc_tx_slots)
1363 hw_tdt = 0;
1364
1365 if (eop) {
1366 if (em_82547_fifo_workaround(sc, length)) {
1367 sc->tx_fifo_wrk_cnt++;
1368 timeout_add(&sc->tx_fifo_timer_handle, 1);
1369 break;
1370 }
1371 E1000_WRITE_REG(&sc->hw, TDT(que->me), hw_tdt)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03818 + ((que->me) *
0x100)) : (0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40)))))), (hw_tdt)))
;
1372 em_82547_update_fifo_head(sc, length);
1373 length = 0;
1374 }
1375 }
1376}
1377
1378void
1379em_82547_move_tail(void *arg)
1380{
1381 struct em_softc *sc = arg;
1382 int s;
1383
1384 s = splnet()splraise(0x4);
1385 em_82547_move_tail_locked(sc);
1386 splx(s)spllower(s);
1387}
1388
1389int
1390em_82547_fifo_workaround(struct em_softc *sc, int len)
1391{
1392 int fifo_space, fifo_pkt_len;
1393
1394 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR)(((len + 0x10) + (0x10) - 1) & ~((0x10) - 1));
1395
1396 if (sc->link_duplex == HALF_DUPLEX1) {
1397 fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1398
1399 if (fifo_pkt_len >= (EM_82547_PKT_THRESH0x3e0 + fifo_space)) {
1400 if (em_82547_tx_fifo_reset(sc))
1401 return (0);
1402 else
1403 return (1);
1404 }
1405 }
1406
1407 return (0);
1408}
1409
1410void
1411em_82547_update_fifo_head(struct em_softc *sc, int len)
1412{
1413 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR)(((len + 0x10) + (0x10) - 1) & ~((0x10) - 1));
1414
1415 /* tx_fifo_head is always 16 byte aligned */
1416 sc->tx_fifo_head += fifo_pkt_len;
1417 if (sc->tx_fifo_head >= sc->tx_fifo_size)
1418 sc->tx_fifo_head -= sc->tx_fifo_size;
1419}
1420
1421int
1422em_82547_tx_fifo_reset(struct em_softc *sc)
1423{
1424 uint32_t tctl;
1425 struct em_queue *que = sc->queues; /* single queue chip */
1426
1427 if ((E1000_READ_REG(&sc->hw, TDT(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) :
(0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40))))))))
==
1428 E1000_READ_REG(&sc->hw, TDH(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x03810 + ((que->me) * 0x100)) :
(0x0E010 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03810 + ((que->me) * 0x100)) : (
0x0E010 + ((que->me) * 0x40))))))))
) &&
1429 (E1000_READ_REG(&sc->hw, TDFT)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03418 : em_translate_82542_register(0x03418)))))
==
1430 E1000_READ_REG(&sc->hw, TDFH)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03410 : em_translate_82542_register(0x03410)))))
) &&
1431 (E1000_READ_REG(&sc->hw, TDFTS)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03428 : em_translate_82542_register(0x03428)))))
==
1432 E1000_READ_REG(&sc->hw, TDFHS)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03420 : em_translate_82542_register(0x03420)))))
) &&
1433 (E1000_READ_REG(&sc->hw, TDFPC)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03430 : em_translate_82542_register(0x03430)))))
== 0)) {
1434
1435 /* Disable TX unit */
1436 tctl = E1000_READ_REG(&sc->hw, TCTL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00400 : em_translate_82542_register(0x00400)))))
;
1437 E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00400 : em_translate_82542_register(0x00400))),
(tctl & ~0x00000002)))
;
1438
1439 /* Reset FIFO pointers */
1440 E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03418 : em_translate_82542_register(0x03418))),
(sc->tx_head_addr)))
;
1441 E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03410 : em_translate_82542_register(0x03410))),
(sc->tx_head_addr)))
;
1442 E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03428 : em_translate_82542_register(0x03428))),
(sc->tx_head_addr)))
;
1443 E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03420 : em_translate_82542_register(0x03420))),
(sc->tx_head_addr)))
;
1444
1445 /* Re-enable TX unit */
1446 E1000_WRITE_REG(&sc->hw, TCTL, tctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00400 : em_translate_82542_register(0x00400))),
(tctl)))
;
1447 E1000_WRITE_FLUSH(&sc->hw)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00008 : em_translate_82542_register(0x00008)))))
;
1448
1449 sc->tx_fifo_head = 0;
1450 sc->tx_fifo_reset_cnt++;
1451
1452 return (TRUE1);
1453 } else
1454 return (FALSE0);
1455}
1456
1457void
1458em_iff(struct em_softc *sc)
1459{
1460 struct ifnet *ifp = &sc->sc_ac.ac_if;
1461 struct arpcom *ac = &sc->sc_ac;
1462 u_int32_t reg_rctl = 0;
1463 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES128 * ETH_LENGTH_OF_ADDRESS6];
1464 struct ether_multi *enm;
1465 struct ether_multistep step;
1466 int i = 0;
1467
1468 IOCTL_DEBUGOUT("em_iff: begin")if (0) printf("em_iff: begin" "\n");
1469
1470 if (sc->hw.mac_type == em_82542_rev2_0) {
1471 reg_rctl = E1000_READ_REG(&sc->hw, RCTL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00100 : em_translate_82542_register(0x00100)))))
;
1472 if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE0x0010)
1473 em_pci_clear_mwi(&sc->hw);
1474 reg_rctl |= E1000_RCTL_RST0x00000001;
1475 E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00100 : em_translate_82542_register(0x00100))),
(reg_rctl)))
;
1476 msec_delay(5)(*delay_func)(1000*(5));
1477 }
1478
1479 reg_rctl = E1000_READ_REG(&sc->hw, RCTL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00100 : em_translate_82542_register(0x00100)))))
;
1480 reg_rctl &= ~(E1000_RCTL_MPE0x00000010 | E1000_RCTL_UPE0x00000008);
1481 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1482
1483 if (ifp->if_flags & IFF_PROMISC0x100 || ac->ac_multirangecnt > 0 ||
1484 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES128) {
1485 ifp->if_flags |= IFF_ALLMULTI0x200;
1486 reg_rctl |= E1000_RCTL_MPE0x00000010;
1487 if (ifp->if_flags & IFF_PROMISC0x100)
1488 reg_rctl |= E1000_RCTL_UPE0x00000008;
1489 } else {
1490 ETHER_FIRST_MULTI(step, ac, enm)do { (step).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((step)).e_enm) != ((void *)0)) ((step
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1491 while (enm != NULL((void *)0)) {
1492 bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS6);
1493 i += ETH_LENGTH_OF_ADDRESS6;
1494
1495 ETHER_NEXT_MULTI(step, enm)do { if (((enm) = (step).e_enm) != ((void *)0)) (step).e_enm =
(((enm))->enm_list.le_next); } while ( 0)
;
1496 }
1497
1498 em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0);
1499 }
1500
1501 E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00100 : em_translate_82542_register(0x00100))),
(reg_rctl)))
;
1502
1503 if (sc->hw.mac_type == em_82542_rev2_0) {
1504 reg_rctl = E1000_READ_REG(&sc->hw, RCTL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00100 : em_translate_82542_register(0x00100)))))
;
1505 reg_rctl &= ~E1000_RCTL_RST0x00000001;
1506 E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00100 : em_translate_82542_register(0x00100))),
(reg_rctl)))
;
1507 msec_delay(5)(*delay_func)(1000*(5));
1508 if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE0x0010)
1509 em_pci_set_mwi(&sc->hw);
1510 }
1511}
1512
1513/*********************************************************************
1514 * Timer routine
1515 *
1516 * This routine checks for link status and updates statistics.
1517 *
1518 **********************************************************************/
1519
1520void
1521em_local_timer(void *arg)
1522{
1523 struct em_softc *sc = arg;
1524 int s;
1525
1526 timeout_add_sec(&sc->timer_handle, 1);
1527
1528 s = splnet()splraise(0x4);
1529 em_smartspeed(sc);
1530 splx(s)spllower(s);
1531
1532#if NKSTAT1 > 0
1533 if (sc->kstat != NULL((void *)0) && mtx_enter_try(&sc->kstat_mtx)) {
1534 em_kstat_read(sc->kstat);
1535 mtx_leave(&sc->kstat_mtx);
1536 }
1537#endif
1538}
1539
1540void
1541em_update_link_status(struct em_softc *sc)
1542{
1543 struct ifnet *ifp = &sc->sc_ac.ac_if;
1544 u_char link_state;
1545
1546 if (E1000_READ_REG(&sc->hw, STATUS)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00008 : em_translate_82542_register(0x00008)))))
& E1000_STATUS_LU0x00000002) {
1547 if (sc->link_active == 0) {
1548 em_get_speed_and_duplex(&sc->hw,
1549 &sc->link_speed,
1550 &sc->link_duplex);
1551 /* Check if we may set SPEED_MODE bit on PCI-E */
1552 if ((sc->link_speed == SPEED_10001000) &&
1553 ((sc->hw.mac_type == em_82571) ||
1554 (sc->hw.mac_type == em_82572) ||
1555 (sc->hw.mac_type == em_82575) ||
1556 (sc->hw.mac_type == em_82576) ||
1557 (sc->hw.mac_type == em_82580))) {
1558 int tarc0;
1559
1560 tarc0 = E1000_READ_REG(&sc->hw, TARC0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03840 : em_translate_82542_register(0x03840)))))
;
1561 tarc0 |= SPEED_MODE_BIT(1<<21);
1562 E1000_WRITE_REG(&sc->hw, TARC0, tarc0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03840 : em_translate_82542_register(0x03840))),
(tarc0)))
;
1563 }
1564 sc->link_active = 1;
1565 sc->smartspeed = 0;
1566 ifp->if_baudrateif_data.ifi_baudrate = IF_Mbps(sc->link_speed)((((sc->link_speed) * 1000ULL) * 1000ULL));
1567 }
1568 link_state = (sc->link_duplex == FULL_DUPLEX2) ?
1569 LINK_STATE_FULL_DUPLEX6 : LINK_STATE_HALF_DUPLEX5;
1570 } else {
1571 if (sc->link_active == 1) {
1572 ifp->if_baudrateif_data.ifi_baudrate = sc->link_speed = 0;
1573 sc->link_duplex = 0;
1574 sc->link_active = 0;
1575 }
1576 link_state = LINK_STATE_DOWN2;
1577 }
1578 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
1579 ifp->if_link_stateif_data.ifi_link_state = link_state;
1580 if_link_state_change(ifp);
1581 }
1582
1583 /* Disable TSO for 10/100 speeds to avoid some hardware issues */
1584 switch (sc->link_speed) {
1585 case SPEED_1010:
1586 case SPEED_100100:
1587 if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210) {
1588 ifp->if_capabilitiesif_data.ifi_capabilities &= ~IFCAP_TSOv40x00001000;
1589 ifp->if_capabilitiesif_data.ifi_capabilities &= ~IFCAP_TSOv60x00002000;
1590 }
1591 break;
1592 case SPEED_10001000:
1593 if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210)
1594 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_TSOv40x00001000 | IFCAP_TSOv60x00002000;
1595 break;
1596 }
1597}
1598
1599/*********************************************************************
1600 *
1601 * This routine disables all traffic on the adapter by issuing a
1602 * global reset on the MAC and deallocates TX/RX buffers.
1603 *
1604 **********************************************************************/
1605
1606void
1607em_stop(void *arg, int softonly)
1608{
1609 struct em_softc *sc = arg;
1610 struct em_queue *que = sc->queues; /* Use only first queue. */
1611 struct ifnet *ifp = &sc->sc_ac.ac_if;
1612
1613 /* Tell the stack that the interface is no longer active */
1614 ifp->if_flags &= ~IFF_RUNNING0x40;
1615
1616 INIT_DEBUGOUT("em_stop: begin")if (0) printf("em_stop: begin" "\n");
1617
1618 timeout_del(&que->rx_refill);
1619 timeout_del(&sc->timer_handle);
1620 timeout_del(&sc->tx_fifo_timer_handle);
1621
1622 if (!softonly)
1623 em_disable_intr(sc);
1624 if (sc->hw.mac_type >= em_pch_spt)
1625 em_flush_desc_rings(sc);
1626 if (!softonly)
1627 em_reset_hw(&sc->hw);
1628
1629 intr_barrier(sc->sc_intrhand);
1630 ifq_barrier(&ifp->if_snd);
1631
1632 KASSERT((ifp->if_flags & IFF_RUNNING) == 0)(((ifp->if_flags & 0x40) == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_em.c", 1632, "(ifp->if_flags & IFF_RUNNING) == 0"
))
;
1633
1634 ifq_clr_oactive(&ifp->if_snd);
1635 ifp->if_timer = 0;
1636
1637 em_free_transmit_structures(sc);
1638 em_free_receive_structures(sc);
1639}
1640
1641/*********************************************************************
1642 *
1643 * Determine hardware revision.
1644 *
1645 **********************************************************************/
1646void
1647em_identify_hardware(struct em_softc *sc)
1648{
1649 u_int32_t reg;
1650 struct pci_attach_args *pa = &sc->osdep.em_pa;
1651
1652 /* Make sure our PCI config space has the necessary stuff set */
1653 sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1654 PCI_COMMAND_STATUS_REG0x04);
1655
1656 /* Save off the information about this board */
1657 sc->hw.vendor_id = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff);
1658 sc->hw.device_id = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff);
1659
1660 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG0x08);
1661 sc->hw.revision_id = PCI_REVISION(reg)(((reg) >> 0) & 0xff);
1662
1663 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c);
1664 sc->hw.subsystem_vendor_id = PCI_VENDOR(reg)(((reg) >> 0) & 0xffff);
1665 sc->hw.subsystem_id = PCI_PRODUCT(reg)(((reg) >> 16) & 0xffff);
1666
1667 /* Identify the MAC */
1668 if (em_set_mac_type(&sc->hw))
1669 printf("%s: Unknown MAC Type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1670
1671 if (sc->hw.mac_type == em_pchlan)
1672 sc->hw.revision_id = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) & 0x0f;
1673
1674 if (sc->hw.mac_type == em_82541 ||
1675 sc->hw.mac_type == em_82541_rev_2 ||
1676 sc->hw.mac_type == em_82547 ||
1677 sc->hw.mac_type == em_82547_rev_2)
1678 sc->hw.phy_init_script = TRUE1;
1679}
1680
1681void
1682em_legacy_irq_quirk_spt(struct em_softc *sc)
1683{
1684 uint32_t reg;
1685
1686 /* Legacy interrupt: SPT needs a quirk. */
1687 if (sc->hw.mac_type != em_pch_spt && sc->hw.mac_type != em_pch_cnp &&
1688 sc->hw.mac_type != em_pch_tgp && sc->hw.mac_type != em_pch_adp)
1689 return;
1690 if (sc->legacy_irq == 0)
1691 return;
1692
1693 reg = EM_READ_REG(&sc->hw, E1000_FEXTNVM7)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0xe4UL)))
;
1694 reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE0x04UL;
1695 EM_WRITE_REG(&sc->hw, E1000_FEXTNVM7, reg)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0xe4UL), (reg)))
;
1696
1697 reg = EM_READ_REG(&sc->hw, E1000_FEXTNVM9)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0x5bb4UL)))
;
1698 reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS0x0800UL |
1699 E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS0x1000UL;
1700 EM_WRITE_REG(&sc->hw, E1000_FEXTNVM9, reg)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x5bb4UL), (reg)))
;
1701}
1702
1703int
1704em_allocate_pci_resources(struct em_softc *sc)
1705{
1706 int val, rid;
1707 struct pci_attach_args *pa = &sc->osdep.em_pa;
1708 struct em_queue *que = NULL((void *)0);
1709
1710 val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA0x0010);
1711 if (PCI_MAPREG_TYPE(val)((val) & 0x00000001) != PCI_MAPREG_TYPE_MEM0x00000000) {
1712 printf(": mmba is not mem space\n");
1713 return (ENXIO6);
1714 }
1715 if (pci_mapreg_map(pa, EM_MMBA0x0010, PCI_MAPREG_MEM_TYPE(val)((val) & 0x00000006), 0,
1716 &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1717 &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1718 printf(": cannot find mem space\n");
1719 return (ENXIO6);
1720 }
1721
1722 switch (sc->hw.mac_type) {
1723 case em_82544:
1724 case em_82540:
1725 case em_82545:
1726 case em_82546:
1727 case em_82541:
1728 case em_82541_rev_2:
1729 /* Figure out where our I/O BAR is ? */
1730 for (rid = PCI_MAPREG_START0x10; rid < PCI_MAPREG_END0x28;) {
1731 val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1732 if (PCI_MAPREG_TYPE(val)((val) & 0x00000001) == PCI_MAPREG_TYPE_IO0x00000001) {
1733 sc->io_rid = rid;
1734 break;
1735 }
1736 rid += 4;
1737 if (PCI_MAPREG_MEM_TYPE(val)((val) & 0x00000006) ==
1738 PCI_MAPREG_MEM_TYPE_64BIT0x00000004)
1739 rid += 4; /* skip high bits, too */
1740 }
1741
1742 if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO0x00000001, 0,
1743 &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1744 &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1745 printf(": cannot find i/o space\n");
1746 return (ENXIO6);
1747 }
1748
1749 sc->hw.io_base = 0;
1750 break;
1751 default:
1752 break;
1753 }
1754
1755 sc->osdep.em_flashoffset = 0;
1756 /* for ICH8 and family we need to find the flash memory */
1757 if (sc->hw.mac_type >= em_pch_spt) {
1758 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag;
1759 sc->osdep.flash_bus_space_handle = sc->osdep.mem_bus_space_handle;
1760 sc->osdep.em_flashbase = 0;
1761 sc->osdep.em_flashsize = 0;
1762 sc->osdep.em_flashoffset = 0xe000;
1763 } else if (IS_ICH8(sc->hw.mac_type)(sc->hw.mac_type == em_ich8lan || sc->hw.mac_type == em_ich9lan
|| sc->hw.mac_type == em_ich10lan || sc->hw.mac_type ==
em_pchlan || sc->hw.mac_type == em_pch2lan || sc->hw.mac_type
== em_pch_lpt || sc->hw.mac_type == em_pch_spt || sc->
hw.mac_type == em_pch_cnp || sc->hw.mac_type == em_pch_tgp
|| sc->hw.mac_type == em_pch_adp)
) {
1764 val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH0x0014);
1765 if (PCI_MAPREG_TYPE(val)((val) & 0x00000001) != PCI_MAPREG_TYPE_MEM0x00000000) {
1766 printf(": flash is not mem space\n");
1767 return (ENXIO6);
1768 }
1769
1770 if (pci_mapreg_map(pa, EM_FLASH0x0014, PCI_MAPREG_MEM_TYPE(val)((val) & 0x00000006), 0,
1771 &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1772 &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1773 printf(": cannot find mem space\n");
1774 return (ENXIO6);
1775 }
1776 }
1777
1778 sc->osdep.dev = (struct device *)sc;
1779 sc->hw.back = &sc->osdep;
1780
1781 /* Only one queue for the moment. */
1782 que = malloc(sizeof(struct em_queue), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1783 if (que == NULL((void *)0)) {
1784 printf(": unable to allocate queue memory\n");
1785 return (ENOMEM12);
1786 }
1787 que->me = 0;
1788 que->sc = sc;
1789 timeout_set(&que->rx_refill, em_rxrefill, que);
1790
1791 sc->queues = que;
1792 sc->num_queues = 1;
1793 sc->msix = 0;
1794 sc->legacy_irq = 0;
1795 if (em_allocate_msix(sc) && em_allocate_legacy(sc))
1796 return (ENXIO6);
1797
1798 /*
1799 * the ICP_xxxx device has multiple, duplicate register sets for
1800 * use when it is being used as a network processor. Disable those
1801 * registers here, as they are not necessary in this context and
1802 * can confuse the system
1803 */
1804 if(sc->hw.mac_type == em_icp_xxxx) {
1805 int offset;
1806 pcireg_t val;
1807
1808 if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1809 sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST0x09, &offset, &val)) {
1810 return (0);
1811 }
1812 offset += PCI_ST_SMIA_OFFSET0x04;
1813 pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1814 offset, 0x06);
1815 E1000_WRITE_REG(&sc->hw, IMC1, ~0x0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x008D8 : em_translate_82542_register(0x008D8))),
(~0x0)))
;
1816 E1000_WRITE_REG(&sc->hw, IMC2, ~0x0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x008F8 : em_translate_82542_register(0x008F8))),
(~0x0)))
;
1817 }
1818 return (0);
1819}
1820
1821void
1822em_free_pci_resources(struct em_softc *sc)
1823{
1824 struct pci_attach_args *pa = &sc->osdep.em_pa;
1825 pci_chipset_tag_t pc = pa->pa_pc;
1826 struct em_queue *que = NULL((void *)0);
1827 if (sc->sc_intrhand)
1828 pci_intr_disestablish(pc, sc->sc_intrhand);
1829 sc->sc_intrhand = 0;
1830
1831 if (sc->osdep.em_flashbase)
1832 bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1833 sc->osdep.em_flashsize);
1834 sc->osdep.em_flashbase = 0;
1835
1836 if (sc->osdep.em_iobase)
1837 bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1838 sc->osdep.em_iosize);
1839 sc->osdep.em_iobase = 0;
1840
1841 if (sc->osdep.em_membase)
1842 bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1843 sc->osdep.em_memsize);
1844 sc->osdep.em_membase = 0;
1845
1846 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
1847 if (que->rx.sc_rx_desc_ring != NULL((void *)0)) {
1848 que->rx.sc_rx_desc_ring = NULL((void *)0);
1849 em_dma_free(sc, &que->rx.sc_rx_dma);
1850 }
1851 if (que->tx.sc_tx_desc_ring != NULL((void *)0)) {
1852 que->tx.sc_tx_desc_ring = NULL((void *)0);
1853 em_dma_free(sc, &que->tx.sc_tx_dma);
1854 }
1855 if (que->tag)
1856 pci_intr_disestablish(pc, que->tag);
1857 que->tag = NULL((void *)0);
1858 que->eims = 0;
1859 que->me = 0;
1860 que->sc = NULL((void *)0);
1861 }
1862 sc->legacy_irq = 0;
1863 sc->msix_linkvec = 0;
1864 sc->msix_queuesmask = 0;
1865 if (sc->queues)
1866 free(sc->queues, M_DEVBUF2,
1867 sc->num_queues * sizeof(struct em_queue));
1868 sc->num_queues = 0;
1869 sc->queues = NULL((void *)0);
1870}
1871
1872/*********************************************************************
1873 *
1874 * Initialize the hardware to a configuration as specified by the
1875 * em_softc structure. The controller is reset, the EEPROM is
1876 * verified, the MAC address is set, then the shared initialization
1877 * routines are called.
1878 *
1879 **********************************************************************/
1880int
1881em_hardware_init(struct em_softc *sc)
1882{
1883 uint32_t ret_val;
1884 u_int16_t rx_buffer_size;
1885
1886 INIT_DEBUGOUT("em_hardware_init: begin")if (0) printf("em_hardware_init: begin" "\n");
1887 if (sc->hw.mac_type >= em_pch_spt)
1888 em_flush_desc_rings(sc);
1889 /* Issue a global reset */
1890 em_reset_hw(&sc->hw);
1891
1892 /* When hardware is reset, fifo_head is also reset */
1893 sc->tx_fifo_head = 0;
1894
1895 /* Make sure we have a good EEPROM before we read from it */
1896 if (em_get_flash_presence_i210(&sc->hw) &&
1897 em_validate_eeprom_checksum(&sc->hw) < 0) {
1898 /*
1899 * Some PCIe parts fail the first check due to
1900 * the link being in sleep state, call it again,
1901 * if it fails a second time its a real issue.
1902 */
1903 if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1904 printf("%s: The EEPROM Checksum Is Not Valid\n",
1905 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1906 return (EIO5);
1907 }
1908 }
1909
1910 if (em_get_flash_presence_i210(&sc->hw) &&
1911 em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1912 printf("%s: EEPROM read error while reading part number\n",
1913 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1914 return (EIO5);
1915 }
1916
1917 /* Set up smart power down as default off on newer adapters */
1918 if (!em_smart_pwr_down &&
1919 (sc->hw.mac_type == em_82571 ||
1920 sc->hw.mac_type == em_82572 ||
1921 sc->hw.mac_type == em_82575 ||
1922 sc->hw.mac_type == em_82576 ||
1923 sc->hw.mac_type == em_82580 ||
1924 sc->hw.mac_type == em_i210 ||
1925 sc->hw.mac_type == em_i350 )) {
1926 uint16_t phy_tmp = 0;
1927
1928 /* Speed up time to link by disabling smart power down */
1929 em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT0x19, &phy_tmp);
1930 phy_tmp &= ~IGP02E1000_PM_SPD0x0001;
1931 em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT0x19, phy_tmp);
1932 }
1933
1934 em_legacy_irq_quirk_spt(sc);
1935
1936 /*
1937 * These parameters control the automatic generation (Tx) and
1938 * response (Rx) to Ethernet PAUSE frames.
1939 * - High water mark should allow for at least two frames to be
1940 * received after sending an XOFF.
1941 * - Low water mark works best when it is very near the high water mark.
1942 * This allows the receiver to restart by sending XON when it has
1943 * drained a bit. Here we use an arbitrary value of 1500 which will
1944 * restart after one full frame is pulled from the buffer. There
1945 * could be several smaller frames in the buffer and if so they will
1946 * not trigger the XON until their total number reduces the buffer
1947 * by 1500.
1948 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1949 */
1950 rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x01000 : em_translate_82542_register(0x01000)))))
& 0xffff) << 10 );
1951
1952 sc->hw.fc_high_water = rx_buffer_size -
1953 EM_ROUNDUP(sc->hw.max_frame_size, 1024)(((sc->hw.max_frame_size) + (1024) - 1) & ~((1024) - 1
))
;
1954 sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1955 if (sc->hw.mac_type == em_80003es2lan)
1956 sc->hw.fc_pause_time = 0xFFFF;
1957 else
1958 sc->hw.fc_pause_time = 1000;
1959 sc->hw.fc_send_xon = TRUE1;
1960 sc->hw.fc = E1000_FC_FULL3;
1961
1962 em_disable_aspm(sc);
1963
1964 if ((ret_val = em_init_hw(sc)) != 0) {
1965 if (ret_val == E1000_DEFER_INIT15) {
1966 INIT_DEBUGOUT("\nHardware Initialization Deferred ")if (0) printf("\nHardware Initialization Deferred " "\n");
1967 return (EAGAIN35);
1968 }
1969 printf("\n%s: Hardware Initialization Failed: %d\n",
1970 DEVNAME(sc)((sc)->sc_dev.dv_xname), ret_val);
1971 return (EIO5);
1972 }
1973
1974 em_check_for_link(&sc->hw);
1975
1976 return (0);
1977}
1978
1979/*********************************************************************
1980 *
1981 * Setup networking device structure and register an interface.
1982 *
1983 **********************************************************************/
1984void
1985em_setup_interface(struct em_softc *sc)
1986{
1987 struct ifnet *ifp;
1988 uint64_t fiber_type = IFM_1000_SX11;
1989
1990 INIT_DEBUGOUT("em_setup_interface: begin")if (0) printf("em_setup_interface: begin" "\n");
1991
1992 ifp = &sc->sc_ac.ac_if;
1993 strlcpy(ifp->if_xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), IFNAMSIZ16);
1994 ifp->if_softc = sc;
1995 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
1996 ifp->if_xflags = IFXF_MPSAFE0x1;
1997 ifp->if_ioctl = em_ioctl;
1998 ifp->if_qstart = em_start;
1999 ifp->if_watchdog = em_watchdog;
2000 ifp->if_hardmtu =
2001 sc->hw.max_frame_size - ETHER_HDR_LEN((6 * 2) + 2) - ETHER_CRC_LEN4;
2002 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_slots - 1);
2003
2004 ifp->if_capabilitiesif_data.ifi_capabilities = IFCAP_VLAN_MTU0x00000010;
2005
2006#if NVLAN1 > 0
2007 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_VLAN_HWTAGGING0x00000020;
2008#endif
2009
2010 if (sc->hw.mac_type >= em_82543) {
2011 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv40x00000002 | IFCAP_CSUM_UDPv40x00000004;
2012 }
2013 if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210) {
2014 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_IPv40x00000001;
2015 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_CSUM_TCPv60x00000080 | IFCAP_CSUM_UDPv60x00000100;
2016 ifp->if_capabilitiesif_data.ifi_capabilities |= IFCAP_TSOv40x00001000 | IFCAP_TSOv60x00002000;
2017 }
2018
2019 /*
2020 * Specify the media types supported by this adapter and register
2021 * callbacks to update media and link information
2022 */
2023 ifmedia_init(&sc->media, IFM_IMASK0xff00000000000000ULL, em_media_change,
2024 em_media_status);
2025 if (sc->hw.media_type == em_media_type_fiber ||
2026 sc->hw.media_type == em_media_type_internal_serdes) {
2027 if (sc->hw.mac_type == em_82545)
2028 fiber_type = IFM_1000_LX14;
2029 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | fiber_type | IFM_FDX0x0000010000000000ULL,
2030 0, NULL((void *)0));
2031 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | fiber_type,
2032 0, NULL((void *)0));
2033 } else {
2034 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10_T3, 0, NULL((void *)0));
2035 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_10_T3 | IFM_FDX0x0000010000000000ULL,
2036 0, NULL((void *)0));
2037 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_100_TX6,
2038 0, NULL((void *)0));
2039 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_100_TX6 | IFM_FDX0x0000010000000000ULL,
2040 0, NULL((void *)0));
2041 if (sc->hw.phy_type != em_phy_ife) {
2042 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16 | IFM_FDX0x0000010000000000ULL,
2043 0, NULL((void *)0));
2044 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_1000_T16, 0, NULL((void *)0));
2045 }
2046 }
2047 ifmedia_add(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
2048 ifmedia_set(&sc->media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
2049
2050 if_attach(ifp);
2051 ether_ifattach(ifp);
2052 em_enable_intr(sc);
2053}
2054
2055int
2056em_detach(struct device *self, int flags)
2057{
2058 struct em_softc *sc = (struct em_softc *)self;
2059 struct ifnet *ifp = &sc->sc_ac.ac_if;
2060 struct pci_attach_args *pa = &sc->osdep.em_pa;
2061 pci_chipset_tag_t pc = pa->pa_pc;
2062
2063 if (sc->sc_intrhand)
2064 pci_intr_disestablish(pc, sc->sc_intrhand);
2065 sc->sc_intrhand = 0;
2066
2067 em_stop(sc, 1);
2068
2069 em_free_pci_resources(sc);
2070
2071 ether_ifdetach(ifp);
2072 if_detach(ifp);
2073
2074 return (0);
2075}
2076
2077int
2078em_activate(struct device *self, int act)
2079{
2080 struct em_softc *sc = (struct em_softc *)self;
2081 struct ifnet *ifp = &sc->sc_ac.ac_if;
2082 int rv = 0;
2083
2084 switch (act) {
2085 case DVACT_SUSPEND3:
2086 if (ifp->if_flags & IFF_RUNNING0x40)
2087 em_stop(sc, 0);
2088 /* We have no children atm, but we will soon */
2089 rv = config_activate_children(self, act);
2090 break;
2091 case DVACT_RESUME4:
2092 if (ifp->if_flags & IFF_UP0x1)
2093 em_init(sc);
2094 break;
2095 default:
2096 rv = config_activate_children(self, act);
2097 break;
2098 }
2099 return (rv);
2100}
2101
2102/*********************************************************************
2103 *
2104 * Workaround for SmartSpeed on 82541 and 82547 controllers
2105 *
2106 **********************************************************************/
2107void
2108em_smartspeed(struct em_softc *sc)
2109{
2110 uint16_t phy_tmp;
2111
2112 if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
2113 !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL0x0020))
2114 return;
2115
2116 if (sc->smartspeed == 0) {
2117 /* If Master/Slave config fault is asserted twice,
2118 * we assume back-to-back */
2119 em_read_phy_reg(&sc->hw, PHY_1000T_STATUS0x0A, &phy_tmp);
2120 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT0x8000))
2121 return;
2122 em_read_phy_reg(&sc->hw, PHY_1000T_STATUS0x0A, &phy_tmp);
2123 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT0x8000) {
2124 em_read_phy_reg(&sc->hw, PHY_1000T_CTRL0x09,
2125 &phy_tmp);
2126 if (phy_tmp & CR_1000T_MS_ENABLE0x1000) {
2127 phy_tmp &= ~CR_1000T_MS_ENABLE0x1000;
2128 em_write_phy_reg(&sc->hw,
2129 PHY_1000T_CTRL0x09, phy_tmp);
2130 sc->smartspeed++;
2131 if (sc->hw.autoneg &&
2132 !em_phy_setup_autoneg(&sc->hw) &&
2133 !em_read_phy_reg(&sc->hw, PHY_CTRL0x00,
2134 &phy_tmp)) {
2135 phy_tmp |= (MII_CR_AUTO_NEG_EN0x1000 |
2136 MII_CR_RESTART_AUTO_NEG0x0200);
2137 em_write_phy_reg(&sc->hw,
2138 PHY_CTRL0x00, phy_tmp);
2139 }
2140 }
2141 }
2142 return;
2143 } else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT3) {
2144 /* If still no link, perhaps using 2/3 pair cable */
2145 em_read_phy_reg(&sc->hw, PHY_1000T_CTRL0x09, &phy_tmp);
2146 phy_tmp |= CR_1000T_MS_ENABLE0x1000;
2147 em_write_phy_reg(&sc->hw, PHY_1000T_CTRL0x09, phy_tmp);
2148 if (sc->hw.autoneg &&
2149 !em_phy_setup_autoneg(&sc->hw) &&
2150 !em_read_phy_reg(&sc->hw, PHY_CTRL0x00, &phy_tmp)) {
2151 phy_tmp |= (MII_CR_AUTO_NEG_EN0x1000 |
2152 MII_CR_RESTART_AUTO_NEG0x0200);
2153 em_write_phy_reg(&sc->hw, PHY_CTRL0x00, phy_tmp);
2154 }
2155 }
2156 /* Restart process after EM_SMARTSPEED_MAX iterations */
2157 if (sc->smartspeed++ == EM_SMARTSPEED_MAX15)
2158 sc->smartspeed = 0;
2159}
2160
2161/*
2162 * Manage DMA'able memory.
2163 */
2164int
2165em_dma_malloc(struct em_softc *sc, bus_size_t size, struct em_dma_alloc *dma)
2166{
2167 int r;
2168
2169 r = bus_dmamap_create(sc->sc_dmat, size, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0000 | 0x0002), (&dma->dma_map
))
2170 size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dma->dma_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0000 | 0x0002), (&dma->dma_map
))
;
2171 if (r != 0)
2172 return (r);
2173
2174 r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &dma->dma_seg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0000 | 0x1000))
2175 1, &dma->dma_nseg, BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&dma->dma_seg), (1), (&
dma->dma_nseg), (0x0000 | 0x1000))
;
2176 if (r != 0)
2177 goto destroy;
2178
2179 r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&dma
->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0000 | 0x0004))
2180 &dma->dma_vaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&dma
->dma_seg), (dma->dma_nseg), (size), (&dma->dma_vaddr
), (0x0000 | 0x0004))
;
2181 if (r != 0)
2182 goto free;
2183
2184 r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dma->
dma_map), (dma->dma_vaddr), (size), (((void *)0)), (0x0000
))
2185 NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dma->
dma_map), (dma->dma_vaddr), (size), (((void *)0)), (0x0000
))
;
2186 if (r != 0)
2187 goto unmap;
2188
2189 dma->dma_size = size;
2190 return (0);
2191
2192unmap:
2193 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (dma->
dma_vaddr), (size))
;
2194free:
2195 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
dma->dma_seg), (dma->dma_nseg))
;
2196destroy:
2197 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (dma
->dma_map))
;
2198
2199 return (r);
2200}
2201
2202void
2203em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2204{
2205 bus_dmamap_unload(sc->sc_dmat, dma->dma_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dma
->dma_map))
;
2206 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (dma->
dma_vaddr), (dma->dma_size))
;
2207 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
dma->dma_seg), (dma->dma_nseg))
;
2208 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (dma
->dma_map))
;
2209}
2210
2211/*********************************************************************
2212 *
2213 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2214 * the information needed to transmit a packet on the wire.
2215 *
2216 **********************************************************************/
2217int
2218em_allocate_transmit_structures(struct em_softc *sc)
2219{
2220 struct em_queue *que;
2221
2222 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2223 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
2224 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
2225 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
;
2226
2227 que->tx.sc_tx_pkts_ring = mallocarray(sc->sc_tx_slots,
2228 sizeof(*que->tx.sc_tx_pkts_ring), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2229 if (que->tx.sc_tx_pkts_ring == NULL((void *)0)) {
2230 printf("%s: Unable to allocate tx_buffer memory\n",
2231 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2232 return (ENOMEM12);
2233 }
2234 }
2235
2236 return (0);
2237}
2238
2239/*********************************************************************
2240 *
2241 * Allocate and initialize transmit structures.
2242 *
2243 **********************************************************************/
2244int
2245em_setup_transmit_structures(struct em_softc *sc)
2246{
2247 struct em_queue *que;
2248 struct em_packet *pkt;
2249 int error, i;
2250
2251 if ((error = em_allocate_transmit_structures(sc)) != 0)
2252 goto fail;
2253
2254 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2255 bzero((void *) que->tx.sc_tx_desc_ring,__builtin_bzero(((void *) que->tx.sc_tx_desc_ring), ((sizeof
(struct em_tx_desc)) * sc->sc_tx_slots))
2256 (sizeof(struct em_tx_desc)) * sc->sc_tx_slots)__builtin_bzero(((void *) que->tx.sc_tx_desc_ring), ((sizeof
(struct em_tx_desc)) * sc->sc_tx_slots))
;
2257
2258 for (i = 0; i < sc->sc_tx_slots; i++) {
2259 pkt = &que->tx.sc_tx_pkts_ring[i];
2260 error = bus_dmamap_create(sc->sc_dmat, EM_TSO_SIZE,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (65535
), (64 / (sc->pcix_82544 ? 2 : 1)), (4096), (0), (0x0001),
(&pkt->pkt_map))
2261 EM_MAX_SCATTER / (sc->pcix_82544 ? 2 : 1),(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (65535
), (64 / (sc->pcix_82544 ? 2 : 1)), (4096), (0), (0x0001),
(&pkt->pkt_map))
2262 EM_TSO_SEG_SIZE, 0, BUS_DMA_NOWAIT, &pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (65535
), (64 / (sc->pcix_82544 ? 2 : 1)), (4096), (0), (0x0001),
(&pkt->pkt_map))
;
2263 if (error != 0) {
2264 printf("%s: Unable to create TX DMA map\n",
2265 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2266 goto fail;
2267 }
2268 }
2269
2270 que->tx.sc_tx_desc_head = 0;
2271 que->tx.sc_tx_desc_tail = 0;
2272
2273 /* Set checksum context */
2274 que->tx.active_checksum_context = OFFLOAD_NONE;
2275 }
2276
2277 return (0);
2278
2279fail:
2280 em_free_transmit_structures(sc);
2281 return (error);
2282}
2283
2284/*********************************************************************
2285 *
2286 * Enable transmit unit.
2287 *
2288 **********************************************************************/
2289void
2290em_initialize_transmit_unit(struct em_softc *sc)
2291{
2292 u_int32_t reg_tctl, reg_tipg = 0;
2293 u_int64_t bus_addr;
2294 struct em_queue *que;
2295
2296 INIT_DEBUGOUT("em_initialize_transmit_unit: begin")if (0) printf("em_initialize_transmit_unit: begin" "\n");
2297
2298 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2299 /* Setup the Base and Length of the Tx Descriptor Ring */
2300 bus_addr = que->tx.sc_tx_dma.dma_map->dm_segs[0].ds_addr;
2301 E1000_WRITE_REG(&sc->hw, TDLEN(que->me),((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03808 + ((que->me) *
0x100)) : (0x0E008 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03808 + ((que->me) * 0x100)) : (
0x0E008 + ((que->me) * 0x40)))))), (sc->sc_tx_slots * sizeof
(struct em_tx_desc))))
2302 sc->sc_tx_slots *((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03808 + ((que->me) *
0x100)) : (0x0E008 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03808 + ((que->me) * 0x100)) : (
0x0E008 + ((que->me) * 0x40)))))), (sc->sc_tx_slots * sizeof
(struct em_tx_desc))))
2303 sizeof(struct em_tx_desc))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03808 + ((que->me) *
0x100)) : (0x0E008 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03808 + ((que->me) * 0x100)) : (
0x0E008 + ((que->me) * 0x40)))))), (sc->sc_tx_slots * sizeof
(struct em_tx_desc))))
;
2304 E1000_WRITE_REG(&sc->hw, TDBAH(que->me), (u_int32_t)(bus_addr >> 32))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03804 + ((que->me) *
0x100)) : (0x0E004 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03804 + ((que->me) * 0x100)) : (
0x0E004 + ((que->me) * 0x40)))))), ((u_int32_t)(bus_addr >>
32))))
;
2305 E1000_WRITE_REG(&sc->hw, TDBAL(que->me), (u_int32_t)bus_addr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03800 + ((que->me) *
0x100)) : (0x0E000 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (
0x0E000 + ((que->me) * 0x40)))))), ((u_int32_t)bus_addr)))
;
2306
2307 /* Setup the HW Tx Head and Tail descriptor pointers */
2308 E1000_WRITE_REG(&sc->hw, TDT(que->me), 0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03818 + ((que->me) *
0x100)) : (0x0E018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03818 + ((que->me) * 0x100)) : (
0x0E018 + ((que->me) * 0x40)))))), (0)))
;
2309 E1000_WRITE_REG(&sc->hw, TDH(que->me), 0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03810 + ((que->me) *
0x100)) : (0x0E010 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03810 + ((que->me) * 0x100)) : (
0x0E010 + ((que->me) * 0x40)))))), (0)))
;
2310
2311 HW_DEBUGOUT2("Base = %x, Length = %x\n",if (0) printf("Base = %x, Length = %x\n" "\n", ((((struct em_osdep
*)(&sc->hw)->back)->mem_bus_space_tag)->read_4
((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_handle
), (((&sc->hw)->mac_type >= em_82543 ? ((que->
me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (0x0E000 + (
(que->me) * 0x40))) : em_translate_82542_register(((que->
me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (0x0E000 + (
(que->me) * 0x40)))))))), ((((struct em_osdep *)(&sc->
hw)->back)->mem_bus_space_tag)->read_4((((struct em_osdep
*)(&sc->hw)->back)->mem_bus_space_handle), (((&
sc->hw)->mac_type >= em_82543 ? ((que->me) < 4
? (0x03808 + ((que->me) * 0x100)) : (0x0E008 + ((que->
me) * 0x40))) : em_translate_82542_register(((que->me) <
4 ? (0x03808 + ((que->me) * 0x100)) : (0x0E008 + ((que->
me) * 0x40)))))))))
2312 E1000_READ_REG(&sc->hw, TDBAL(que->me)),if (0) printf("Base = %x, Length = %x\n" "\n", ((((struct em_osdep
*)(&sc->hw)->back)->mem_bus_space_tag)->read_4
((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_handle
), (((&sc->hw)->mac_type >= em_82543 ? ((que->
me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (0x0E000 + (
(que->me) * 0x40))) : em_translate_82542_register(((que->
me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (0x0E000 + (
(que->me) * 0x40)))))))), ((((struct em_osdep *)(&sc->
hw)->back)->mem_bus_space_tag)->read_4((((struct em_osdep
*)(&sc->hw)->back)->mem_bus_space_handle), (((&
sc->hw)->mac_type >= em_82543 ? ((que->me) < 4
? (0x03808 + ((que->me) * 0x100)) : (0x0E008 + ((que->
me) * 0x40))) : em_translate_82542_register(((que->me) <
4 ? (0x03808 + ((que->me) * 0x100)) : (0x0E008 + ((que->
me) * 0x40)))))))))
2313 E1000_READ_REG(&sc->hw, TDLEN(que->me)))if (0) printf("Base = %x, Length = %x\n" "\n", ((((struct em_osdep
*)(&sc->hw)->back)->mem_bus_space_tag)->read_4
((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_handle
), (((&sc->hw)->mac_type >= em_82543 ? ((que->
me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (0x0E000 + (
(que->me) * 0x40))) : em_translate_82542_register(((que->
me) < 4 ? (0x03800 + ((que->me) * 0x100)) : (0x0E000 + (
(que->me) * 0x40)))))))), ((((struct em_osdep *)(&sc->
hw)->back)->mem_bus_space_tag)->read_4((((struct em_osdep
*)(&sc->hw)->back)->mem_bus_space_handle), (((&
sc->hw)->mac_type >= em_82543 ? ((que->me) < 4
? (0x03808 + ((que->me) * 0x100)) : (0x0E008 + ((que->
me) * 0x40))) : em_translate_82542_register(((que->me) <
4 ? (0x03808 + ((que->me) * 0x100)) : (0x0E008 + ((que->
me) * 0x40)))))))))
;
2314
2315 /* Set the default values for the Tx Inter Packet Gap timer */
2316 switch (sc->hw.mac_type) {
2317 case em_82542_rev2_0:
2318 case em_82542_rev2_1:
2319 reg_tipg = DEFAULT_82542_TIPG_IPGT10;
2320 reg_tipg |= DEFAULT_82542_TIPG_IPGR12 << E1000_TIPG_IPGR1_SHIFT10;
2321 reg_tipg |= DEFAULT_82542_TIPG_IPGR210 << E1000_TIPG_IPGR2_SHIFT20;
2322 break;
2323 case em_80003es2lan:
2324 reg_tipg = DEFAULT_82543_TIPG_IPGR18;
2325 reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR27 << E1000_TIPG_IPGR2_SHIFT20;
2326 break;
2327 default:
2328 if (sc->hw.media_type == em_media_type_fiber ||
2329 sc->hw.media_type == em_media_type_internal_serdes)
2330 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER9;
2331 else
2332 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER8;
2333 reg_tipg |= DEFAULT_82543_TIPG_IPGR18 << E1000_TIPG_IPGR1_SHIFT10;
2334 reg_tipg |= DEFAULT_82543_TIPG_IPGR26 << E1000_TIPG_IPGR2_SHIFT20;
2335 }
2336
2337
2338 E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00410 : em_translate_82542_register(0x00410))),
(reg_tipg)))
;
2339 E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03820 : em_translate_82542_register(0x03820))),
(sc->tx_int_delay)))
;
2340 if (sc->hw.mac_type >= em_82540)
2341 E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x0382C : em_translate_82542_register(0x0382C))),
(sc->tx_abs_int_delay)))
;
2342
2343 /* Setup Transmit Descriptor Base Settings */
2344 que->tx.sc_txd_cmd = E1000_TXD_CMD_IFCS0x02000000;
2345
2346 if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2347 sc->hw.mac_type == em_82576 ||
2348 sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2349 /* 82575/6 need to enable the TX queue and lack the IDE bit */
2350 reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x03828 + ((que->me) * 0x100)) :
(0x0E028 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03828 + ((que->me) * 0x100)) : (
0x0E028 + ((que->me) * 0x40))))))))
;
2351 reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE0x02000000;
2352 E1000_WRITE_REG(&sc->hw, TXDCTL(que->me), reg_tctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x03828 + ((que->me) *
0x100)) : (0x0E028 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x03828 + ((que->me) * 0x100)) : (
0x0E028 + ((que->me) * 0x40)))))), (reg_tctl)))
;
2353 } else if (sc->tx_int_delay > 0)
2354 que->tx.sc_txd_cmd |= E1000_TXD_CMD_IDE0x80000000;
2355 }
2356
2357 /* Program the Transmit Control Register */
2358 reg_tctl = E1000_TCTL_PSP0x00000008 | E1000_TCTL_EN0x00000002 |
2359 (E1000_COLLISION_THRESHOLD15 << E1000_CT_SHIFT4);
2360 if (sc->hw.mac_type >= em_82571)
2361 reg_tctl |= E1000_TCTL_MULR0x10000000;
2362 if (sc->link_duplex == FULL_DUPLEX2)
2363 reg_tctl |= E1000_FDX_COLLISION_DISTANCE63 << E1000_COLD_SHIFT12;
2364 else
2365 reg_tctl |= E1000_HDX_COLLISION_DISTANCE63 << E1000_COLD_SHIFT12;
2366 /* This write will effectively turn on the transmit unit */
2367 E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00400 : em_translate_82542_register(0x00400))),
(reg_tctl)))
;
2368
2369 /* SPT Si errata workaround to avoid data corruption */
2370
2371 if (sc->hw.mac_type == em_pch_spt) {
2372 uint32_t reg_val;
2373
2374 reg_val = EM_READ_REG(&sc->hw, E1000_IOSFPC)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0x00F28)))
;
2375 reg_val |= E1000_RCTL_RDMTS_HEX0x00010000;
2376 EM_WRITE_REG(&sc->hw, E1000_IOSFPC, reg_val)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x00F28), (reg_val)))
;
2377
2378 reg_val = E1000_READ_REG(&sc->hw, TARC0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x03840 : em_translate_82542_register(0x03840)))))
;
2379 /* i218-i219 Specification Update 1.5.4.5 */
2380 reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ(1 << 28 | 1 << 29);
2381 reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ(1 << 29);
2382 E1000_WRITE_REG(&sc->hw, TARC0, reg_val)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x03840 : em_translate_82542_register(0x03840))),
(reg_val)))
;
2383 }
2384}
2385
2386/*********************************************************************
2387 *
2388 * Free all transmit related data structures.
2389 *
2390 **********************************************************************/
2391void
2392em_free_transmit_structures(struct em_softc *sc)
2393{
2394 struct em_queue *que;
2395 struct em_packet *pkt;
2396 int i;
2397
2398 INIT_DEBUGOUT("free_transmit_structures: begin")if (0) printf("free_transmit_structures: begin" "\n");
2399
2400 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2401 if (que->tx.sc_tx_pkts_ring != NULL((void *)0)) {
2402 for (i = 0; i < sc->sc_tx_slots; i++) {
2403 pkt = &que->tx.sc_tx_pkts_ring[i];
2404
2405 if (pkt->pkt_m != NULL((void *)0)) {
2406 bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x08))
2407 0, pkt->pkt_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x08))
2408 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x08))
;
2409 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->pkt_map))
2410 pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->pkt_map))
;
2411
2412 m_freem(pkt->pkt_m);
2413 pkt->pkt_m = NULL((void *)0);
2414 }
2415
2416 if (pkt->pkt_map != NULL((void *)0)) {
2417 bus_dmamap_destroy(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (pkt
->pkt_map))
2418 pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (pkt
->pkt_map))
;
2419 pkt->pkt_map = NULL((void *)0);
2420 }
2421 }
2422
2423 free(que->tx.sc_tx_pkts_ring, M_DEVBUF2,
2424 sc->sc_tx_slots * sizeof(*que->tx.sc_tx_pkts_ring));
2425 que->tx.sc_tx_pkts_ring = NULL((void *)0);
2426 }
2427
2428 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
2429 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
2430 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
;
2431 }
2432}
2433
2434u_int
2435em_tso_setup(struct em_queue *que, struct mbuf *mp, u_int head,
2436 u_int32_t *olinfo_status, u_int32_t *cmd_type_len)
2437{
2438 struct ether_extracted ext;
2439 struct e1000_adv_tx_context_desc *TD;
2440 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0, mss_l4len_idx = 0;
2441 uint32_t paylen = 0;
2442 uint8_t iphlen = 0;
2443
2444 *olinfo_status = 0;
2445 *cmd_type_len = 0;
2446 TD = (struct e1000_adv_tx_context_desc *)&que->tx.sc_tx_desc_ring[head];
2447
2448#if NVLAN1 > 0
2449 if (ISSET(mp->m_flags, M_VLANTAG)((mp->m_hdr.mh_flags) & (0x0020))) {
2450 uint32_t vtag = mp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
2451 vlan_macip_lens |= vtag << E1000_ADVTXD_VLAN_SHIFT16;
2452 *cmd_type_len |= E1000_ADVTXD_DCMD_VLE0x40000000;
2453 }
2454#endif
2455
2456 ether_extract_headers(mp, &ext);
2457 if (ext.tcp == NULL((void *)0))
2458 goto out;
2459
2460 vlan_macip_lens |= (sizeof(*ext.eh) << E1000_ADVTXD_MACLEN_SHIFT9);
2461
2462 if (ext.ip4) {
2463 iphlen = ext.ip4->ip_hl << 2;
2464
2465 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV40x00000400;
2466 *olinfo_status |= E1000_TXD_POPTS_IXSM0x01 << 8;
2467#ifdef INET61
2468 } else if (ext.ip6) {
2469 iphlen = sizeof(*ext.ip6);
2470
2471 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV60x00000000;
2472#endif
2473 } else {
2474 goto out;
2475 }
2476
2477 *cmd_type_len |= E1000_ADVTXD_DTYP_DATA0x00300000 | E1000_ADVTXD_DCMD_IFCS0x02000000;
2478 *cmd_type_len |= E1000_ADVTXD_DCMD_DEXT0x20000000 | E1000_ADVTXD_DCMD_TSE0x80000000;
2479 paylen = mp->m_pkthdrM_dat.MH.MH_pkthdr.len - sizeof(*ext.eh) - iphlen -
2480 (ext.tcp->th_off << 2);
2481 *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT14;
2482 vlan_macip_lens |= iphlen;
2483 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT0x20000000 | E1000_ADVTXD_DTYP_CTXT0x00200000;
2484
2485 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP0x00000800;
2486 *olinfo_status |= E1000_TXD_POPTS_TXSM0x02 << 8;
2487
2488 mss_l4len_idx |= mp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss << E1000_ADVTXD_MSS_SHIFT16;
2489 mss_l4len_idx |= (ext.tcp->th_off << 2) << E1000_ADVTXD_L4LEN_SHIFT8;
2490 /* 82575 needs the queue index added */
2491 if (que->sc->hw.mac_type == em_82575)
2492 mss_l4len_idx |= (que->me & 0xff) << 4;
2493
2494 htolem32(&TD->vlan_macip_lens, vlan_macip_lens)(*(__uint32_t *)(&TD->vlan_macip_lens) = ((__uint32_t)
(vlan_macip_lens)))
;
2495 htolem32(&TD->type_tucmd_mlhl, type_tucmd_mlhl)(*(__uint32_t *)(&TD->type_tucmd_mlhl) = ((__uint32_t)
(type_tucmd_mlhl)))
;
2496 htolem32(&TD->u.seqnum_seed, 0)(*(__uint32_t *)(&TD->u.seqnum_seed) = ((__uint32_t)(0
)))
;
2497 htolem32(&TD->mss_l4len_idx, mss_l4len_idx)(*(__uint32_t *)(&TD->mss_l4len_idx) = ((__uint32_t)(mss_l4len_idx
)))
;
2498
2499 tcpstat_add(tcps_outpkttso, (paylen + mp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss - 1) /
2500 mp->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss);
2501
2502 return 1;
2503
2504out:
2505 tcpstat_inc(tcps_outbadtso);
2506 return 0;
2507}
2508
2509u_int
2510em_tx_ctx_setup(struct em_queue *que, struct mbuf *mp, u_int head,
2511 u_int32_t *olinfo_status, u_int32_t *cmd_type_len)
2512{
2513 struct ether_extracted ext;
2514 struct e1000_adv_tx_context_desc *TD;
2515 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0, mss_l4len_idx = 0;
2516 int off = 0;
2517 uint8_t iphlen;
2518
2519 *olinfo_status = 0;
2520 *cmd_type_len = 0;
2521 TD = (struct e1000_adv_tx_context_desc *)&que->tx.sc_tx_desc_ring[head];
2522
2523#if NVLAN1 > 0
2524 if (ISSET(mp->m_flags, M_VLANTAG)((mp->m_hdr.mh_flags) & (0x0020))) {
2525 uint32_t vtag = mp->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag;
2526 vlan_macip_lens |= vtag << E1000_ADVTXD_VLAN_SHIFT16;
2527 *cmd_type_len |= E1000_ADVTXD_DCMD_VLE0x40000000;
2528 off = 1;
2529 }
2530#endif
2531
2532 ether_extract_headers(mp, &ext);
2533
2534 vlan_macip_lens |= (sizeof(*ext.eh) << E1000_ADVTXD_MACLEN_SHIFT9);
2535
2536 if (ext.ip4) {
2537 iphlen = ext.ip4->ip_hl << 2;
2538
2539 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV40x00000400;
2540 if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)((mp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0001))) {
2541 *olinfo_status |= E1000_TXD_POPTS_IXSM0x01 << 8;
2542 off = 1;
2543 }
2544#ifdef INET61
2545 } else if (ext.ip6) {
2546 iphlen = sizeof(*ext.ip6);
2547
2548 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV60x00000000;
2549#endif
2550 } else {
2551 iphlen = 0;
2552 }
2553
2554 *cmd_type_len |= E1000_ADVTXD_DTYP_DATA0x00300000 | E1000_ADVTXD_DCMD_IFCS0x02000000;
2555 *cmd_type_len |= E1000_ADVTXD_DCMD_DEXT0x20000000;
2556 *olinfo_status |= mp->m_pkthdrM_dat.MH.MH_pkthdr.len << E1000_ADVTXD_PAYLEN_SHIFT14;
2557 vlan_macip_lens |= iphlen;
2558 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT0x20000000 | E1000_ADVTXD_DTYP_CTXT0x00200000;
2559
2560 if (ext.tcp) {
2561 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP0x00000800;
2562 if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)((mp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0002))) {
2563 *olinfo_status |= E1000_TXD_POPTS_TXSM0x02 << 8;
2564 off = 1;
2565 }
2566 } else if (ext.udp) {
2567 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP0x00000000;
2568 if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)((mp->M_dat.MH.MH_pkthdr.csum_flags) & (0x0004))) {
2569 *olinfo_status |= E1000_TXD_POPTS_TXSM0x02 << 8;
2570 off = 1;
2571 }
2572 }
2573
2574 if (!off)
2575 return (0);
2576
2577 /* 82575 needs the queue index added */
2578 if (que->sc->hw.mac_type == em_82575)
2579 mss_l4len_idx |= (que->me & 0xff) << 4;
2580
2581 htolem32(&TD->vlan_macip_lens, vlan_macip_lens)(*(__uint32_t *)(&TD->vlan_macip_lens) = ((__uint32_t)
(vlan_macip_lens)))
;
2582 htolem32(&TD->type_tucmd_mlhl, type_tucmd_mlhl)(*(__uint32_t *)(&TD->type_tucmd_mlhl) = ((__uint32_t)
(type_tucmd_mlhl)))
;
2583 htolem32(&TD->u.seqnum_seed, 0)(*(__uint32_t *)(&TD->u.seqnum_seed) = ((__uint32_t)(0
)))
;
2584 htolem32(&TD->mss_l4len_idx, mss_l4len_idx)(*(__uint32_t *)(&TD->mss_l4len_idx) = ((__uint32_t)(mss_l4len_idx
)))
;
2585
2586 return (1);
2587}
2588
2589/*********************************************************************
2590 *
2591 * The offload context needs to be set when we transfer the first
2592 * packet of a particular protocol (TCP/UDP). We change the
2593 * context only if the protocol type changes.
2594 *
2595 **********************************************************************/
2596u_int
2597em_transmit_checksum_setup(struct em_queue *que, struct mbuf *mp, u_int head,
2598 u_int32_t *txd_upper, u_int32_t *txd_lower)
2599{
2600 struct em_context_desc *TXD;
2601
2602 if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_OUT0x0002) {
2603 *txd_upper = E1000_TXD_POPTS_TXSM0x02 << 8;
2604 *txd_lower = E1000_TXD_CMD_DEXT0x20000000 | E1000_TXD_DTYP_D0x00100000;
2605 if (que->tx.active_checksum_context == OFFLOAD_TCP_IP)
2606 return (0);
2607 else
2608 que->tx.active_checksum_context = OFFLOAD_TCP_IP;
2609 } else if (mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_UDP_CSUM_OUT0x0004) {
2610 *txd_upper = E1000_TXD_POPTS_TXSM0x02 << 8;
2611 *txd_lower = E1000_TXD_CMD_DEXT0x20000000 | E1000_TXD_DTYP_D0x00100000;
2612 if (que->tx.active_checksum_context == OFFLOAD_UDP_IP)
2613 return (0);
2614 else
2615 que->tx.active_checksum_context = OFFLOAD_UDP_IP;
2616 } else {
2617 *txd_upper = 0;
2618 *txd_lower = 0;
2619 return (0);
2620 }
2621
2622 /* If we reach this point, the checksum offload context
2623 * needs to be reset.
2624 */
2625 TXD = (struct em_context_desc *)&que->tx.sc_tx_desc_ring[head];
2626
2627 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN((6 * 2) + 2);
2628 TXD->lower_setup.ip_fields.ipcso =
2629 ETHER_HDR_LEN((6 * 2) + 2) + offsetof(struct ip, ip_sum)__builtin_offsetof(struct ip, ip_sum);
2630 TXD->lower_setup.ip_fields.ipcse =
2631 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1)((__uint16_t)(((6 * 2) + 2) + sizeof(struct ip) - 1));
2632
2633 TXD->upper_setup.tcp_fields.tucss =
2634 ETHER_HDR_LEN((6 * 2) + 2) + sizeof(struct ip);
2635 TXD->upper_setup.tcp_fields.tucse = htole16(0)((__uint16_t)(0));
2636
2637 if (que->tx.active_checksum_context == OFFLOAD_TCP_IP) {
2638 TXD->upper_setup.tcp_fields.tucso =
2639 ETHER_HDR_LEN((6 * 2) + 2) + sizeof(struct ip) +
2640 offsetof(struct tcphdr, th_sum)__builtin_offsetof(struct tcphdr, th_sum);
2641 } else if (que->tx.active_checksum_context == OFFLOAD_UDP_IP) {
2642 TXD->upper_setup.tcp_fields.tucso =
2643 ETHER_HDR_LEN((6 * 2) + 2) + sizeof(struct ip) +
2644 offsetof(struct udphdr, uh_sum)__builtin_offsetof(struct udphdr, uh_sum);
2645 }
2646
2647 TXD->tcp_seg_setup.data = htole32(0)((__uint32_t)(0));
2648 TXD->cmd_and_length = htole32(que->tx.sc_txd_cmd | E1000_TXD_CMD_DEXT)((__uint32_t)(que->tx.sc_txd_cmd | 0x20000000));
2649
2650 return (1);
2651}
2652
2653/**********************************************************************
2654 *
2655 * Examine each tx_buffer in the used queue. If the hardware is done
2656 * processing the packet then free associated resources. The
2657 * tx_buffer is put back on the free queue.
2658 *
2659 **********************************************************************/
2660void
2661em_txeof(struct em_queue *que)
2662{
2663 struct em_softc *sc = que->sc;
2664 struct ifnet *ifp = &sc->sc_ac.ac_if;
2665 struct em_packet *pkt;
2666 struct em_tx_desc *desc;
2667 u_int head, tail;
2668 u_int free = 0;
2669
2670 head = que->tx.sc_tx_desc_head;
2671 tail = que->tx.sc_tx_desc_tail;
2672
2673 if (head == tail)
2674 return;
2675
2676 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02))
2677 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02))
2678 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x02))
;
2679
2680 do {
2681 pkt = &que->tx.sc_tx_pkts_ring[tail];
2682 desc = &que->tx.sc_tx_desc_ring[pkt->pkt_eop];
2683
2684 if (!ISSET(desc->upper.fields.status, E1000_TXD_STAT_DD)((desc->upper.fields.status) & (0x00000001)))
2685 break;
2686
2687 bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x08))
2688 0, pkt->pkt_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x08))
2689 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x08))
;
2690 bus_dmamap_unload(sc->sc_dmat, pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->pkt_map))
;
2691
2692 KASSERT(pkt->pkt_m != NULL)((pkt->pkt_m != ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_em.c", 2692, "pkt->pkt_m != NULL"
))
;
2693
2694 m_freem(pkt->pkt_m);
2695 pkt->pkt_m = NULL((void *)0);
2696
2697 tail = pkt->pkt_eop;
2698
2699 if (++tail == sc->sc_tx_slots)
2700 tail = 0;
2701
2702 free++;
2703 } while (tail != head);
2704
2705 bus_dmamap_sync(sc->sc_dmat, que->tx.sc_tx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01))
2706 0, que->tx.sc_tx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01))
2707 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
tx.sc_tx_dma.dma_map), (0), (que->tx.sc_tx_dma.dma_map->
dm_mapsize), (0x01))
;
2708
2709 if (free == 0)
2710 return;
2711
2712 que->tx.sc_tx_desc_tail = tail;
2713
2714 if (ifq_is_oactive(&ifp->if_snd))
2715 ifq_restart(&ifp->if_snd);
2716 else if (tail == head)
2717 ifp->if_timer = 0;
2718}
2719
2720/*********************************************************************
2721 *
2722 * Get a buffer from system mbuf buffer pool.
2723 *
2724 **********************************************************************/
2725int
2726em_get_buf(struct em_queue *que, int i)
2727{
2728 struct em_softc *sc = que->sc;
2729 struct mbuf *m;
2730 struct em_packet *pkt;
2731 struct em_rx_desc *desc;
2732 int error;
2733
2734 pkt = &que->rx.sc_rx_pkts_ring[i];
2735 desc = &que->rx.sc_rx_desc_ring[i];
2736
2737 KASSERT(pkt->pkt_m == NULL)((pkt->pkt_m == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_em.c", 2737, "pkt->pkt_m == NULL"
))
;
2738
2739 m = MCLGETL(NULL, M_DONTWAIT, EM_MCLBYTES)m_clget((((void *)0)), (0x0002), ((2048 + 2)));
2740 if (m == NULL((void *)0)) {
2741 sc->mbuf_cluster_failed++;
2742 return (ENOBUFS55);
2743 }
2744 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = EM_MCLBYTES(2048 + 2);
2745 m_adj(m, ETHER_ALIGN2);
2746
2747 error = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->pkt_map,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->pkt_map), (m), (0x0001))
2748 m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkt->pkt_map), (m), (0x0001))
;
2749 if (error) {
2750 m_freem(m);
2751 return (error);
2752 }
2753
2754 bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x01))
2755 0, pkt->pkt_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x01))
2756 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x01))
;
2757 pkt->pkt_m = m;
2758
2759 memset(desc, 0, sizeof(*desc))__builtin_memset((desc), (0), (sizeof(*desc)));
2760 htolem64(&desc->buffer_addr, pkt->pkt_map->dm_segs[0].ds_addr)(*(__uint64_t *)(&desc->buffer_addr) = ((__uint64_t)(pkt
->pkt_map->dm_segs[0].ds_addr)))
;
2761
2762 return (0);
2763}
2764
2765/*********************************************************************
2766 *
2767 * Allocate memory for rx_buffer structures. Since we use one
2768 * rx_buffer per received packet, the maximum number of rx_buffer's
2769 * that we'll need is equal to the number of receive descriptors
2770 * that we've allocated.
2771 *
2772 **********************************************************************/
2773int
2774em_allocate_receive_structures(struct em_softc *sc)
2775{
2776 struct em_queue *que;
2777 struct em_packet *pkt;
2778 int i;
2779 int error;
2780
2781 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2782 que->rx.sc_rx_pkts_ring = mallocarray(sc->sc_rx_slots,
2783 sizeof(*que->rx.sc_rx_pkts_ring),
2784 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2785 if (que->rx.sc_rx_pkts_ring == NULL((void *)0)) {
2786 printf("%s: Unable to allocate rx_buffer memory\n",
2787 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2788 return (ENOMEM12);
2789 }
2790
2791 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
2792 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
2793 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x01 | 0x04))
;
2794
2795 for (i = 0; i < sc->sc_rx_slots; i++) {
2796 pkt = &que->rx.sc_rx_pkts_ring[i];
2797
2798 error = bus_dmamap_create(sc->sc_dmat, EM_MCLBYTES, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((2048
+ 2)), (1), ((2048 + 2)), (0), (0x0001), (&pkt->pkt_map
))
2799 EM_MCLBYTES, 0, BUS_DMA_NOWAIT, &pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((2048
+ 2)), (1), ((2048 + 2)), (0), (0x0001), (&pkt->pkt_map
))
;
2800 if (error != 0) {
2801 printf("%s: em_allocate_receive_structures: "
2802 "bus_dmamap_create failed; error %u\n",
2803 DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
2804 goto fail;
2805 }
2806
2807 pkt->pkt_m = NULL((void *)0);
2808 }
2809 }
2810
2811 return (0);
2812
2813fail:
2814 em_free_receive_structures(sc);
2815 return (error);
2816}
2817
2818/*********************************************************************
2819 *
2820 * Allocate and initialize receive structures.
2821 *
2822 **********************************************************************/
2823int
2824em_setup_receive_structures(struct em_softc *sc)
2825{
2826 struct ifnet *ifp = &sc->sc_ac.ac_if;
2827 struct em_queue *que;
2828 u_int lwm;
2829
2830 if (em_allocate_receive_structures(sc))
2831 return (ENOMEM12);
2832
2833 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2834 memset(que->rx.sc_rx_desc_ring, 0,__builtin_memset((que->rx.sc_rx_desc_ring), (0), (sc->sc_rx_slots
* sizeof(*que->rx.sc_rx_desc_ring)))
2835 sc->sc_rx_slots * sizeof(*que->rx.sc_rx_desc_ring))__builtin_memset((que->rx.sc_rx_desc_ring), (0), (sc->sc_rx_slots
* sizeof(*que->rx.sc_rx_desc_ring)))
;
2836
2837 /* Setup our descriptor pointers */
2838 que->rx.sc_rx_desc_tail = 0;
2839 que->rx.sc_rx_desc_head = sc->sc_rx_slots - 1;
2840
2841 lwm = max(4, 2 * ((ifp->if_hardmtu / MCLBYTES(1 << 11)) + 1));
2842 if_rxr_init(&que->rx.sc_rx_ring, lwm, sc->sc_rx_slots);
2843
2844 if (em_rxfill(que) == 0) {
2845 printf("%s: unable to fill any rx descriptors\n",
2846 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2847 return (ENOMEM12);
2848 }
2849 }
2850
2851 return (0);
2852}
2853
2854/*********************************************************************
2855 *
2856 * Enable receive unit.
2857 *
2858 **********************************************************************/
2859void
2860em_initialize_receive_unit(struct em_softc *sc)
2861{
2862 struct em_queue *que;
2863 u_int32_t reg_rctl;
2864 u_int32_t reg_rxcsum;
2865 u_int32_t reg_srrctl;
2866 u_int64_t bus_addr;
2867
2868 INIT_DEBUGOUT("em_initialize_receive_unit: begin")if (0) printf("em_initialize_receive_unit: begin" "\n");
2869
2870 /* Make sure receives are disabled while setting up the descriptor ring */
2871 E1000_WRITE_REG(&sc->hw, RCTL, 0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00100 : em_translate_82542_register(0x00100))),
(0)))
;
2872
2873 /* Set the Receive Delay Timer Register */
2874 E1000_WRITE_REG(&sc->hw, RDTR,((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x02820 : em_translate_82542_register(0x02820))),
(sc->rx_int_delay | 0x80000000)))
2875 sc->rx_int_delay | E1000_RDT_FPDB)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x02820 : em_translate_82542_register(0x02820))),
(sc->rx_int_delay | 0x80000000)))
;
2876
2877 if (sc->hw.mac_type >= em_82540) {
2878 if (sc->rx_int_delay)
2879 E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x0282C : em_translate_82542_register(0x0282C))),
(sc->rx_abs_int_delay)))
;
2880
2881 /* Set the interrupt throttling rate. Value is calculated
2882 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2883 E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x000C4 : em_translate_82542_register(0x000C4))),
(1000000000/(8000 * 256))))
;
2884 }
2885
2886 /* Setup the Receive Control Register */
2887 reg_rctl = E1000_RCTL_EN0x00000002 | E1000_RCTL_BAM0x00008000 | E1000_RCTL_LBM_NO0x00000000 |
2888 E1000_RCTL_RDMTS_HALF0x00000000 |
2889 (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT12);
2890
2891 if (sc->hw.tbi_compatibility_on == TRUE1)
2892 reg_rctl |= E1000_RCTL_SBP0x00000004;
2893
2894 /*
2895 * The i350 has a bug where it always strips the CRC whether
2896 * asked to or not. So ask for stripped CRC here and
2897 * cope in rxeof
2898 */
2899 if (sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350)
2900 reg_rctl |= E1000_RCTL_SECRC0x04000000;
2901
2902 switch (sc->sc_rx_buffer_len) {
2903 default:
2904 case EM_RXBUFFER_20482048:
2905 reg_rctl |= E1000_RCTL_SZ_20480x00000000;
2906 break;
2907 case EM_RXBUFFER_40964096:
2908 reg_rctl |= E1000_RCTL_SZ_40960x00030000|E1000_RCTL_BSEX0x02000000|E1000_RCTL_LPE0x00000020;
2909 break;
2910 case EM_RXBUFFER_81928192:
2911 reg_rctl |= E1000_RCTL_SZ_81920x00020000|E1000_RCTL_BSEX0x02000000|E1000_RCTL_LPE0x00000020;
2912 break;
2913 case EM_RXBUFFER_1638416384:
2914 reg_rctl |= E1000_RCTL_SZ_163840x00010000|E1000_RCTL_BSEX0x02000000|E1000_RCTL_LPE0x00000020;
2915 break;
2916 }
2917
2918 if (sc->hw.max_frame_size != ETHER_MAX_LEN1518)
2919 reg_rctl |= E1000_RCTL_LPE0x00000020;
2920
2921 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2922 if (sc->hw.mac_type >= em_82543) {
2923 reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x05000 : em_translate_82542_register(0x05000)))))
;
2924 reg_rxcsum |= (E1000_RXCSUM_IPOFL0x00000100 | E1000_RXCSUM_TUOFL0x00000200);
2925 E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x05000 : em_translate_82542_register(0x05000))),
(reg_rxcsum)))
;
2926 }
2927
2928 /*
2929 * XXX TEMPORARY WORKAROUND: on some systems with 82573
2930 * long latencies are observed, like Lenovo X60.
2931 */
2932 if (sc->hw.mac_type == em_82573)
2933 E1000_WRITE_REG(&sc->hw, RDTR, 0x20)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x02820 : em_translate_82542_register(0x02820))),
(0x20)))
;
2934
2935 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2936 if (sc->num_queues > 1) {
2937 /*
2938 * Disable Drop Enable for every queue, default has
2939 * it enabled for queues > 0
2940 */
2941 reg_srrctl = E1000_READ_REG(&sc->hw, SRRCTL(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x0280C + ((que->me) * 0x100)) :
(0x0C00C + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x0280C + ((que->me) * 0x100)) : (
0x0C00C + ((que->me) * 0x40))))))))
;
2942 reg_srrctl &= ~E1000_SRRCTL_DROP_EN0x80000000;
2943 E1000_WRITE_REG(&sc->hw, SRRCTL(que->me), reg_srrctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x0280C + ((que->me) *
0x100)) : (0x0C00C + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x0280C + ((que->me) * 0x100)) : (
0x0C00C + ((que->me) * 0x40)))))), (reg_srrctl)))
;
2944 }
2945
2946 /* Setup the Base and Length of the Rx Descriptor Ring */
2947 bus_addr = que->rx.sc_rx_dma.dma_map->dm_segs[0].ds_addr;
2948 E1000_WRITE_REG(&sc->hw, RDLEN(que->me),((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02808 + ((que->me) *
0x100)) : (0x0C008 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02808 + ((que->me) * 0x100)) : (
0x0C008 + ((que->me) * 0x40)))))), (sc->sc_rx_slots * sizeof
(*que->rx.sc_rx_desc_ring))))
2949 sc->sc_rx_slots * sizeof(*que->rx.sc_rx_desc_ring))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02808 + ((que->me) *
0x100)) : (0x0C008 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02808 + ((que->me) * 0x100)) : (
0x0C008 + ((que->me) * 0x40)))))), (sc->sc_rx_slots * sizeof
(*que->rx.sc_rx_desc_ring))))
;
2950 E1000_WRITE_REG(&sc->hw, RDBAH(que->me), (u_int32_t)(bus_addr >> 32))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02804 + ((que->me) *
0x100)) : (0x0C004 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02804 + ((que->me) * 0x100)) : (
0x0C004 + ((que->me) * 0x40)))))), ((u_int32_t)(bus_addr >>
32))))
;
2951 E1000_WRITE_REG(&sc->hw, RDBAL(que->me), (u_int32_t)bus_addr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02800 + ((que->me) *
0x100)) : (0x0C000 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02800 + ((que->me) * 0x100)) : (
0x0C000 + ((que->me) * 0x40)))))), ((u_int32_t)bus_addr)))
;
2952
2953 if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2954 sc->hw.mac_type == em_82576 ||
2955 sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2956 /* 82575/6 need to enable the RX queue */
2957 uint32_t reg;
2958 reg = E1000_READ_REG(&sc->hw, RXDCTL(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? ((que->me) < 4 ? (0x02828 + ((que->me) * 0x100)) :
(0x0C028 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02828 + ((que->me) * 0x100)) : (
0x0C028 + ((que->me) * 0x40))))))))
;
2959 reg |= E1000_RXDCTL_QUEUE_ENABLE0x2000000;
2960 E1000_WRITE_REG(&sc->hw, RXDCTL(que->me), reg)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02828 + ((que->me) *
0x100)) : (0x0C028 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02828 + ((que->me) * 0x100)) : (
0x0C028 + ((que->me) * 0x40)))))), (reg)))
;
2961 }
2962 }
2963
2964 /* Enable Receives */
2965 E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00100 : em_translate_82542_register(0x00100))),
(reg_rctl)))
;
2966
2967 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2968 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2969 E1000_WRITE_REG(&sc->hw, RDH(que->me), 0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02810 + ((que->me) *
0x100)) : (0x0C010 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02810 + ((que->me) * 0x100)) : (
0x0C010 + ((que->me) * 0x40)))))), (0)))
;
2970 E1000_WRITE_REG(&sc->hw, RDT(que->me), que->rx.sc_rx_desc_head)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02818 + ((que->me) *
0x100)) : (0x0C018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02818 + ((que->me) * 0x100)) : (
0x0C018 + ((que->me) * 0x40)))))), (que->rx.sc_rx_desc_head
)))
;
2971 }
2972}
2973
2974/*********************************************************************
2975 *
2976 * Free receive related data structures.
2977 *
2978 **********************************************************************/
2979void
2980em_free_receive_structures(struct em_softc *sc)
2981{
2982 struct em_queue *que;
2983 struct em_packet *pkt;
2984 int i;
2985
2986 INIT_DEBUGOUT("free_receive_structures: begin")if (0) printf("free_receive_structures: begin" "\n");
2987
2988 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
2989 if_rxr_init(&que->rx.sc_rx_ring, 0, 0);
2990
2991 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
2992 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
2993 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x02 | 0x08))
;
2994
2995 if (que->rx.sc_rx_pkts_ring != NULL((void *)0)) {
2996 for (i = 0; i < sc->sc_rx_slots; i++) {
2997 pkt = &que->rx.sc_rx_pkts_ring[i];
2998 if (pkt->pkt_m != NULL((void *)0)) {
2999 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
3000 pkt->pkt_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
3001 0, pkt->pkt_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
3002 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
;
3003 bus_dmamap_unload(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->pkt_map))
3004 pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->pkt_map))
;
3005 m_freem(pkt->pkt_m);
3006 pkt->pkt_m = NULL((void *)0);
3007 }
3008 bus_dmamap_destroy(sc->sc_dmat, pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (pkt
->pkt_map))
;
3009 }
3010
3011 free(que->rx.sc_rx_pkts_ring, M_DEVBUF2,
3012 sc->sc_rx_slots * sizeof(*que->rx.sc_rx_pkts_ring));
3013 que->rx.sc_rx_pkts_ring = NULL((void *)0);
3014 }
3015
3016 if (que->rx.fmp != NULL((void *)0)) {
3017 m_freem(que->rx.fmp);
3018 que->rx.fmp = NULL((void *)0);
3019 que->rx.lmp = NULL((void *)0);
3020 }
3021 }
3022}
3023
3024int
3025em_rxfill(struct em_queue *que)
3026{
3027 struct em_softc *sc = que->sc;
3028 u_int slots;
3029 int post = 0;
3030 int i;
3031
3032 i = que->rx.sc_rx_desc_head;
3033
3034 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x08))
3035 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x08))
3036 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x08))
;
3037
3038 for (slots = if_rxr_get(&que->rx.sc_rx_ring, sc->sc_rx_slots);
3039 slots > 0; slots--) {
3040 if (++i == sc->sc_rx_slots)
3041 i = 0;
3042
3043 if (em_get_buf(que, i) != 0)
3044 break;
3045
3046 que->rx.sc_rx_desc_head = i;
3047 post = 1;
3048 }
3049
3050 if_rxr_put(&que->rx.sc_rx_ring, slots)do { (&que->rx.sc_rx_ring)->rxr_alive -= (slots); }
while (0)
;
3051
3052 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x04))
3053 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x04))
3054 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x04))
;
3055
3056 return (post);
3057}
3058
3059void
3060em_rxrefill(void *arg)
3061{
3062 struct em_queue *que = arg;
3063 int s;
3064
3065 s = splnet()splraise(0x4);
3066 em_rxrefill_locked(que);
3067 splx(s)spllower(s);
3068}
3069
3070void
3071em_rxrefill_locked(struct em_queue *que)
3072{
3073 struct em_softc *sc = que->sc;
3074
3075 if (em_rxfill(que))
3076 E1000_WRITE_REG(&sc->hw, RDT(que->me), que->rx.sc_rx_desc_head)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? ((que->me) < 4 ? (0x02818 + ((que->me) *
0x100)) : (0x0C018 + ((que->me) * 0x40))) : em_translate_82542_register
(((que->me) < 4 ? (0x02818 + ((que->me) * 0x100)) : (
0x0C018 + ((que->me) * 0x40)))))), (que->rx.sc_rx_desc_head
)))
;
3077 else if (if_rxr_needrefill(&que->rx.sc_rx_ring)((&que->rx.sc_rx_ring)->rxr_alive < (&que->
rx.sc_rx_ring)->rxr_lwm)
)
3078 timeout_add(&que->rx_refill, 1);
3079}
3080
3081/*********************************************************************
3082 *
3083 * This routine executes in interrupt context. It replenishes
3084 * the mbufs in the descriptor and sends data which has been
3085 * dma'ed into host memory to upper layer.
3086 *
3087 *********************************************************************/
3088int
3089em_rxeof(struct em_queue *que)
3090{
3091 struct em_softc *sc = que->sc;
3092 struct ifnet *ifp = &sc->sc_ac.ac_if;
3093 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3094 struct mbuf *m;
3095 u_int8_t accept_frame = 0;
3096 u_int8_t eop = 0;
3097 u_int16_t len, desc_len, prev_len_adj;
3098 int i, rv = 0;
3099
3100 /* Pointer to the receive descriptor being examined. */
3101 struct em_rx_desc *desc;
3102 struct em_packet *pkt;
3103 u_int8_t status;
3104
3105 if (if_rxr_inuse(&que->rx.sc_rx_ring)((&que->rx.sc_rx_ring)->rxr_alive) == 0)
3106 return (0);
3107
3108 i = que->rx.sc_rx_desc_tail;
3109
3110 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x02))
3111 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x02))
3112 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x02))
;
3113
3114 do {
3115 m = NULL((void *)0);
3116
3117 pkt = &que->rx.sc_rx_pkts_ring[i];
3118 desc = &que->rx.sc_rx_desc_ring[i];
3119
3120 status = desc->status;
3121 if (!ISSET(status, E1000_RXD_STAT_DD)((status) & (0x01)))
3122 break;
3123
3124 /* pull the mbuf off the ring */
3125 bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
3126 0, pkt->pkt_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
3127 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkt->
pkt_map), (0), (pkt->pkt_map->dm_mapsize), (0x02))
;
3128 bus_dmamap_unload(sc->sc_dmat, pkt->pkt_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkt
->pkt_map))
;
3129 m = pkt->pkt_m;
3130 pkt->pkt_m = NULL((void *)0);
3131
3132 KASSERT(m != NULL)((m != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_em.c"
, 3132, "m != NULL"))
;
3133
3134 if_rxr_put(&que->rx.sc_rx_ring, 1)do { (&que->rx.sc_rx_ring)->rxr_alive -= (1); } while
(0)
;
3135 rv = 1;
3136
3137 accept_frame = 1;
3138 prev_len_adj = 0;
3139 desc_len = letoh16(desc->length)((__uint16_t)(desc->length));
3140
3141 if (status & E1000_RXD_STAT_EOP0x02) {
3142 eop = 1;
3143 if (desc_len < ETHER_CRC_LEN4) {
3144 len = 0;
3145 prev_len_adj = ETHER_CRC_LEN4 - desc_len;
3146 } else if (sc->hw.mac_type == em_i210 ||
3147 sc->hw.mac_type == em_i350)
3148 len = desc_len;
3149 else
3150 len = desc_len - ETHER_CRC_LEN4;
3151 } else {
3152 eop = 0;
3153 len = desc_len;
3154 }
3155
3156 if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK( 0x01 | 0x02 | 0x04 | 0x10 | 0x80)) {
3157 u_int8_t last_byte;
3158 u_int32_t pkt_len = desc_len;
3159
3160 if (que->rx.fmp != NULL((void *)0))
3161 pkt_len += que->rx.fmp->m_pkthdrM_dat.MH.MH_pkthdr.len;
3162
3163 last_byte = *(mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)) + desc_len - 1);
3164 if (TBI_ACCEPT(&sc->hw, status, desc->errors,((&sc->hw)->tbi_compatibility_on && (((desc
->errors) & ( 0x01 | 0x02 | 0x04 | 0x10 | 0x80)) == 0x01
) && ((last_byte) == 0x0F) && (((status) &
0x08) ? (((pkt_len) > ((&sc->hw)->min_frame_size
- 4)) && ((pkt_len) <= ((&sc->hw)->max_frame_size
+ 1))) : (((pkt_len) > (&sc->hw)->min_frame_size
) && ((pkt_len) <= ((&sc->hw)->max_frame_size
+ 4 + 1)))))
3165 pkt_len, last_byte)((&sc->hw)->tbi_compatibility_on && (((desc
->errors) & ( 0x01 | 0x02 | 0x04 | 0x10 | 0x80)) == 0x01
) && ((last_byte) == 0x0F) && (((status) &
0x08) ? (((pkt_len) > ((&sc->hw)->min_frame_size
- 4)) && ((pkt_len) <= ((&sc->hw)->max_frame_size
+ 1))) : (((pkt_len) > (&sc->hw)->min_frame_size
) && ((pkt_len) <= ((&sc->hw)->max_frame_size
+ 4 + 1)))))
) {
3166#if NKSTAT1 > 0
3167 em_tbi_adjust_stats(sc,
3168 pkt_len, sc->hw.mac_addr);
3169#endif
3170 if (len > 0)
3171 len--;
3172 } else
3173 accept_frame = 0;
3174 }
3175
3176 if (accept_frame) {
3177 /* Assign correct length to the current fragment */
3178 m->m_lenm_hdr.mh_len = len;
3179
3180 if (que->rx.fmp == NULL((void *)0)) {
3181 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len;
3182 que->rx.fmp = m; /* Store the first mbuf */
3183 que->rx.lmp = m;
3184 } else {
3185 /* Chain mbuf's together */
3186 m->m_flagsm_hdr.mh_flags &= ~M_PKTHDR0x0002;
3187 /*
3188 * Adjust length of previous mbuf in chain if
3189 * we received less than 4 bytes in the last
3190 * descriptor.
3191 */
3192 if (prev_len_adj > 0) {
3193 que->rx.lmp->m_lenm_hdr.mh_len -= prev_len_adj;
3194 que->rx.fmp->m_pkthdrM_dat.MH.MH_pkthdr.len -= prev_len_adj;
3195 }
3196 que->rx.lmp->m_nextm_hdr.mh_next = m;
3197 que->rx.lmp = m;
3198 que->rx.fmp->m_pkthdrM_dat.MH.MH_pkthdr.len += m->m_lenm_hdr.mh_len;
3199 }
3200
3201 if (eop) {
3202 m = que->rx.fmp;
3203
3204 em_receive_checksum(sc, desc, m);
3205#if NVLAN1 > 0
3206 if (desc->status & E1000_RXD_STAT_VP0x08) {
3207 m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag =
3208 letoh16(desc->special)((__uint16_t)(desc->special));
3209 m->m_flagsm_hdr.mh_flags |= M_VLANTAG0x0020;
3210 }
3211#endif
3212 ml_enqueue(&ml, m);
3213
3214 que->rx.fmp = NULL((void *)0);
3215 que->rx.lmp = NULL((void *)0);
3216 }
3217 } else {
3218 que->rx.dropped_pkts++;
3219
3220 if (que->rx.fmp != NULL((void *)0)) {
3221 m_freem(que->rx.fmp);
3222 que->rx.fmp = NULL((void *)0);
3223 que->rx.lmp = NULL((void *)0);
3224 }
3225
3226 m_freem(m);
3227 }
3228
3229 /* Advance our pointers to the next descriptor. */
3230 if (++i == sc->sc_rx_slots)
3231 i = 0;
3232 } while (if_rxr_inuse(&que->rx.sc_rx_ring)((&que->rx.sc_rx_ring)->rxr_alive) > 0);
3233
3234 bus_dmamap_sync(sc->sc_dmat, que->rx.sc_rx_dma.dma_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x01))
3235 0, que->rx.sc_rx_dma.dma_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x01))
3236 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (que->
rx.sc_rx_dma.dma_map), (0), (que->rx.sc_rx_dma.dma_map->
dm_mapsize), (0x01))
;
3237
3238 que->rx.sc_rx_desc_tail = i;
3239
3240 if (ifiq_input(&ifp->if_rcv, &ml))
3241 if_rxr_livelocked(&que->rx.sc_rx_ring);
3242
3243 return (rv);
3244}
3245
3246/*********************************************************************
3247 *
3248 * Verify that the hardware indicated that the checksum is valid.
3249 * Inform the stack about the status of checksum so that stack
3250 * doesn't spend time verifying the checksum.
3251 *
3252 *********************************************************************/
3253void
3254em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
3255 struct mbuf *mp)
3256{
3257 /* 82543 or newer only */
3258 if ((sc->hw.mac_type < em_82543) ||
3259 /* Ignore Checksum bit is set */
3260 (rx_desc->status & E1000_RXD_STAT_IXSM0x04)) {
3261 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
3262 return;
3263 }
3264
3265 if (rx_desc->status & E1000_RXD_STAT_IPCS0x40) {
3266 /* Did it pass? */
3267 if (!(rx_desc->errors & E1000_RXD_ERR_IPE0x40)) {
3268 /* IP Checksum Good */
3269 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK0x0008;
3270
3271 } else
3272 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags = 0;
3273 }
3274
3275 if (rx_desc->status & E1000_RXD_STAT_TCPCS0x20) {
3276 /* Did it pass? */
3277 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE0x20))
3278 mp->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |=
3279 M_TCP_CSUM_IN_OK0x0020 | M_UDP_CSUM_IN_OK0x0080;
3280 }
3281}
3282
3283/*
3284 * This turns on the hardware offload of the VLAN
3285 * tag insertion and strip
3286 */
3287void
3288em_enable_hw_vlans(struct em_softc *sc)
3289{
3290 uint32_t ctrl;
3291
3292 ctrl = E1000_READ_REG(&sc->hw, CTRL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00000 : em_translate_82542_register(0x00000)))))
;
3293 ctrl |= E1000_CTRL_VME0x40000000;
3294 E1000_WRITE_REG(&sc->hw, CTRL, ctrl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x00000 : em_translate_82542_register(0x00000))),
(ctrl)))
;
3295}
3296
3297void
3298em_enable_intr(struct em_softc *sc)
3299{
3300 uint32_t mask;
3301
3302 if (sc->msix) {
3303 mask = sc->msix_queuesmask | sc->msix_linkmask;
3304 E1000_WRITE_REG(&sc->hw, EIAC, mask)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x0152C : em_translate_82542_register(0x0152C))),
(mask)))
;
3305 E1000_WRITE_REG(&sc->hw, EIAM, mask)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01530 : em_translate_82542_register(0x01530))),
(mask)))
;
3306 E1000_WRITE_REG(&sc->hw, EIMS, mask)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01524 : em_translate_82542_register(0x01524))),
(mask)))
;
3307 E1000_WRITE_REG(&sc->hw, IMS, E1000_IMS_LSC)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x000D0 : em_translate_82542_register(0x000D0))),
(0x00000004)))
;
3308 } else
3309 E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x000D0 : em_translate_82542_register(0x000D0))),
((( 0x00000080 | 0x00000001 | 0x00000010 | 0x00000008 | 0x00000040
| 0x00000004)))))
;
3310}
3311
3312void
3313em_disable_intr(struct em_softc *sc)
3314{
3315 /*
3316 * The first version of 82542 had an errata where when link
3317 * was forced it would stay up even if the cable was disconnected
3318 * Sequence errors were used to detect the disconnect and then
3319 * the driver would unforce the link. This code is in the ISR.
3320 * For this to work correctly the Sequence error interrupt had
3321 * to be enabled all the time.
3322 */
3323 if (sc->msix) {
3324 E1000_WRITE_REG(&sc->hw, EIMC, ~0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01528 : em_translate_82542_register(0x01528))),
(~0)))
;
3325 E1000_WRITE_REG(&sc->hw, EIAC, 0)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x0152C : em_translate_82542_register(0x0152C))),
(0)))
;
3326 } else if (sc->hw.mac_type == em_82542_rev2_0)
3327 E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x000D8 : em_translate_82542_register(0x000D8))),
((0xffffffff & ~0x00000008))))
;
3328 else
3329 E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x000D8 : em_translate_82542_register(0x000D8))),
(0xffffffff)))
;
3330}
3331
3332void
3333em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3334{
3335 struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3336 pcireg_t val;
3337
3338 val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3339 if (reg & 0x2) {
3340 val &= 0x0000ffff;
3341 val |= (*value << 16);
3342 } else {
3343 val &= 0xffff0000;
3344 val |= *value;
3345 }
3346 pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3347}
3348
3349void
3350em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3351{
3352 struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3353 pcireg_t val;
3354
3355 val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3356 if (reg & 0x2)
3357 *value = (val >> 16) & 0xffff;
3358 else
3359 *value = val & 0xffff;
3360}
3361
3362void
3363em_pci_set_mwi(struct em_hw *hw)
3364{
3365 struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3366
3367 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG0x04,
3368 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE0x0010));
3369}
3370
3371void
3372em_pci_clear_mwi(struct em_hw *hw)
3373{
3374 struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3375
3376 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG0x04,
3377 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE0x0010));
3378}
3379
3380/*
3381 * We may eventually really do this, but its unnecessary
3382 * for now so we just return unsupported.
3383 */
3384int32_t
3385em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3386{
3387 return -E1000_NOT_IMPLEMENTED14;
3388}
3389
3390/*********************************************************************
3391* 82544 Coexistence issue workaround.
3392* There are 2 issues.
3393* 1. Transmit Hang issue.
3394* To detect this issue, following equation can be used...
3395* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3396* If SUM[3:0] is in between 1 to 4, we will have this issue.
3397*
3398* 2. DAC issue.
3399* To detect this issue, following equation can be used...
3400* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3401* If SUM[3:0] is in between 9 to c, we will have this issue.
3402*
3403*
3404* WORKAROUND:
3405* Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3406*
3407*** *********************************************************************/
3408u_int32_t
3409em_fill_descriptors(u_int64_t address, u_int32_t length,
3410 PDESC_ARRAY desc_array)
3411{
3412 /* Since issue is sensitive to length and address.*/
3413 /* Let us first check the address...*/
3414 u_int32_t safe_terminator;
3415 if (length <= 4) {
3416 desc_array->descriptor[0].address = address;
3417 desc_array->descriptor[0].length = length;
3418 desc_array->elements = 1;
3419 return desc_array->elements;
3420 }
3421 safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3422 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3423 if (safe_terminator == 0 ||
3424 (safe_terminator > 4 &&
3425 safe_terminator < 9) ||
3426 (safe_terminator > 0xC &&
3427 safe_terminator <= 0xF)) {
3428 desc_array->descriptor[0].address = address;
3429 desc_array->descriptor[0].length = length;
3430 desc_array->elements = 1;
3431 return desc_array->elements;
3432 }
3433
3434 desc_array->descriptor[0].address = address;
3435 desc_array->descriptor[0].length = length - 4;
3436 desc_array->descriptor[1].address = address + (length - 4);
3437 desc_array->descriptor[1].length = 4;
3438 desc_array->elements = 2;
3439 return desc_array->elements;
3440}
3441
3442/*
3443 * Disable the L0S and L1 LINK states.
3444 */
3445void
3446em_disable_aspm(struct em_softc *sc)
3447{
3448 int offset;
3449 pcireg_t val;
3450
3451 switch (sc->hw.mac_type) {
3452 case em_82571:
3453 case em_82572:
3454 case em_82573:
3455 case em_82574:
3456 break;
3457 default:
3458 return;
3459 }
3460
3461 if (!pci_get_capability(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3462 PCI_CAP_PCIEXPRESS0x10, &offset, NULL((void *)0)))
3463 return;
3464
3465 /* Disable PCIe Active State Power Management (ASPM). */
3466 val = pci_conf_read(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3467 offset + PCI_PCIE_LCSR0x10);
3468
3469 switch (sc->hw.mac_type) {
3470 case em_82571:
3471 case em_82572:
3472 val &= ~PCI_PCIE_LCSR_ASPM_L10x00000002;
3473 break;
3474 case em_82573:
3475 case em_82574:
3476 val &= ~(PCI_PCIE_LCSR_ASPM_L0S0x00000001 |
3477 PCI_PCIE_LCSR_ASPM_L10x00000002);
3478 break;
3479 default:
3480 break;
3481 }
3482
3483 pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3484 offset + PCI_PCIE_LCSR0x10, val);
3485}
3486
3487/*
3488 * em_flush_tx_ring - remove all descriptors from the tx_ring
3489 *
3490 * We want to clear all pending descriptors from the TX ring.
3491 * zeroing happens when the HW reads the regs. We assign the ring itself as
3492 * the data of the next descriptor. We don't care about the data we are about
3493 * to reset the HW.
3494 */
3495void
3496em_flush_tx_ring(struct em_queue *que)
3497{
3498 struct em_softc *sc = que->sc;
3499 uint32_t tctl, txd_lower = E1000_TXD_CMD_IFCS0x02000000;
3500 uint16_t size = 512;
3501 struct em_tx_desc *txd;
3502
3503 KASSERT(que->tx.sc_tx_desc_ring != NULL)((que->tx.sc_tx_desc_ring != ((void *)0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_em.c", 3503, "que->tx.sc_tx_desc_ring != NULL"
))
;
3504
3505 tctl = EM_READ_REG(&sc->hw, E1000_TCTL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0x00400)))
;
3506 EM_WRITE_REG(&sc->hw, E1000_TCTL, tctl | E1000_TCTL_EN)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x00400), (tctl | 0x00000002)))
;
3507
3508 KASSERT(EM_READ_REG(&sc->hw, E1000_TDT(que->me)) == que->tx.sc_tx_desc_head)((((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((que->me) < 4 ? (0x03818 + ((que
->me) * 0x100)) : (0x0E018 + ((que->me) * 0x40)))))) ==
que->tx.sc_tx_desc_head) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_em.c", 3508, "EM_READ_REG(&sc->hw, E1000_TDT(que->me)) == que->tx.sc_tx_desc_head"
))
;
3509
3510 txd = &que->tx.sc_tx_desc_ring[que->tx.sc_tx_desc_head];
3511 txd->buffer_addr = que->tx.sc_tx_dma.dma_map->dm_segs[0].ds_addr;
3512 txd->lower.data = htole32(txd_lower | size)((__uint32_t)(txd_lower | size));
3513 txd->upper.data = 0;
3514
3515 /* flush descriptors to memory before notifying the HW */
3516 bus_space_barrier(sc->osdep.mem_bus_space_tag,
3517 sc->osdep.mem_bus_space_handle, 0, 0, BUS_SPACE_BARRIER_WRITE0x02);
3518
3519 if (++que->tx.sc_tx_desc_head == sc->sc_tx_slots)
3520 que->tx.sc_tx_desc_head = 0;
3521
3522 EM_WRITE_REG(&sc->hw, E1000_TDT(que->me), que->tx.sc_tx_desc_head)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((que->me) < 4 ? (0x03818 +
((que->me) * 0x100)) : (0x0E018 + ((que->me) * 0x40)))
), (que->tx.sc_tx_desc_head)))
;
3523 bus_space_barrier(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
3524 0, 0, BUS_SPACE_BARRIER_READ0x01|BUS_SPACE_BARRIER_WRITE0x02);
3525 usec_delay(250)(*delay_func)(250);
3526}
3527
3528/*
3529 * em_flush_rx_ring - remove all descriptors from the rx_ring
3530 *
3531 * Mark all descriptors in the RX ring as consumed and disable the rx ring
3532 */
3533void
3534em_flush_rx_ring(struct em_queue *que)
3535{
3536 uint32_t rctl, rxdctl;
3537 struct em_softc *sc = que->sc;
3538
3539 rctl = EM_READ_REG(&sc->hw, E1000_RCTL)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0x00100)))
;
3540 EM_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x00100), (rctl & ~0x00000002
)))
;
3541 E1000_WRITE_FLUSH(&sc->hw)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00008 : em_translate_82542_register(0x00008)))))
;
3542 usec_delay(150)(*delay_func)(150);
3543
3544 rxdctl = EM_READ_REG(&sc->hw, E1000_RXDCTL(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((que->me) < 4 ? (0x02828 + ((que
->me) * 0x100)) : (0x0C028 + ((que->me) * 0x40))))))
;
3545 /* zero the lower 14 bits (prefetch and host thresholds) */
3546 rxdctl &= 0xffffc000;
3547 /*
3548 * update thresholds: prefetch threshold to 31, host threshold to 1
3549 * and make sure the granularity is "descriptors" and not "cache lines"
3550 */
3551 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC0x1000000);
3552 EM_WRITE_REG(&sc->hw, E1000_RXDCTL(que->me), rxdctl)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((que->me) < 4 ? (0x02828 +
((que->me) * 0x100)) : (0x0C028 + ((que->me) * 0x40)))
), (rxdctl)))
;
3553
3554 /* momentarily enable the RX ring for the changes to take effect */
3555 EM_WRITE_REG(&sc->hw, E1000_RCTL, rctl | E1000_RCTL_EN)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x00100), (rctl | 0x00000002)))
;
3556 E1000_WRITE_FLUSH(&sc->hw)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x00008 : em_translate_82542_register(0x00008)))))
;
3557 usec_delay(150)(*delay_func)(150);
3558 EM_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x00100), (rctl & ~0x00000002
)))
;
3559}
3560
3561/*
3562 * em_flush_desc_rings - remove all descriptors from the descriptor rings
3563 *
3564 * In i219, the descriptor rings must be emptied before resetting the HW
3565 * or before changing the device state to D3 during runtime (runtime PM).
3566 *
3567 * Failure to do this will cause the HW to enter a unit hang state which can
3568 * only be released by PCI reset on the device
3569 *
3570 */
3571void
3572em_flush_desc_rings(struct em_softc *sc)
3573{
3574 struct em_queue *que = sc->queues; /* Use only first queue. */
3575 struct pci_attach_args *pa = &sc->osdep.em_pa;
3576 uint32_t fextnvm11, tdlen;
3577 uint16_t hang_state;
3578
3579 /* First, disable MULR fix in FEXTNVM11 */
3580 fextnvm11 = EM_READ_REG(&sc->hw, E1000_FEXTNVM11)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (0x05bbc)))
;
3581 fextnvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX0x00002000;
3582 EM_WRITE_REG(&sc->hw, E1000_FEXTNVM11, fextnvm11)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (0x05bbc), (fextnvm11)))
;
3583
3584 /* do nothing if we're not in faulty state, or if the queue is empty */
3585 tdlen = EM_READ_REG(&sc->hw, E1000_TDLEN(que->me))((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((que->me) < 4 ? (0x03808 + ((que
->me) * 0x100)) : (0x0E008 + ((que->me) * 0x40))))))
;
3586 hang_state = pci_conf_read(pa->pa_pc, pa->pa_tag, PCICFG_DESC_RING_STATUS0xe4);
3587 if (!(hang_state & FLUSH_DESC_REQUIRED0x100) || !tdlen)
3588 return;
3589 em_flush_tx_ring(que);
3590
3591 /* recheck, maybe the fault is caused by the rx ring */
3592 hang_state = pci_conf_read(pa->pa_pc, pa->pa_tag, PCICFG_DESC_RING_STATUS0xe4);
3593 if (hang_state & FLUSH_DESC_REQUIRED0x100)
3594 em_flush_rx_ring(que);
3595}
3596
3597int
3598em_allocate_legacy(struct em_softc *sc)
3599{
3600 pci_intr_handle_t ih;
3601 const char *intrstr = NULL((void *)0);
3602 struct pci_attach_args *pa = &sc->osdep.em_pa;
3603 pci_chipset_tag_t pc = pa->pa_pc;
3604
3605 if (pci_intr_map_msi(pa, &ih)) {
3606 if (pci_intr_map(pa, &ih)) {
3607 printf(": couldn't map interrupt\n");
3608 return (ENXIO6);
3609 }
3610 sc->legacy_irq = 1;
3611 }
3612
3613 intrstr = pci_intr_string(pc, ih);
3614 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100,
3615 em_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
3616 if (sc->sc_intrhand == NULL((void *)0)) {
3617 printf(": couldn't establish interrupt");
3618 if (intrstr != NULL((void *)0))
3619 printf(" at %s", intrstr);
3620 printf("\n");
3621 return (ENXIO6);
3622 }
3623 printf(": %s", intrstr);
3624
3625 return (0);
3626}
3627
3628#if NKSTAT1 > 0
3629/* this is used to look up the array of kstats quickly */
3630enum em_stat {
3631 em_stat_crcerrs,
3632 em_stat_algnerrc,
3633 em_stat_symerrs,
3634 em_stat_rxerrc,
3635 em_stat_mpc,
3636 em_stat_scc,
3637 em_stat_ecol,
3638 em_stat_mcc,
3639 em_stat_latecol,
3640 em_stat_colc,
3641 em_stat_dc,
3642 em_stat_tncrs,
3643 em_stat_sec,
3644 em_stat_cexterr,
3645 em_stat_rlec,
3646 em_stat_xonrxc,
3647 em_stat_xontxc,
3648 em_stat_xoffrxc,
3649 em_stat_xofftxc,
3650 em_stat_fcruc,
3651 em_stat_prc64,
3652 em_stat_prc127,
3653 em_stat_prc255,
3654 em_stat_prc511,
3655 em_stat_prc1023,
3656 em_stat_prc1522,
3657 em_stat_gprc,
3658 em_stat_bprc,
3659 em_stat_mprc,
3660 em_stat_gptc,
3661 em_stat_gorc,
3662 em_stat_gotc,
3663 em_stat_rnbc,
3664 em_stat_ruc,
3665 em_stat_rfc,
3666 em_stat_roc,
3667 em_stat_rjc,
3668 em_stat_mgtprc,
3669 em_stat_mgtpdc,
3670 em_stat_mgtptc,
3671 em_stat_tor,
3672 em_stat_tot,
3673 em_stat_tpr,
3674 em_stat_tpt,
3675 em_stat_ptc64,
3676 em_stat_ptc127,
3677 em_stat_ptc255,
3678 em_stat_ptc511,
3679 em_stat_ptc1023,
3680 em_stat_ptc1522,
3681 em_stat_mptc,
3682 em_stat_bptc,
3683#if 0
3684 em_stat_tsctc,
3685 em_stat_tsctf,
3686#endif
3687
3688 em_stat_count,
3689};
3690
3691struct em_counter {
3692 const char *name;
3693 enum kstat_kv_unit unit;
3694 uint32_t reg;
3695};
3696
3697static const struct em_counter em_counters[em_stat_count] = {
3698 [em_stat_crcerrs] =
3699 { "rx crc errs", KSTAT_KV_U_PACKETS, E1000_CRCERRS0x04000 },
3700 [em_stat_algnerrc] = /* >= em_82543 */
3701 { "rx align errs", KSTAT_KV_U_PACKETS, 0 },
3702 [em_stat_symerrs] = /* >= em_82543 */
3703 { "rx align errs", KSTAT_KV_U_PACKETS, 0 },
3704 [em_stat_rxerrc] =
3705 { "rx errs", KSTAT_KV_U_PACKETS, E1000_RXERRC0x0400C },
3706 [em_stat_mpc] =
3707 { "rx missed", KSTAT_KV_U_PACKETS, E1000_MPC0x04010 },
3708 [em_stat_scc] =
3709 { "tx single coll", KSTAT_KV_U_PACKETS, E1000_SCC0x04014 },
3710 [em_stat_ecol] =
3711 { "tx excess coll", KSTAT_KV_U_PACKETS, E1000_ECOL0x04018 },
3712 [em_stat_mcc] =
3713 { "tx multi coll", KSTAT_KV_U_PACKETS, E1000_MCC0x0401C },
3714 [em_stat_latecol] =
3715 { "tx late coll", KSTAT_KV_U_PACKETS, E1000_LATECOL0x04020 },
3716 [em_stat_colc] =
3717 { "tx coll", KSTAT_KV_U_NONE, E1000_COLC0x04028 },
3718 [em_stat_dc] =
3719 { "tx defers", KSTAT_KV_U_NONE, E1000_DC0x04030 },
3720 [em_stat_tncrs] = /* >= em_82543 */
3721 { "tx no CRS", KSTAT_KV_U_PACKETS, 0 },
3722 [em_stat_sec] =
3723 { "seq errs", KSTAT_KV_U_NONE, E1000_SEC0x04038 },
3724 [em_stat_cexterr] = /* >= em_82543 */
3725 { "carr ext errs", KSTAT_KV_U_PACKETS, 0 },
3726 [em_stat_rlec] =
3727 { "rx len errs", KSTAT_KV_U_PACKETS, E1000_RLEC0x04040 },
3728 [em_stat_xonrxc] =
3729 { "rx xon", KSTAT_KV_U_PACKETS, E1000_XONRXC0x04048 },
3730 [em_stat_xontxc] =
3731 { "tx xon", KSTAT_KV_U_PACKETS, E1000_XONTXC0x0404C },
3732 [em_stat_xoffrxc] =
3733 { "rx xoff", KSTAT_KV_U_PACKETS, E1000_XOFFRXC0x04050 },
3734 [em_stat_xofftxc] =
3735 { "tx xoff", KSTAT_KV_U_PACKETS, E1000_XOFFTXC0x04054 },
3736 [em_stat_fcruc] =
3737 { "FC unsupported", KSTAT_KV_U_PACKETS, E1000_FCRUC0x04058 },
3738 [em_stat_prc64] =
3739 { "rx 64B", KSTAT_KV_U_PACKETS, E1000_PRC640x0405C },
3740 [em_stat_prc127] =
3741 { "rx 65-127B", KSTAT_KV_U_PACKETS, E1000_PRC1270x04060 },
3742 [em_stat_prc255] =
3743 { "rx 128-255B", KSTAT_KV_U_PACKETS, E1000_PRC2550x04064 },
3744 [em_stat_prc511] =
3745 { "rx 256-511B", KSTAT_KV_U_PACKETS, E1000_PRC5110x04068 },
3746 [em_stat_prc1023] =
3747 { "rx 512-1023B", KSTAT_KV_U_PACKETS, E1000_PRC10230x0406C },
3748 [em_stat_prc1522] =
3749 { "rx 1024-maxB", KSTAT_KV_U_PACKETS, E1000_PRC15220x04070 },
3750 [em_stat_gprc] =
3751 { "rx good", KSTAT_KV_U_PACKETS, E1000_GPRC0x04074 },
3752 [em_stat_bprc] =
3753 { "rx bcast", KSTAT_KV_U_PACKETS, E1000_BPRC0x04078 },
3754 [em_stat_mprc] =
3755 { "rx mcast", KSTAT_KV_U_PACKETS, E1000_MPRC0x0407C },
3756 [em_stat_gptc] =
3757 { "tx good", KSTAT_KV_U_PACKETS, E1000_GPTC0x04080 },
3758 [em_stat_gorc] = /* 64bit */
3759 { "rx good", KSTAT_KV_U_BYTES, 0 },
3760 [em_stat_gotc] = /* 64bit */
3761 { "tx good", KSTAT_KV_U_BYTES, 0 },
3762 [em_stat_rnbc] =
3763 { "rx no buffers", KSTAT_KV_U_PACKETS, E1000_RNBC0x040A0 },
3764 [em_stat_ruc] =
3765 { "rx undersize", KSTAT_KV_U_PACKETS, E1000_RUC0x040A4 },
3766 [em_stat_rfc] =
3767 { "rx fragments", KSTAT_KV_U_PACKETS, E1000_RFC0x040A8 },
3768 [em_stat_roc] =
3769 { "rx oversize", KSTAT_KV_U_PACKETS, E1000_ROC0x040AC },
3770 [em_stat_rjc] =
3771 { "rx jabbers", KSTAT_KV_U_PACKETS, E1000_RJC0x040B0 },
3772 [em_stat_mgtprc] =
3773 { "rx mgmt", KSTAT_KV_U_PACKETS, E1000_MGTPRC0x040B4 },
3774 [em_stat_mgtpdc] =
3775 { "rx mgmt drops", KSTAT_KV_U_PACKETS, E1000_MGTPDC0x040B8 },
3776 [em_stat_mgtptc] =
3777 { "tx mgmt", KSTAT_KV_U_PACKETS, E1000_MGTPTC0x040BC },
3778 [em_stat_tor] = /* 64bit */
3779 { "rx total", KSTAT_KV_U_BYTES, 0 },
3780 [em_stat_tot] = /* 64bit */
3781 { "tx total", KSTAT_KV_U_BYTES, 0 },
3782 [em_stat_tpr] =
3783 { "rx total", KSTAT_KV_U_PACKETS, E1000_TPR0x040D0 },
3784 [em_stat_tpt] =
3785 { "tx total", KSTAT_KV_U_PACKETS, E1000_TPT0x040D4 },
3786 [em_stat_ptc64] =
3787 { "tx 64B", KSTAT_KV_U_PACKETS, E1000_PTC640x040D8 },
3788 [em_stat_ptc127] =
3789 { "tx 65-127B", KSTAT_KV_U_PACKETS, E1000_PTC1270x040DC },
3790 [em_stat_ptc255] =
3791 { "tx 128-255B", KSTAT_KV_U_PACKETS, E1000_PTC2550x040E0 },
3792 [em_stat_ptc511] =
3793 { "tx 256-511B", KSTAT_KV_U_PACKETS, E1000_PTC5110x040E4 },
3794 [em_stat_ptc1023] =
3795 { "tx 512-1023B", KSTAT_KV_U_PACKETS, E1000_PTC10230x040E8 },
3796 [em_stat_ptc1522] =
3797 { "tx 1024-maxB", KSTAT_KV_U_PACKETS, E1000_PTC15220x040EC },
3798 [em_stat_mptc] =
3799 { "tx mcast", KSTAT_KV_U_PACKETS, E1000_MPTC0x040F0 },
3800 [em_stat_bptc] =
3801 { "tx bcast", KSTAT_KV_U_PACKETS, E1000_BPTC0x040F4 },
3802};
3803
3804/**********************************************************************
3805 *
3806 * Update the board statistics counters.
3807 *
3808 **********************************************************************/
3809int
3810em_kstat_read(struct kstat *ks)
3811{
3812 struct em_softc *sc = ks->ks_softc;
3813 struct em_hw *hw = &sc->hw;
3814 struct kstat_kv *kvs = ks->ks_data;
3815 uint32_t lo, hi;
3816 unsigned int i;
3817
3818 for (i = 0; i < nitems(em_counters)(sizeof((em_counters)) / sizeof((em_counters)[0])); i++) {
3819 const struct em_counter *c = &em_counters[i];
3820 if (c->reg == 0)
3821 continue;
3822
3823 kstat_kv_u64(&kvs[i])(&kvs[i])->kv_v.v_u64 += EM_READ_REG(hw,((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? c->reg : em_translate_82542_register
(c->reg)))))
3824 E1000_REG_TR(hw, c->reg))((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? c->reg : em_translate_82542_register
(c->reg)))))
; /* wtf */
3825 }
3826
3827 /* Handle the exceptions. */
3828
3829 if (sc->hw.mac_type >= em_82543) {
3830 kstat_kv_u64(&kvs[em_stat_algnerrc])(&kvs[em_stat_algnerrc])->kv_v.v_u64 +=
3831 E1000_READ_REG(hw, ALGNERRC)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x04004 : em_translate_82542_register
(0x04004)))))
;
3832 kstat_kv_u64(&kvs[em_stat_rxerrc])(&kvs[em_stat_rxerrc])->kv_v.v_u64 +=
3833 E1000_READ_REG(hw, RXERRC)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x0400C : em_translate_82542_register
(0x0400C)))))
;
3834 kstat_kv_u64(&kvs[em_stat_cexterr])(&kvs[em_stat_cexterr])->kv_v.v_u64 +=
3835 E1000_READ_REG(hw, CEXTERR)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x0403C : em_translate_82542_register
(0x0403C)))))
;
3836 kstat_kv_u64(&kvs[em_stat_tncrs])(&kvs[em_stat_tncrs])->kv_v.v_u64 +=
3837 E1000_READ_REG(hw, TNCRS)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x04034 : em_translate_82542_register
(0x04034)))))
;
3838#if 0
3839 sc->stats.tsctc +=
3840 E1000_READ_REG(hw, TSCTC)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x040F8 : em_translate_82542_register
(0x040F8)))))
;
3841 sc->stats.tsctfc +=
3842 E1000_READ_REG(hw, TSCTFC)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x040FC : em_translate_82542_register
(0x040FC)))))
;
3843#endif
3844 }
3845
3846 /* For the 64-bit byte counters the low dword must be read first. */
3847 /* Both registers clear on the read of the high dword */
3848
3849 lo = E1000_READ_REG(hw, GORCL)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x04088 : em_translate_82542_register
(0x04088)))))
;
3850 hi = E1000_READ_REG(hw, GORCH)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x0408C : em_translate_82542_register
(0x0408C)))))
;
3851 kstat_kv_u64(&kvs[em_stat_gorc])(&kvs[em_stat_gorc])->kv_v.v_u64 +=
3852 ((uint64_t)hi << 32) | (uint64_t)lo;
3853
3854 lo = E1000_READ_REG(hw, GOTCL)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x04090 : em_translate_82542_register
(0x04090)))))
;
3855 hi = E1000_READ_REG(hw, GOTCH)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x04094 : em_translate_82542_register
(0x04094)))))
;
3856 kstat_kv_u64(&kvs[em_stat_gotc])(&kvs[em_stat_gotc])->kv_v.v_u64 +=
3857 ((uint64_t)hi << 32) | (uint64_t)lo;
3858
3859 lo = E1000_READ_REG(hw, TORL)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x040C0 : em_translate_82542_register
(0x040C0)))))
;
3860 hi = E1000_READ_REG(hw, TORH)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x040C4 : em_translate_82542_register
(0x040C4)))))
;
3861 kstat_kv_u64(&kvs[em_stat_tor])(&kvs[em_stat_tor])->kv_v.v_u64 +=
3862 ((uint64_t)hi << 32) | (uint64_t)lo;
3863
3864 lo = E1000_READ_REG(hw, TOTL)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x040C8 : em_translate_82542_register
(0x040C8)))))
;
3865 hi = E1000_READ_REG(hw, TOTH)((((struct em_osdep *)(hw)->back)->mem_bus_space_tag)->
read_4((((struct em_osdep *)(hw)->back)->mem_bus_space_handle
), (((hw)->mac_type >= em_82543 ? 0x040CC : em_translate_82542_register
(0x040CC)))))
;
3866 kstat_kv_u64(&kvs[em_stat_tot])(&kvs[em_stat_tot])->kv_v.v_u64 +=
3867 ((uint64_t)hi << 32) | (uint64_t)lo;
3868
3869 getnanouptime(&ks->ks_updated);
3870
3871 return (0);
3872}
3873
3874void
3875em_kstat_attach(struct em_softc *sc)
3876{
3877 struct kstat *ks;
3878 struct kstat_kv *kvs;
3879 unsigned int i;
3880
3881 mtx_init(&sc->kstat_mtx, IPL_SOFTCLOCK)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
kstat_mtx), ((((0x1)) > 0x0 && ((0x1)) < 0x9) ?
0x9 : ((0x1)))); } while (0)
;
3882
3883 ks = kstat_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 0, "em-stats", 0,
3884 KSTAT_T_KV1, 0);
3885 if (ks == NULL((void *)0))
3886 return;
3887
3888 kvs = mallocarray(nitems(em_counters)(sizeof((em_counters)) / sizeof((em_counters)[0])), sizeof(*kvs),
3889 M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
3890 for (i = 0; i < nitems(em_counters)(sizeof((em_counters)) / sizeof((em_counters)[0])); i++) {
3891 const struct em_counter *c = &em_counters[i];
3892 kstat_kv_unit_init(&kvs[i], c->name,
3893 KSTAT_KV_T_COUNTER64, c->unit);
3894 }
3895
3896 ks->ks_softc = sc;
3897 ks->ks_data = kvs;
3898 ks->ks_datalen = nitems(em_counters)(sizeof((em_counters)) / sizeof((em_counters)[0])) * sizeof(*kvs);
3899 ks->ks_read = em_kstat_read;
3900 kstat_set_mutex(ks, &sc->kstat_mtx);
3901
3902 kstat_install(ks);
3903}
3904
3905/******************************************************************************
3906 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
3907 *****************************************************************************/
3908void
3909em_tbi_adjust_stats(struct em_softc *sc, uint32_t frame_len, uint8_t *mac_addr)
3910{
3911 struct em_hw *hw = &sc->hw;
3912 struct kstat *ks = sc->kstat;
3913 struct kstat_kv *kvs;
3914
3915 if (ks == NULL((void *)0))
3916 return;
3917
3918 /* First adjust the frame length. */
3919 frame_len--;
3920
3921 mtx_enter(&sc->kstat_mtx);
3922 kvs = ks->ks_data;
3923
3924 /*
3925 * We need to adjust the statistics counters, since the hardware
3926 * counters overcount this packet as a CRC error and undercount the
3927 * packet as a good packet
3928 */
3929
3930 /* This packet should not be counted as a CRC error. */
3931 kstat_kv_u64(&kvs[em_stat_crcerrs])(&kvs[em_stat_crcerrs])->kv_v.v_u64--;
3932 /* This packet does count as a Good Packet Received. */
3933 kstat_kv_u64(&kvs[em_stat_gprc])(&kvs[em_stat_gprc])->kv_v.v_u64++;
3934
3935 /* Adjust the Good Octets received counters */
3936 kstat_kv_u64(&kvs[em_stat_gorc])(&kvs[em_stat_gorc])->kv_v.v_u64 += frame_len;
3937
3938 /*
3939 * Is this a broadcast or multicast? Check broadcast first, since
3940 * the test for a multicast frame will test positive on a broadcast
3941 * frame.
3942 */
3943 if (ETHER_IS_BROADCAST(mac_addr)(((mac_addr)[0] & (mac_addr)[1] & (mac_addr)[2] &
(mac_addr)[3] & (mac_addr)[4] & (mac_addr)[5]) == 0xff
)
) {
3944 /* Broadcast packet */
3945 kstat_kv_u64(&kvs[em_stat_bprc])(&kvs[em_stat_bprc])->kv_v.v_u64++;
3946 } else if (ETHER_IS_MULTICAST(mac_addr)(*(mac_addr) & 0x01)) {
3947 /* Multicast packet */
3948 kstat_kv_u64(&kvs[em_stat_mprc])(&kvs[em_stat_mprc])->kv_v.v_u64++;
3949 }
3950
3951 if (frame_len == hw->max_frame_size) {
3952 /*
3953 * In this case, the hardware has overcounted the number of
3954 * oversize frames.
3955 */
3956 kstat_kv_u64(&kvs[em_stat_roc])(&kvs[em_stat_roc])->kv_v.v_u64--;
3957 }
3958
3959 /*
3960 * Adjust the bin counters when the extra byte put the frame in the
3961 * wrong bin. Remember that the frame_len was adjusted above.
3962 */
3963 if (frame_len == 64) {
3964 kstat_kv_u64(&kvs[em_stat_prc64])(&kvs[em_stat_prc64])->kv_v.v_u64++;
3965 kstat_kv_u64(&kvs[em_stat_prc127])(&kvs[em_stat_prc127])->kv_v.v_u64--;
3966 } else if (frame_len == 127) {
3967 kstat_kv_u64(&kvs[em_stat_prc127])(&kvs[em_stat_prc127])->kv_v.v_u64++;
3968 kstat_kv_u64(&kvs[em_stat_prc255])(&kvs[em_stat_prc255])->kv_v.v_u64--;
3969 } else if (frame_len == 255) {
3970 kstat_kv_u64(&kvs[em_stat_prc255])(&kvs[em_stat_prc255])->kv_v.v_u64++;
3971 kstat_kv_u64(&kvs[em_stat_prc511])(&kvs[em_stat_prc511])->kv_v.v_u64--;
3972 } else if (frame_len == 511) {
3973 kstat_kv_u64(&kvs[em_stat_prc511])(&kvs[em_stat_prc511])->kv_v.v_u64++;
3974 kstat_kv_u64(&kvs[em_stat_prc1023])(&kvs[em_stat_prc1023])->kv_v.v_u64--;
3975 } else if (frame_len == 1023) {
3976 kstat_kv_u64(&kvs[em_stat_prc1023])(&kvs[em_stat_prc1023])->kv_v.v_u64++;
3977 kstat_kv_u64(&kvs[em_stat_prc1522])(&kvs[em_stat_prc1522])->kv_v.v_u64--;
3978 } else if (frame_len == 1522) {
3979 kstat_kv_u64(&kvs[em_stat_prc1522])(&kvs[em_stat_prc1522])->kv_v.v_u64++;
3980 }
3981
3982 mtx_leave(&sc->kstat_mtx);
3983}
3984#endif /* NKSTAT > 0 */
3985
3986#ifndef SMALL_KERNEL
3987int
3988em_allocate_msix(struct em_softc *sc)
3989{
3990 pci_intr_handle_t ih;
3991 const char *intrstr = NULL((void *)0);
3992 struct pci_attach_args *pa = &sc->osdep.em_pa;
3993 pci_chipset_tag_t pc = pa->pa_pc;
3994 struct em_queue *que = sc->queues; /* Use only first queue. */
3995 int vec;
3996
3997 if (!em_enable_msix)
3998 return (ENODEV19);
3999
4000 switch (sc->hw.mac_type) {
4001 case em_82576:
4002 case em_82580:
4003 case em_i350:
4004 case em_i210:
4005 break;
4006 default:
4007 return (ENODEV19);
4008 }
4009
4010 vec = 0;
4011 if (pci_intr_map_msix(pa, vec, &ih))
4012 return (ENODEV19);
4013 sc->msix = 1;
4014
4015 que->me = vec;
4016 que->eims = 1 << vec;
4017 snprintf(que->name, sizeof(que->name), "%s:%d", DEVNAME(sc)((sc)->sc_dev.dv_xname), vec);
4018
4019 intrstr = pci_intr_string(pc, ih);
4020 que->tag = pci_intr_establish(pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100,
4021 em_queue_intr_msix, que, que->name);
4022 if (que->tag == NULL((void *)0)) {
4023 printf(": couldn't establish interrupt");
4024 if (intrstr != NULL((void *)0))
4025 printf(" at %s", intrstr);
4026 printf("\n");
4027 return (ENXIO6);
4028 }
4029
4030 /* Setup linkvector, use last queue vector + 1 */
4031 vec++;
4032 sc->msix_linkvec = vec;
4033 if (pci_intr_map_msix(pa, sc->msix_linkvec, &ih)) {
4034 printf(": couldn't map link vector\n");
4035 return (ENXIO6);
4036 }
4037
4038 intrstr = pci_intr_string(pc, ih);
4039 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET0x4 | IPL_MPSAFE0x100,
4040 em_link_intr_msix, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
4041 if (sc->sc_intrhand == NULL((void *)0)) {
4042 printf(": couldn't establish interrupt");
4043 if (intrstr != NULL((void *)0))
4044 printf(" at %s", intrstr);
4045 printf("\n");
4046 return (ENXIO6);
4047 }
4048 printf(", %s, %d queue%s", intrstr, vec, (vec > 1) ? "s" : "");
4049
4050 return (0);
4051}
4052
4053/*
4054 * Interrupt for a specific queue, (not link interrupts). The EICR bit which
4055 * maps to the EIMS bit expresses both RX and TX, therefore we can't
4056 * distinguish if this is a RX completion of TX completion and must do both.
4057 * The bits in EICR are autocleared and we _cannot_ read EICR.
4058 */
4059int
4060em_queue_intr_msix(void *vque)
4061{
4062 struct em_queue *que = vque;
4063 struct em_softc *sc = que->sc;
4064 struct ifnet *ifp = &sc->sc_ac.ac_if;
4065
4066 if (ifp->if_flags & IFF_RUNNING0x40) {
4067 em_txeof(que);
4068 if (em_rxeof(que))
4069 em_rxrefill_locked(que);
4070 }
4071
4072 em_enable_queue_intr_msix(que);
4073
4074 return (1);
4075}
4076
4077int
4078em_link_intr_msix(void *arg)
4079{
4080 struct em_softc *sc = arg;
4081 uint32_t icr;
4082
4083 icr = E1000_READ_REG(&sc->hw, ICR)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x000C0 : em_translate_82542_register(0x000C0)))))
;
4084
4085 /* Link status change */
4086 if (icr & E1000_ICR_LSC0x00000004) {
4087 KERNEL_LOCK()_kernel_lock();
4088 sc->hw.get_link_status = 1;
4089 em_check_for_link(&sc->hw);
4090 em_update_link_status(sc);
4091 KERNEL_UNLOCK()_kernel_unlock();
4092 }
4093
4094 /* Re-arm unconditionally */
4095 E1000_WRITE_REG(&sc->hw, IMS, E1000_ICR_LSC)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x000D0 : em_translate_82542_register(0x000D0))),
(0x00000004)))
;
4096 E1000_WRITE_REG(&sc->hw, EIMS, sc->msix_linkmask)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01524 : em_translate_82542_register(0x01524))),
(sc->msix_linkmask)))
;
4097
4098 return (1);
4099}
4100
4101/*
4102 * Maps queues into msix interrupt vectors.
4103 */
4104int
4105em_setup_queues_msix(struct em_softc *sc)
4106{
4107 uint32_t ivar, newitr, index;
4108 struct em_queue *que;
4109
4110 KASSERT(sc->msix)((sc->msix) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_em.c"
, 4110, "sc->msix"))
;
4111
4112 /* First turn on RSS capability */
4113 if (sc->hw.mac_type != em_82575)
4114 E1000_WRITE_REG(&sc->hw, GPIE,((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01514 : em_translate_82542_register(0x01514))),
(0x00000010 | 0x40000000 | 0x80000000 | 0x00000001)))
4115 E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01514 : em_translate_82542_register(0x01514))),
(0x00000010 | 0x40000000 | 0x80000000 | 0x00000001)))
4116 E1000_GPIE_PBA | E1000_GPIE_NSICR)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01514 : em_translate_82542_register(0x01514))),
(0x00000010 | 0x40000000 | 0x80000000 | 0x00000001)))
;
4117
4118 /* Turn on MSIX */
4119 switch (sc->hw.mac_type) {
4120 case em_82580:
4121 case em_i350:
4122 case em_i210:
4123 /* RX entries */
4124 /*
4125 * Note, this maps Queues into MSIX vectors, it works fine.
4126 * The funky calculation of offsets and checking if que->me is
4127 * odd is due to the weird register distribution, the datasheet
4128 * explains it well.
4129 */
4130 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
4131 index = que->me >> 1;
4132 ivar = E1000_READ_REG_ARRAY(&sc->hw, IVAR0, index)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x01700 : em_translate_82542_register(0x01700)) + ((index)
<< 2))))
;
4133 if (que->me & 1) {
4134 ivar &= 0xFF00FFFF;
4135 ivar |= (que->me | E1000_IVAR_VALID0x80) << 16;
4136 } else {
4137 ivar &= 0xFFFFFF00;
4138 ivar |= que->me | E1000_IVAR_VALID0x80;
4139 }
4140 E1000_WRITE_REG_ARRAY(&sc->hw, IVAR0, index, ivar)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01700 : em_translate_82542_register(0x01700)) +
((index) << 2)), (ivar)))
;
4141 }
4142
4143 /* TX entries */
4144 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
4145 index = que->me >> 1;
4146 ivar = E1000_READ_REG_ARRAY(&sc->hw, IVAR0, index)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x01700 : em_translate_82542_register(0x01700)) + ((index)
<< 2))))
;
4147 if (que->me & 1) {
4148 ivar &= 0x00FFFFFF;
4149 ivar |= (que->me | E1000_IVAR_VALID0x80) << 24;
4150 } else {
4151 ivar &= 0xFFFF00FF;
4152 ivar |= (que->me | E1000_IVAR_VALID0x80) << 8;
4153 }
4154 E1000_WRITE_REG_ARRAY(&sc->hw, IVAR0, index, ivar)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01700 : em_translate_82542_register(0x01700)) +
((index) << 2)), (ivar)))
;
4155 sc->msix_queuesmask |= que->eims;
4156 }
4157
4158 /* And for the link interrupt */
4159 ivar = (sc->msix_linkvec | E1000_IVAR_VALID0x80) << 8;
4160 sc->msix_linkmask = 1 << sc->msix_linkvec;
4161 E1000_WRITE_REG(&sc->hw, IVAR_MISC, ivar)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01740 : em_translate_82542_register(0x01740))),
(ivar)))
;
4162 break;
4163 case em_82576:
4164 /* RX entries */
4165 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
4166 index = que->me & 0x7; /* Each IVAR has two entries */
4167 ivar = E1000_READ_REG_ARRAY(&sc->hw, IVAR0, index)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x01700 : em_translate_82542_register(0x01700)) + ((index)
<< 2))))
;
4168 if (que->me < 8) {
4169 ivar &= 0xFFFFFF00;
4170 ivar |= que->me | E1000_IVAR_VALID0x80;
4171 } else {
4172 ivar &= 0xFF00FFFF;
4173 ivar |= (que->me | E1000_IVAR_VALID0x80) << 16;
4174 }
4175 E1000_WRITE_REG_ARRAY(&sc->hw, IVAR0, index, ivar)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01700 : em_translate_82542_register(0x01700)) +
((index) << 2)), (ivar)))
;
4176 sc->msix_queuesmask |= que->eims;
4177 }
4178 /* TX entries */
4179 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
4180 index = que->me & 0x7; /* Each IVAR has two entries */
4181 ivar = E1000_READ_REG_ARRAY(&sc->hw, IVAR0, index)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->read_4((((struct em_osdep *)(&sc->hw)->back)->
mem_bus_space_handle), (((&sc->hw)->mac_type >= em_82543
? 0x01700 : em_translate_82542_register(0x01700)) + ((index)
<< 2))))
;
4182 if (que->me < 8) {
4183 ivar &= 0xFFFF00FF;
4184 ivar |= (que->me | E1000_IVAR_VALID0x80) << 8;
4185 } else {
4186 ivar &= 0x00FFFFFF;
4187 ivar |= (que->me | E1000_IVAR_VALID0x80) << 24;
4188 }
4189 E1000_WRITE_REG_ARRAY(&sc->hw, IVAR0, index, ivar)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01700 : em_translate_82542_register(0x01700)) +
((index) << 2)), (ivar)))
;
4190 sc->msix_queuesmask |= que->eims;
4191 }
4192
4193 /* And for the link interrupt */
4194 ivar = (sc->msix_linkvec | E1000_IVAR_VALID0x80) << 8;
4195 sc->msix_linkmask = 1 << sc->msix_linkvec;
4196 E1000_WRITE_REG(&sc->hw, IVAR_MISC, ivar)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? 0x01740 : em_translate_82542_register(0x01740))),
(ivar)))
;
4197 break;
4198 default:
4199 panic("unsupported mac");
4200 break;
4201 }
4202
4203 /* Set the starting interrupt rate */
4204 newitr = (4000000 / MAX_INTS_PER_SEC8000) & 0x7FFC;
4205
4206 if (sc->hw.mac_type == em_82575)
4207 newitr |= newitr << 16;
4208 else
4209 newitr |= E1000_EITR_CNT_IGNR0x80000000;
4210
4211 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
4212 E1000_WRITE_REG(&sc->hw, EITR(que->me), newitr)((((struct em_osdep *)(&sc->hw)->back)->mem_bus_space_tag
)->write_4((((struct em_osdep *)(&sc->hw)->back)
->mem_bus_space_handle), (((&sc->hw)->mac_type >=
em_82543 ? (0x01680 + (0x4 * (que->me))) : em_translate_82542_register
((0x01680 + (0x4 * (que->me)))))), (newitr)))
;
4213
4214 return (0);
4215}
4216
4217void
4218em_enable_queue_intr_msix(struct em_queue *que)
4219{
4220 E1000_WRITE_REG(&que->sc->hw, EIMS, que->eims)((((struct em_osdep *)(&que->sc->hw)->back)->
mem_bus_space_tag)->write_4((((struct em_osdep *)(&que
->sc->hw)->back)->mem_bus_space_handle), (((&
que->sc->hw)->mac_type >= em_82543 ? 0x01524 : em_translate_82542_register
(0x01524))), (que->eims)))
;
4221}
4222#endif /* !SMALL_KERNEL */
4223
4224int
4225em_allocate_desc_rings(struct em_softc *sc)
4226{
4227 struct em_queue *que;
4228
4229 FOREACH_QUEUE(sc, que)for ((que) = (sc)->queues; (que) < ((sc)->queues + (
sc)->num_queues); (que)++)
{
4230 /* Allocate Transmit Descriptor ring */
4231 if (em_dma_malloc(sc, sc->sc_tx_slots * sizeof(struct em_tx_desc),
4232 &que->tx.sc_tx_dma) != 0) {
4233 printf("%s: Unable to allocate tx_desc memory\n",
4234 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4235 return (ENOMEM12);
4236 }
4237 que->tx.sc_tx_desc_ring =
4238 (struct em_tx_desc *)que->tx.sc_tx_dma.dma_vaddr;
4239
4240 /* Allocate Receive Descriptor ring */
4241 if (em_dma_malloc(sc, sc->sc_rx_slots * sizeof(struct em_rx_desc),
4242 &que->rx.sc_rx_dma) != 0) {
4243 printf("%s: Unable to allocate rx_desc memory\n",
4244 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4245 return (ENOMEM12);
4246 }
4247 que->rx.sc_rx_desc_ring =
4248 (struct em_rx_desc *)que->rx.sc_rx_dma.dma_vaddr;
4249 }
4250
4251 return (0);
4252}
4253
4254int
4255em_get_sffpage(struct em_softc *sc, struct if_sffpage *sff)
4256{
4257 struct em_hw *hw = &sc->hw;
4258 size_t i;
4259 int off;
4260
4261 if (hw->mac_type != em_82575 && hw->mac_type != em_82580 &&
4262 hw->mac_type != em_82576 &&
4263 hw->mac_type != em_i210 && hw->mac_type != em_i350)
4264 return (ENODEV19);
4265
4266 if (sff->sff_addr == IFSFF_ADDR_EEPROM0xa0)
4267 off = E1000_I2CCMD_SFP_DATA_ADDR(0)(0x0000 + (0));
4268 else if (sff->sff_addr == IFSFF_ADDR_DDM0xa2)
4269 off = E1000_I2CCMD_SFP_DIAG_ADDR(0)(0x0100 + (0));
4270 else
4271 return (EIO5);
4272
4273 for (i = 0; i < sizeof(sff->sff_data); i++) {
4274 if (em_read_sfp_data_byte(hw, off + i,
4275 &sff->sff_data[i]) != E1000_SUCCESS0)
4276 return (EIO5);
4277 }
4278
4279 return (0);
4280}