Bug Summary

File:dev/usb/if_umb.c
Warning:line 1023, column 14
The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_umb.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/usb/if_umb.c

/usr/src/sys/dev/usb/if_umb.c

1/* $OpenBSD: if_umb.c,v 1.49 2022/01/11 10:34:13 claudio Exp $ */
2
3/*
4 * Copyright (c) 2016 genua mbH
5 * All rights reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*
21 * Mobile Broadband Interface Model specification:
22 * https://www.usb.org/sites/default/files/MBIM10Errata1_073013.zip
23 * Compliance testing guide
24 * https://www.usb.org/sites/default/files/MBIM-Compliance-1.0.pdf
25 */
26#include "bpfilter.h"
27
28#include <sys/param.h>
29#include <sys/mbuf.h>
30#include <sys/socket.h>
31#include <sys/systm.h>
32#include <sys/syslog.h>
33
34#if NBPFILTER1 > 0
35#include <net/bpf.h>
36#endif
37#include <net/if.h>
38#include <net/if_var.h>
39#include <net/if_types.h>
40#include <net/route.h>
41
42#include <netinet/in.h>
43#include <netinet/in_var.h>
44#include <netinet/ip.h>
45
46#ifdef INET61
47#include <netinet/ip6.h>
48#include <netinet6/in6_var.h>
49#include <netinet6/ip6_var.h>
50#include <netinet6/in6_ifattach.h>
51#include <netinet6/nd6.h>
52#endif
53
54#include <machine/bus.h>
55
56#include <dev/usb/usb.h>
57#include <dev/usb/usbdi.h>
58#include <dev/usb/usbdivar.h>
59#include <dev/usb/usbdi_util.h>
60#include <dev/usb/usbdevs.h>
61#include <dev/usb/usbcdc.h>
62
63#include <dev/usb/mbim.h>
64#include <dev/usb/if_umb.h>
65
66#ifdef UMB_DEBUG
67#define DPRINTF(x...)do { } while (0) \
68 do { if (umb_debug) log(LOG_DEBUG7, x); } while (0)
69
70#define DPRINTFN(n, x...)do { } while (0) \
71 do { if (umb_debug >= (n)) log(LOG_DEBUG7, x); } while (0)
72
73#define DDUMPN(n, b, l)do { } while (0) \
74 do { \
75 if (umb_debug >= (n)) \
76 umb_dump((b), (l)); \
77 } while (0)
78
79int umb_debug = 0;
80char *umb_uuid2str(uint8_t [MBIM_UUID_LEN16]);
81void umb_dump(void *, int);
82
83#else
84#define DPRINTF(x...)do { } while (0) do { } while (0)
85#define DPRINTFN(n, x...)do { } while (0) do { } while (0)
86#define DDUMPN(n, b, l)do { } while (0) do { } while (0)
87#endif
88
89#define DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname) (((struct umb_softc *)(sc))->sc_dev.dv_xname)
90
91/*
92 * State change timeout
93 */
94#define UMB_STATE_CHANGE_TIMEOUT30 30
95
96/*
97 * State change flags
98 */
99#define UMB_NS_DONT_DROP0x0001 0x0001 /* do not drop below current state */
100#define UMB_NS_DONT_RAISE0x0002 0x0002 /* do not raise below current state */
101
102/*
103 * Diagnostic macros
104 */
105const struct umb_valdescr umb_regstates[] = MBIM_REGSTATE_DESCRIPTIONS{ { 0, "unknown" }, { 1, "not registered" }, { 2, "searching"
}, { 3, "home network" }, { 4, "roaming network" }, { 5, "partner network"
}, { 6, "access denied" }, { 0, ((void *)0) } }
;
106const struct umb_valdescr umb_dataclasses[] = MBIM_DATACLASS_DESCRIPTIONS{ { 0x00000000, "none" }, { 0x00000001, "GPRS" }, { 0x00000002
, "EDGE" }, { 0x00000004, "UMTS" }, { 0x00000008, "HSDPA" }, {
0x00000010, "HSUPA" }, { 0x00000008|0x00000010, "HSPA" }, { 0x00000020
, "LTE" }, { 0x00010000, "CDMA2000" }, { 0x00020000, "CDMA2000"
}, { 0x00040000, "CDMA2000" }, { 0x00080000, "CDMA2000" }, {
0x00100000, "CDMA2000" }, { 0x00200000, "CDMA2000" }, { 0x00400000
, "CDMA2000" }, { 0x80000000, "custom" }, { 0, ((void *)0) } }
;
107const struct umb_valdescr umb_simstate[] = MBIM_SIMSTATE_DESCRIPTIONS{ { 0, "not initialized" }, { 1, "initialized" }, { 2, "not inserted"
}, { 3, "bad type" }, { 4, "failed" }, { 5, "not activated" }
, { 6, "locked" }, { 0, ((void *)0) } }
;
108const struct umb_valdescr umb_messages[] = MBIM_MESSAGES_DESCRIPTIONS{ { (1U), "MBIM_OPEN_MSG" }, { (2U), "MBIM_CLOSE_MSG" }, { (3U
), "MBIM_COMMAND_MSG" }, { (4U), "MBIM_HOST_ERROR_MSG" }, { (
0x80000001U), "MBIM_OPEN_DONE" }, { (0x80000002U), "MBIM_CLOSE_DONE"
}, { (0x80000003U), "MBIM_COMMAND_DONE" }, { (0x80000004U), "MBIM_FUNCTION_ERROR_MSG"
}, { (0x80000007U), "MBIM_INDICATE_STATUS_MSG" }, { 0, ((void
*)0) } }
;
109const struct umb_valdescr umb_status[] = MBIM_STATUS_DESCRIPTIONS{ { 0, "SUCCESS" }, { 1, "BUSY" }, { 2, "FAILURE" }, { 3, "SIM_NOT_INSERTED"
}, { 4, "BAD_SIM" }, { 5, "PIN_REQUIRED" }, { 6, "PIN_DISABLED"
}, { 7, "NOT_REGISTERED" }, { 8, "PROVIDERS_NOT_FOUND" }, { 9
, "NO_DEVICE_SUPPORT" }, { 10, "PROVIDER_NOT_VISIBLE" }, { 11
, "DATA_CLASS_NOT_AVAILABLE" }, { 12, "PACKET_SERVICE_DETACHED"
}, { 13, "MAX_ACTIVATED_CONTEXTS" }, { 14, "NOT_INITIALIZED"
}, { 15, "VOICE_CALL_IN_PROGRESS" }, { 16, "CONTEXT_NOT_ACTIVATED"
}, { 17, "SERVICE_NOT_ACTIVATED" }, { 18, "INVALID_ACCESS_STRING"
}, { 19, "INVALID_USER_NAME_PWD" }, { 20, "RADIO_POWER_OFF" }
, { 21, "INVALID_PARAMETERS" }, { 22, "READ_FAILURE" }, { 23,
"WRITE_FAILURE" }, { 25, "NO_PHONEBOOK" }, { 26, "PARAMETER_TOO_LONG"
}, { 27, "STK_BUSY" }, { 28, "OPERATION_NOT_ALLOWED" }, { 29
, "MEMORY_FAILURE" }, { 30, "INVALID_MEMORY_INDEX" }, { 31, "MEMORY_FULL"
}, { 32, "FILTER_NOT_SUPPORTED" }, { 33, "DSS_INSTANCE_LIMIT"
}, { 34, "INVALID_DEVICE_SERVICE_OPERATION" }, { 35, "AUTH_INCORRECT_AUTN"
}, { 36, "AUTH_SYNC_FAILURE" }, { 37, "AUTH_AMF_NOT_SET" }, {
38, "CONTEXT_NOT_SUPPORTED" }, { 100, "SMS_UNKNOWN_SMSC_ADDRESS"
}, { 101, "SMS_NETWORK_TIMEOUT" }, { 102, "SMS_LANG_NOT_SUPPORTED"
}, { 103, "SMS_ENCODING_NOT_SUPPORTED" }, { 104, "SMS_FORMAT_NOT_SUPPORTED"
}, { 0, ((void *)0) } }
;
110const struct umb_valdescr umb_cids[] = MBIM_CID_DESCRIPTIONS{ { (1), "MBIM_CID_DEVICE_CAPS" }, { (2), "MBIM_CID_SUBSCRIBER_READY_STATUS"
}, { (3), "MBIM_CID_RADIO_STATE" }, { (4), "MBIM_CID_PIN" },
{ (5), "MBIM_CID_PIN_LIST" }, { (6), "MBIM_CID_HOME_PROVIDER"
}, { (7), "MBIM_CID_PREFERRED_PROVIDERS" }, { (8), "MBIM_CID_VISIBLE_PROVIDERS"
}, { (9), "MBIM_CID_REGISTER_STATE" }, { (10), "MBIM_CID_PACKET_SERVICE"
}, { (11), "MBIM_CID_SIGNAL_STATE" }, { (12), "MBIM_CID_CONNECT"
}, { (13), "MBIM_CID_PROVISIONED_CONTEXTS" }, { (14), "MBIM_CID_SERVICE_ACTIVATION"
}, { (15), "MBIM_CID_IP_CONFIGURATION" }, { (16), "MBIM_CID_DEVICE_SERVICES"
}, { (19), "MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST" }, { (20
), "MBIM_CID_PACKET_STATISTICS" }, { (21), "MBIM_CID_NETWORK_IDLE_HINT"
}, { (22), "MBIM_CID_EMERGENCY_MODE" }, { (23), "MBIM_CID_IP_PACKET_FILTERS"
}, { (24), "MBIM_CID_MULTICARRIER_PROVIDERS" }, { 0, ((void *
)0) } }
;
111const struct umb_valdescr umb_pktstate[] = MBIM_PKTSRV_STATE_DESCRIPTIONS{ { 0, "unknown" }, { 1, "attaching" }, { 2, "attached" }, { 3
, "detaching" }, { 4, "detached" }, { 0, ((void *)0) } }
;
112const struct umb_valdescr umb_actstate[] = MBIM_ACTIVATION_STATE_DESCRIPTIONS{ { 0, "unknown" }, { 1, "activated" }, { 2, "activating" }, {
3, "deactivated" }, { 4, "deactivating" }, { 0, ((void *)0) }
}
;
113const struct umb_valdescr umb_error[] = MBIM_ERROR_DESCRIPTIONS{ { 1, "TIMEOUT_FRAGMENT" }, { 2, "FRAGMENT_OUT_OF_SEQUENCE" }
, { 3, "LENGTH_MISMATCH" }, { 4, "DUPLICATED_TID" }, { 5, "NOT_OPENED"
}, { 6, "UNKNOWN" }, { 7, "CANCEL" }, { 8, "MAX_TRANSFER" },
{ 0, ((void *)0) } }
;
114const struct umb_valdescr umb_pintype[] = MBIM_PINTYPE_DESCRIPTIONS{ { 0, "none" }, { 1, "custom" }, { 2, "PIN1" }, { 3, "PIN2" }
, { 4, "device PIN" }, { 5, "device 1st PIN" }, { 6, "network PIN"
}, { 7, "network subset PIN" }, { 8, "provider PIN" }, { 9, "corporate PIN"
}, { 10, "subsidy lock" }, { 11, "PUK" }, { 12, "PUK2" }, { 13
, "device 1st PUK" }, { 14, "network PUK" }, { 15, "network subset PUK"
}, { 16, "provider PUK" }, { 17, "corporate PUK" }, { 0, ((void
*)0) } }
;
115const struct umb_valdescr umb_istate[] = UMB_INTERNAL_STATE_DESCRIPTIONS{ { UMB_S_DOWN, "down" }, { UMB_S_OPEN, "open" }, { UMB_S_CID
, "CID allocated" }, { UMB_S_RADIO, "radio on" }, { UMB_S_SIMREADY
, "SIM is ready" }, { UMB_S_ATTACHED, "attached" }, { UMB_S_CONNECTED
, "connected" }, { UMB_S_UP, "up" }, { 0, ((void *)0) } }
;
116
117#define umb_regstate(c)umb_val2descr(umb_regstates, (c)) umb_val2descr(umb_regstates, (c))
118#define umb_dataclass(c)umb_val2descr(umb_dataclasses, (c)) umb_val2descr(umb_dataclasses, (c))
119#define umb_simstate(s)umb_val2descr(umb_simstate, (s)) umb_val2descr(umb_simstate, (s))
120#define umb_request2str(m)umb_val2descr(umb_messages, (m)) umb_val2descr(umb_messages, (m))
121#define umb_status2str(s)umb_val2descr(umb_status, (s)) umb_val2descr(umb_status, (s))
122#define umb_cid2str(c)umb_val2descr(umb_cids, (c)) umb_val2descr(umb_cids, (c))
123#define umb_packet_state(s)umb_val2descr(umb_pktstate, (s)) umb_val2descr(umb_pktstate, (s))
124#define umb_activation(s)umb_val2descr(umb_actstate, (s)) umb_val2descr(umb_actstate, (s))
125#define umb_error2str(e)umb_val2descr(umb_error, (e)) umb_val2descr(umb_error, (e))
126#define umb_pin_type(t)umb_val2descr(umb_pintype, (t)) umb_val2descr(umb_pintype, (t))
127#define umb_istate(s)umb_val2descr(umb_istate, (s)) umb_val2descr(umb_istate, (s))
128
129int umb_match(struct device *, void *, void *);
130void umb_attach(struct device *, struct device *, void *);
131int umb_detach(struct device *, int);
132void umb_ncm_setup(struct umb_softc *);
133void umb_ncm_setup_format(struct umb_softc *);
134int umb_alloc_xfers(struct umb_softc *);
135void umb_free_xfers(struct umb_softc *);
136int umb_alloc_bulkpipes(struct umb_softc *);
137void umb_close_bulkpipes(struct umb_softc *);
138int umb_ioctl(struct ifnet *, u_long, caddr_t);
139int umb_output(struct ifnet *, struct mbuf *, struct sockaddr *,
140 struct rtentry *);
141void umb_input(struct ifnet *, struct mbuf *);
142void umb_start(struct ifnet *);
143void umb_rtrequest(struct ifnet *, int, struct rtentry *);
144void umb_watchdog(struct ifnet *);
145void umb_statechg_timeout(void *);
146
147void umb_newstate(struct umb_softc *, enum umb_state, int);
148void umb_state_task(void *);
149void umb_up(struct umb_softc *);
150void umb_down(struct umb_softc *, int);
151
152void umb_get_response_task(void *);
153
154void umb_decode_response(struct umb_softc *, void *, int);
155void umb_handle_indicate_status_msg(struct umb_softc *, void *,
156 int);
157void umb_handle_opendone_msg(struct umb_softc *, void *, int);
158void umb_handle_closedone_msg(struct umb_softc *, void *, int);
159int umb_decode_register_state(struct umb_softc *, void *, int);
160int umb_decode_devices_caps(struct umb_softc *, void *, int);
161int umb_decode_subscriber_status(struct umb_softc *, void *, int);
162int umb_decode_radio_state(struct umb_softc *, void *, int);
163int umb_decode_pin(struct umb_softc *, void *, int);
164int umb_decode_packet_service(struct umb_softc *, void *, int);
165int umb_decode_signal_state(struct umb_softc *, void *, int);
166int umb_decode_connect_info(struct umb_softc *, void *, int);
167void umb_clear_addr(struct umb_softc *);
168int umb_add_inet_config(struct umb_softc *, struct in_addr, u_int,
169 struct in_addr);
170int umb_add_inet6_config(struct umb_softc *, struct in6_addr *,
171 u_int, struct in6_addr *);
172void umb_send_inet_proposal(struct umb_softc *, int);
173int umb_decode_ip_configuration(struct umb_softc *, void *, int);
174void umb_rx(struct umb_softc *);
175void umb_rxeof(struct usbd_xfer *, void *, usbd_status);
176int umb_encap(struct umb_softc *, int);
177void umb_txeof(struct usbd_xfer *, void *, usbd_status);
178void umb_decap(struct umb_softc *, struct usbd_xfer *);
179
180usbd_status umb_send_encap_command(struct umb_softc *, void *, int);
181int umb_get_encap_response(struct umb_softc *, void *, int *);
182void umb_ctrl_msg(struct umb_softc *, uint32_t, void *, int);
183
184void umb_open(struct umb_softc *);
185void umb_close(struct umb_softc *);
186
187int umb_setpin(struct umb_softc *, int, int, void *, int, void *,
188 int);
189void umb_setdataclass(struct umb_softc *);
190void umb_radio(struct umb_softc *, int);
191void umb_allocate_cid(struct umb_softc *);
192void umb_send_fcc_auth(struct umb_softc *);
193void umb_packet_service(struct umb_softc *, int);
194void umb_connect(struct umb_softc *);
195void umb_disconnect(struct umb_softc *);
196void umb_send_connect(struct umb_softc *, int);
197
198void umb_qry_ipconfig(struct umb_softc *);
199void umb_cmd(struct umb_softc *, int, int, void *, int);
200void umb_cmd1(struct umb_softc *, int, int, void *, int, uint8_t *);
201void umb_command_done(struct umb_softc *, void *, int);
202void umb_decode_cid(struct umb_softc *, uint32_t, void *, int);
203void umb_decode_qmi(struct umb_softc *, uint8_t *, int);
204
205void umb_intr(struct usbd_xfer *, void *, usbd_status);
206
207int umb_xfer_tout = USBD_DEFAULT_TIMEOUT5000;
208
209uint8_t umb_uuid_basic_connect[] = MBIM_UUID_BASIC_CONNECT{ 0xa2, 0x89, 0xcc, 0x33, 0xbc, 0xbb, 0x8b, 0x4f, 0xb6, 0xb0,
0x13, 0x3e, 0xc2, 0xaa, 0xe6, 0xdf }
;
210uint8_t umb_uuid_context_internet[] = MBIM_UUID_CONTEXT_INTERNET{ 0x7e, 0x5e, 0x2a, 0x7e, 0x4e, 0x6f, 0x72, 0x72, 0x73, 0x6b,
0x65, 0x6e, 0x7e, 0x5e, 0x2a, 0x7e }
;
211uint8_t umb_uuid_qmi_mbim[] = MBIM_UUID_QMI_MBIM{ 0xd1, 0xa3, 0x0b, 0xc2, 0xf9, 0x7a, 0x6e, 0x43, 0xbf, 0x65,
0xc7, 0xe2, 0x4f, 0xb0, 0xf0, 0xd3 }
;
212uint32_t umb_session_id = 0;
213
214struct cfdriver umb_cd = {
215 NULL((void *)0), "umb", DV_IFNET
216};
217
218const struct cfattach umb_ca = {
219 sizeof (struct umb_softc),
220 umb_match,
221 umb_attach,
222 umb_detach,
223 NULL((void *)0),
224};
225
226int umb_delay = 4000;
227
228struct umb_quirk {
229 struct usb_devno dev;
230 u_int32_t umb_flags;
231 int umb_confno;
232 int umb_match;
233};
234const struct umb_quirk umb_quirks[] = {
235 { { USB_VENDOR_DELL0x413c, USB_PRODUCT_DELL_DW5821E0x81d7 },
236 0,
237 2,
238 UMATCH_VENDOR_PRODUCT13
239 },
240
241 { { USB_VENDOR_QUECTEL0x2c7c, USB_PRODUCT_QUECTEL_EC250x0125 },
242 0,
243 1,
244 UMATCH_VENDOR_PRODUCT13
245 },
246
247
248 { { USB_VENDOR_HUAWEI0x12d1, USB_PRODUCT_HUAWEI_ME906S0x15c1 },
249 UMBFLG_NDP_AT_END0x0004,
250 3,
251 UMATCH_VENDOR_PRODUCT13
252 },
253
254 { { USB_VENDOR_SIERRA0x1199, USB_PRODUCT_SIERRA_EM74550x9079 },
255 UMBFLG_FCC_AUTH_REQUIRED0x0001,
256 0,
257 0
258 },
259
260 { { USB_VENDOR_SIMCOM0x1e0e, USB_PRODUCT_SIMCOM_SIM76000x9003 },
261 0,
262 1,
263 UMATCH_VENDOR_PRODUCT13
264 },
265};
266
267#define umb_lookup(vid, pid)((const struct umb_quirk *)usbd_match_device((const struct usb_devno
*)(umb_quirks), sizeof (umb_quirks) / sizeof ((umb_quirks)[0
]), sizeof ((umb_quirks)[0]), (vid), (pid)))
\
268 ((const struct umb_quirk *)usb_lookup(umb_quirks, vid, pid)usbd_match_device((const struct usb_devno *)(umb_quirks), sizeof
(umb_quirks) / sizeof ((umb_quirks)[0]), sizeof ((umb_quirks
)[0]), (vid), (pid))
)
269
270uint8_t umb_qmi_alloc_cid[] = {
271 0x01,
272 0x0f, 0x00, /* len */
273 0x00, /* QMUX flags */
274 0x00, /* service "ctl" */
275 0x00, /* CID */
276 0x00, /* QMI flags */
277 0x01, /* transaction */
278 0x22, 0x00, /* msg "Allocate CID" */
279 0x04, 0x00, /* TLV len */
280 0x01, 0x01, 0x00, 0x02 /* TLV */
281};
282
283uint8_t umb_qmi_fcc_auth[] = {
284 0x01,
285 0x0c, 0x00, /* len */
286 0x00, /* QMUX flags */
287 0x02, /* service "dms" */
288#define UMB_QMI_CID_OFFS5 5
289 0x00, /* CID (filled in later) */
290 0x00, /* QMI flags */
291 0x01, 0x00, /* transaction */
292 0x5f, 0x55, /* msg "Send FCC Authentication" */
293 0x00, 0x00 /* TLV len */
294};
295
296int
297umb_match(struct device *parent, void *match, void *aux)
298{
299 struct usb_attach_arg *uaa = aux;
300 const struct umb_quirk *quirk;
301 usb_interface_descriptor_t *id;
302
303 quirk = umb_lookup(uaa->vendor, uaa->product)((const struct umb_quirk *)usbd_match_device((const struct usb_devno
*)(umb_quirks), sizeof (umb_quirks) / sizeof ((umb_quirks)[0
]), sizeof ((umb_quirks)[0]), (uaa->vendor), (uaa->product
)))
;
304 if (quirk != NULL((void *)0) && quirk->umb_match)
305 return (quirk->umb_match);
306 if (!uaa->iface)
307 return UMATCH_NONE0;
308 if ((id = usbd_get_interface_descriptor(uaa->iface)) == NULL((void *)0))
309 return UMATCH_NONE0;
310
311 /*
312 * If this function implements NCM, check if alternate setting
313 * 1 implements MBIM.
314 */
315 if (id->bInterfaceClass == UICLASS_CDC0x02 &&
316 id->bInterfaceSubClass ==
317 UISUBCLASS_NETWORK_CONTROL_MODEL13)
318 id = usbd_find_idesc(uaa->device->cdesc, uaa->iface->index, 1);
319 if (id == NULL((void *)0))
320 return UMATCH_NONE0;
321
322 if (id->bInterfaceClass == UICLASS_CDC0x02 &&
323 id->bInterfaceSubClass ==
324 UISUBCLASS_MOBILE_BROADBAND_INTERFACE_MODEL14 &&
325 id->bInterfaceProtocol == 0)
326 return UMATCH_IFACECLASS_IFACESUBCLASS_IFACEPROTO5;
327
328 return UMATCH_NONE0;
329}
330
331void
332umb_attach(struct device *parent, struct device *self, void *aux)
333{
334 struct umb_softc *sc = (struct umb_softc *)self;
335 struct usb_attach_arg *uaa = aux;
336 const struct umb_quirk *quirk;
337 usbd_status status;
338 struct usbd_desc_iter iter;
339 const usb_descriptor_t *desc;
340 int v;
341 struct usb_cdc_union_descriptor *ud;
342 struct mbim_descriptor *md;
343 int i;
344 int ctrl_ep;
345 usb_interface_descriptor_t *id;
346 usb_config_descriptor_t *cd;
347 usb_endpoint_descriptor_t *ed;
348 usb_interface_assoc_descriptor_t *ad;
349 int current_ifaceno = -1;
350 int data_ifaceno = -1;
351 int altnum;
352 int s;
353 struct ifnet *ifp;
354
355 sc->sc_udev = uaa->device;
356 sc->sc_ctrl_ifaceno = uaa->ifaceno;
357 ml_init(&sc->sc_tx_ml);
358
359 quirk = umb_lookup(uaa->vendor, uaa->product)((const struct umb_quirk *)usbd_match_device((const struct usb_devno
*)(umb_quirks), sizeof (umb_quirks) / sizeof ((umb_quirks)[0
]), sizeof ((umb_quirks)[0]), (uaa->vendor), (uaa->product
)))
;
360 if (quirk != NULL((void *)0) && quirk->umb_flags) {
361 DPRINTF("%s: setting flags 0x%x from quirk\n", DEVNAM(sc),do { } while (0)
362 quirk->umb_flags)do { } while (0);
363 sc->sc_flags |= quirk->umb_flags;
364 }
365
366 /*
367 * Normally, MBIM devices are detected by their interface class and
368 * subclass. But for some models that have multiple configurations, it
369 * is better to match by vendor and product id so that we can select
370 * the desired configuration ourselves, e.g. to override a class-based
371 * match to another driver.
372 */
373 if (uaa->configno < 0) {
374 if (quirk == NULL((void *)0)) {
375 printf("%s: unknown configuration for vid/pid match\n",
376 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
377 goto fail;
378 }
379 uaa->configno = quirk->umb_confno;
380 DPRINTF("%s: switching to config #%d\n", DEVNAM(sc),do { } while (0)
381 uaa->configno)do { } while (0);
382 status = usbd_set_config_no(sc->sc_udev, uaa->configno, 1);
383 if (status) {
384 printf("%s: failed to switch to config #%d: %s\n",
385 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), uaa->configno, usbd_errstr(status));
386 goto fail;
387 }
388 usbd_delay_ms(sc->sc_udev, 200);
389
390 /*
391 * Need to do some manual setup that usbd_probe_and_attach()
392 * would do for us otherwise.
393 */
394 uaa->nifaces = uaa->device->cdesc->bNumInterfaces;
395 for (i = 0; i < uaa->nifaces; i++) {
396 if (usbd_iface_claimed(sc->sc_udev, i))
397 continue;
398 id = usbd_get_interface_descriptor(&uaa->device->ifaces[i]);
399 if (id != NULL((void *)0) && id->bInterfaceClass == UICLASS_CDC0x02 &&
400 id->bInterfaceSubClass ==
401 UISUBCLASS_MOBILE_BROADBAND_INTERFACE_MODEL14) {
402 uaa->iface = &uaa->device->ifaces[i];
403 uaa->ifaceno = uaa->iface->idesc->bInterfaceNumber;
404 sc->sc_ctrl_ifaceno = uaa->ifaceno;
405 break;
406 }
407 }
408 }
409
410 /*
411 * Some MBIM hardware does not provide the mandatory CDC Union
412 * Descriptor, so we also look at matching Interface
413 * Association Descriptors to find out the MBIM Data Interface
414 * number.
415 */
416 sc->sc_ver_maj = sc->sc_ver_min = -1;
417 sc->sc_maxpktlen = MBIM_MAXSEGSZ_MINVAL(2 * 1024);
418 usbd_desc_iter_init(sc->sc_udev, &iter);
419 while ((desc = usbd_desc_iter_next(&iter))) {
420 if (desc->bDescriptorType == UDESC_IFACE_ASSOC0x0B) {
421 ad = (usb_interface_assoc_descriptor_t *)desc;
422 if (ad->bFirstInterface == uaa->ifaceno &&
423 ad->bInterfaceCount > 1)
424 data_ifaceno = uaa->ifaceno + 1;
425 continue;
426 }
427 if (desc->bDescriptorType == UDESC_INTERFACE0x04) {
428 id = (usb_interface_descriptor_t *)desc;
429 current_ifaceno = id->bInterfaceNumber;
430 continue;
431 }
432 if (current_ifaceno != uaa->ifaceno)
433 continue;
434 if (desc->bDescriptorType != UDESC_CS_INTERFACE0x24)
435 continue;
436 switch (desc->bDescriptorSubtype) {
437 case UDESCSUB_CDC_UNION6:
438 ud = (struct usb_cdc_union_descriptor *)desc;
439 data_ifaceno = ud->bSlaveInterface[0];
440 break;
441 case UDESCSUB_MBIM27:
442 md = (struct mbim_descriptor *)desc;
443 v = UGETW(md->bcdMBIMVersion)(*(u_int16_t *)(md->bcdMBIMVersion));
444 sc->sc_ver_maj = MBIM_VER_MAJOR(v)(((v) >> 8) & 0x0f);
445 sc->sc_ver_min = MBIM_VER_MINOR(v)((v) & 0x0f);
446 sc->sc_ctrl_len = UGETW(md->wMaxControlMessage)(*(u_int16_t *)(md->wMaxControlMessage));
447 /* Never trust a USB device! Could try to exploit us */
448 if (sc->sc_ctrl_len < MBIM_CTRLMSG_MINLEN64 ||
449 sc->sc_ctrl_len > MBIM_CTRLMSG_MAXLEN(4 * 1204)) {
450 DPRINTF("%s: control message len %d out of "do { } while (0)
451 "bounds [%d .. %d]\n", DEVNAM(sc),do { } while (0)
452 sc->sc_ctrl_len, MBIM_CTRLMSG_MINLEN,do { } while (0)
453 MBIM_CTRLMSG_MAXLEN)do { } while (0);
454 /* cont. anyway */
455 }
456 sc->sc_maxpktlen = UGETW(md->wMaxSegmentSize)(*(u_int16_t *)(md->wMaxSegmentSize));
457 DPRINTFN(2, "%s: ctrl_len=%d, maxpktlen=%d, cap=0x%x\n",do { } while (0)
458 DEVNAM(sc), sc->sc_ctrl_len, sc->sc_maxpktlen,do { } while (0)
459 md->bmNetworkCapabilities)do { } while (0);
460 break;
461 default:
462 break;
463 }
464 }
465 if (sc->sc_ver_maj < 0) {
466 printf("%s: missing MBIM descriptor\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
467 goto fail;
468 }
469 if (sc->sc_flags & UMBFLG_FCC_AUTH_REQUIRED0x0001)
470 sc->sc_cid = -1;
471
472 for (i = 0; i < uaa->nifaces; i++) {
473 if (usbd_iface_claimed(sc->sc_udev, i))
474 continue;
475 id = usbd_get_interface_descriptor(&sc->sc_udev->ifaces[i]);
476 if (id != NULL((void *)0) && id->bInterfaceNumber == data_ifaceno) {
477 sc->sc_data_iface = &sc->sc_udev->ifaces[i];
478 usbd_claim_iface(sc->sc_udev, i);
479 }
480 }
481 if (sc->sc_data_iface == NULL((void *)0)) {
482 printf("%s: no data interface found\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
483 goto fail;
484 }
485
486 /*
487 * If this is a combined NCM/MBIM function, switch to
488 * alternate setting one to enable MBIM.
489 */
490 id = usbd_get_interface_descriptor(uaa->iface);
491 if (id->bInterfaceClass == UICLASS_CDC0x02 &&
492 id->bInterfaceSubClass ==
493 UISUBCLASS_NETWORK_CONTROL_MODEL13)
494 usbd_set_interface(uaa->iface, 1);
495
496 id = usbd_get_interface_descriptor(uaa->iface);
497 ctrl_ep = -1;
498 for (i = 0; i < id->bNumEndpoints && ctrl_ep == -1; i++) {
499 ed = usbd_interface2endpoint_descriptor(uaa->iface, i);
500 if (ed == NULL((void *)0))
501 break;
502 if (UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03) == UE_INTERRUPT0x03 &&
503 UE_GET_DIR(ed->bEndpointAddress)((ed->bEndpointAddress) & 0x80) == UE_DIR_IN0x80)
504 ctrl_ep = ed->bEndpointAddress;
505 }
506 if (ctrl_ep == -1) {
507 printf("%s: missing interrupt endpoint\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
508 goto fail;
509 }
510
511 /*
512 * For the MBIM Data Interface, select the appropriate
513 * alternate setting by looking for a matching descriptor that
514 * has two endpoints.
515 */
516 cd = usbd_get_config_descriptor(sc->sc_udev);
517 altnum = usbd_get_no_alts(cd, data_ifaceno);
518 for (i = 0; i < altnum; i++) {
519 id = usbd_find_idesc(cd, sc->sc_data_iface->index, i);
520 if (id == NULL((void *)0))
521 continue;
522 if (id->bInterfaceClass == UICLASS_CDC_DATA0x0a &&
523 id->bInterfaceSubClass == UISUBCLASS_DATA0 &&
524 id->bInterfaceProtocol == UIPROTO_DATA_MBIM0x02 &&
525 id->bNumEndpoints == 2)
526 break;
527 }
528 if (i == altnum || id == NULL((void *)0)) {
529 printf("%s: missing alt setting for interface #%d\n",
530 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), data_ifaceno);
531 goto fail;
532 }
533 status = usbd_set_interface(sc->sc_data_iface, i);
534 if (status) {
535 printf("%s: select alt setting %d for interface #%d "
536 "failed: %s\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), i, data_ifaceno,
537 usbd_errstr(status));
538 goto fail;
539 }
540
541 id = usbd_get_interface_descriptor(sc->sc_data_iface);
542 sc->sc_rx_ep = sc->sc_tx_ep = -1;
543 for (i = 0; i < id->bNumEndpoints; i++) {
544 if ((ed = usbd_interface2endpoint_descriptor(sc->sc_data_iface,
545 i)) == NULL((void *)0))
546 break;
547 if (UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03) == UE_BULK0x02 &&
548 UE_GET_DIR(ed->bEndpointAddress)((ed->bEndpointAddress) & 0x80) == UE_DIR_IN0x80)
549 sc->sc_rx_ep = ed->bEndpointAddress;
550 else if (UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03) == UE_BULK0x02 &&
551 UE_GET_DIR(ed->bEndpointAddress)((ed->bEndpointAddress) & 0x80) == UE_DIR_OUT0x00)
552 sc->sc_tx_ep = ed->bEndpointAddress;
553 }
554 if (sc->sc_rx_ep == -1 || sc->sc_tx_ep == -1) {
555 printf("%s: missing bulk endpoints\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
556 goto fail;
557 }
558
559 DPRINTFN(2, "%s: ctrl-ifno#%d: ep-ctrl=%d, data-ifno#%d: ep-rx=%d, "do { } while (0)
560 "ep-tx=%d\n", DEVNAM(sc), sc->sc_ctrl_ifaceno,do { } while (0)
561 UE_GET_ADDR(ctrl_ep), data_ifaceno,do { } while (0)
562 UE_GET_ADDR(sc->sc_rx_ep), UE_GET_ADDR(sc->sc_tx_ep))do { } while (0);
563
564 usb_init_task(&sc->sc_umb_task, umb_state_task, sc,((&sc->sc_umb_task)->fun = (umb_state_task), (&
sc->sc_umb_task)->arg = (sc), (&sc->sc_umb_task)
->type = (0), (&sc->sc_umb_task)->state = 0x0)
565 USB_TASK_TYPE_GENERIC)((&sc->sc_umb_task)->fun = (umb_state_task), (&
sc->sc_umb_task)->arg = (sc), (&sc->sc_umb_task)
->type = (0), (&sc->sc_umb_task)->state = 0x0)
;
566 usb_init_task(&sc->sc_get_response_task, umb_get_response_task, sc,((&sc->sc_get_response_task)->fun = (umb_get_response_task
), (&sc->sc_get_response_task)->arg = (sc), (&sc
->sc_get_response_task)->type = (0), (&sc->sc_get_response_task
)->state = 0x0)
567 USB_TASK_TYPE_GENERIC)((&sc->sc_get_response_task)->fun = (umb_get_response_task
), (&sc->sc_get_response_task)->arg = (sc), (&sc
->sc_get_response_task)->type = (0), (&sc->sc_get_response_task
)->state = 0x0)
;
568 timeout_set(&sc->sc_statechg_timer, umb_statechg_timeout, sc);
569
570 if (usbd_open_pipe_intr(uaa->iface, ctrl_ep, USBD_SHORT_XFER_OK0x04,
571 &sc->sc_ctrl_pipe, sc, &sc->sc_intr_msg, sizeof (sc->sc_intr_msg),
572 umb_intr, USBD_DEFAULT_INTERVAL(-1))) {
573 printf("%s: failed to open control pipe\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
574 goto fail;
575 }
576 sc->sc_resp_buf = malloc(sc->sc_ctrl_len, M_USBDEV102, M_NOWAIT0x0002);
577 if (sc->sc_resp_buf == NULL((void *)0)) {
578 printf("%s: allocation of resp buffer failed\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
579 goto fail;
580 }
581 sc->sc_ctrl_msg = malloc(sc->sc_ctrl_len, M_USBDEV102, M_NOWAIT0x0002);
582 if (sc->sc_ctrl_msg == NULL((void *)0)) {
583 printf("%s: allocation of ctrl msg buffer failed\n",
584 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
585 goto fail;
586 }
587
588 sc->sc_info.regstate = MBIM_REGSTATE_UNKNOWN0;
589 sc->sc_info.pin_attempts_left = UMB_VALUE_UNKNOWN-999;
590 sc->sc_info.rssi = UMB_VALUE_UNKNOWN-999;
591 sc->sc_info.ber = UMB_VALUE_UNKNOWN-999;
592
593 /* Default to 16 bit NTB format. */
594 sc->sc_ncm_format = NCM_FORMAT_NTB160x00;
595 umb_ncm_setup(sc);
596 umb_ncm_setup_format(sc);
597 if (sc->sc_ncm_supported_formats == 0)
598 goto fail;
599 DPRINTFN(2, "%s: rx/tx size %d/%d\n", DEVNAM(sc),do { } while (0)
600 sc->sc_rx_bufsz, sc->sc_tx_bufsz)do { } while (0);
601
602 s = splnet()splraise(0x7);
603 ifp = GET_IFP(sc)(&(sc)->sc_if);
604 ifp->if_flags = IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000 | IFF_POINTOPOINT0x10;
605 ifp->if_ioctl = umb_ioctl;
606 ifp->if_start = umb_start;
607 ifp->if_rtrequest = umb_rtrequest;
608
609 ifp->if_watchdog = umb_watchdog;
610 strlcpy(ifp->if_xname, DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), IFNAMSIZ16);
611 ifp->if_link_stateif_data.ifi_link_state = LINK_STATE_DOWN2;
612
613 ifp->if_typeif_data.ifi_type = IFT_MBIM0xfa;
614 ifp->if_priority = IF_WWAN_DEFAULT_PRIORITY6;
615 ifp->if_addrlenif_data.ifi_addrlen = 0;
616 ifp->if_hdrlenif_data.ifi_hdrlen = sizeof (struct ncm_header16) +
617 sizeof (struct ncm_pointer16);
618 ifp->if_mtuif_data.ifi_mtu = 1500; /* use a common default */
619 ifp->if_hardmtu = sc->sc_maxpktlen;
620 ifp->if_input = umb_input;
621 ifp->if_output = umb_output;
622 if_attach(ifp);
623 if_alloc_sadl(ifp);
624 ifp->if_softc = sc;
625#if NBPFILTER1 > 0
626 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP12, sizeof(uint32_t));
627#endif
628 /*
629 * Open the device now so that we are able to query device information.
630 * XXX maybe close when done?
631 */
632 umb_open(sc);
633 splx(s)spllower(s);
634
635 DPRINTF("%s: vers %d.%d\n", DEVNAM(sc), sc->sc_ver_maj, sc->sc_ver_min)do { } while (0);
636 return;
637
638fail:
639 usbd_deactivate(sc->sc_udev);
640 return;
641}
642
643int
644umb_detach(struct device *self, int flags)
645{
646 struct umb_softc *sc = (struct umb_softc *)self;
647 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
648 int s;
649
650 s = splnet()splraise(0x7);
651 if (ifp->if_flags & IFF_RUNNING0x40)
652 umb_down(sc, 1);
653 umb_close(sc);
654
655 usb_rem_wait_task(sc->sc_udev, &sc->sc_get_response_task);
656 if (timeout_initialized(&sc->sc_statechg_timer)((&sc->sc_statechg_timer)->to_flags & 0x04))
657 timeout_del(&sc->sc_statechg_timer);
658 sc->sc_nresp = 0;
659 usb_rem_wait_task(sc->sc_udev, &sc->sc_umb_task);
660 if (sc->sc_ctrl_pipe) {
661 usbd_close_pipe(sc->sc_ctrl_pipe);
662 sc->sc_ctrl_pipe = NULL((void *)0);
663 }
664 if (sc->sc_ctrl_msg) {
665 free(sc->sc_ctrl_msg, M_USBDEV102, sc->sc_ctrl_len);
666 sc->sc_ctrl_msg = NULL((void *)0);
667 }
668 if (sc->sc_resp_buf) {
669 free(sc->sc_resp_buf, M_USBDEV102, sc->sc_ctrl_len);
670 sc->sc_resp_buf = NULL((void *)0);
671 }
672 if (ifp->if_softc != NULL((void *)0)) {
673 if_detach(ifp);
674 }
675
676 splx(s)spllower(s);
677 return 0;
678}
679
680void
681umb_ncm_setup(struct umb_softc *sc)
682{
683 usb_device_request_t req;
684 struct ncm_ntb_parameters np;
685
686 /* Query NTB transfer sizes */
687 req.bmRequestType = UT_READ_CLASS_INTERFACE(0x80 | 0x20 | 0x01);
688 req.bRequest = NCM_GET_NTB_PARAMETERS0x80;
689 USETW(req.wValue, 0)(*(u_int16_t *)(req.wValue) = (0));
690 USETW(req.wIndex, sc->sc_ctrl_ifaceno)(*(u_int16_t *)(req.wIndex) = (sc->sc_ctrl_ifaceno));
691 USETW(req.wLength, sizeof (np))(*(u_int16_t *)(req.wLength) = (sizeof (np)));
692 if (usbd_do_request(sc->sc_udev, &req, &np) == USBD_NORMAL_COMPLETION &&
693 UGETW(np.wLength)(*(u_int16_t *)(np.wLength)) == sizeof (np)) {
694 sc->sc_rx_bufsz = UGETDW(np.dwNtbInMaxSize)(*(u_int32_t *)(np.dwNtbInMaxSize));
695 sc->sc_tx_bufsz = UGETDW(np.dwNtbOutMaxSize)(*(u_int32_t *)(np.dwNtbOutMaxSize));
696 sc->sc_maxdgram = UGETW(np.wNtbOutMaxDatagrams)(*(u_int16_t *)(np.wNtbOutMaxDatagrams));
697 sc->sc_align = UGETW(np.wNdpOutAlignment)(*(u_int16_t *)(np.wNdpOutAlignment));
698 sc->sc_ndp_div = UGETW(np.wNdpOutDivisor)(*(u_int16_t *)(np.wNdpOutDivisor));
699 sc->sc_ndp_remainder = UGETW(np.wNdpOutPayloadRemainder)(*(u_int16_t *)(np.wNdpOutPayloadRemainder));
700 /* Validate values */
701 if (!powerof2(sc->sc_align)((((sc->sc_align)-1)&(sc->sc_align))==0) || sc->sc_align == 0 ||
702 sc->sc_align >= sc->sc_tx_bufsz)
703 sc->sc_align = sizeof (uint32_t);
704 if (!powerof2(sc->sc_ndp_div)((((sc->sc_ndp_div)-1)&(sc->sc_ndp_div))==0) || sc->sc_ndp_div == 0 ||
705 sc->sc_ndp_div >= sc->sc_tx_bufsz)
706 sc->sc_ndp_div = sizeof (uint32_t);
707 if (sc->sc_ndp_remainder >= sc->sc_ndp_div)
708 sc->sc_ndp_remainder = 0;
709 DPRINTF("%s: NCM align=%d div=%d rem=%d\n", DEVNAM(sc),do { } while (0)
710 sc->sc_align, sc->sc_ndp_div, sc->sc_ndp_remainder)do { } while (0);
711 sc->sc_ncm_supported_formats = UGETW(np.bmNtbFormatsSupported)(*(u_int16_t *)(np.bmNtbFormatsSupported));
712 } else {
713 sc->sc_rx_bufsz = sc->sc_tx_bufsz = 8 * 1024;
714 sc->sc_maxdgram = 0;
715 sc->sc_align = sc->sc_ndp_div = sizeof (uint32_t);
716 sc->sc_ndp_remainder = 0;
717 DPRINTF("%s: align=default div=default rem=default\n",do { } while (0)
718 DEVNAM(sc))do { } while (0);
719 sc->sc_ncm_supported_formats = NCM_FORMAT_NTB16_MASK(1U << 0x00);
720 }
721}
722
723void
724umb_ncm_setup_format(struct umb_softc *sc)
725{
726 usb_device_request_t req;
727 uWord wFmt;
728 uint16_t fmt;
729
730 assertwaitok();
731 if (sc->sc_ncm_supported_formats == 0)
732 goto fail;
733
734 /* NCM_GET_NTB_FORMAT is not allowed for 16-bit only devices. */
735 if (sc->sc_ncm_supported_formats == NCM_FORMAT_NTB16_MASK(1U << 0x00)) {
736 DPRINTF("%s: Only NTB16 format supported.\n", DEVNAM(sc))do { } while (0);
737 sc->sc_ncm_format = NCM_FORMAT_NTB160x00;
738 return;
739 }
740
741 /* Query NTB FORMAT (16 vs. 32 bit) */
742 req.bmRequestType = UT_READ_CLASS_INTERFACE(0x80 | 0x20 | 0x01);
743 req.bRequest = NCM_GET_NTB_FORMAT0x83;
744 USETW(req.wValue, 0)(*(u_int16_t *)(req.wValue) = (0));
745 USETW(req.wIndex, sc->sc_ctrl_ifaceno)(*(u_int16_t *)(req.wIndex) = (sc->sc_ctrl_ifaceno));
746 USETW(req.wLength, sizeof (wFmt))(*(u_int16_t *)(req.wLength) = (sizeof (wFmt)));
747 if (usbd_do_request(sc->sc_udev, &req, wFmt) != USBD_NORMAL_COMPLETION)
748 goto fail;
749 fmt = UGETW(wFmt)(*(u_int16_t *)(wFmt));
750 if ((sc->sc_ncm_supported_formats & (1UL << fmt)) == 0)
751 goto fail;
752 if (fmt != NCM_FORMAT_NTB160x00 && fmt != NCM_FORMAT_NTB320x01)
753 goto fail;
754 sc->sc_ncm_format = fmt;
755
756 DPRINTF("%s: Using NCM format %d, supported=0x%x\n",do { } while (0)
757 DEVNAM(sc), sc->sc_ncm_format, sc->sc_ncm_supported_formats)do { } while (0);
758 return;
759
760fail:
761 DPRINTF("%s: Cannot setup NCM format\n", DEVNAM(sc))do { } while (0);
762 sc->sc_ncm_supported_formats = 0;
763}
764
765int
766umb_alloc_xfers(struct umb_softc *sc)
767{
768 if (!sc->sc_rx_xfer) {
769 if ((sc->sc_rx_xfer = usbd_alloc_xfer(sc->sc_udev)) != NULL((void *)0))
770 sc->sc_rx_buf = usbd_alloc_buffer(sc->sc_rx_xfer,
771 sc->sc_rx_bufsz);
772 }
773 if (!sc->sc_tx_xfer) {
774 if ((sc->sc_tx_xfer = usbd_alloc_xfer(sc->sc_udev)) != NULL((void *)0))
775 sc->sc_tx_buf = usbd_alloc_buffer(sc->sc_tx_xfer,
776 sc->sc_tx_bufsz);
777 }
778 return (sc->sc_rx_buf && sc->sc_tx_buf) ? 1 : 0;
779}
780
781void
782umb_free_xfers(struct umb_softc *sc)
783{
784 if (sc->sc_rx_xfer) {
785 /* implicit usbd_free_buffer() */
786 usbd_free_xfer(sc->sc_rx_xfer);
787 sc->sc_rx_xfer = NULL((void *)0);
788 sc->sc_rx_buf = NULL((void *)0);
789 }
790 if (sc->sc_tx_xfer) {
791 usbd_free_xfer(sc->sc_tx_xfer);
792 sc->sc_tx_xfer = NULL((void *)0);
793 sc->sc_tx_buf = NULL((void *)0);
794 }
795 ml_purge(&sc->sc_tx_ml);
796}
797
798int
799umb_alloc_bulkpipes(struct umb_softc *sc)
800{
801 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
802
803 if (!(ifp->if_flags & IFF_RUNNING0x40)) {
804 if (usbd_open_pipe(sc->sc_data_iface, sc->sc_rx_ep,
805 USBD_EXCLUSIVE_USE0x01, &sc->sc_rx_pipe))
806 return 0;
807 if (usbd_open_pipe(sc->sc_data_iface, sc->sc_tx_ep,
808 USBD_EXCLUSIVE_USE0x01, &sc->sc_tx_pipe))
809 return 0;
810
811 ifp->if_flags |= IFF_RUNNING0x40;
812 ifq_clr_oactive(&ifp->if_snd);
813 umb_rx(sc);
814 }
815 return 1;
816}
817
818void
819umb_close_bulkpipes(struct umb_softc *sc)
820{
821 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
822
823 ifp->if_flags &= ~IFF_RUNNING0x40;
824 ifq_clr_oactive(&ifp->if_snd);
825 ifp->if_timer = 0;
826 if (sc->sc_rx_pipe) {
827 usbd_close_pipe(sc->sc_rx_pipe);
828 sc->sc_rx_pipe = NULL((void *)0);
829 }
830 if (sc->sc_tx_pipe) {
831 usbd_close_pipe(sc->sc_tx_pipe);
832 sc->sc_tx_pipe = NULL((void *)0);
833 }
834}
835
836int
837umb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
838{
839 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
840 struct umb_softc *sc = ifp->if_softc;
841 struct ifreq *ifr = (struct ifreq *)data;
842 int s, error = 0;
843 struct umb_parameter mp;
844
845 if (usbd_is_dying(sc->sc_udev))
846 return ENXIO6;
847
848 s = splnet()splraise(0x7);
849 switch (cmd) {
850 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
851 usb_add_task(sc->sc_udev, &sc->sc_umb_task);
852 break;
853 case SIOCGUMBINFO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((190)))
:
854 error = copyout(&sc->sc_info, ifr->ifr_dataifr_ifru.ifru_data,
855 sizeof (sc->sc_info));
856 break;
857 case SIOCSUMBPARAM((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((191)))
:
858 if ((error = suser(p)) != 0)
859 break;
860 if ((error = copyin(ifr->ifr_dataifr_ifru.ifru_data, &mp, sizeof (mp))) != 0)
861 break;
862
863 if ((error = umb_setpin(sc, mp.op, mp.is_puk, mp.pin, mp.pinlen,
864 mp.newpin, mp.newpinlen)) != 0)
865 break;
866
867 if (mp.apnlen < 0 || mp.apnlen > sizeof (sc->sc_info.apn)) {
868 error = EINVAL22;
869 break;
870 }
871 sc->sc_roamingsc_info.enable_roaming = mp.roaming ? 1 : 0;
872 memset(sc->sc_info.apn, 0, sizeof (sc->sc_info.apn))__builtin_memset((sc->sc_info.apn), (0), (sizeof (sc->sc_info
.apn)))
;
873 memcpy(sc->sc_info.apn, mp.apn, mp.apnlen)__builtin_memcpy((sc->sc_info.apn), (mp.apn), (mp.apnlen));
874 sc->sc_info.apnlen = mp.apnlen;
875 sc->sc_info.preferredclasses = mp.preferredclasses;
876 umb_setdataclass(sc);
877 break;
878 case SIOCGUMBPARAM(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((192)))
:
879 memset(&mp, 0, sizeof (mp))__builtin_memset((&mp), (0), (sizeof (mp)));
880 memcpy(mp.apn, sc->sc_info.apn, sc->sc_info.apnlen)__builtin_memcpy((mp.apn), (sc->sc_info.apn), (sc->sc_info
.apnlen))
;
881 mp.apnlen = sc->sc_info.apnlen;
882 mp.roaming = sc->sc_roamingsc_info.enable_roaming;
883 mp.preferredclasses = sc->sc_info.preferredclasses;
884 error = copyout(&mp, ifr->ifr_dataifr_ifru.ifru_data, sizeof (mp));
885 break;
886 case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((127)))
:
887 /* Does this include the NCM headers and tail? */
888 if (ifr->ifr_mtuifr_ifru.ifru_metric > ifp->if_hardmtu) {
889 error = EINVAL22;
890 break;
891 }
892 ifp->if_mtuif_data.ifi_mtu = ifr->ifr_mtuifr_ifru.ifru_metric;
893 break;
894 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
895 case SIOCAIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifaliasreq) &
0x1fff) << 16) | ((('i')) << 8) | ((26)))
:
896 case SIOCSIFDSTADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((14)))
:
897 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
898 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
899 break;
900 default:
901 error = ENOTTY25;
902 break;
903 }
904 splx(s)spllower(s);
905 return error;
906}
907
908int
909umb_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
910 struct rtentry *rtp)
911{
912 if ((ifp->if_flags & (IFF_UP0x1|IFF_RUNNING0x40)) != (IFF_UP0x1|IFF_RUNNING0x40)) {
913 m_freem(m);
914 return ENETDOWN50;
915 }
916 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = dst->sa_family;
917 return if_enqueue(ifp, m);
918}
919
920void
921umb_input(struct ifnet *ifp, struct mbuf *m)
922{
923 uint32_t af;
924
925 if ((ifp->if_flags & IFF_UP0x1) == 0) {
926 m_freem(m);
927 return;
928 }
929 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof (struct ip) + sizeof(af)) {
930 ifp->if_ierrorsif_data.ifi_ierrors++;
931 DPRINTFN(4, "%s: dropping short packet (len %d)\n", __func__,do { } while (0)
932 m->m_pkthdr.len)do { } while (0);
933 m_freem(m);
934 return;
935 }
936 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain;
937
938 /* pop off DLT_LOOP header, no longer needed */
939 af = *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data));
940 m_adj(m, sizeof (af));
941 af = ntohl(af)(__uint32_t)(__builtin_constant_p(af) ? (__uint32_t)(((__uint32_t
)(af) & 0xff) << 24 | ((__uint32_t)(af) & 0xff00
) << 8 | ((__uint32_t)(af) & 0xff0000) >> 8 |
((__uint32_t)(af) & 0xff000000) >> 24) : __swap32md
(af))
;
942
943 ifp->if_ibytesif_data.ifi_ibytes += m->m_pkthdrM_dat.MH.MH_pkthdr.len;
944 switch (af) {
945 case AF_INET2:
946 ipv4_input(ifp, m);
947 return;
948#ifdef INET61
949 case AF_INET624:
950 ipv6_input(ifp, m);
951 return;
952#endif /* INET6 */
953 default:
954 ifp->if_ierrorsif_data.ifi_ierrors++;
955 DPRINTFN(4, "%s: dropping packet with bad IP version (af %d)\n",do { } while (0)
956 __func__, af)do { } while (0);
957 m_freem(m);
958 return;
959 }
960}
961
962static inline int
963umb_align(size_t bufsz, int offs, int alignment, int remainder)
964{
965 size_t m = alignment - 1;
966 int align;
967
968 align = (((size_t)offs + m) & ~m) - alignment + remainder;
969 if (align < offs)
970 align += alignment;
971 if (align > bufsz)
972 align = bufsz;
973 return align - offs;
974}
975
976static inline int
977umb_padding(void *buf, size_t bufsz, int offs, int alignment, int remainder)
978{
979 int nb;
980
981 nb = umb_align(bufsz, offs, alignment, remainder);
982 if (nb > 0)
983 memset(buf + offs, 0, nb)__builtin_memset((buf + offs), (0), (nb));
984 return nb;
985}
986
987void
988umb_start(struct ifnet *ifp)
989{
990 struct umb_softc *sc = ifp->if_softc;
991 struct mbuf *m = NULL((void *)0);
992 int ndgram = 0;
993 int offs, len, mlen;
994 int maxoverhead;
6
'maxoverhead' declared without an initial value
995
996 if (usbd_is_dying(sc->sc_udev) ||
7
Assuming the condition is false
13
Taking false branch
997 !(ifp->if_flags & IFF_RUNNING0x40) ||
8
Assuming the condition is false
998 ifq_is_oactive(&ifp->if_snd))
9
Calling 'ifq_is_oactive'
12
Returning from 'ifq_is_oactive'
999 return;
1000
1001 KASSERT(ml_empty(&sc->sc_tx_ml))((((&sc->sc_tx_ml)->ml_len == 0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/usb/if_umb.c", 1001, "ml_empty(&sc->sc_tx_ml)"
))
;
14
Assuming field 'ml_len' is equal to 0
15
'?' condition is true
1002
1003 switch (sc->sc_ncm_format) {
16
'Default' branch taken. Execution continues on line 1023
1004 case NCM_FORMAT_NTB160x00:
1005 offs = sizeof (struct ncm_header16);
1006 offs += umb_align(sc->sc_tx_bufsz, offs, sc->sc_align, 0);
1007 offs += sizeof (struct ncm_pointer16);
1008 maxoverhead = sizeof (struct ncm_pointer16_dgram);
1009 break;
1010 case NCM_FORMAT_NTB320x01:
1011 offs = sizeof (struct ncm_header32);
1012 offs += umb_align(sc->sc_tx_bufsz, offs, sc->sc_align, 0);
1013 offs += sizeof (struct ncm_pointer32);
1014 maxoverhead = sizeof (struct ncm_pointer32_dgram);
1015 break;
1016 }
1017
1018 /*
1019 * Overhead for per packet alignment plus packet pointer. Note
1020 * that 'struct ncm_pointer{16,32}' already includes space for
1021 * the terminating zero pointer.
1022 */
1023 maxoverhead += sc->sc_ndp_div - 1;
17
The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage
1024
1025 len = 0;
1026 while (1) {
1027 m = ifq_deq_begin(&ifp->if_snd);
1028 if (m == NULL((void *)0))
1029 break;
1030
1031 /*
1032 * Check if mbuf plus required NCM pointer still fits into
1033 * xfer buffers. Assume maximal padding.
1034 */
1035 mlen = maxoverhead + m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1036 if ((sc->sc_maxdgram != 0 && ndgram >= sc->sc_maxdgram) ||
1037 (offs + len + mlen > sc->sc_tx_bufsz)) {
1038 ifq_deq_rollback(&ifp->if_snd, m);
1039 break;
1040 }
1041 ifq_deq_commit(&ifp->if_snd, m);
1042
1043 ndgram++;
1044 len += mlen;
1045 ml_enqueue(&sc->sc_tx_ml, m);
1046
1047#if NBPFILTER1 > 0
1048 if (ifp->if_bpf)
1049 bpf_mtap_af(ifp->if_bpf, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family, m,
1050 BPF_DIRECTION_OUT(1 << 1));
1051#endif
1052 }
1053 if (ml_empty(&sc->sc_tx_ml)((&sc->sc_tx_ml)->ml_len == 0))
1054 return;
1055 if (umb_encap(sc, ndgram)) {
1056 ifq_set_oactive(&ifp->if_snd);
1057 ifp->if_timer = (2 * umb_xfer_tout) / 1000;
1058 }
1059}
1060
1061void
1062umb_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt)
1063{
1064 struct umb_softc *sc = ifp->if_softc;
1065
1066 if (req == RTM_PROPOSAL0x13) {
1067 KERNEL_LOCK()_kernel_lock();
1068 umb_send_inet_proposal(sc, AF_INET2);
1069#ifdef INET61
1070 umb_send_inet_proposal(sc, AF_INET624);
1071#endif
1072 KERNEL_UNLOCK()_kernel_unlock();
1073 return;
1074 }
1075
1076 p2p_rtrequest(ifp, req, rt);
1077}
1078
1079
1080void
1081umb_watchdog(struct ifnet *ifp)
1082{
1083 struct umb_softc *sc = ifp->if_softc;
1084
1085 if (usbd_is_dying(sc->sc_udev))
1086 return;
1087
1088 ifp->if_oerrorsif_data.ifi_oerrors++;
1089 printf("%s: watchdog timeout\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1090 usbd_abort_pipe(sc->sc_tx_pipe);
1091 return;
1092}
1093
1094void
1095umb_statechg_timeout(void *arg)
1096{
1097 struct umb_softc *sc = arg;
1098 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1099
1100 if (sc->sc_info.regstate != MBIM_REGSTATE_ROAMING4 || sc->sc_roamingsc_info.enable_roaming)
1101 if (ifp->if_flags & IFF_DEBUG0x4)
1102 log(LOG_DEBUG7, "%s: state change timeout\n",
1103 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1104 usb_add_task(sc->sc_udev, &sc->sc_umb_task);
1105}
1106
1107void
1108umb_newstate(struct umb_softc *sc, enum umb_state newstate, int flags)
1109{
1110 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1111
1112 if (newstate == sc->sc_statesc_info.state)
1113 return;
1114 if (((flags & UMB_NS_DONT_DROP0x0001) && newstate < sc->sc_statesc_info.state) ||
1115 ((flags & UMB_NS_DONT_RAISE0x0002) && newstate > sc->sc_statesc_info.state))
1116 return;
1117 if (ifp->if_flags & IFF_DEBUG0x4)
1118 log(LOG_DEBUG7, "%s: state going %s from '%s' to '%s'\n",
1119 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), newstate > sc->sc_statesc_info.state ? "up" : "down",
1120 umb_istate(sc->sc_state)umb_val2descr(umb_istate, (sc->sc_info.state)), umb_istate(newstate)umb_val2descr(umb_istate, (newstate)));
1121 sc->sc_statesc_info.state = newstate;
1122 usb_add_task(sc->sc_udev, &sc->sc_umb_task);
1123}
1124
1125void
1126umb_state_task(void *arg)
1127{
1128 struct umb_softc *sc = arg;
1129 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1130 int s;
1131 int state;
1132
1133 if (sc->sc_info.regstate == MBIM_REGSTATE_ROAMING4 && !sc->sc_roamingsc_info.enable_roaming) {
1134 /*
1135 * Query the registration state until we're with the home
1136 * network again.
1137 */
1138 umb_cmd(sc, MBIM_CID_REGISTER_STATE9, MBIM_CMDOP_QRY0, NULL((void *)0), 0);
1139 return;
1140 }
1141
1142 s = splnet()splraise(0x7);
1143 if (ifp->if_flags & IFF_UP0x1)
1144 umb_up(sc);
1145 else
1146 umb_down(sc, 0);
1147
1148 state = sc->sc_statesc_info.state == UMB_S_UP ? LINK_STATE_UP4 : LINK_STATE_DOWN2;
1149 if (ifp->if_link_stateif_data.ifi_link_state != state) {
1150 if (ifp->if_flags & IFF_DEBUG0x4)
1151 log(LOG_DEBUG7, "%s: link state changed from %s to %s\n",
1152 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
1153 LINK_STATE_IS_UP(ifp->if_link_state)((ifp->if_data.ifi_link_state) >= 4 || (ifp->if_data
.ifi_link_state) == 0)
1154 ? "up" : "down",
1155 LINK_STATE_IS_UP(state)((state) >= 4 || (state) == 0) ? "up" : "down");
1156 ifp->if_link_stateif_data.ifi_link_state = state;
1157 if_link_state_change(ifp);
1158 }
1159 splx(s)spllower(s);
1160}
1161
1162void
1163umb_up(struct umb_softc *sc)
1164{
1165 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
1166
1167 switch (sc->sc_statesc_info.state) {
1168 case UMB_S_DOWN:
1169 DPRINTF("%s: init: opening ...\n", DEVNAM(sc))do { } while (0);
1170 umb_open(sc);
1171 break;
1172 case UMB_S_OPEN:
1173 if (sc->sc_flags & UMBFLG_FCC_AUTH_REQUIRED0x0001) {
1174 if (sc->sc_cid == -1) {
1175 DPRINTF("%s: init: allocating CID ...\n",do { } while (0)
1176 DEVNAM(sc))do { } while (0);
1177 umb_allocate_cid(sc);
1178 break;
1179 } else
1180 umb_newstate(sc, UMB_S_CID, UMB_NS_DONT_DROP0x0001);
1181 } else {
1182 DPRINTF("%s: init: turning radio on ...\n", DEVNAM(sc))do { } while (0);
1183 umb_radio(sc, 1);
1184 break;
1185 }
1186 /*FALLTHROUGH*/
1187 case UMB_S_CID:
1188 DPRINTF("%s: init: sending FCC auth ...\n", DEVNAM(sc))do { } while (0);
1189 umb_send_fcc_auth(sc);
1190 break;
1191 case UMB_S_RADIO:
1192 DPRINTF("%s: init: checking SIM state ...\n", DEVNAM(sc))do { } while (0);
1193 umb_cmd(sc, MBIM_CID_SUBSCRIBER_READY_STATUS2, MBIM_CMDOP_QRY0,
1194 NULL((void *)0), 0);
1195 break;
1196 case UMB_S_SIMREADY:
1197 DPRINTF("%s: init: attaching ...\n", DEVNAM(sc))do { } while (0);
1198 umb_packet_service(sc, 1);
1199 break;
1200 case UMB_S_ATTACHED:
1201 sc->sc_tx_seq = 0;
1202 if (!umb_alloc_xfers(sc)) {
1203 umb_free_xfers(sc);
1204 printf("%s: allocation of xfers failed\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1205 break;
1206 }
1207 DPRINTF("%s: init: connecting ...\n", DEVNAM(sc))do { } while (0);
1208 umb_connect(sc);
1209 break;
1210 case UMB_S_CONNECTED:
1211 DPRINTF("%s: init: getting IP config ...\n", DEVNAM(sc))do { } while (0);
1212 umb_qry_ipconfig(sc);
1213 break;
1214 case UMB_S_UP:
1215 DPRINTF("%s: init: reached state UP\n", DEVNAM(sc))do { } while (0);
1216 if (!umb_alloc_bulkpipes(sc)) {
1217 printf("%s: opening bulk pipes failed\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1218 umb_down(sc, 1);
1219 }
1220 break;
1221 }
1222 if (sc->sc_statesc_info.state < UMB_S_UP)
1223 timeout_add_sec(&sc->sc_statechg_timer,
1224 UMB_STATE_CHANGE_TIMEOUT30);
1225 else
1226 timeout_del(&sc->sc_statechg_timer);
1227 return;
1228}
1229
1230void
1231umb_down(struct umb_softc *sc, int force)
1232{
1233 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
1234
1235 umb_close_bulkpipes(sc);
1236 if (sc->sc_statesc_info.state < UMB_S_CONNECTED)
1237 umb_free_xfers(sc);
1238
1239 switch (sc->sc_statesc_info.state) {
1240 case UMB_S_UP:
1241 umb_clear_addr(sc);
1242 /*FALLTHROUGH*/
1243 case UMB_S_CONNECTED:
1244 DPRINTF("%s: stop: disconnecting ...\n", DEVNAM(sc))do { } while (0);
1245 umb_disconnect(sc);
1246 if (!force)
1247 break;
1248 /*FALLTHROUGH*/
1249 case UMB_S_ATTACHED:
1250 DPRINTF("%s: stop: detaching ...\n", DEVNAM(sc))do { } while (0);
1251 umb_packet_service(sc, 0);
1252 if (!force)
1253 break;
1254 /*FALLTHROUGH*/
1255 case UMB_S_SIMREADY:
1256 case UMB_S_RADIO:
1257 DPRINTF("%s: stop: turning radio off ...\n", DEVNAM(sc))do { } while (0);
1258 umb_radio(sc, 0);
1259 if (!force)
1260 break;
1261 /*FALLTHROUGH*/
1262 case UMB_S_CID:
1263 case UMB_S_OPEN:
1264 case UMB_S_DOWN:
1265 /* Do not close the device */
1266 DPRINTF("%s: stop: reached state DOWN\n", DEVNAM(sc))do { } while (0);
1267 break;
1268 }
1269 if (force)
1270 sc->sc_statesc_info.state = UMB_S_OPEN;
1271
1272 if (sc->sc_statesc_info.state > UMB_S_OPEN)
1273 timeout_add_sec(&sc->sc_statechg_timer,
1274 UMB_STATE_CHANGE_TIMEOUT30);
1275 else
1276 timeout_del(&sc->sc_statechg_timer);
1277}
1278
1279void
1280umb_get_response_task(void *arg)
1281{
1282 struct umb_softc *sc = arg;
1283 int len;
1284 int s;
1285
1286 /*
1287 * Function is required to send on RESPONSE_AVAILABLE notification for
1288 * each encapsulated response that is to be processed by the host.
1289 * But of course, we can receive multiple notifications before the
1290 * response task is run.
1291 */
1292 s = splusb()splraise(0x5);
1293 while (sc->sc_nresp > 0) {
1294 --sc->sc_nresp;
1295 len = sc->sc_ctrl_len;
1296 if (umb_get_encap_response(sc, sc->sc_resp_buf, &len))
1297 umb_decode_response(sc, sc->sc_resp_buf, len);
1298 }
1299 splx(s)spllower(s);
1300}
1301
1302void
1303umb_decode_response(struct umb_softc *sc, void *response, int len)
1304{
1305 struct mbim_msghdr *hdr = response;
1306 struct mbim_fragmented_msg_hdr *fraghdr;
1307 uint32_t type;
1308 uint32_t tid;
1309
1310 DPRINTFN(3, "%s: got response: len %d\n", DEVNAM(sc), len)do { } while (0);
1311 DDUMPN(4, response, len)do { } while (0);
1312
1313 if (len < sizeof (*hdr) || letoh32(hdr->len)((__uint32_t)(hdr->len)) != len) {
1314 /*
1315 * We should probably cancel a transaction, but since the
1316 * message is too short, we cannot decode the transaction
1317 * id (tid) and hence don't know, whom to cancel. Must wait
1318 * for the timeout.
1319 */
1320 DPRINTF("%s: received short response (len %d)\n",do { } while (0)
1321 DEVNAM(sc), len)do { } while (0);
1322 return;
1323 }
1324
1325 /*
1326 * XXX FIXME: if message is fragmented, store it until last frag
1327 * is received and then re-assemble all fragments.
1328 */
1329 type = letoh32(hdr->type)((__uint32_t)(hdr->type));
1330 tid = letoh32(hdr->tid)((__uint32_t)(hdr->tid));
1331 switch (type) {
1332 case MBIM_INDICATE_STATUS_MSG0x80000007U:
1333 case MBIM_COMMAND_DONE0x80000003U:
1334 fraghdr = response;
1335 if (letoh32(fraghdr->frag.nfrag)((__uint32_t)(fraghdr->frag.nfrag)) != 1) {
1336 DPRINTF("%s: discarding fragmented messages\n",do { } while (0)
1337 DEVNAM(sc))do { } while (0);
1338 return;
1339 }
1340 break;
1341 default:
1342 break;
1343 }
1344
1345 DPRINTF("%s: <- rcv %s (tid %u)\n", DEVNAM(sc), umb_request2str(type),do { } while (0)
1346 tid)do { } while (0);
1347 switch (type) {
1348 case MBIM_FUNCTION_ERROR_MSG0x80000004U:
1349 case MBIM_HOST_ERROR_MSG4U:
1350 {
1351 struct mbim_f2h_hosterr *e;
1352 int err;
1353
1354 if (len >= sizeof (*e)) {
1355 e = response;
1356 err = letoh32(e->err)((__uint32_t)(e->err));
1357
1358 DPRINTF("%s: %s message, error %s (tid %u)\n",do { } while (0)
1359 DEVNAM(sc), umb_request2str(type),do { } while (0)
1360 umb_error2str(err), tid)do { } while (0);
1361 if (err == MBIM_ERROR_NOT_OPENED5)
1362 umb_newstate(sc, UMB_S_DOWN, 0);
1363 }
1364 break;
1365 }
1366 case MBIM_INDICATE_STATUS_MSG0x80000007U:
1367 umb_handle_indicate_status_msg(sc, response, len);
1368 break;
1369 case MBIM_OPEN_DONE0x80000001U:
1370 umb_handle_opendone_msg(sc, response, len);
1371 break;
1372 case MBIM_CLOSE_DONE0x80000002U:
1373 umb_handle_closedone_msg(sc, response, len);
1374 break;
1375 case MBIM_COMMAND_DONE0x80000003U:
1376 umb_command_done(sc, response, len);
1377 break;
1378 default:
1379 DPRINTF("%s: discard message %s\n", DEVNAM(sc),do { } while (0)
1380 umb_request2str(type))do { } while (0);
1381 break;
1382 }
1383}
1384
1385void
1386umb_handle_indicate_status_msg(struct umb_softc *sc, void *data, int len)
1387{
1388 struct mbim_f2h_indicate_status *m = data;
1389 uint32_t infolen;
1390 uint32_t cid;
1391
1392 if (len < sizeof (*m)) {
1393 DPRINTF("%s: discard short %s message\n", DEVNAM(sc),do { } while (0)
1394 umb_request2str(letoh32(m->hdr.type)))do { } while (0);
1395 return;
1396 }
1397 if (memcmp(m->devid, umb_uuid_basic_connect, sizeof (m->devid))__builtin_memcmp((m->devid), (umb_uuid_basic_connect), (sizeof
(m->devid)))
) {
1398 DPRINTF("%s: discard %s message for other UUID '%s'\n",do { } while (0)
1399 DEVNAM(sc), umb_request2str(letoh32(m->hdr.type)),do { } while (0)
1400 umb_uuid2str(m->devid))do { } while (0);
1401 return;
1402 }
1403 infolen = letoh32(m->infolen)((__uint32_t)(m->infolen));
1404 if (len < sizeof (*m) + infolen) {
1405 DPRINTF("%s: discard truncated %s message (want %d, got %d)\n",do { } while (0)
1406 DEVNAM(sc), umb_request2str(letoh32(m->hdr.type)),do { } while (0)
1407 (int)sizeof (*m) + infolen, len)do { } while (0);
1408 return;
1409 }
1410
1411 cid = letoh32(m->cid)((__uint32_t)(m->cid));
1412 DPRINTF("%s: indicate %s status\n", DEVNAM(sc), umb_cid2str(cid))do { } while (0);
1413 umb_decode_cid(sc, cid, m->info, infolen);
1414}
1415
1416void
1417umb_handle_opendone_msg(struct umb_softc *sc, void *data, int len)
1418{
1419 struct mbim_f2h_openclosedone *resp = data;
1420 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1421 uint32_t status;
1422
1423 status = letoh32(resp->status)((__uint32_t)(resp->status));
1424 if (status == MBIM_STATUS_SUCCESS0) {
1425 if (sc->sc_maxsessions == 0) {
1426 umb_cmd(sc, MBIM_CID_DEVICE_CAPS1, MBIM_CMDOP_QRY0, NULL((void *)0),
1427 0);
1428 umb_cmd(sc, MBIM_CID_PIN4, MBIM_CMDOP_QRY0, NULL((void *)0), 0);
1429 umb_cmd(sc, MBIM_CID_REGISTER_STATE9, MBIM_CMDOP_QRY0,
1430 NULL((void *)0), 0);
1431 }
1432 umb_newstate(sc, UMB_S_OPEN, UMB_NS_DONT_DROP0x0001);
1433 } else if (ifp->if_flags & IFF_DEBUG0x4)
1434 log(LOG_ERR3, "%s: open error: %s\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
1435 umb_status2str(status)umb_val2descr(umb_status, (status)));
1436 return;
1437}
1438
1439void
1440umb_handle_closedone_msg(struct umb_softc *sc, void *data, int len)
1441{
1442 struct mbim_f2h_openclosedone *resp = data;
1443 uint32_t status;
1444
1445 status = letoh32(resp->status)((__uint32_t)(resp->status));
1446 if (status == MBIM_STATUS_SUCCESS0)
1447 umb_newstate(sc, UMB_S_DOWN, 0);
1448 else
1449 DPRINTF("%s: close error: %s\n", DEVNAM(sc),do { } while (0)
1450 umb_status2str(status))do { } while (0);
1451 return;
1452}
1453
1454static inline void
1455umb_getinfobuf(void *in, int inlen, uint32_t offs, uint32_t sz,
1456 void *out, size_t outlen)
1457{
1458 offs = letoh32(offs)((__uint32_t)(offs));
1459 sz = letoh32(sz)((__uint32_t)(sz));
1460 if (inlen >= offs + sz) {
1461 memset(out, 0, outlen)__builtin_memset((out), (0), (outlen));
1462 memcpy(out, in + offs, MIN(sz, outlen))__builtin_memcpy((out), (in + offs), ((((sz)<(outlen))?(sz
):(outlen))))
;
1463 }
1464}
1465
1466static inline int
1467umb_addstr(void *buf, size_t bufsz, int *offs, void *str, int slen,
1468 uint32_t *offsmember, uint32_t *sizemember)
1469{
1470 if (*offs + slen > bufsz)
1471 return 0;
1472
1473 *sizemember = htole32((uint32_t)slen)((__uint32_t)((uint32_t)slen));
1474 if (slen && str) {
1475 *offsmember = htole32((uint32_t)*offs)((__uint32_t)((uint32_t)*offs));
1476 memcpy(buf + *offs, str, slen)__builtin_memcpy((buf + *offs), (str), (slen));
1477 *offs += slen;
1478 *offs += umb_padding(buf, bufsz, *offs, sizeof (uint32_t), 0);
1479 } else
1480 *offsmember = htole32(0)((__uint32_t)(0));
1481 return 1;
1482}
1483
1484int
1485umb_decode_register_state(struct umb_softc *sc, void *data, int len)
1486{
1487 struct mbim_cid_registration_state_info *rs = data;
1488 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1489
1490 if (len < sizeof (*rs))
1491 return 0;
1492 sc->sc_info.nwerror = letoh32(rs->nwerror)((__uint32_t)(rs->nwerror));
1493 sc->sc_info.regstate = letoh32(rs->regstate)((__uint32_t)(rs->regstate));
1494 sc->sc_info.regmode = letoh32(rs->regmode)((__uint32_t)(rs->regmode));
1495 sc->sc_info.cellclass = letoh32(rs->curcellclass)((__uint32_t)(rs->curcellclass));
1496
1497 umb_getinfobuf(data, len, rs->provname_offs, rs->provname_size,
1498 sc->sc_info.provider, sizeof (sc->sc_info.provider));
1499 umb_getinfobuf(data, len, rs->provid_offs, rs->provid_size,
1500 sc->sc_info.providerid, sizeof (sc->sc_info.providerid));
1501 umb_getinfobuf(data, len, rs->roamingtxt_offs, rs->roamingtxt_size,
1502 sc->sc_info.roamingtxt, sizeof (sc->sc_info.roamingtxt));
1503
1504 DPRINTFN(2, "%s: %s, availclass 0x%x, class 0x%x, regmode %d\n",do { } while (0)
1505 DEVNAM(sc), umb_regstate(sc->sc_info.regstate),do { } while (0)
1506 letoh32(rs->availclasses), sc->sc_info.cellclass,do { } while (0)
1507 sc->sc_info.regmode)do { } while (0);
1508
1509 if (sc->sc_info.regstate == MBIM_REGSTATE_ROAMING4 &&
1510 !sc->sc_roamingsc_info.enable_roaming &&
1511 sc->sc_info.activation == MBIM_ACTIVATION_STATE_ACTIVATED1) {
1512 if (ifp->if_flags & IFF_DEBUG0x4)
1513 log(LOG_INFO6,
1514 "%s: disconnecting from roaming network\n",
1515 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1516 umb_disconnect(sc);
1517 }
1518 return 1;
1519}
1520
1521int
1522umb_decode_devices_caps(struct umb_softc *sc, void *data, int len)
1523{
1524 struct mbim_cid_device_caps *dc = data;
1525
1526 if (len < sizeof (*dc))
1527 return 0;
1528 sc->sc_maxsessions = letoh32(dc->max_sessions)((__uint32_t)(dc->max_sessions));
1529 sc->sc_info.supportedclasses = letoh32(dc->dataclass)((__uint32_t)(dc->dataclass));
1530 umb_getinfobuf(data, len, dc->devid_offs, dc->devid_size,
1531 sc->sc_info.devid, sizeof (sc->sc_info.devid));
1532 umb_getinfobuf(data, len, dc->fwinfo_offs, dc->fwinfo_size,
1533 sc->sc_info.fwinfo, sizeof (sc->sc_info.fwinfo));
1534 umb_getinfobuf(data, len, dc->hwinfo_offs, dc->hwinfo_size,
1535 sc->sc_info.hwinfo, sizeof (sc->sc_info.hwinfo));
1536 DPRINTFN(2, "%s: max sessions %d, supported classes 0x%x\n",do { } while (0)
1537 DEVNAM(sc), sc->sc_maxsessions, sc->sc_info.supportedclasses)do { } while (0);
1538 return 1;
1539}
1540
1541int
1542umb_decode_subscriber_status(struct umb_softc *sc, void *data, int len)
1543{
1544 struct mbim_cid_subscriber_ready_info *si = data;
1545 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1546 int npn;
1547
1548 if (len < sizeof (*si))
1549 return 0;
1550 sc->sc_info.sim_state = letoh32(si->ready)((__uint32_t)(si->ready));
1551
1552 umb_getinfobuf(data, len, si->sid_offs, si->sid_size,
1553 sc->sc_info.sid, sizeof (sc->sc_info.sid));
1554 umb_getinfobuf(data, len, si->icc_offs, si->icc_size,
1555 sc->sc_info.iccid, sizeof (sc->sc_info.iccid));
1556
1557 npn = letoh32(si->no_pn)((__uint32_t)(si->no_pn));
1558 if (npn > 0)
1559 umb_getinfobuf(data, len, si->pn[0].offs, si->pn[0].size,
1560 sc->sc_info.pn, sizeof (sc->sc_info.pn));
1561 else
1562 memset(sc->sc_info.pn, 0, sizeof (sc->sc_info.pn))__builtin_memset((sc->sc_info.pn), (0), (sizeof (sc->sc_info
.pn)))
;
1563
1564 if (sc->sc_info.sim_state == MBIM_SIMSTATE_LOCKED6)
1565 sc->sc_info.pin_state = UMB_PUK_REQUIRED2;
1566 if (ifp->if_flags & IFF_DEBUG0x4)
1567 log(LOG_INFO6, "%s: SIM %s\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
1568 umb_simstate(sc->sc_info.sim_state)umb_val2descr(umb_simstate, (sc->sc_info.sim_state)));
1569 if (sc->sc_info.sim_state == MBIM_SIMSTATE_INITIALIZED1)
1570 umb_newstate(sc, UMB_S_SIMREADY, UMB_NS_DONT_DROP0x0001);
1571 return 1;
1572}
1573
1574int
1575umb_decode_radio_state(struct umb_softc *sc, void *data, int len)
1576{
1577 struct mbim_cid_radio_state_info *rs = data;
1578 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1579
1580 if (len < sizeof (*rs))
1581 return 0;
1582
1583 sc->sc_info.hw_radio_on =
1584 (letoh32(rs->hw_state)((__uint32_t)(rs->hw_state)) == MBIM_RADIO_STATE_ON1) ? 1 : 0;
1585 sc->sc_info.sw_radio_on =
1586 (letoh32(rs->sw_state)((__uint32_t)(rs->sw_state)) == MBIM_RADIO_STATE_ON1) ? 1 : 0;
1587 if (!sc->sc_info.hw_radio_on) {
1588 printf("%s: radio is disabled by hardware switch\n",
1589 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1590 /*
1591 * XXX do we need a time to poll the state of the rfkill switch
1592 * or will the device send an unsolicited notification
1593 * in case the state changes?
1594 */
1595 umb_newstate(sc, UMB_S_OPEN, 0);
1596 } else if (!sc->sc_info.sw_radio_on) {
1597 if (ifp->if_flags & IFF_DEBUG0x4)
1598 log(LOG_INFO6, "%s: radio is off\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1599 umb_newstate(sc, UMB_S_OPEN, 0);
1600 } else
1601 umb_newstate(sc, UMB_S_RADIO, UMB_NS_DONT_DROP0x0001);
1602 return 1;
1603}
1604
1605int
1606umb_decode_pin(struct umb_softc *sc, void *data, int len)
1607{
1608 struct mbim_cid_pin_info *pi = data;
1609 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1610 uint32_t attempts_left;
1611
1612 if (len < sizeof (*pi))
1613 return 0;
1614
1615 attempts_left = letoh32(pi->remaining_attempts)((__uint32_t)(pi->remaining_attempts));
1616 if (attempts_left != 0xffffffff)
1617 sc->sc_info.pin_attempts_left = attempts_left;
1618
1619 switch (letoh32(pi->state)((__uint32_t)(pi->state))) {
1620 case MBIM_PIN_STATE_UNLOCKED0:
1621 sc->sc_info.pin_state = UMB_PIN_UNLOCKED1;
1622 break;
1623 case MBIM_PIN_STATE_LOCKED1:
1624 switch (letoh32(pi->type)((__uint32_t)(pi->type))) {
1625 case MBIM_PIN_TYPE_PIN12:
1626 sc->sc_info.pin_state = UMB_PIN_REQUIRED0;
1627 break;
1628 case MBIM_PIN_TYPE_PUK111:
1629 sc->sc_info.pin_state = UMB_PUK_REQUIRED2;
1630 break;
1631 case MBIM_PIN_TYPE_PIN23:
1632 case MBIM_PIN_TYPE_PUK212:
1633 /* Assume that PIN1 was accepted */
1634 sc->sc_info.pin_state = UMB_PIN_UNLOCKED1;
1635 break;
1636 }
1637 break;
1638 }
1639 if (ifp->if_flags & IFF_DEBUG0x4)
1640 log(LOG_INFO6, "%s: %s state %s (%d attempts left)\n",
1641 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), umb_pin_type(letoh32(pi->type))umb_val2descr(umb_pintype, (((__uint32_t)(pi->type)))),
1642 (letoh32(pi->state)((__uint32_t)(pi->state)) == MBIM_PIN_STATE_UNLOCKED0) ?
1643 "unlocked" : "locked",
1644 letoh32(pi->remaining_attempts)((__uint32_t)(pi->remaining_attempts)));
1645
1646 /*
1647 * In case the PIN was set after IFF_UP, retrigger the state machine
1648 */
1649 usb_add_task(sc->sc_udev, &sc->sc_umb_task);
1650 return 1;
1651}
1652
1653int
1654umb_decode_packet_service(struct umb_softc *sc, void *data, int len)
1655{
1656 struct mbim_cid_packet_service_info *psi = data;
1657 int state, highestclass;
1658 uint64_t up_speed, down_speed;
1659 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1660
1661 if (len < sizeof (*psi))
1662 return 0;
1663
1664 sc->sc_info.nwerror = letoh32(psi->nwerror)((__uint32_t)(psi->nwerror));
1665 state = letoh32(psi->state)((__uint32_t)(psi->state));
1666 highestclass = letoh32(psi->highest_dataclass)((__uint32_t)(psi->highest_dataclass));
1667 up_speed = letoh64(psi->uplink_speed)((__uint64_t)(psi->uplink_speed));
1668 down_speed = letoh64(psi->downlink_speed)((__uint64_t)(psi->downlink_speed));
1669 if (sc->sc_info.packetstate != state ||
1670 sc->sc_info.uplink_speed != up_speed ||
1671 sc->sc_info.downlink_speed != down_speed) {
1672 if (ifp->if_flags & IFF_DEBUG0x4) {
1673 log(LOG_INFO6, "%s: packet service ", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
1674 if (sc->sc_info.packetstate != state)
1675 addlog("changed from %s to ",
1676 umb_packet_state(sc->sc_info.packetstate)umb_val2descr(umb_pktstate, (sc->sc_info.packetstate)));
1677 addlog("%s, class %s, speed: %llu up / %llu down\n",
1678 umb_packet_state(state)umb_val2descr(umb_pktstate, (state)),
1679 umb_dataclass(highestclass)umb_val2descr(umb_dataclasses, (highestclass)), up_speed, down_speed);
1680 }
1681 }
1682 sc->sc_info.packetstate = state;
1683 sc->sc_info.highestclass = highestclass;
1684 sc->sc_info.uplink_speed = up_speed;
1685 sc->sc_info.downlink_speed = down_speed;
1686
1687 if (sc->sc_info.regmode == MBIM_REGMODE_AUTOMATIC1) {
1688 /*
1689 * For devices using automatic registration mode, just proceed,
1690 * once registration has completed.
1691 */
1692 if (ifp->if_flags & IFF_UP0x1) {
1693 switch (sc->sc_info.regstate) {
1694 case MBIM_REGSTATE_HOME3:
1695 case MBIM_REGSTATE_ROAMING4:
1696 case MBIM_REGSTATE_PARTNER5:
1697 umb_newstate(sc, UMB_S_ATTACHED,
1698 UMB_NS_DONT_DROP0x0001);
1699 break;
1700 default:
1701 break;
1702 }
1703 } else
1704 umb_newstate(sc, UMB_S_SIMREADY, UMB_NS_DONT_RAISE0x0002);
1705 } else switch (sc->sc_info.packetstate) {
1706 case MBIM_PKTSERVICE_STATE_ATTACHED2:
1707 umb_newstate(sc, UMB_S_ATTACHED, UMB_NS_DONT_DROP0x0001);
1708 break;
1709 case MBIM_PKTSERVICE_STATE_DETACHED4:
1710 umb_newstate(sc, UMB_S_SIMREADY, UMB_NS_DONT_RAISE0x0002);
1711 break;
1712 }
1713 return 1;
1714}
1715
1716int
1717umb_decode_signal_state(struct umb_softc *sc, void *data, int len)
1718{
1719 struct mbim_cid_signal_state *ss = data;
1720 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1721 int rssi;
1722
1723 if (len < sizeof (*ss))
1724 return 0;
1725
1726 if (letoh32(ss->rssi)((__uint32_t)(ss->rssi)) == 99)
1727 rssi = UMB_VALUE_UNKNOWN-999;
1728 else {
1729 rssi = -113 + 2 * letoh32(ss->rssi)((__uint32_t)(ss->rssi));
1730 if ((ifp->if_flags & IFF_DEBUG0x4) && sc->sc_info.rssi != rssi &&
1731 sc->sc_statesc_info.state >= UMB_S_CONNECTED)
1732 log(LOG_INFO6, "%s: rssi %d dBm\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), rssi);
1733 }
1734 sc->sc_info.rssi = rssi;
1735 sc->sc_info.ber = letoh32(ss->err_rate)((__uint32_t)(ss->err_rate));
1736 if (sc->sc_info.ber == -99)
1737 sc->sc_info.ber = UMB_VALUE_UNKNOWN-999;
1738 return 1;
1739}
1740
1741int
1742umb_decode_connect_info(struct umb_softc *sc, void *data, int len)
1743{
1744 struct mbim_cid_connect_info *ci = data;
1745 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1746 int act;
1747
1748 if (len < sizeof (*ci))
1749 return 0;
1750
1751 if (letoh32(ci->sessionid)((__uint32_t)(ci->sessionid)) != umb_session_id) {
1752 DPRINTF("%s: discard connection info for session %u\n",do { } while (0)
1753 DEVNAM(sc), letoh32(ci->sessionid))do { } while (0);
1754 return 1;
1755 }
1756 if (memcmp(ci->context, umb_uuid_context_internet,__builtin_memcmp((ci->context), (umb_uuid_context_internet
), (sizeof (ci->context)))
1757 sizeof (ci->context))__builtin_memcmp((ci->context), (umb_uuid_context_internet
), (sizeof (ci->context)))
) {
1758 DPRINTF("%s: discard connection info for other context\n",do { } while (0)
1759 DEVNAM(sc))do { } while (0);
1760 return 1;
1761 }
1762 act = letoh32(ci->activation)((__uint32_t)(ci->activation));
1763 if (sc->sc_info.activation != act) {
1764 if (ifp->if_flags & IFF_DEBUG0x4)
1765 log(LOG_INFO6, "%s: connection %s\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
1766 umb_activation(act)umb_val2descr(umb_actstate, (act)));
1767
1768 sc->sc_info.activation = act;
1769 sc->sc_info.nwerror = letoh32(ci->nwerror)((__uint32_t)(ci->nwerror));
1770
1771 if (sc->sc_info.activation == MBIM_ACTIVATION_STATE_ACTIVATED1)
1772 umb_newstate(sc, UMB_S_CONNECTED, UMB_NS_DONT_DROP0x0001);
1773 else if (sc->sc_info.activation ==
1774 MBIM_ACTIVATION_STATE_DEACTIVATED3)
1775 umb_newstate(sc, UMB_S_ATTACHED, 0);
1776 /* else: other states are purely transitional */
1777 }
1778 return 1;
1779}
1780
1781void
1782umb_clear_addr(struct umb_softc *sc)
1783{
1784 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1785
1786 memset(sc->sc_info.ipv4dns, 0, sizeof (sc->sc_info.ipv4dns))__builtin_memset((sc->sc_info.ipv4dns), (0), (sizeof (sc->
sc_info.ipv4dns)))
;
1787 memset(sc->sc_info.ipv6dns, 0, sizeof (sc->sc_info.ipv6dns))__builtin_memset((sc->sc_info.ipv6dns), (0), (sizeof (sc->
sc_info.ipv6dns)))
;
1788 umb_send_inet_proposal(sc, AF_INET2);
1789#ifdef INET61
1790 umb_send_inet_proposal(sc, AF_INET624);
1791#endif
1792 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1793 in_ifdetach(ifp);
1794#ifdef INET61
1795 in6_ifdetach(ifp);
1796#endif
1797 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1798}
1799
1800int
1801umb_add_inet_config(struct umb_softc *sc, struct in_addr ip, u_int prefixlen,
1802 struct in_addr gw)
1803{
1804 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1805 struct in_aliasreq ifra;
1806 struct sockaddr_in *sin, default_sin;
1807 struct rt_addrinfo info;
1808 struct rtentry *rt;
1809 int rv;
1810
1811 memset(&ifra, 0, sizeof (ifra))__builtin_memset((&ifra), (0), (sizeof (ifra)));
1812 sin = &ifra.ifra_addrifra_ifrau.ifrau_addr;
1813 sin->sin_family = AF_INET2;
1814 sin->sin_len = sizeof (*sin);
1815 sin->sin_addr = ip;
1816
1817 sin = &ifra.ifra_dstaddr;
1818 sin->sin_family = AF_INET2;
1819 sin->sin_len = sizeof (*sin);
1820 sin->sin_addr = gw;
1821
1822 sin = &ifra.ifra_mask;
1823 sin->sin_family = AF_INET2;
1824 sin->sin_len = sizeof (*sin);
1825 in_len2mask(&sin->sin_addr, prefixlen);
1826
1827 rv = in_ioctl(SIOCAIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifaliasreq) &
0x1fff) << 16) | ((('i')) << 8) | ((26)))
, (caddr_t)&ifra, ifp, 1);
1828 if (rv != 0) {
1829 printf("%s: unable to set IPv4 address, error %d\n",
1830 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, rv);
1831 return rv;
1832 }
1833
1834 memset(&default_sin, 0, sizeof(default_sin))__builtin_memset((&default_sin), (0), (sizeof(default_sin
)))
;
1835 default_sin.sin_family = AF_INET2;
1836 default_sin.sin_len = sizeof (default_sin);
1837
1838 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
1839 info.rti_flags = RTF_GATEWAY0x2 /* maybe | RTF_STATIC */;
1840 info.rti_ifa = ifa_ifwithaddr(sintosa(&ifra.ifra_addrifra_ifrau.ifrau_addr),
1841 ifp->if_rdomainif_data.ifi_rdomain);
1842 info.rti_info[RTAX_DST0] = sintosa(&default_sin);
1843 info.rti_info[RTAX_NETMASK2] = sintosa(&default_sin);
1844 info.rti_info[RTAX_GATEWAY1] = sintosa(&ifra.ifra_dstaddr);
1845
1846 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1847 rv = rtrequest(RTM_ADD0x1, &info, 0, &rt, ifp->if_rdomainif_data.ifi_rdomain);
1848 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1849 if (rv) {
1850 printf("%s: unable to set IPv4 default route, "
1851 "error %d\n", DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, rv);
1852 rtm_miss(RTM_MISS0x7, &info, 0, RTP_NONE0, 0, rv,
1853 ifp->if_rdomainif_data.ifi_rdomain);
1854 } else {
1855 /* Inform listeners of the new route */
1856 rtm_send(rt, RTM_ADD0x1, rv, ifp->if_rdomainif_data.ifi_rdomain);
1857 rtfree(rt);
1858 }
1859
1860 if (ifp->if_flags & IFF_DEBUG0x4) {
1861 char str[3][INET_ADDRSTRLEN16];
1862 log(LOG_INFO6, "%s: IPv4 addr %s, mask %s, gateway %s\n",
1863 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
,
1864 sockaddr_ntop(sintosa(&ifra.ifra_addrifra_ifrau.ifrau_addr), str[0],
1865 sizeof(str[0])),
1866 sockaddr_ntop(sintosa(&ifra.ifra_mask), str[1],
1867 sizeof(str[1])),
1868 sockaddr_ntop(sintosa(&ifra.ifra_dstaddr), str[2],
1869 sizeof(str[2])));
1870 }
1871 return 0;
1872}
1873
1874#ifdef INET61
1875int
1876umb_add_inet6_config(struct umb_softc *sc, struct in6_addr *ip, u_int prefixlen,
1877 struct in6_addr *gw)
1878{
1879 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1880 struct in6_aliasreq ifra;
1881 struct sockaddr_in6 *sin6, default_sin6;
1882 struct rt_addrinfo info;
1883 struct rtentry *rt;
1884 int rv;
1885
1886 memset(&ifra, 0, sizeof (ifra))__builtin_memset((&ifra), (0), (sizeof (ifra)));
1887 sin6 = &ifra.ifra_addrifra_ifrau.ifrau_addr;
1888 sin6->sin6_family = AF_INET624;
1889 sin6->sin6_len = sizeof (*sin6);
1890 memcpy(&sin6->sin6_addr, ip, sizeof (sin6->sin6_addr))__builtin_memcpy((&sin6->sin6_addr), (ip), (sizeof (sin6
->sin6_addr)))
;
1891
1892 sin6 = &ifra.ifra_dstaddr;
1893 sin6->sin6_family = AF_INET624;
1894 sin6->sin6_len = sizeof (*sin6);
1895 memcpy(&sin6->sin6_addr, gw, sizeof (sin6->sin6_addr))__builtin_memcpy((&sin6->sin6_addr), (gw), (sizeof (sin6
->sin6_addr)))
;
1896
1897 /* XXX: in6_update_ifa() accepts only 128 bits for P2P interfaces. */
1898 prefixlen = 128;
1899
1900 sin6 = &ifra.ifra_prefixmask;
1901 sin6->sin6_family = AF_INET624;
1902 sin6->sin6_len = sizeof (*sin6);
1903 in6_prefixlen2mask(&sin6->sin6_addr, prefixlen);
1904
1905 ifra.ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME0xffffffff;
1906 ifra.ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME0xffffffff;
1907
1908 rv = in6_ioctl(SIOCAIFADDR_IN6((unsigned long)0x80000000 | ((sizeof(struct in6_aliasreq) &
0x1fff) << 16) | ((('i')) << 8) | ((26)))
, (caddr_t)&ifra, ifp, 1);
1909 if (rv != 0) {
1910 printf("%s: unable to set IPv6 address, error %d\n",
1911 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, rv);
1912 return rv;
1913 }
1914
1915 memset(&default_sin6, 0, sizeof(default_sin6))__builtin_memset((&default_sin6), (0), (sizeof(default_sin6
)))
;
1916 default_sin6.sin6_family = AF_INET624;
1917 default_sin6.sin6_len = sizeof (default_sin6);
1918
1919 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
1920 info.rti_flags = RTF_GATEWAY0x2 /* maybe | RTF_STATIC */;
1921 info.rti_ifa = ifa_ifwithaddr(sin6tosa(&ifra.ifra_addrifra_ifrau.ifrau_addr),
1922 ifp->if_rdomainif_data.ifi_rdomain);
1923 info.rti_info[RTAX_DST0] = sin6tosa(&default_sin6);
1924 info.rti_info[RTAX_NETMASK2] = sin6tosa(&default_sin6);
1925 info.rti_info[RTAX_GATEWAY1] = sin6tosa(&ifra.ifra_dstaddr);
1926
1927 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1928 rv = rtrequest(RTM_ADD0x1, &info, 0, &rt, ifp->if_rdomainif_data.ifi_rdomain);
1929 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1930 if (rv) {
1931 printf("%s: unable to set IPv6 default route, "
1932 "error %d\n", DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, rv);
1933 rtm_miss(RTM_MISS0x7, &info, 0, RTP_NONE0, 0, rv,
1934 ifp->if_rdomainif_data.ifi_rdomain);
1935 } else {
1936 /* Inform listeners of the new route */
1937 rtm_send(rt, RTM_ADD0x1, rv, ifp->if_rdomainif_data.ifi_rdomain);
1938 rtfree(rt);
1939 }
1940
1941 if (ifp->if_flags & IFF_DEBUG0x4) {
1942 char str[3][INET6_ADDRSTRLEN46];
1943 log(LOG_INFO6, "%s: IPv6 addr %s, mask %s, gateway %s\n",
1944 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
,
1945 sockaddr_ntop(sin6tosa(&ifra.ifra_addrifra_ifrau.ifrau_addr), str[0],
1946 sizeof(str[0])),
1947 sockaddr_ntop(sin6tosa(&ifra.ifra_prefixmask), str[1],
1948 sizeof(str[1])),
1949 sockaddr_ntop(sin6tosa(&ifra.ifra_dstaddr), str[2],
1950 sizeof(str[2])));
1951 }
1952 return 0;
1953}
1954#endif
1955
1956void
1957umb_send_inet_proposal(struct umb_softc *sc, int af)
1958{
1959 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
1960 struct sockaddr_rtdns rtdns;
1961 struct rt_addrinfo info;
1962 int i, flag = 0;
1963 size_t sz = 0;
1964
1965 memset(&rtdns, 0, sizeof(rtdns))__builtin_memset((&rtdns), (0), (sizeof(rtdns)));
1966 memset(&info, 0, sizeof(info))__builtin_memset((&info), (0), (sizeof(info)));
1967
1968 for (i = 0; i < UMB_MAX_DNSSRV2; i++) {
1969 if (af == AF_INET2) {
1970 sz = sizeof (sc->sc_info.ipv4dns[i]);
1971 if (sc->sc_info.ipv4dns[i].s_addr == INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
)
1972 break;
1973 memcpy(rtdns.sr_dns + i * sz, &sc->sc_info.ipv4dns[i],__builtin_memcpy((rtdns.sr_dns + i * sz), (&sc->sc_info
.ipv4dns[i]), (sz))
1974 sz)__builtin_memcpy((rtdns.sr_dns + i * sz), (&sc->sc_info
.ipv4dns[i]), (sz))
;
1975 flag = RTF_UP0x1;
1976#ifdef INET61
1977 } else if (af == AF_INET624) {
1978 sz = sizeof (sc->sc_info.ipv6dns[i]);
1979 if (IN6_ARE_ADDR_EQUAL(&sc->sc_info.ipv6dns[i],(__builtin_memcmp((&(&sc->sc_info.ipv6dns[i])->
__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->__u6_addr
.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)
1980 &in6addr_any)(__builtin_memcmp((&(&sc->sc_info.ipv6dns[i])->
__u6_addr.__u6_addr8[0]), (&(&in6addr_any)->__u6_addr
.__u6_addr8[0]), (sizeof(struct in6_addr))) == 0)
)
1981 break;
1982 memcpy(rtdns.sr_dns + i * sz, &sc->sc_info.ipv6dns[i],__builtin_memcpy((rtdns.sr_dns + i * sz), (&sc->sc_info
.ipv6dns[i]), (sz))
1983 sz)__builtin_memcpy((rtdns.sr_dns + i * sz), (&sc->sc_info
.ipv6dns[i]), (sz))
;
1984 flag = RTF_UP0x1;
1985#endif
1986 }
1987 }
1988 rtdns.sr_family = af;
1989 rtdns.sr_len = 2 + i * sz;
1990 info.rti_info[RTAX_DNS12] = srtdnstosa(&rtdns);
1991
1992 rtm_proposal(ifp, &info, flag, RTP_PROPOSAL_UMB60);
1993}
1994
1995int
1996umb_decode_ip_configuration(struct umb_softc *sc, void *data, int len)
1997{
1998 struct mbim_cid_ip_configuration_info *ic = data;
1999 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2000 int s;
2001 uint32_t avail_v4;
2002 uint32_t val;
2003 int n, i;
2004 int off;
2005 struct mbim_cid_ipv4_element ipv4elem;
2006 struct in_addr addr, gw;
2007 int state = -1;
2008 int rv;
2009 int hasmtu = 0;
2010#ifdef INET61
2011 uint32_t avail_v6;
2012 struct mbim_cid_ipv6_element ipv6elem;
2013 struct in6_addr addr6, gw6;
2014#endif
2015
2016 if (len < sizeof (*ic))
2017 return 0;
2018 if (letoh32(ic->sessionid)((__uint32_t)(ic->sessionid)) != umb_session_id) {
2019 DPRINTF("%s: ignore IP configuration for session id %d\n",do { } while (0)
2020 DEVNAM(sc), letoh32(ic->sessionid))do { } while (0);
2021 return 0;
2022 }
2023 s = splnet()splraise(0x7);
2024
2025 memset(sc->sc_info.ipv4dns, 0, sizeof (sc->sc_info.ipv4dns))__builtin_memset((sc->sc_info.ipv4dns), (0), (sizeof (sc->
sc_info.ipv4dns)))
;
2026 memset(sc->sc_info.ipv6dns, 0, sizeof (sc->sc_info.ipv6dns))__builtin_memset((sc->sc_info.ipv6dns), (0), (sizeof (sc->
sc_info.ipv6dns)))
;
2027
2028 /*
2029 * IPv4 configuration
2030 */
2031 avail_v4 = letoh32(ic->ipv4_available)((__uint32_t)(ic->ipv4_available));
2032 if ((avail_v4 & (MBIM_IPCONF_HAS_ADDRINFO0x0001 | MBIM_IPCONF_HAS_GWINFO0x0002)) ==
2033 (MBIM_IPCONF_HAS_ADDRINFO0x0001 | MBIM_IPCONF_HAS_GWINFO0x0002)) {
2034 n = letoh32(ic->ipv4_naddr)((__uint32_t)(ic->ipv4_naddr));
2035 off = letoh32(ic->ipv4_addroffs)((__uint32_t)(ic->ipv4_addroffs));
2036
2037 if (n == 0 || off + sizeof (ipv4elem) > len)
2038 goto tryv6;
2039 if (n != 1 && ifp->if_flags & IFF_DEBUG0x4)
2040 log(LOG_INFO6, "%s: more than one IPv4 addr: %d\n",
2041 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, n);
2042
2043 /* Only pick the first one */
2044 memcpy(&ipv4elem, data + off, sizeof (ipv4elem))__builtin_memcpy((&ipv4elem), (data + off), (sizeof (ipv4elem
)))
;
2045 ipv4elem.prefixlen = letoh32(ipv4elem.prefixlen)((__uint32_t)(ipv4elem.prefixlen));
2046 addr.s_addr = ipv4elem.addr;
2047
2048 off = letoh32(ic->ipv4_gwoffs)((__uint32_t)(ic->ipv4_gwoffs));
2049 if (off + sizeof (gw) > len)
2050 goto done;
2051 memcpy(&gw, data + off, sizeof(gw))__builtin_memcpy((&gw), (data + off), (sizeof(gw)));
2052
2053 rv = umb_add_inet_config(sc, addr, ipv4elem.prefixlen, gw);
2054 if (rv == 0)
2055 state = UMB_S_UP;
2056
2057 }
2058
2059 memset(sc->sc_info.ipv4dns, 0, sizeof (sc->sc_info.ipv4dns))__builtin_memset((sc->sc_info.ipv4dns), (0), (sizeof (sc->
sc_info.ipv4dns)))
;
2060 if (avail_v4 & MBIM_IPCONF_HAS_DNSINFO0x0004) {
2061 n = letoh32(ic->ipv4_ndnssrv)((__uint32_t)(ic->ipv4_ndnssrv));
2062 off = letoh32(ic->ipv4_dnssrvoffs)((__uint32_t)(ic->ipv4_dnssrvoffs));
2063 i = 0;
2064 while (n-- > 0) {
2065 if (off + sizeof (addr) > len)
2066 break;
2067 memcpy(&addr, data + off, sizeof(addr))__builtin_memcpy((&addr), (data + off), (sizeof(addr)));
2068 if (i < UMB_MAX_DNSSRV2)
2069 sc->sc_info.ipv4dns[i++] = addr;
2070 off += sizeof(addr);
2071 if (ifp->if_flags & IFF_DEBUG0x4) {
2072 char str[INET_ADDRSTRLEN16];
2073 log(LOG_INFO6, "%s: IPv4 nameserver %s\n",
2074 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, inet_ntop(AF_INET2,
2075 &addr, str, sizeof(str)));
2076 }
2077 }
2078 umb_send_inet_proposal(sc, AF_INET2);
2079 }
2080 if ((avail_v4 & MBIM_IPCONF_HAS_MTUINFO0x0008)) {
2081 val = letoh32(ic->ipv4_mtu)((__uint32_t)(ic->ipv4_mtu));
2082 if (ifp->if_hardmtu != val && val <= sc->sc_maxpktlen) {
2083 hasmtu = 1;
2084 ifp->if_hardmtu = val;
2085 if (ifp->if_mtuif_data.ifi_mtu > val)
2086 ifp->if_mtuif_data.ifi_mtu = val;
2087 }
2088 }
2089
2090tryv6:;
2091#ifdef INET61
2092 /*
2093 * IPv6 configuration
2094 */
2095 avail_v6 = letoh32(ic->ipv6_available)((__uint32_t)(ic->ipv6_available));
2096 if (avail_v6 == 0) {
2097 if (ifp->if_flags & IFF_DEBUG0x4)
2098 log(LOG_INFO6, "%s: ISP or WWAN module offers no IPv6 "
2099 "support\n", DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
);
2100 goto done;
2101 }
2102
2103 if ((avail_v6 & (MBIM_IPCONF_HAS_ADDRINFO0x0001 | MBIM_IPCONF_HAS_GWINFO0x0002)) ==
2104 (MBIM_IPCONF_HAS_ADDRINFO0x0001 | MBIM_IPCONF_HAS_GWINFO0x0002)) {
2105 n = letoh32(ic->ipv6_naddr)((__uint32_t)(ic->ipv6_naddr));
2106 off = letoh32(ic->ipv6_addroffs)((__uint32_t)(ic->ipv6_addroffs));
2107
2108 if (n == 0 || off + sizeof (ipv6elem) > len)
2109 goto done;
2110 if (n != 1 && ifp->if_flags & IFF_DEBUG0x4)
2111 log(LOG_INFO6, "%s: more than one IPv6 addr: %d\n",
2112 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, n);
2113
2114 /* Only pick the first one */
2115 memcpy(&ipv6elem, data + off, sizeof (ipv6elem))__builtin_memcpy((&ipv6elem), (data + off), (sizeof (ipv6elem
)))
;
2116 memcpy(&addr6, ipv6elem.addr, sizeof (addr6))__builtin_memcpy((&addr6), (ipv6elem.addr), (sizeof (addr6
)))
;
2117
2118 off = letoh32(ic->ipv6_gwoffs)((__uint32_t)(ic->ipv6_gwoffs));
2119 if (off + sizeof (gw6) > len)
2120 goto done;
2121 memcpy(&gw6, data + off, sizeof (gw6))__builtin_memcpy((&gw6), (data + off), (sizeof (gw6)));
2122
2123 rv = umb_add_inet6_config(sc, &addr6, ipv6elem.prefixlen, &gw6);
2124 if (rv == 0)
2125 state = UMB_S_UP;
2126 }
2127
2128 if (avail_v6 & MBIM_IPCONF_HAS_DNSINFO0x0004) {
2129 n = letoh32(ic->ipv6_ndnssrv)((__uint32_t)(ic->ipv6_ndnssrv));
2130 off = letoh32(ic->ipv6_dnssrvoffs)((__uint32_t)(ic->ipv6_dnssrvoffs));
2131 i = 0;
2132 while (n-- > 0) {
2133 if (off + sizeof (addr6) > len)
2134 break;
2135 memcpy(&addr6, data + off, sizeof(addr6))__builtin_memcpy((&addr6), (data + off), (sizeof(addr6)));
2136 if (i < UMB_MAX_DNSSRV2)
2137 sc->sc_info.ipv6dns[i++] = addr6;
2138 off += sizeof(addr6);
2139 if (ifp->if_flags & IFF_DEBUG0x4) {
2140 char str[INET6_ADDRSTRLEN46];
2141 log(LOG_INFO6, "%s: IPv6 nameserver %s\n",
2142 DEVNAM(ifp->if_softc)(((struct umb_softc *)(ifp->if_softc))->sc_dev.dv_xname
)
, inet_ntop(AF_INET624,
2143 &addr6, str, sizeof(str)));
2144 }
2145 }
2146 umb_send_inet_proposal(sc, AF_INET624);
2147 }
2148
2149 if ((avail_v6 & MBIM_IPCONF_HAS_MTUINFO0x0008)) {
2150 val = letoh32(ic->ipv6_mtu)((__uint32_t)(ic->ipv6_mtu));
2151 if (ifp->if_hardmtu != val && val <= sc->sc_maxpktlen) {
2152 hasmtu = 1;
2153 ifp->if_hardmtu = val;
2154 if (ifp->if_mtuif_data.ifi_mtu > val)
2155 ifp->if_mtuif_data.ifi_mtu = val;
2156 }
2157 }
2158#endif
2159
2160done:
2161 if (hasmtu && (ifp->if_flags & IFF_DEBUG0x4))
2162 log(LOG_INFO6, "%s: MTU %d\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), ifp->if_hardmtu);
2163
2164 if (state != -1)
2165 umb_newstate(sc, state, 0);
2166
2167 splx(s)spllower(s);
2168 return 1;
2169}
2170
2171void
2172umb_rx(struct umb_softc *sc)
2173{
2174 usbd_setup_xfer(sc->sc_rx_xfer, sc->sc_rx_pipe, sc, sc->sc_rx_buf,
2175 sc->sc_rx_bufsz, USBD_SHORT_XFER_OK0x04 | USBD_NO_COPY0x01,
2176 USBD_NO_TIMEOUT0, umb_rxeof);
2177 usbd_transfer(sc->sc_rx_xfer);
2178}
2179
2180void
2181umb_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
2182{
2183 struct umb_softc *sc = priv;
2184 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2185
2186 if (usbd_is_dying(sc->sc_udev) || !(ifp->if_flags & IFF_RUNNING0x40))
2187 return;
2188
2189 if (status != USBD_NORMAL_COMPLETION) {
2190 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
2191 return;
2192 DPRINTF("%s: rx error: %s\n", DEVNAM(sc), usbd_errstr(status))do { } while (0);
2193 if (status == USBD_STALLED)
2194 usbd_clear_endpoint_stall_async(sc->sc_rx_pipe);
2195 if (++sc->sc_rx_nerr > 100) {
2196 log(LOG_ERR3, "%s: too many rx errors, disabling\n",
2197 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
2198 usbd_deactivate(sc->sc_udev);
2199 }
2200 } else {
2201 sc->sc_rx_nerr = 0;
2202 umb_decap(sc, xfer);
2203 }
2204
2205 umb_rx(sc);
2206 return;
2207}
2208
2209int
2210umb_encap(struct umb_softc *sc, int ndgram)
2211{
2212 struct ncm_header16 *hdr16 = NULL((void *)0);
2213 struct ncm_header32 *hdr32 = NULL((void *)0);
2214 struct ncm_pointer16 *ptr16 = NULL((void *)0);
2215 struct ncm_pointer32 *ptr32 = NULL((void *)0);
2216 struct ncm_pointer16_dgram *dgram16 = NULL((void *)0);
2217 struct ncm_pointer32_dgram *dgram32 = NULL((void *)0);
2218 int offs = 0, plen = 0;
2219 int dgoffs = 0, poffs;
2220 struct mbuf *m;
2221 usbd_status err;
2222
2223 /* All size constraints have been validated by the caller! */
2224
2225 /* NCM Header */
2226 switch (sc->sc_ncm_format) {
2227 case NCM_FORMAT_NTB160x00:
2228 hdr16 = sc->sc_tx_buf;
2229 USETDW(hdr16->dwSignature, NCM_HDR16_SIG)(*(u_int32_t *)(hdr16->dwSignature) = (0x484d434e));
2230 USETW(hdr16->wHeaderLength, sizeof (*hdr16))(*(u_int16_t *)(hdr16->wHeaderLength) = (sizeof (*hdr16)));
2231 USETW(hdr16->wSequence, sc->sc_tx_seq)(*(u_int16_t *)(hdr16->wSequence) = (sc->sc_tx_seq));
2232 USETW(hdr16->wBlockLength, 0)(*(u_int16_t *)(hdr16->wBlockLength) = (0));
2233 offs = sizeof (*hdr16);
2234 break;
2235 case NCM_FORMAT_NTB320x01:
2236 hdr32 = sc->sc_tx_buf;
2237 USETDW(hdr32->dwSignature, NCM_HDR32_SIG)(*(u_int32_t *)(hdr32->dwSignature) = (0x686d636e));
2238 USETW(hdr32->wHeaderLength, sizeof (*hdr32))(*(u_int16_t *)(hdr32->wHeaderLength) = (sizeof (*hdr32)));
2239 USETW(hdr32->wSequence, sc->sc_tx_seq)(*(u_int16_t *)(hdr32->wSequence) = (sc->sc_tx_seq));
2240 USETDW(hdr32->dwBlockLength, 0)(*(u_int32_t *)(hdr32->dwBlockLength) = (0));
2241 offs = sizeof (*hdr32);
2242 break;
2243 }
2244 offs += umb_padding(sc->sc_tx_buf, sc->sc_tx_bufsz, offs,
2245 sc->sc_align, 0);
2246
2247 if (sc->sc_flags & UMBFLG_NDP_AT_END0x0004) {
2248 dgoffs = offs;
2249
2250 /*
2251 * Calculate space needed for datagrams.
2252 *
2253 * XXX cannot use ml_len(&sc->sc_tx_ml), since it ignores
2254 * the padding requirements.
2255 */
2256 poffs = dgoffs;
2257 MBUF_LIST_FOREACH(&sc->sc_tx_ml, m)for ((m) = ((&sc->sc_tx_ml)->ml_head); (m) != ((void
*)0); (m) = ((m)->m_hdr.mh_nextpkt))
{
2258 poffs += umb_padding(sc->sc_tx_buf, sc->sc_tx_bufsz,
2259 poffs, sc->sc_ndp_div, sc->sc_ndp_remainder);
2260 poffs += m->m_pkthdrM_dat.MH.MH_pkthdr.len;
2261 }
2262 poffs += umb_padding(sc->sc_tx_buf, sc->sc_tx_bufsz,
2263 poffs, sc->sc_ndp_div, sc->sc_ndp_remainder);
2264 } else
2265 poffs = offs;
2266
2267 /* NCM Pointer */
2268 switch (sc->sc_ncm_format) {
2269 case NCM_FORMAT_NTB160x00:
2270 USETW(hdr16->wNdpIndex, poffs)(*(u_int16_t *)(hdr16->wNdpIndex) = (poffs));
2271 ptr16 = (struct ncm_pointer16 *)(sc->sc_tx_buf + poffs);
2272 plen = sizeof(*ptr16) + ndgram * sizeof(*dgram16);
2273 USETDW(ptr16->dwSignature, MBIM_NCM_NTH16_SIG(umb_session_id))(*(u_int32_t *)(ptr16->dwSignature) = (((((umb_session_id)
& 0xff) << 24) | 0x00535049)))
;
2274 USETW(ptr16->wLength, plen)(*(u_int16_t *)(ptr16->wLength) = (plen));
2275 USETW(ptr16->wNextNdpIndex, 0)(*(u_int16_t *)(ptr16->wNextNdpIndex) = (0));
2276 dgram16 = ptr16->dgram;
2277 break;
2278 case NCM_FORMAT_NTB320x01:
2279 USETDW(hdr32->dwNdpIndex, poffs)(*(u_int32_t *)(hdr32->dwNdpIndex) = (poffs));
2280 ptr32 = (struct ncm_pointer32 *)(sc->sc_tx_buf + poffs);
2281 plen = sizeof(*ptr32) + ndgram * sizeof(*dgram32);
2282 USETDW(ptr32->dwSignature, MBIM_NCM_NTH32_SIG(umb_session_id))(*(u_int32_t *)(ptr32->dwSignature) = (((((umb_session_id)
& 0xff) << 24) | 0x00737069)))
;
2283 USETW(ptr32->wLength, plen)(*(u_int16_t *)(ptr32->wLength) = (plen));
2284 USETW(ptr32->wReserved6, 0)(*(u_int16_t *)(ptr32->wReserved6) = (0));
2285 USETDW(ptr32->dwNextNdpIndex, 0)(*(u_int32_t *)(ptr32->dwNextNdpIndex) = (0));
2286 USETDW(ptr32->dwReserved12, 0)(*(u_int32_t *)(ptr32->dwReserved12) = (0));
2287 dgram32 = ptr32->dgram;
2288 break;
2289 }
2290
2291 if (!(sc->sc_flags & UMBFLG_NDP_AT_END0x0004))
2292 dgoffs = offs + plen;
2293
2294 /* Encap mbufs to NCM dgrams */
2295 sc->sc_tx_seq++;
2296 while ((m = ml_dequeue(&sc->sc_tx_ml)) != NULL((void *)0)) {
2297 dgoffs += umb_padding(sc->sc_tx_buf, sc->sc_tx_bufsz, dgoffs,
2298 sc->sc_ndp_div, sc->sc_ndp_remainder);
2299 switch (sc->sc_ncm_format) {
2300 case NCM_FORMAT_NTB160x00:
2301 USETW(dgram16->wDatagramIndex, dgoffs)(*(u_int16_t *)(dgram16->wDatagramIndex) = (dgoffs));
2302 USETW(dgram16->wDatagramLen, m->m_pkthdr.len)(*(u_int16_t *)(dgram16->wDatagramLen) = (m->M_dat.MH.MH_pkthdr
.len))
;
2303 dgram16++;
2304 break;
2305 case NCM_FORMAT_NTB320x01:
2306 USETDW(dgram32->dwDatagramIndex, dgoffs)(*(u_int32_t *)(dgram32->dwDatagramIndex) = (dgoffs));
2307 USETDW(dgram32->dwDatagramLen, m->m_pkthdr.len)(*(u_int32_t *)(dgram32->dwDatagramLen) = (m->M_dat.MH.
MH_pkthdr.len))
;
2308 dgram32++;
2309 break;
2310 }
2311 m_copydata(m, 0, m->m_pkthdrM_dat.MH.MH_pkthdr.len, sc->sc_tx_buf + dgoffs);
2312 dgoffs += m->m_pkthdrM_dat.MH.MH_pkthdr.len;
2313 m_freem(m);
2314 }
2315
2316 if (sc->sc_flags & UMBFLG_NDP_AT_END0x0004)
2317 offs = poffs + plen;
2318 else
2319 offs = dgoffs;
2320
2321 /* Terminating pointer and datagram size */
2322 switch (sc->sc_ncm_format) {
2323 case NCM_FORMAT_NTB160x00:
2324 USETW(dgram16->wDatagramIndex, 0)(*(u_int16_t *)(dgram16->wDatagramIndex) = (0));
2325 USETW(dgram16->wDatagramLen, 0)(*(u_int16_t *)(dgram16->wDatagramLen) = (0));
2326 USETW(hdr16->wBlockLength, offs)(*(u_int16_t *)(hdr16->wBlockLength) = (offs));
2327 KASSERT(dgram16 - ptr16->dgram == ndgram)((dgram16 - ptr16->dgram == ndgram) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/usb/if_umb.c", 2327, "dgram16 - ptr16->dgram == ndgram"
))
;
2328 break;
2329 case NCM_FORMAT_NTB320x01:
2330 USETDW(dgram32->dwDatagramIndex, 0)(*(u_int32_t *)(dgram32->dwDatagramIndex) = (0));
2331 USETDW(dgram32->dwDatagramLen, 0)(*(u_int32_t *)(dgram32->dwDatagramLen) = (0));
2332 USETDW(hdr32->dwBlockLength, offs)(*(u_int32_t *)(hdr32->dwBlockLength) = (offs));
2333 KASSERT(dgram32 - ptr32->dgram == ndgram)((dgram32 - ptr32->dgram == ndgram) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/usb/if_umb.c", 2333, "dgram32 - ptr32->dgram == ndgram"
))
;
2334 break;
2335 }
2336
2337 DPRINTFN(3, "%s: encap %d bytes\n", DEVNAM(sc), offs)do { } while (0);
2338 DDUMPN(5, sc->sc_tx_buf, offs)do { } while (0);
2339 KASSERT(offs <= sc->sc_tx_bufsz)((offs <= sc->sc_tx_bufsz) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/usb/if_umb.c", 2339, "offs <= sc->sc_tx_bufsz"
))
;
2340
2341 usbd_setup_xfer(sc->sc_tx_xfer, sc->sc_tx_pipe, sc, sc->sc_tx_buf, offs,
2342 USBD_FORCE_SHORT_XFER0x08 | USBD_NO_COPY0x01, umb_xfer_tout, umb_txeof);
2343 err = usbd_transfer(sc->sc_tx_xfer);
2344 if (err != USBD_IN_PROGRESS) {
2345 DPRINTF("%s: start tx error: %s\n", DEVNAM(sc),do { } while (0)
2346 usbd_errstr(err))do { } while (0);
2347 ml_purge(&sc->sc_tx_ml);
2348 return 0;
2349 }
2350 return 1;
2351}
2352
2353void
2354umb_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
2355{
2356 struct umb_softc *sc = priv;
2357 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2358 int s;
2359
2360 s = splnet()splraise(0x7);
2361 ml_purge(&sc->sc_tx_ml);
2362 ifq_clr_oactive(&ifp->if_snd);
2363 ifp->if_timer = 0;
2364
2365 if (status != USBD_NORMAL_COMPLETION) {
1
Assuming 'status' is equal to USBD_NORMAL_COMPLETION
2
Taking false branch
2366 if (status != USBD_NOT_STARTED && status != USBD_CANCELLED) {
2367 ifp->if_oerrorsif_data.ifi_oerrors++;
2368 DPRINTF("%s: tx error: %s\n", DEVNAM(sc),do { } while (0)
2369 usbd_errstr(status))do { } while (0);
2370 if (status == USBD_STALLED)
2371 usbd_clear_endpoint_stall_async(sc->sc_tx_pipe);
2372 }
2373 }
2374 if (ifq_empty(&ifp->if_snd)(((&ifp->if_snd)->ifq_len) == 0) == 0)
3
Assuming field 'ifq_len' is not equal to 0
4
Taking true branch
2375 umb_start(ifp);
5
Calling 'umb_start'
2376
2377 splx(s)spllower(s);
2378}
2379
2380void
2381umb_decap(struct umb_softc *sc, struct usbd_xfer *xfer)
2382{
2383 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2384 int s;
2385 void *buf;
2386 uint32_t len, af = 0;
2387 char *dp;
2388 struct ncm_header16 *hdr16;
2389 struct ncm_header32 *hdr32;
2390 struct ncm_pointer16 *ptr16;
2391 struct ncm_pointer16_dgram *dgram16;
2392 struct ncm_pointer32_dgram *dgram32;
2393 uint32_t hsig, psig;
2394 int blen;
2395 int ptrlen, ptroff, dgentryoff;
2396 uint32_t doff, dlen;
2397 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
2398 struct mbuf *m;
2399
2400 usbd_get_xfer_status(xfer, NULL((void *)0), &buf, &len, NULL((void *)0));
2401 DPRINTFN(4, "%s: recv %d bytes\n", DEVNAM(sc), len)do { } while (0);
2402 DDUMPN(5, buf, len)do { } while (0);
2403 s = splnet()splraise(0x7);
2404 if (len < sizeof (*hdr16))
2405 goto toosmall;
2406
2407 hdr16 = (struct ncm_header16 *)buf;
2408 hsig = UGETDW(hdr16->dwSignature)(*(u_int32_t *)(hdr16->dwSignature));
2409
2410 switch (hsig) {
2411 case NCM_HDR16_SIG0x484d434e:
2412 blen = UGETW(hdr16->wBlockLength)(*(u_int16_t *)(hdr16->wBlockLength));
2413 ptroff = UGETW(hdr16->wNdpIndex)(*(u_int16_t *)(hdr16->wNdpIndex));
2414 if (UGETW(hdr16->wHeaderLength)(*(u_int16_t *)(hdr16->wHeaderLength)) != sizeof (*hdr16)) {
2415 DPRINTF("%s: bad header len %d for NTH16 (exp %zu)\n",do { } while (0)
2416 DEVNAM(sc), UGETW(hdr16->wHeaderLength),do { } while (0)
2417 sizeof (*hdr16))do { } while (0);
2418 goto fail;
2419 }
2420 break;
2421 case NCM_HDR32_SIG0x686d636e:
2422 if (len < sizeof (*hdr32))
2423 goto toosmall;
2424 hdr32 = (struct ncm_header32 *)hdr16;
2425 blen = UGETDW(hdr32->dwBlockLength)(*(u_int32_t *)(hdr32->dwBlockLength));
2426 ptroff = UGETDW(hdr32->dwNdpIndex)(*(u_int32_t *)(hdr32->dwNdpIndex));
2427 if (UGETW(hdr32->wHeaderLength)(*(u_int16_t *)(hdr32->wHeaderLength)) != sizeof (*hdr32)) {
2428 DPRINTF("%s: bad header len %d for NTH32 (exp %zu)\n",do { } while (0)
2429 DEVNAM(sc), UGETW(hdr32->wHeaderLength),do { } while (0)
2430 sizeof (*hdr32))do { } while (0);
2431 goto fail;
2432 }
2433 break;
2434 default:
2435 DPRINTF("%s: unsupported NCM header signature (0x%08x)\n",do { } while (0)
2436 DEVNAM(sc), hsig)do { } while (0);
2437 goto fail;
2438 }
2439 if (blen != 0 && len < blen) {
2440 DPRINTF("%s: bad NTB len (%d) for %d bytes of data\n",do { } while (0)
2441 DEVNAM(sc), blen, len)do { } while (0);
2442 goto fail;
2443 }
2444
2445 ptr16 = (struct ncm_pointer16 *)(buf + ptroff);
2446 psig = UGETDW(ptr16->dwSignature)(*(u_int32_t *)(ptr16->dwSignature));
2447 ptrlen = UGETW(ptr16->wLength)(*(u_int16_t *)(ptr16->wLength));
2448 if (len < ptrlen + ptroff)
2449 goto toosmall;
2450 if (!MBIM_NCM_NTH16_ISISG(psig)(((psig) & 0x00ffffff) == 0x00535049) && !MBIM_NCM_NTH32_ISISG(psig)(((psig) & 0x00ffffff) == 0x00737069)) {
2451 DPRINTF("%s: unsupported NCM pointer signature (0x%08x)\n",do { } while (0)
2452 DEVNAM(sc), psig)do { } while (0);
2453 goto fail;
2454 }
2455
2456 switch (hsig) {
2457 case NCM_HDR16_SIG0x484d434e:
2458 dgentryoff = offsetof(struct ncm_pointer16, dgram)__builtin_offsetof(struct ncm_pointer16, dgram);
2459 break;
2460 case NCM_HDR32_SIG0x686d636e:
2461 dgentryoff = offsetof(struct ncm_pointer32, dgram)__builtin_offsetof(struct ncm_pointer32, dgram);
2462 break;
2463 default:
2464 goto fail;
2465 }
2466
2467 while (dgentryoff < ptrlen) {
2468 switch (hsig) {
2469 case NCM_HDR16_SIG0x484d434e:
2470 if (ptroff + dgentryoff < sizeof (*dgram16))
2471 goto done;
2472 dgram16 = (struct ncm_pointer16_dgram *)
2473 (buf + ptroff + dgentryoff);
2474 dgentryoff += sizeof (*dgram16);
2475 dlen = UGETW(dgram16->wDatagramLen)(*(u_int16_t *)(dgram16->wDatagramLen));
2476 doff = UGETW(dgram16->wDatagramIndex)(*(u_int16_t *)(dgram16->wDatagramIndex));
2477 break;
2478 case NCM_HDR32_SIG0x686d636e:
2479 if (ptroff + dgentryoff < sizeof (*dgram32))
2480 goto done;
2481 dgram32 = (struct ncm_pointer32_dgram *)
2482 (buf + ptroff + dgentryoff);
2483 dgentryoff += sizeof (*dgram32);
2484 dlen = UGETDW(dgram32->dwDatagramLen)(*(u_int32_t *)(dgram32->dwDatagramLen));
2485 doff = UGETDW(dgram32->dwDatagramIndex)(*(u_int32_t *)(dgram32->dwDatagramIndex));
2486 break;
2487 default:
2488 ifp->if_ierrorsif_data.ifi_ierrors++;
2489 goto done;
2490 }
2491
2492 /* Terminating zero entry */
2493 if (dlen == 0 || doff == 0)
2494 break;
2495 if (len < dlen + doff) {
2496 /* Skip giant datagram but continue processing */
2497 DPRINTF("%s: datagram too large (%d @ off %d)\n",do { } while (0)
2498 DEVNAM(sc), dlen, doff)do { } while (0);
2499 continue;
2500 }
2501
2502 dp = buf + doff;
2503 DPRINTFN(3, "%s: decap %d bytes\n", DEVNAM(sc), dlen)do { } while (0);
2504 m = m_devget(dp, dlen, sizeof(uint32_t));
2505 if (m == NULL((void *)0)) {
2506 ifp->if_iqdropsif_data.ifi_iqdrops++;
2507 continue;
2508 }
2509 m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT0x0002);
2510 if (m == NULL((void *)0)) {
2511 ifp->if_iqdropsif_data.ifi_iqdrops++;
2512 continue;
2513 }
2514 switch (*dp & 0xf0) {
2515 case 4 << 4:
2516 af = htonl(AF_INET)(__uint32_t)(__builtin_constant_p(2) ? (__uint32_t)(((__uint32_t
)(2) & 0xff) << 24 | ((__uint32_t)(2) & 0xff00)
<< 8 | ((__uint32_t)(2) & 0xff0000) >> 8 | (
(__uint32_t)(2) & 0xff000000) >> 24) : __swap32md(2
))
;
2517 break;
2518 case 6 << 4:
2519 af = htonl(AF_INET6)(__uint32_t)(__builtin_constant_p(24) ? (__uint32_t)(((__uint32_t
)(24) & 0xff) << 24 | ((__uint32_t)(24) & 0xff00
) << 8 | ((__uint32_t)(24) & 0xff0000) >> 8 |
((__uint32_t)(24) & 0xff000000) >> 24) : __swap32md
(24))
;
2520 break;
2521 }
2522 *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data)) = af;
2523 ml_enqueue(&ml, m);
2524 }
2525done:
2526 if_input(ifp, &ml);
2527 splx(s)spllower(s);
2528 return;
2529toosmall:
2530 DPRINTF("%s: packet too small (%d)\n", DEVNAM(sc), len)do { } while (0);
2531fail:
2532 ifp->if_ierrorsif_data.ifi_ierrors++;
2533 splx(s)spllower(s);
2534}
2535
2536usbd_status
2537umb_send_encap_command(struct umb_softc *sc, void *data, int len)
2538{
2539 struct usbd_xfer *xfer;
2540 usb_device_request_t req;
2541 char *buf;
2542
2543 if (len > sc->sc_ctrl_len)
2544 return USBD_INVAL;
2545
2546 if ((xfer = usbd_alloc_xfer(sc->sc_udev)) == NULL((void *)0))
2547 return USBD_NOMEM;
2548 if ((buf = usbd_alloc_buffer(xfer, len)) == NULL((void *)0)) {
2549 usbd_free_xfer(xfer);
2550 return USBD_NOMEM;
2551 }
2552 memcpy(buf, data, len)__builtin_memcpy((buf), (data), (len));
2553
2554 /* XXX FIXME: if (total len > sc->sc_ctrl_len) => must fragment */
2555 req.bmRequestType = UT_WRITE_CLASS_INTERFACE(0x00 | 0x20 | 0x01);
2556 req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND0x00;
2557 USETW(req.wValue, 0)(*(u_int16_t *)(req.wValue) = (0));
2558 USETW(req.wIndex, sc->sc_ctrl_ifaceno)(*(u_int16_t *)(req.wIndex) = (sc->sc_ctrl_ifaceno));
2559 USETW(req.wLength, len)(*(u_int16_t *)(req.wLength) = (len));
2560 DELAY(umb_delay)(*delay_func)(umb_delay);
2561 return usbd_request_async(xfer, &req, NULL((void *)0), NULL((void *)0));
2562}
2563
2564int
2565umb_get_encap_response(struct umb_softc *sc, void *buf, int *len)
2566{
2567 usb_device_request_t req;
2568 usbd_status err;
2569
2570 req.bmRequestType = UT_READ_CLASS_INTERFACE(0x80 | 0x20 | 0x01);
2571 req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE0x01;
2572 USETW(req.wValue, 0)(*(u_int16_t *)(req.wValue) = (0));
2573 USETW(req.wIndex, sc->sc_ctrl_ifaceno)(*(u_int16_t *)(req.wIndex) = (sc->sc_ctrl_ifaceno));
2574 USETW(req.wLength, *len)(*(u_int16_t *)(req.wLength) = (*len));
2575 /* XXX FIXME: re-assemble fragments */
2576
2577 DELAY(umb_delay)(*delay_func)(umb_delay);
2578 err = usbd_do_request_flags(sc->sc_udev, &req, buf, USBD_SHORT_XFER_OK0x04,
2579 len, umb_xfer_tout);
2580 if (err == USBD_NORMAL_COMPLETION)
2581 return 1;
2582 DPRINTF("%s: ctrl recv: %s\n", DEVNAM(sc), usbd_errstr(err))do { } while (0);
2583 return 0;
2584}
2585
2586void
2587umb_ctrl_msg(struct umb_softc *sc, uint32_t req, void *data, int len)
2588{
2589 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2590 uint32_t tid;
2591 struct mbim_msghdr *hdr = data;
2592 usbd_status err;
2593 int s;
2594
2595 assertwaitok();
2596 if (usbd_is_dying(sc->sc_udev))
2597 return;
2598 if (len < sizeof (*hdr))
2599 return;
2600 tid = ++sc->sc_tid;
2601
2602 hdr->type = htole32(req)((__uint32_t)(req));
2603 hdr->len = htole32(len)((__uint32_t)(len));
2604 hdr->tid = htole32(tid)((__uint32_t)(tid));
2605
2606#ifdef UMB_DEBUG
2607 if (umb_debug) {
2608 const char *op, *str;
2609 if (req == MBIM_COMMAND_MSG3U) {
2610 struct mbim_h2f_cmd *c = data;
2611 if (letoh32(c->op)((__uint32_t)(c->op)) == MBIM_CMDOP_SET1)
2612 op = "set";
2613 else
2614 op = "qry";
2615 str = umb_cid2str(letoh32(c->cid))umb_val2descr(umb_cids, (((__uint32_t)(c->cid))));
2616 } else {
2617 op = "snd";
2618 str = umb_request2str(req)umb_val2descr(umb_messages, (req));
2619 }
2620 DPRINTF("%s: -> %s %s (tid %u)\n", DEVNAM(sc), op, str, tid)do { } while (0);
2621 }
2622#endif
2623 s = splusb()splraise(0x5);
2624 err = umb_send_encap_command(sc, data, len);
2625 splx(s)spllower(s);
2626 if (err != USBD_NORMAL_COMPLETION) {
2627 if (ifp->if_flags & IFF_DEBUG0x4)
2628 log(LOG_ERR3, "%s: send %s msg (tid %u) failed: %s\n",
2629 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), umb_request2str(req)umb_val2descr(umb_messages, (req)), tid,
2630 usbd_errstr(err));
2631
2632 /* will affect other transactions, too */
2633 usbd_abort_pipe(sc->sc_udev->default_pipe);
2634 } else {
2635 DPRINTFN(2, "%s: sent %s (tid %u)\n", DEVNAM(sc),do { } while (0)
2636 umb_request2str(req), tid)do { } while (0);
2637 DDUMPN(3, data, len)do { } while (0);
2638 }
2639 return;
2640}
2641
2642void
2643umb_open(struct umb_softc *sc)
2644{
2645 struct mbim_h2f_openmsg msg;
2646
2647 memset(&msg, 0, sizeof (msg))__builtin_memset((&msg), (0), (sizeof (msg)));
2648 msg.maxlen = htole32(sc->sc_ctrl_len)((__uint32_t)(sc->sc_ctrl_len));
2649 umb_ctrl_msg(sc, MBIM_OPEN_MSG1U, &msg, sizeof (msg));
2650 return;
2651}
2652
2653void
2654umb_close(struct umb_softc *sc)
2655{
2656 struct mbim_h2f_closemsg msg;
2657
2658 memset(&msg, 0, sizeof (msg))__builtin_memset((&msg), (0), (sizeof (msg)));
2659 umb_ctrl_msg(sc, MBIM_CLOSE_MSG2U, &msg, sizeof (msg));
2660}
2661
2662int
2663umb_setpin(struct umb_softc *sc, int op, int is_puk, void *pin, int pinlen,
2664 void *newpin, int newpinlen)
2665{
2666 struct mbim_cid_pin cp;
2667 int off;
2668
2669 if (pinlen == 0)
2670 return 0;
2671 if (pinlen < 0 || pinlen > MBIM_PIN_MAXLEN32 ||
2672 newpinlen < 0 || newpinlen > MBIM_PIN_MAXLEN32 ||
2673 op < 0 || op > MBIM_PIN_OP_CHANGE3 ||
2674 (is_puk && op != MBIM_PIN_OP_ENTER0))
2675 return EINVAL22;
2676
2677 memset(&cp, 0, sizeof (cp))__builtin_memset((&cp), (0), (sizeof (cp)));
2678 cp.type = htole32(is_puk ? MBIM_PIN_TYPE_PUK1 : MBIM_PIN_TYPE_PIN1)((__uint32_t)(is_puk ? 11 : 2));
2679
2680 off = offsetof(struct mbim_cid_pin, data)__builtin_offsetof(struct mbim_cid_pin, data);
2681 if (!umb_addstr(&cp, sizeof (cp), &off, pin, pinlen,
2682 &cp.pin_offs, &cp.pin_size))
2683 return EINVAL22;
2684
2685 cp.op = htole32(op)((__uint32_t)(op));
2686 if (newpinlen) {
2687 if (!umb_addstr(&cp, sizeof (cp), &off, newpin, newpinlen,
2688 &cp.newpin_offs, &cp.newpin_size))
2689 return EINVAL22;
2690 } else {
2691 if ((op == MBIM_PIN_OP_CHANGE3) || is_puk)
2692 return EINVAL22;
2693 if (!umb_addstr(&cp, sizeof (cp), &off, NULL((void *)0), 0,
2694 &cp.newpin_offs, &cp.newpin_size))
2695 return EINVAL22;
2696 }
2697 umb_cmd(sc, MBIM_CID_PIN4, MBIM_CMDOP_SET1, &cp, off);
2698 return 0;
2699}
2700
2701void
2702umb_setdataclass(struct umb_softc *sc)
2703{
2704 struct mbim_cid_registration_state rs;
2705 uint32_t classes;
2706
2707 if (sc->sc_info.supportedclasses == MBIM_DATACLASS_NONE0x00000000)
2708 return;
2709
2710 memset(&rs, 0, sizeof (rs))__builtin_memset((&rs), (0), (sizeof (rs)));
2711 rs.regaction = htole32(MBIM_REGACTION_AUTOMATIC)((__uint32_t)(0));
2712 classes = sc->sc_info.supportedclasses;
2713 if (sc->sc_info.preferredclasses != MBIM_DATACLASS_NONE0x00000000)
2714 classes &= sc->sc_info.preferredclasses;
2715 rs.data_class = htole32(classes)((__uint32_t)(classes));
2716 umb_cmd(sc, MBIM_CID_REGISTER_STATE9, MBIM_CMDOP_SET1, &rs, sizeof (rs));
2717}
2718
2719void
2720umb_radio(struct umb_softc *sc, int on)
2721{
2722 struct mbim_cid_radio_state s;
2723
2724 DPRINTF("%s: set radio %s\n", DEVNAM(sc), on ? "on" : "off")do { } while (0);
2725 memset(&s, 0, sizeof (s))__builtin_memset((&s), (0), (sizeof (s)));
2726 s.state = htole32(on ? MBIM_RADIO_STATE_ON : MBIM_RADIO_STATE_OFF)((__uint32_t)(on ? 1 : 0));
2727 umb_cmd(sc, MBIM_CID_RADIO_STATE3, MBIM_CMDOP_SET1, &s, sizeof (s));
2728}
2729
2730void
2731umb_allocate_cid(struct umb_softc *sc)
2732{
2733 umb_cmd1(sc, MBIM_CID_DEVICE_CAPS1, MBIM_CMDOP_SET1,
2734 umb_qmi_alloc_cid, sizeof (umb_qmi_alloc_cid), umb_uuid_qmi_mbim);
2735}
2736
2737void
2738umb_send_fcc_auth(struct umb_softc *sc)
2739{
2740 uint8_t fccauth[sizeof (umb_qmi_fcc_auth)];
2741
2742 if (sc->sc_cid == -1) {
2743 DPRINTF("%s: missing CID, cannot send FCC auth\n", DEVNAM(sc))do { } while (0);
2744 umb_allocate_cid(sc);
2745 return;
2746 }
2747 memcpy(fccauth, umb_qmi_fcc_auth, sizeof (fccauth))__builtin_memcpy((fccauth), (umb_qmi_fcc_auth), (sizeof (fccauth
)))
;
2748 fccauth[UMB_QMI_CID_OFFS5] = sc->sc_cid;
2749 umb_cmd1(sc, MBIM_CID_DEVICE_CAPS1, MBIM_CMDOP_SET1,
2750 fccauth, sizeof (fccauth), umb_uuid_qmi_mbim);
2751}
2752
2753void
2754umb_packet_service(struct umb_softc *sc, int attach)
2755{
2756 struct mbim_cid_packet_service s;
2757
2758 DPRINTF("%s: %s packet service\n", DEVNAM(sc),do { } while (0)
2759 attach ? "attach" : "detach")do { } while (0);
2760 memset(&s, 0, sizeof (s))__builtin_memset((&s), (0), (sizeof (s)));
2761 s.action = htole32(attach ?((__uint32_t)(attach ? 0 : 1))
2762 MBIM_PKTSERVICE_ACTION_ATTACH : MBIM_PKTSERVICE_ACTION_DETACH)((__uint32_t)(attach ? 0 : 1));
2763 umb_cmd(sc, MBIM_CID_PACKET_SERVICE10, MBIM_CMDOP_SET1, &s, sizeof (s));
2764}
2765
2766void
2767umb_connect(struct umb_softc *sc)
2768{
2769 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2770
2771 if (sc->sc_info.regstate == MBIM_REGSTATE_ROAMING4 && !sc->sc_roamingsc_info.enable_roaming) {
2772 log(LOG_INFO6, "%s: connection disabled in roaming network\n",
2773 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
2774 return;
2775 }
2776 if (ifp->if_flags & IFF_DEBUG0x4)
2777 log(LOG_DEBUG7, "%s: connecting ...\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
2778 umb_send_connect(sc, MBIM_CONNECT_ACTIVATE1);
2779}
2780
2781void
2782umb_disconnect(struct umb_softc *sc)
2783{
2784 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2785
2786 if (ifp->if_flags & IFF_DEBUG0x4)
2787 log(LOG_DEBUG7, "%s: disconnecting ...\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
2788 umb_send_connect(sc, MBIM_CONNECT_DEACTIVATE0);
2789}
2790
2791void
2792umb_send_connect(struct umb_softc *sc, int command)
2793{
2794 struct mbim_cid_connect *c;
2795 int off;
2796
2797 /* Too large or the stack */
2798 c = malloc(sizeof (*c), M_USBDEV102, M_WAIT0x0001|M_ZERO0x0008);
2799 c->sessionid = htole32(umb_session_id)((__uint32_t)(umb_session_id));
2800 c->command = htole32(command)((__uint32_t)(command));
2801 off = offsetof(struct mbim_cid_connect, data)__builtin_offsetof(struct mbim_cid_connect, data);
2802 if (!umb_addstr(c, sizeof (*c), &off, sc->sc_info.apn,
2803 sc->sc_info.apnlen, &c->access_offs, &c->access_size))
2804 goto done;
2805 /* XXX FIXME: support user name and passphrase */
2806 c->user_offs = htole32(0)((__uint32_t)(0));
2807 c->user_size = htole32(0)((__uint32_t)(0));
2808 c->passwd_offs = htole32(0)((__uint32_t)(0));
2809 c->passwd_size = htole32(0)((__uint32_t)(0));
2810 c->authprot = htole32(MBIM_AUTHPROT_NONE)((__uint32_t)(0));
2811 c->compression = htole32(MBIM_COMPRESSION_NONE)((__uint32_t)(0));
2812 c->iptype = htole32(MBIM_CONTEXT_IPTYPE_IPV4)((__uint32_t)(1));
2813#ifdef INET61
2814 /* XXX FIXME: support IPv6-only mode, too */
2815 if ((sc->sc_flags & UMBFLG_NO_INET60x0002) == 0 &&
2816 in6ifa_ifpforlinklocal(GET_IFP(sc)(&(sc)->sc_if), 0) != NULL((void *)0))
2817 c->iptype = htole32(MBIM_CONTEXT_IPTYPE_IPV4V6)((__uint32_t)(3));
2818#endif
2819 memcpy(c->context, umb_uuid_context_internet, sizeof (c->context))__builtin_memcpy((c->context), (umb_uuid_context_internet)
, (sizeof (c->context)))
;
2820 umb_cmd(sc, MBIM_CID_CONNECT12, MBIM_CMDOP_SET1, c, off);
2821done:
2822 free(c, M_USBDEV102, sizeof (*c));
2823 return;
2824}
2825
2826void
2827umb_qry_ipconfig(struct umb_softc *sc)
2828{
2829 struct mbim_cid_ip_configuration_info ipc;
2830
2831 memset(&ipc, 0, sizeof (ipc))__builtin_memset((&ipc), (0), (sizeof (ipc)));
2832 ipc.sessionid = htole32(umb_session_id)((__uint32_t)(umb_session_id));
2833 umb_cmd(sc, MBIM_CID_IP_CONFIGURATION15, MBIM_CMDOP_QRY0,
2834 &ipc, sizeof (ipc));
2835}
2836
2837void
2838umb_cmd(struct umb_softc *sc, int cid, int op, void *data, int len)
2839{
2840 umb_cmd1(sc, cid, op, data, len, umb_uuid_basic_connect);
2841}
2842
2843void
2844umb_cmd1(struct umb_softc *sc, int cid, int op, void *data, int len,
2845 uint8_t *uuid)
2846{
2847 struct mbim_h2f_cmd *cmd;
2848 int totlen;
2849
2850 /* XXX FIXME support sending fragments */
2851 if (sizeof (*cmd) + len > sc->sc_ctrl_len) {
2852 DPRINTF("%s: set %s msg too long: cannot send\n",do { } while (0)
2853 DEVNAM(sc), umb_cid2str(cid))do { } while (0);
2854 return;
2855 }
2856 cmd = sc->sc_ctrl_msg;
2857 memset(cmd, 0, sizeof (*cmd))__builtin_memset((cmd), (0), (sizeof (*cmd)));
2858 cmd->frag.nfrag = htole32(1)((__uint32_t)(1));
2859 memcpy(cmd->devid, uuid, sizeof (cmd->devid))__builtin_memcpy((cmd->devid), (uuid), (sizeof (cmd->devid
)))
;
2860 cmd->cid = htole32(cid)((__uint32_t)(cid));
2861 cmd->op = htole32(op)((__uint32_t)(op));
2862 cmd->infolen = htole32(len)((__uint32_t)(len));
2863 totlen = sizeof (*cmd);
2864 if (len > 0) {
2865 memcpy(cmd + 1, data, len)__builtin_memcpy((cmd + 1), (data), (len));
2866 totlen += len;
2867 }
2868 umb_ctrl_msg(sc, MBIM_COMMAND_MSG3U, cmd, totlen);
2869}
2870
2871void
2872umb_command_done(struct umb_softc *sc, void *data, int len)
2873{
2874 struct mbim_f2h_cmddone *cmd = data;
2875 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
2876 uint32_t status;
2877 uint32_t cid;
2878 uint32_t infolen;
2879 int qmimsg = 0;
2880
2881 if (len < sizeof (*cmd)) {
2882 DPRINTF("%s: discard short %s message\n", DEVNAM(sc),do { } while (0)
2883 umb_request2str(letoh32(cmd->hdr.type)))do { } while (0);
2884 return;
2885 }
2886 cid = letoh32(cmd->cid)((__uint32_t)(cmd->cid));
2887 if (memcmp(cmd->devid, umb_uuid_basic_connect, sizeof (cmd->devid))__builtin_memcmp((cmd->devid), (umb_uuid_basic_connect), (
sizeof (cmd->devid)))
) {
2888 if (memcmp(cmd->devid, umb_uuid_qmi_mbim,__builtin_memcmp((cmd->devid), (umb_uuid_qmi_mbim), (sizeof
(cmd->devid)))
2889 sizeof (cmd->devid))__builtin_memcmp((cmd->devid), (umb_uuid_qmi_mbim), (sizeof
(cmd->devid)))
) {
2890 DPRINTF("%s: discard %s message for other UUID '%s'\n",do { } while (0)
2891 DEVNAM(sc), umb_request2str(letoh32(cmd->hdr.type)),do { } while (0)
2892 umb_uuid2str(cmd->devid))do { } while (0);
2893 return;
2894 } else
2895 qmimsg = 1;
2896 }
2897
2898 status = letoh32(cmd->status)((__uint32_t)(cmd->status));
2899 switch (status) {
2900 case MBIM_STATUS_SUCCESS0:
2901 break;
2902#ifdef INET61
2903 case MBIM_STATUS_NO_DEVICE_SUPPORT9:
2904 if ((cid == MBIM_CID_CONNECT12) &&
2905 (sc->sc_flags & UMBFLG_NO_INET60x0002) == 0) {
2906 sc->sc_flags |= UMBFLG_NO_INET60x0002;
2907 if (ifp->if_flags & IFF_DEBUG0x4)
2908 log(LOG_ERR3,
2909 "%s: device does not support IPv6\n",
2910 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
2911 }
2912 /* Re-trigger the connect, this time IPv4 only */
2913 usb_add_task(sc->sc_udev, &sc->sc_umb_task);
2914 return;
2915#endif
2916 case MBIM_STATUS_NOT_INITIALIZED14:
2917 if (ifp->if_flags & IFF_DEBUG0x4)
2918 log(LOG_ERR3, "%s: SIM not initialized (PIN missing)\n",
2919 DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname));
2920 return;
2921 case MBIM_STATUS_PIN_REQUIRED5:
2922 sc->sc_info.pin_state = UMB_PIN_REQUIRED0;
2923 /*FALLTHROUGH*/
2924 default:
2925 if (ifp->if_flags & IFF_DEBUG0x4)
2926 log(LOG_ERR3, "%s: set/qry %s failed: %s\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
2927 umb_cid2str(cid)umb_val2descr(umb_cids, (cid)), umb_status2str(status)umb_val2descr(umb_status, (status)));
2928 return;
2929 }
2930
2931 infolen = letoh32(cmd->infolen)((__uint32_t)(cmd->infolen));
2932 if (len < sizeof (*cmd) + infolen) {
2933 DPRINTF("%s: discard truncated %s message (want %d, got %d)\n",do { } while (0)
2934 DEVNAM(sc), umb_cid2str(cid),do { } while (0)
2935 (int)sizeof (*cmd) + infolen, len)do { } while (0);
2936 return;
2937 }
2938 if (qmimsg) {
2939 if (sc->sc_flags & UMBFLG_FCC_AUTH_REQUIRED0x0001)
2940 umb_decode_qmi(sc, cmd->info, infolen);
2941 } else {
2942 DPRINTFN(2, "%s: set/qry %s done\n", DEVNAM(sc),do { } while (0)
2943 umb_cid2str(cid))do { } while (0);
2944 umb_decode_cid(sc, cid, cmd->info, infolen);
2945 }
2946}
2947
2948void
2949umb_decode_cid(struct umb_softc *sc, uint32_t cid, void *data, int len)
2950{
2951 int ok = 1;
2952
2953 switch (cid) {
2954 case MBIM_CID_DEVICE_CAPS1:
2955 ok = umb_decode_devices_caps(sc, data, len);
2956 break;
2957 case MBIM_CID_SUBSCRIBER_READY_STATUS2:
2958 ok = umb_decode_subscriber_status(sc, data, len);
2959 break;
2960 case MBIM_CID_RADIO_STATE3:
2961 ok = umb_decode_radio_state(sc, data, len);
2962 break;
2963 case MBIM_CID_PIN4:
2964 ok = umb_decode_pin(sc, data, len);
2965 break;
2966 case MBIM_CID_REGISTER_STATE9:
2967 ok = umb_decode_register_state(sc, data, len);
2968 break;
2969 case MBIM_CID_PACKET_SERVICE10:
2970 ok = umb_decode_packet_service(sc, data, len);
2971 break;
2972 case MBIM_CID_SIGNAL_STATE11:
2973 ok = umb_decode_signal_state(sc, data, len);
2974 break;
2975 case MBIM_CID_CONNECT12:
2976 ok = umb_decode_connect_info(sc, data, len);
2977 break;
2978 case MBIM_CID_IP_CONFIGURATION15:
2979 ok = umb_decode_ip_configuration(sc, data, len);
2980 break;
2981 default:
2982 /*
2983 * Note: the above list is incomplete and only contains
2984 * mandatory CIDs from the BASIC_CONNECT set.
2985 * So alternate values are not unusual.
2986 */
2987 DPRINTFN(4, "%s: ignore %s\n", DEVNAM(sc), umb_cid2str(cid))do { } while (0);
2988 break;
2989 }
2990 if (!ok)
2991 DPRINTF("%s: discard %s with bad info length %d\n",do { } while (0)
2992 DEVNAM(sc), umb_cid2str(cid), len)do { } while (0);
2993 return;
2994}
2995
2996void
2997umb_decode_qmi(struct umb_softc *sc, uint8_t *data, int len)
2998{
2999 uint8_t srv;
3000 uint16_t msg, tlvlen;
3001 uint32_t val;
3002
3003#define UMB_QMI_QMUXLEN6 6
3004 if (len < UMB_QMI_QMUXLEN6)
3005 goto tooshort;
3006
3007 srv = data[4];
3008 data += UMB_QMI_QMUXLEN6;
3009 len -= UMB_QMI_QMUXLEN6;
3010
3011#define UMB_GET16(p)((uint16_t)*p | (uint16_t)*(p + 1) << 8) ((uint16_t)*p | (uint16_t)*(p + 1) << 8)
3012#define UMB_GET32(p)((uint32_t)*p | (uint32_t)*(p + 1) << 8 | (uint32_t)*(p
+ 2) << 16 |(uint32_t)*(p + 3) << 24)
((uint32_t)*p | (uint32_t)*(p + 1) << 8 | \
3013 (uint32_t)*(p + 2) << 16 |(uint32_t)*(p + 3) << 24)
3014 switch (srv) {
3015 case 0: /* ctl */
3016#define UMB_QMI_CTLLEN6 6
3017 if (len < UMB_QMI_CTLLEN6)
3018 goto tooshort;
3019 msg = UMB_GET16(&data[2])((uint16_t)*&data[2] | (uint16_t)*(&data[2] + 1) <<
8)
;
3020 tlvlen = UMB_GET16(&data[4])((uint16_t)*&data[4] | (uint16_t)*(&data[4] + 1) <<
8)
;
3021 data += UMB_QMI_CTLLEN6;
3022 len -= UMB_QMI_CTLLEN6;
3023 break;
3024 case 2: /* dms */
3025#define UMB_QMI_DMSLEN7 7
3026 if (len < UMB_QMI_DMSLEN7)
3027 goto tooshort;
3028 msg = UMB_GET16(&data[3])((uint16_t)*&data[3] | (uint16_t)*(&data[3] + 1) <<
8)
;
3029 tlvlen = UMB_GET16(&data[5])((uint16_t)*&data[5] | (uint16_t)*(&data[5] + 1) <<
8)
;
3030 data += UMB_QMI_DMSLEN7;
3031 len -= UMB_QMI_DMSLEN7;
3032 break;
3033 default:
3034 DPRINTF("%s: discard QMI message for unknown service type %d\n",do { } while (0)
3035 DEVNAM(sc), srv)do { } while (0);
3036 return;
3037 }
3038
3039 if (len < tlvlen)
3040 goto tooshort;
3041
3042#define UMB_QMI_TLVLEN3 3
3043 while (len > 0) {
3044 if (len < UMB_QMI_TLVLEN3)
3045 goto tooshort;
3046 tlvlen = UMB_GET16(&data[1])((uint16_t)*&data[1] | (uint16_t)*(&data[1] + 1) <<
8)
;
3047 if (len < UMB_QMI_TLVLEN3 + tlvlen)
3048 goto tooshort;
3049 switch (data[0]) {
3050 case 1: /* allocation info */
3051 if (msg == 0x0022) { /* Allocate CID */
3052 if (tlvlen != 2 || data[3] != 2) /* dms */
3053 break;
3054 sc->sc_cid = data[4];
3055 DPRINTF("%s: QMI CID %d allocated\n",do { } while (0)
3056 DEVNAM(sc), sc->sc_cid)do { } while (0);
3057 umb_newstate(sc, UMB_S_CID, UMB_NS_DONT_DROP0x0001);
3058 }
3059 break;
3060 case 2: /* response */
3061 if (tlvlen != sizeof (val))
3062 break;
3063 val = UMB_GET32(&data[3])((uint32_t)*&data[3] | (uint32_t)*(&data[3] + 1) <<
8 | (uint32_t)*(&data[3] + 2) << 16 |(uint32_t)*(&
data[3] + 3) << 24)
;
3064 switch (msg) {
3065 case 0x0022: /* Allocate CID */
3066 if (val != 0) {
3067 log(LOG_ERR3, "%s: allocation of QMI CID"
3068 " failed, error 0x%x\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
3069 val);
3070 /* XXX how to proceed? */
3071 return;
3072 }
3073 break;
3074 case 0x555f: /* Send FCC Authentication */
3075 if (val == 0)
3076 DPRINTF("%s: send FCC "do { } while (0)
3077 "Authentication succeeded\n",do { } while (0)
3078 DEVNAM(sc))do { } while (0);
3079 else if (val == 0x001a0001)
3080 DPRINTF("%s: FCC Authentication "do { } while (0)
3081 "not required\n", DEVNAM(sc))do { } while (0);
3082 else
3083 log(LOG_INFO6, "%s: send FCC "
3084 "Authentication failed, "
3085 "error 0x%x\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname), val);
3086
3087 /* FCC Auth is needed only once after power-on*/
3088 sc->sc_flags &= ~UMBFLG_FCC_AUTH_REQUIRED0x0001;
3089
3090 /* Try to proceed anyway */
3091 DPRINTF("%s: init: turning radio on ...\n",do { } while (0)
3092 DEVNAM(sc))do { } while (0);
3093 umb_radio(sc, 1);
3094 break;
3095 default:
3096 break;
3097 }
3098 break;
3099 default:
3100 break;
3101 }
3102 data += UMB_QMI_TLVLEN3 + tlvlen;
3103 len -= UMB_QMI_TLVLEN3 + tlvlen;
3104 }
3105 return;
3106
3107tooshort:
3108 DPRINTF("%s: discard short QMI message\n", DEVNAM(sc))do { } while (0);
3109 return;
3110}
3111
3112void
3113umb_intr(struct usbd_xfer *xfer, void *priv, usbd_status status)
3114{
3115 struct umb_softc *sc = priv;
3116 struct ifnet *ifp = GET_IFP(sc)(&(sc)->sc_if);
3117 int total_len;
3118
3119 if (status != USBD_NORMAL_COMPLETION) {
3120 DPRINTF("%s: notification error: %s\n", DEVNAM(sc),do { } while (0)
3121 usbd_errstr(status))do { } while (0);
3122 if (status == USBD_STALLED)
3123 usbd_clear_endpoint_stall_async(sc->sc_ctrl_pipe);
3124 return;
3125 }
3126 usbd_get_xfer_status(xfer, NULL((void *)0), NULL((void *)0), &total_len, NULL((void *)0));
3127 if (total_len < UCDC_NOTIFICATION_LENGTH8) {
3128 DPRINTF("%s: short notification (%d<%d)\n", DEVNAM(sc),do { } while (0)
3129 total_len, UCDC_NOTIFICATION_LENGTH)do { } while (0);
3130 return;
3131 }
3132 if (sc->sc_intr_msg.bmRequestType != UCDC_NOTIFICATION0xa1) {
3133 DPRINTF("%s: unexpected notification (type=0x%02x)\n",do { } while (0)
3134 DEVNAM(sc), sc->sc_intr_msg.bmRequestType)do { } while (0);
3135 return;
3136 }
3137
3138 switch (sc->sc_intr_msg.bNotification) {
3139 case UCDC_N_NETWORK_CONNECTION0x00:
3140 if (ifp->if_flags & IFF_DEBUG0x4)
3141 log(LOG_DEBUG7, "%s: network %sconnected\n", DEVNAM(sc)(((struct umb_softc *)(sc))->sc_dev.dv_xname),
3142 UGETW(sc->sc_intr_msg.wValue)(*(u_int16_t *)(sc->sc_intr_msg.wValue)) ? "" : "dis");
3143 break;
3144 case UCDC_N_RESPONSE_AVAILABLE0x01:
3145 DPRINTFN(2, "%s: umb_intr: response available\n", DEVNAM(sc))do { } while (0);
3146 ++sc->sc_nresp;
3147 usb_add_task(sc->sc_udev, &sc->sc_get_response_task);
3148 break;
3149 case UCDC_N_CONNECTION_SPEED_CHANGE0x2a:
3150 DPRINTFN(2, "%s: umb_intr: connection speed changed\n",do { } while (0)
3151 DEVNAM(sc))do { } while (0);
3152 break;
3153 default:
3154 DPRINTF("%s: unexpected notification (0x%02x)\n",do { } while (0)
3155 DEVNAM(sc), sc->sc_intr_msg.bNotification)do { } while (0);
3156 break;
3157 }
3158}
3159
3160/*
3161 * Diagnostic routines
3162 */
3163#ifdef UMB_DEBUG
3164char *
3165umb_uuid2str(uint8_t uuid[MBIM_UUID_LEN16])
3166{
3167 static char uuidstr[2 * MBIM_UUID_LEN16 + 5];
3168
3169#define UUID_BFMT "%02X"
3170#define UUID_SEP "-"
3171 snprintf(uuidstr, sizeof (uuidstr),
3172 UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT UUID_SEP
3173 UUID_BFMT UUID_BFMT UUID_SEP
3174 UUID_BFMT UUID_BFMT UUID_SEP
3175 UUID_BFMT UUID_BFMT UUID_SEP
3176 UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT,
3177 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
3178 uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
3179 uuid[12], uuid[13], uuid[14], uuid[15]);
3180 return uuidstr;
3181}
3182
3183void
3184umb_dump(void *buf, int len)
3185{
3186 int i = 0;
3187 uint8_t *c = buf;
3188
3189 if (len == 0)
3190 return;
3191 while (i < len) {
3192 if ((i % 16) == 0) {
3193 if (i > 0)
3194 addlog("\n");
3195 log(LOG_DEBUG7, "%4d: ", i);
3196 }
3197 addlog(" %02x", *c);
3198 c++;
3199 i++;
3200 }
3201 addlog("\n");
3202}
3203#endif /* UMB_DEBUG */

/usr/src/sys/net/ifq.h

1/* $OpenBSD: ifq.h,v 1.33 2021/03/10 10:21:48 jsg Exp $ */
2
3/*
4 * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _NET_IFQ_H_
20#define _NET_IFQ_H_
21
22struct ifnet;
23struct kstat;
24
25struct ifq_ops;
26
27struct ifqueue {
28 struct ifnet *ifq_if;
29 struct taskq *ifq_softnet;
30 union {
31 void *_ifq_softc;
32 /*
33 * a rings sndq is found by looking up an array of pointers.
34 * by default we only have one sndq and the default drivers
35 * dont use ifq_softc, so we can borrow it for the map until
36 * we need to allocate a proper map.
37 */
38 struct ifqueue *_ifq_ifqs[1];
39 } _ifq_ptr;
40#define ifq_softc_ifq_ptr._ifq_softc _ifq_ptr._ifq_softc
41#define ifq_ifqs_ifq_ptr._ifq_ifqs _ifq_ptr._ifq_ifqs
42
43 /* mbuf handling */
44 struct mutex ifq_mtx;
45 const struct ifq_ops *ifq_ops;
46 void *ifq_q;
47 struct mbuf_list ifq_free;
48 unsigned int ifq_len;
49 unsigned int ifq_oactive;
50
51 /* statistics */
52 uint64_t ifq_packets;
53 uint64_t ifq_bytes;
54 uint64_t ifq_qdrops;
55 uint64_t ifq_errors;
56 uint64_t ifq_mcasts;
57
58 struct kstat *ifq_kstat;
59
60 /* work serialisation */
61 struct mutex ifq_task_mtx;
62 struct task_list ifq_task_list;
63 void *ifq_serializer;
64 struct task ifq_bundle;
65
66 /* work to be serialised */
67 struct task ifq_start;
68 struct task ifq_restart;
69
70 /* properties */
71 unsigned int ifq_maxlen;
72 unsigned int ifq_idx;
73};
74
75struct ifiqueue {
76 struct ifnet *ifiq_if;
77 struct taskq *ifiq_softnet;
78 union {
79 void *_ifiq_softc;
80 struct ifiqueue *_ifiq_ifiqs[1];
81 } _ifiq_ptr;
82#define ifiq_softc_ifiq_ptr._ifiq_softc _ifiq_ptr._ifiq_softc
83#define ifiq_ifiqs_ifiq_ptr._ifiq_ifiqs _ifiq_ptr._ifiq_ifiqs
84
85 struct mutex ifiq_mtx;
86 struct mbuf_list ifiq_ml;
87 struct task ifiq_task;
88 unsigned int ifiq_pressure;
89
90 /* counters */
91 uint64_t ifiq_packets;
92 uint64_t ifiq_bytes;
93 uint64_t ifiq_qdrops;
94 uint64_t ifiq_errors;
95 uint64_t ifiq_mcasts;
96 uint64_t ifiq_noproto;
97
98 struct kstat *ifiq_kstat;
99
100 /* properties */
101 unsigned int ifiq_idx;
102};
103
104#ifdef _KERNEL1
105
106#define IFQ_MAXLEN256 256
107
108/*
109 *
110 * Interface Send Queues
111 *
112 * struct ifqueue sits between the network stack and a drivers
113 * transmission of packets. The high level view is that when the stack
114 * has finished generating a packet it hands it to a driver for
115 * transmission. It does this by queueing the packet on an ifqueue and
116 * notifying the driver to start transmission of the queued packets.
117 *
118 * A network device may have multiple contexts for the transmission
119 * of packets, ie, independent transmit rings. Such a network device,
120 * represented by a struct ifnet, would then have multiple ifqueue
121 * structures, each of which maps to an independent transmit ring.
122 *
123 * struct ifqueue also provides the point where conditioning of
124 * traffic (ie, priq and hfsc) is implemented, and provides some
125 * infrastructure to assist in the implementation of network drivers.
126 *
127 * = ifq API
128 *
129 * The ifq API provides functions for three distinct consumers:
130 *
131 * 1. The network stack
132 * 2. Traffic QoS/conditioning implementations
133 * 3. Network drivers
134 *
135 * == Network Stack API
136 *
137 * The network stack is responsible for initialising and destroying
138 * the ifqueue structures, changing the traffic conditioner on an
139 * interface, enqueuing packets for transmission, and notifying
140 * the driver to start transmission of a particular ifqueue.
141 *
142 * === ifq_init()
143 *
144 * During if_attach(), the network stack calls ifq_init to initialise
145 * the ifqueue structure. By default it configures the priq traffic
146 * conditioner.
147 *
148 * === ifq_destroy()
149 *
150 * The network stack calls ifq_destroy() during if_detach to tear down
151 * the ifqueue structure. It frees the traffic conditioner state, and
152 * frees any mbufs that were left queued.
153 *
154 * === ifq_attach()
155 *
156 * ifq_attach() is used to replace the current traffic conditioner on
157 * the ifqueue. All the pending mbufs are removed from the previous
158 * conditioner and requeued on the new.
159 *
160 * === ifq_idx()
161 *
162 * ifq_idx() selects a specific ifqueue from the current ifnet
163 * structure for use in the transmission of the mbuf.
164 *
165 * === ifq_enqueue()
166 *
167 * ifq_enqueue() attempts to fit an mbuf onto the ifqueue. The
168 * current traffic conditioner may drop a packet to make space on the
169 * queue.
170 *
171 * === ifq_start()
172 *
173 * Once a packet has been successfully queued with ifq_enqueue(),
174 * the network card is notified with a call to ifq_start().
175 * Calls to ifq_start() run in the ifqueue serialisation context,
176 * guaranteeing that only one instance of ifp->if_qstart() will be
177 * running on behalf of a specific ifqueue in the system at any point
178 * in time.
179 *
180 * == Traffic conditioners API
181 *
182 * The majority of interaction between struct ifqueue and a traffic
183 * conditioner occurs via the callbacks a traffic conditioner provides
184 * in an instance of struct ifq_ops.
185 *
186 * XXX document ifqop_*
187 *
188 * The ifqueue API implements the locking on behalf of the conditioning
189 * implementations so conditioners only have to reject or keep mbufs.
190 * If something needs to inspect a conditioners internals, the queue lock
191 * needs to be taken to allow for a consistent or safe view. The queue
192 * lock may be taken and released with ifq_q_enter() and ifq_q_leave().
193 *
194 * === ifq_q_enter()
195 *
196 * Code wishing to access a conditioners internals may take the queue
197 * lock with ifq_q_enter(). The caller must pass a reference to the
198 * conditioners ifq_ops structure so the infrastructure can ensure the
199 * caller is able to understand the internals. ifq_q_enter() returns
200 * a pointer to the conditioners internal structures, or NULL if the
201 * ifq_ops did not match the current conditioner.
202 *
203 * === ifq_q_leave()
204 *
205 * The queue lock acquired with ifq_q_enter() is released with
206 * ifq_q_leave().
207 *
208 * === ifq_mfreem() and ifq_mfreeml()
209 *
210 * A goal of the API is to avoid freeing an mbuf while mutexes are
211 * held. Because the ifq API manages the lock on behalf of the backend
212 * ifqops, the backend should not directly free mbufs. If a conditioner
213 * backend needs to drop a packet during the handling of ifqop_deq_begin,
214 * it may free it by calling ifq_mfreem(). This accounts for the drop,
215 * and schedules the free of the mbuf outside the hold of ifq_mtx.
216 * ifq_mfreeml() takes an mbuf list as an argument instead.
217 *
218 *
219 * == Network Driver API
220 *
221 * The API used by network drivers is mostly documented in the
222 * ifq_dequeue(9) manpage except for ifq_serialize().
223 *
224 * === ifq_serialize()
225 *
226 * A driver may run arbitrary work in the ifqueue serialiser context
227 * via ifq_serialize(). The work to be done is represented by a task
228 * that has been prepared with task_set.
229 *
230 * The work will be run in series with any other work dispatched by
231 * ifq_start(), ifq_restart(), or other ifq_serialize() calls.
232 *
233 * Because the work may be run on another CPU, the lifetime of the
234 * task and the work it represents can extend beyond the end of the
235 * call to ifq_serialize() that dispatched it.
236 *
237 *
238 * = ifqueue work serialisation
239 *
240 * ifqueues provide a mechanism to dispatch work to be run in a single
241 * context. Work in this mechanism is represented by task structures.
242 *
243 * The tasks are run in a context similar to a taskq serviced by a
244 * single kernel thread, except the work is run immediately by the
245 * first CPU that dispatches work. If a second CPU attempts to dispatch
246 * additional tasks while the first is still running, it will be queued
247 * to be run by the first CPU. The second CPU will return immediately.
248 *
249 * = MP Safe Network Drivers
250 *
251 * An MP safe network driver is one in which its start routine can be
252 * called by the network stack without holding the big kernel lock.
253 *
254 * == Attach
255 *
256 * A driver advertises it's ability to run its start routine without
257 * the kernel lock by setting the IFXF_MPSAFE flag in ifp->if_xflags
258 * before calling if_attach(). Advertising an MPSAFE start routine
259 * also implies that the driver understands that a network card can
260 * have multiple rings or transmit queues, and therefore provides
261 * if_qstart function (which takes an ifqueue pointer) instead of an
262 * if_start function (which takes an ifnet pointer).
263 *
264 * If the hardware supports multiple transmit rings, it advertises
265 * support for multiple rings to the network stack with if_attach_queues()
266 * after the call to if_attach(). if_attach_queues allocates a struct
267 * ifqueue for each hardware ring, which can then be initialised by
268 * the driver with data for each ring.
269 *
270 * void drv_start(struct ifqueue *);
271 *
272 * void
273 * drv_attach()
274 * {
275 * ...
276 * ifp->if_xflags = IFXF_MPSAFE;
277 * ifp->if_qstart = drv_start;
278 * if_attach(ifp);
279 *
280 * if_attach_queues(ifp, DRV_NUM_TX_RINGS);
281 * for (i = ; i < DRV_NUM_TX_RINGS; i++) {
282 * struct ifqueue *ifq = ifp->if_ifqs[i];
283 * struct drv_tx_ring *ring = &sc->sc_tx_rings[i];
284 *
285 * ifq->ifq_softc = ring;
286 * ring->ifq = ifq;
287 * }
288 * }
289 *
290 * The network stack will then call ifp->if_qstart via ifq_start()
291 * to guarantee there is only one instance of that function running
292 * for each ifq in the system, and to serialise it with other work
293 * the driver may provide.
294 *
295 * == Initialise
296 *
297 * When the stack requests an interface be brought up (ie, drv_ioctl()
298 * is called to handle SIOCSIFFLAGS with IFF_UP set in ifp->if_flags)
299 * drivers should set IFF_RUNNING in ifp->if_flags, and then call
300 * ifq_clr_oactive() against each ifq.
301 *
302 * == if_start
303 *
304 * ifq_start() checks that IFF_RUNNING is set in ifp->if_flags, that
305 * ifq_is_oactive() does not return true, and that there are pending
306 * packets to transmit via a call to ifq_len(). Therefore, drivers are
307 * no longer responsible for doing this themselves.
308 *
309 * If a driver should not transmit packets while its link is down, use
310 * ifq_purge() to flush pending packets from the transmit queue.
311 *
312 * Drivers for hardware should use the following pattern to transmit
313 * packets:
314 *
315 * void
316 * drv_start(struct ifqueue *ifq)
317 * {
318 * struct drv_tx_ring *ring = ifq->ifq_softc;
319 * struct ifnet *ifp = ifq->ifq_if;
320 * struct drv_softc *sc = ifp->if_softc;
321 * struct mbuf *m;
322 * int kick = 0;
323 *
324 * if (NO_LINK) {
325 * ifq_purge(ifq);
326 * return;
327 * }
328 *
329 * for (;;) {
330 * if (NO_SPACE(ring)) {
331 * ifq_set_oactive(ifq);
332 * break;
333 * }
334 *
335 * m = ifq_dequeue(ifq);
336 * if (m == NULL)
337 * break;
338 *
339 * if (drv_encap(sc, ring, m) != 0) { // map and fill ring
340 * m_freem(m);
341 * continue;
342 * }
343 *
344 * bpf_mtap();
345 * }
346 *
347 * drv_kick(ring); // notify hw of new descriptors on the ring
348 * }
349 *
350 * == Transmission completion
351 *
352 * The following pattern should be used for transmit queue interrupt
353 * processing:
354 *
355 * void
356 * drv_txeof(struct drv_tx_ring *ring)
357 * {
358 * struct ifqueue *ifq = ring->ifq;
359 *
360 * while (COMPLETED_PKTS(ring)) {
361 * // unmap packets, m_freem() the mbufs.
362 * }
363 *
364 * if (ifq_is_oactive(ifq))
365 * ifq_restart(ifq);
366 * }
367 *
368 * == Stop
369 *
370 * Bringing an interface down (ie, IFF_UP was cleared in ifp->if_flags)
371 * should clear IFF_RUNNING in ifp->if_flags, and guarantee the start
372 * routine is not running before freeing any resources it uses:
373 *
374 * void
375 * drv_down(struct drv_softc *sc)
376 * {
377 * struct ifnet *ifp = &sc->sc_if;
378 * struct ifqueue *ifq;
379 * int i;
380 *
381 * CLR(ifp->if_flags, IFF_RUNNING);
382 * DISABLE_INTERRUPTS();
383 *
384 * for (i = 0; i < sc->sc_num_queues; i++) {
385 * ifq = ifp->if_ifqs[i];
386 * ifq_barrier(ifq);
387 * }
388 *
389 * intr_barrier(sc->sc_ih);
390 *
391 * FREE_RESOURCES();
392 *
393 * for (i = 0; i < sc->sc_num_queues; i++) {
394 * ifq = ifp->if_ifqs[i];
395 * ifq_clr_oactive(ifq);
396 * }
397 * }
398 *
399 */
400
401struct ifq_ops {
402 unsigned int (*ifqop_idx)(unsigned int,
403 const struct mbuf *);
404 struct mbuf *(*ifqop_enq)(struct ifqueue *, struct mbuf *);
405 struct mbuf *(*ifqop_deq_begin)(struct ifqueue *, void **);
406 void (*ifqop_deq_commit)(struct ifqueue *,
407 struct mbuf *, void *);
408 void (*ifqop_purge)(struct ifqueue *,
409 struct mbuf_list *);
410 void *(*ifqop_alloc)(unsigned int, void *);
411 void (*ifqop_free)(unsigned int, void *);
412};
413
414extern const struct ifq_ops * const ifq_priq_ops;
415
416/*
417 * Interface send queues.
418 */
419
420void ifq_init(struct ifqueue *, struct ifnet *, unsigned int);
421void ifq_attach(struct ifqueue *, const struct ifq_ops *, void *);
422void ifq_destroy(struct ifqueue *);
423void ifq_add_data(struct ifqueue *, struct if_data *);
424int ifq_enqueue(struct ifqueue *, struct mbuf *);
425void ifq_start(struct ifqueue *);
426struct mbuf *ifq_deq_begin(struct ifqueue *);
427void ifq_deq_commit(struct ifqueue *, struct mbuf *);
428void ifq_deq_rollback(struct ifqueue *, struct mbuf *);
429struct mbuf *ifq_dequeue(struct ifqueue *);
430int ifq_hdatalen(struct ifqueue *);
431void ifq_mfreem(struct ifqueue *, struct mbuf *);
432void ifq_mfreeml(struct ifqueue *, struct mbuf_list *);
433unsigned int ifq_purge(struct ifqueue *);
434void *ifq_q_enter(struct ifqueue *, const struct ifq_ops *);
435void ifq_q_leave(struct ifqueue *, void *);
436void ifq_serialize(struct ifqueue *, struct task *);
437void ifq_barrier(struct ifqueue *);
438
439
440int ifq_deq_sleep(struct ifqueue *, struct mbuf **, int, int,
441 const char *, volatile unsigned int *,
442 volatile unsigned int *);
443
444#define ifq_len(_ifq)((_ifq)->ifq_len) ((_ifq)->ifq_len)
445#define ifq_empty(_ifq)(((_ifq)->ifq_len) == 0) (ifq_len(_ifq)((_ifq)->ifq_len) == 0)
446#define ifq_set_maxlen(_ifq, _l)((_ifq)->ifq_maxlen = (_l)) ((_ifq)->ifq_maxlen = (_l))
447
448static inline int
449ifq_is_priq(struct ifqueue *ifq)
450{
451 return (ifq->ifq_ops == ifq_priq_ops);
452}
453
454static inline void
455ifq_set_oactive(struct ifqueue *ifq)
456{
457 ifq->ifq_oactive = 1;
458}
459
460static inline void
461ifq_clr_oactive(struct ifqueue *ifq)
462{
463 ifq->ifq_oactive = 0;
464}
465
466static inline unsigned int
467ifq_is_oactive(struct ifqueue *ifq)
468{
469 return (ifq->ifq_oactive);
10
Returning without writing to 'ifq->ifq_oactive', which participates in a condition later
11
Returning zero, which participates in a condition later
470}
471
472static inline void
473ifq_restart(struct ifqueue *ifq)
474{
475 ifq_serialize(ifq, &ifq->ifq_restart);
476}
477
478static inline unsigned int
479ifq_idx(struct ifqueue *ifq, unsigned int nifqs, const struct mbuf *m)
480{
481 return ((*ifq->ifq_ops->ifqop_idx)(nifqs, m));
482}
483
484/* ifiq */
485
486void ifiq_init(struct ifiqueue *, struct ifnet *, unsigned int);
487void ifiq_destroy(struct ifiqueue *);
488int ifiq_input(struct ifiqueue *, struct mbuf_list *);
489int ifiq_enqueue(struct ifiqueue *, struct mbuf *);
490void ifiq_add_data(struct ifiqueue *, struct if_data *);
491
492#define ifiq_len(_ifiq)((&(_ifiq)->ifiq_ml)->ml_len) ml_len(&(_ifiq)->ifiq_ml)((&(_ifiq)->ifiq_ml)->ml_len)
493#define ifiq_empty(_ifiq)((&(_ifiq)->ifiq_ml)->ml_len == 0) ml_empty(&(_ifiq)->ifiq_ml)((&(_ifiq)->ifiq_ml)->ml_len == 0)
494
495#endif /* _KERNEL */
496
497#endif /* _NET_IFQ_H_ */