Bug Summary

File:kern/uipc_usrreq.c
Warning:line 265, column 10
Access to field 'unp_socket' results in a dereference of a null pointer (loaded from field 'unp_conn')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name uipc_usrreq.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/kern/uipc_usrreq.c
1/* $OpenBSD: uipc_usrreq.c,v 1.161 2021/12/29 07:15:13 anton Exp $ */
2/* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */
3
4/*
5 * Copyright (c) 1982, 1986, 1989, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
33 */
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38#include <sys/filedesc.h>
39#include <sys/domain.h>
40#include <sys/protosw.h>
41#include <sys/queue.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/unpcb.h>
45#include <sys/un.h>
46#include <sys/namei.h>
47#include <sys/vnode.h>
48#include <sys/file.h>
49#include <sys/stat.h>
50#include <sys/mbuf.h>
51#include <sys/task.h>
52#include <sys/pledge.h>
53#include <sys/pool.h>
54#include <sys/rwlock.h>
55#include <sys/mutex.h>
56#include <sys/sysctl.h>
57#include <sys/lock.h>
58
59#include "kcov.h"
60#if NKCOV0 > 0
61#include <sys/kcov.h>
62#endif
63
64/*
65 * Locks used to protect global data and struct members:
66 * I immutable after creation
67 * D unp_df_lock
68 * G unp_gc_lock
69 * U unp_lock
70 * R unp_rights_mtx
71 * a atomic
72 */
73
74struct rwlock unp_lock = RWLOCK_INITIALIZER("unplock"){ 0, "unplock" };
75struct rwlock unp_df_lock = RWLOCK_INITIALIZER("unpdflk"){ 0, "unpdflk" };
76struct rwlock unp_gc_lock = RWLOCK_INITIALIZER("unpgclk"){ 0, "unpgclk" };
77
78struct mutex unp_rights_mtx = MUTEX_INITIALIZER(IPL_SOFTNET){ ((void *)0), ((((0x5)) > 0x0 && ((0x5)) < 0x9
) ? 0x9 : ((0x5))), 0x0 }
;
79
80/*
81 * Stack of sets of files that were passed over a socket but were
82 * not received and need to be closed.
83 */
84struct unp_deferral {
85 SLIST_ENTRY(unp_deferral)struct { struct unp_deferral *sle_next; } ud_link; /* [D] */
86 int ud_n; /* [I] */
87 /* followed by ud_n struct fdpass */
88 struct fdpass ud_fp[]; /* [I] */
89};
90
91void uipc_setaddr(const struct unpcb *, struct mbuf *);
92void unp_discard(struct fdpass *, int);
93void unp_remove_gcrefs(struct fdpass *, int);
94void unp_restore_gcrefs(struct fdpass *, int);
95void unp_scan(struct mbuf *, void (*)(struct fdpass *, int));
96int unp_nam2sun(struct mbuf *, struct sockaddr_un **, size_t *);
97
98struct pool unpcb_pool;
99struct task unp_gc_task = TASK_INITIALIZER(unp_gc, NULL){{ ((void *)0), ((void *)0) }, (unp_gc), (((void *)0)), 0 };
100
101/*
102 * Unix communications domain.
103 *
104 * TODO:
105 * RDM
106 * rethink name space problems
107 * need a proper out-of-band
108 */
109const struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX1 };
110
111/* [G] list of all UNIX domain sockets, for unp_gc() */
112LIST_HEAD(unp_head, unpcb)struct unp_head { struct unpcb *lh_first; } unp_head =
113 LIST_HEAD_INITIALIZER(unp_head){ ((void *)0) };
114/* [D] list of sets of files that were sent over sockets that are now closed */
115SLIST_HEAD(,unp_deferral)struct { struct unp_deferral *slh_first; } unp_deferred =
116 SLIST_HEAD_INITIALIZER(unp_deferred){ ((void *)0) };
117
118ino_t unp_ino; /* [U] prototype for fake inode numbers */
119int unp_rights; /* [R] file descriptors in flight */
120int unp_defer; /* [G] number of deferred fp to close by the GC task */
121int unp_gcing; /* [G] GC task currently running */
122
123void
124unp_init(void)
125{
126 pool_init(&unpcb_pool, sizeof(struct unpcb), 0,
127 IPL_SOFTNET0x5, 0, "unpcb", NULL((void *)0));
128}
129
130void
131uipc_setaddr(const struct unpcb *unp, struct mbuf *nam)
132{
133 if (unp != NULL((void *)0) && unp->unp_addr != NULL((void *)0)) {
134 nam->m_lenm_hdr.mh_len = unp->unp_addr->m_lenm_hdr.mh_len;
135 memcpy(mtod(nam, caddr_t), mtod(unp->unp_addr, caddr_t),__builtin_memcpy((((caddr_t)((nam)->m_hdr.mh_data))), (((caddr_t
)((unp->unp_addr)->m_hdr.mh_data))), (nam->m_hdr.mh_len
))
136 nam->m_len)__builtin_memcpy((((caddr_t)((nam)->m_hdr.mh_data))), (((caddr_t
)((unp->unp_addr)->m_hdr.mh_data))), (nam->m_hdr.mh_len
))
;
137 } else {
138 nam->m_lenm_hdr.mh_len = sizeof(sun_noname);
139 memcpy(mtod(nam, struct sockaddr *), &sun_noname,__builtin_memcpy((((struct sockaddr *)((nam)->m_hdr.mh_data
))), (&sun_noname), (nam->m_hdr.mh_len))
140 nam->m_len)__builtin_memcpy((((struct sockaddr *)((nam)->m_hdr.mh_data
))), (&sun_noname), (nam->m_hdr.mh_len))
;
141 }
142}
143
144int
145uipc_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
146 struct mbuf *control, struct proc *p)
147{
148 struct unpcb *unp = sotounpcb(so)((struct unpcb *)((so)->so_pcb));
149 struct unpcb *unp2;
150 struct socket *so2;
151 int error = 0;
152
153 if (req == PRU_CONTROL11)
1
Assuming 'req' is not equal to PRU_CONTROL
2
Taking false branch
154 return (EOPNOTSUPP45);
155 if (req != PRU_SEND9 && control && control->m_lenm_hdr.mh_len) {
3
Assuming 'req' is equal to PRU_SEND
156 error = EOPNOTSUPP45;
157 goto release;
158 }
159 if (unp == NULL((void *)0)) {
4
Assuming 'unp' is not equal to NULL
5
Taking false branch
160 error = EINVAL22;
161 goto release;
162 }
163
164 switch (req) {
6
Control jumps to 'case 9:' at line 238
165
166 case PRU_BIND2:
167 error = unp_bind(unp, nam, p);
168 break;
169
170 case PRU_LISTEN3:
171 if (unp->unp_vnode == NULL((void *)0))
172 error = EINVAL22;
173 break;
174
175 case PRU_CONNECT4:
176 error = unp_connect(so, nam, p);
177 break;
178
179 case PRU_CONNECT217:
180 error = unp_connect2(so, (struct socket *)nam);
181 if (!error) {
182 unp->unp_connid.uid = p->p_ucred->cr_uid;
183 unp->unp_connid.gid = p->p_ucred->cr_gid;
184 unp->unp_connid.pid = p->p_p->ps_pid;
185 unp->unp_flags |= UNP_FEIDS0x01;
186 unp2 = sotounpcb((struct socket *)nam)((struct unpcb *)(((struct socket *)nam)->so_pcb));
187 unp2->unp_connid.uid = p->p_ucred->cr_uid;
188 unp2->unp_connid.gid = p->p_ucred->cr_gid;
189 unp2->unp_connid.pid = p->p_p->ps_pid;
190 unp2->unp_flags |= UNP_FEIDS0x01;
191 }
192 break;
193
194 case PRU_DISCONNECT6:
195 unp_disconnect(unp);
196 break;
197
198 case PRU_ACCEPT5:
199 /*
200 * Pass back name of connected socket,
201 * if it was bound and we are still connected
202 * (our peer may have closed already!).
203 */
204 uipc_setaddr(unp->unp_conn, nam);
205 break;
206
207 case PRU_SHUTDOWN7:
208 socantsendmore(so);
209 unp_shutdown(unp);
210 break;
211
212 case PRU_RCVD8:
213 switch (so->so_type) {
214
215 case SOCK_DGRAM2:
216 panic("uipc 1");
217 /*NOTREACHED*/
218
219 case SOCK_STREAM1:
220 case SOCK_SEQPACKET5:
221 if (unp->unp_conn == NULL((void *)0))
222 break;
223 so2 = unp->unp_conn->unp_socket;
224 /*
225 * Adjust backpressure on sender
226 * and wakeup any waiting to write.
227 */
228 so2->so_snd.sb_mbcnt = so->so_rcv.sb_mbcnt;
229 so2->so_snd.sb_cc = so->so_rcv.sb_cc;
230 sowwakeup(so2);
231 break;
232
233 default:
234 panic("uipc 2");
235 }
236 break;
237
238 case PRU_SEND9:
239 if (control) {
7
Assuming 'control' is non-null
8
Taking true branch
240 sounlock(so, SL_LOCKED0x42);
241 error = unp_internalize(control, p);
9
Calling 'unp_internalize'
39
Returning from 'unp_internalize'
242 solock(so);
243 if (error
39.1
'error' is 0
)
40
Taking false branch
244 break;
245 }
246 switch (so->so_type) {
41
Control jumps to 'case 2:' at line 248
247
248 case SOCK_DGRAM2: {
249 const struct sockaddr *from;
250
251 if (nam) {
42
Assuming 'nam' is non-null
43
Taking true branch
252 if (unp->unp_conn) {
44
Assuming field 'unp_conn' is null
45
Taking false branch
253 error = EISCONN56;
254 break;
255 }
256 error = unp_connect(so, nam, p);
257 if (error)
46
Assuming 'error' is 0
47
Taking false branch
258 break;
259 } else {
260 if (unp->unp_conn == NULL((void *)0)) {
261 error = ENOTCONN57;
262 break;
263 }
264 }
265 so2 = unp->unp_conn->unp_socket;
48
Access to field 'unp_socket' results in a dereference of a null pointer (loaded from field 'unp_conn')
266 if (unp->unp_addr)
267 from = mtod(unp->unp_addr, struct sockaddr *)((struct sockaddr *)((unp->unp_addr)->m_hdr.mh_data));
268 else
269 from = &sun_noname;
270 if (sbappendaddr(so2, &so2->so_rcv, from, m, control)) {
271 sorwakeup(so2);
272 m = NULL((void *)0);
273 control = NULL((void *)0);
274 } else
275 error = ENOBUFS55;
276 if (nam)
277 unp_disconnect(unp);
278 break;
279 }
280
281 case SOCK_STREAM1:
282 case SOCK_SEQPACKET5:
283 if (so->so_state & SS_CANTSENDMORE0x010) {
284 error = EPIPE32;
285 break;
286 }
287 if (unp->unp_conn == NULL((void *)0)) {
288 error = ENOTCONN57;
289 break;
290 }
291 so2 = unp->unp_conn->unp_socket;
292 /*
293 * Send to paired receive port, and then raise
294 * send buffer counts to maintain backpressure.
295 * Wake up readers.
296 */
297 if (control) {
298 if (sbappendcontrol(so2, &so2->so_rcv, m,
299 control)) {
300 control = NULL((void *)0);
301 } else {
302 error = ENOBUFS55;
303 break;
304 }
305 } else if (so->so_type == SOCK_SEQPACKET5)
306 sbappendrecord(so2, &so2->so_rcv, m);
307 else
308 sbappend(so2, &so2->so_rcv, m);
309 so->so_snd.sb_mbcnt = so2->so_rcv.sb_mbcnt;
310 so->so_snd.sb_cc = so2->so_rcv.sb_cc;
311 if (so2->so_rcv.sb_cc > 0)
312 sorwakeup(so2);
313 m = NULL((void *)0);
314 break;
315
316 default:
317 panic("uipc 4");
318 }
319 /* we need to undo unp_internalize in case of errors */
320 if (control && error)
321 unp_dispose(control);
322 break;
323
324 case PRU_ABORT10:
325 unp_detach(unp);
326 /*
327 * As long as `unp_lock' is taken before entering
328 * uipc_usrreq() releasing it here would lead to a
329 * double unlock.
330 */
331 sofree(so, SL_NOUNLOCK0x00);
332 break;
333
334 case PRU_SENSE12: {
335 struct stat *sb = (struct stat *)m;
336
337 sb->st_blksize = so->so_snd.sb_hiwat;
338 sb->st_dev = NODEV(dev_t)(-1);
339 if (unp->unp_ino == 0)
340 unp->unp_ino = unp_ino++;
341 sb->st_atim.tv_sec =
342 sb->st_mtim.tv_sec =
343 sb->st_ctim.tv_sec = unp->unp_ctime.tv_sec;
344 sb->st_atim.tv_nsec =
345 sb->st_mtim.tv_nsec =
346 sb->st_ctim.tv_nsec = unp->unp_ctime.tv_nsec;
347 sb->st_ino = unp->unp_ino;
348 break;
349 }
350
351 case PRU_RCVOOB13:
352 case PRU_SENDOOB14:
353 error = EOPNOTSUPP45;
354 break;
355
356 case PRU_SOCKADDR15:
357 uipc_setaddr(unp, nam);
358 break;
359
360 case PRU_PEERADDR16:
361 uipc_setaddr(unp->unp_conn, nam);
362 break;
363
364 case PRU_SLOWTIMO19:
365 break;
366
367 default:
368 panic("uipc_usrreq");
369 }
370release:
371 if (req != PRU_RCVD8 && req != PRU_RCVOOB13 && req != PRU_SENSE12) {
372 m_freem(control);
373 m_freem(m);
374 }
375 return (error);
376}
377
378/*
379 * Both send and receive buffers are allocated PIPSIZ bytes of buffering
380 * for stream sockets, although the total for sender and receiver is
381 * actually only PIPSIZ.
382 * Datagram sockets really use the sendspace as the maximum datagram size,
383 * and don't really want to reserve the sendspace. Their recvspace should
384 * be large enough for at least one max-size datagram plus address.
385 */
386#define PIPSIZ8192 8192
387u_int unpst_sendspace = PIPSIZ8192;
388u_int unpst_recvspace = PIPSIZ8192;
389u_int unpsq_sendspace = PIPSIZ8192;
390u_int unpsq_recvspace = PIPSIZ8192;
391u_int unpdg_sendspace = 2*1024; /* really max datagram size */
392u_int unpdg_recvspace = 16*1024;
393
394const struct sysctl_bounded_args unpstctl_vars[] = {
395 { UNPCTL_RECVSPACE1, &unpst_recvspace, 0, SB_MAX(2*1024*1024) },
396 { UNPCTL_SENDSPACE2, &unpst_sendspace, 0, SB_MAX(2*1024*1024) },
397};
398const struct sysctl_bounded_args unpsqctl_vars[] = {
399 { UNPCTL_RECVSPACE1, &unpsq_recvspace, 0, SB_MAX(2*1024*1024) },
400 { UNPCTL_SENDSPACE2, &unpsq_sendspace, 0, SB_MAX(2*1024*1024) },
401};
402const struct sysctl_bounded_args unpdgctl_vars[] = {
403 { UNPCTL_RECVSPACE1, &unpdg_recvspace, 0, SB_MAX(2*1024*1024) },
404 { UNPCTL_SENDSPACE2, &unpdg_sendspace, 0, SB_MAX(2*1024*1024) },
405};
406
407int
408uipc_attach(struct socket *so, int proto)
409{
410 struct unpcb *unp;
411 int error;
412
413 rw_assert_wrlock(&unp_lock);
414
415 if (so->so_pcb)
416 return EISCONN56;
417 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
418 switch (so->so_type) {
419
420 case SOCK_STREAM1:
421 error = soreserve(so, unpst_sendspace, unpst_recvspace);
422 break;
423
424 case SOCK_SEQPACKET5:
425 error = soreserve(so, unpsq_sendspace, unpsq_recvspace);
426 break;
427
428 case SOCK_DGRAM2:
429 error = soreserve(so, unpdg_sendspace, unpdg_recvspace);
430 break;
431
432 default:
433 panic("unp_attach");
434 }
435 if (error)
436 return (error);
437 }
438 unp = pool_get(&unpcb_pool, PR_NOWAIT0x0002|PR_ZERO0x0008);
439 if (unp == NULL((void *)0))
440 return (ENOBUFS55);
441 unp->unp_socket = so;
442 so->so_pcb = unp;
443 getnanotime(&unp->unp_ctime);
444
445 /*
446 * Enforce `unp_gc_lock' -> `solock()' lock order.
447 */
448 /*
449 * We also release the lock on listening socket and on our peer
450 * socket when called from unp_connect(). This is safe. The
451 * listening socket protected by vnode(9) lock. The peer socket
452 * has 'UNP_CONNECTING' flag set.
453 */
454 sounlock(so, SL_LOCKED0x42);
455 rw_enter_write(&unp_gc_lock);
456 LIST_INSERT_HEAD(&unp_head, unp, unp_link)do { if (((unp)->unp_link.le_next = (&unp_head)->lh_first
) != ((void *)0)) (&unp_head)->lh_first->unp_link.le_prev
= &(unp)->unp_link.le_next; (&unp_head)->lh_first
= (unp); (unp)->unp_link.le_prev = &(&unp_head)->
lh_first; } while (0)
;
457 rw_exit_write(&unp_gc_lock);
458 solock(so);
459 return (0);
460}
461
462int
463uipc_detach(struct socket *so)
464{
465 struct unpcb *unp = sotounpcb(so)((struct unpcb *)((so)->so_pcb));
466
467 if (unp == NULL((void *)0))
468 return (EINVAL22);
469
470 unp_detach(unp);
471
472 return (0);
473}
474
475int
476uipc_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
477 size_t newlen)
478{
479 int *valp = &unp_defer;
480
481 /* All sysctl names at this level are terminal. */
482 switch (name[0]) {
483 case SOCK_STREAM1:
484 if (namelen != 2)
485 return (ENOTDIR20);
486 return sysctl_bounded_arr(unpstctl_vars, nitems(unpstctl_vars)(sizeof((unpstctl_vars)) / sizeof((unpstctl_vars)[0])),
487 name + 1, namelen - 1, oldp, oldlenp, newp, newlen);
488 case SOCK_SEQPACKET5:
489 if (namelen != 2)
490 return (ENOTDIR20);
491 return sysctl_bounded_arr(unpsqctl_vars, nitems(unpsqctl_vars)(sizeof((unpsqctl_vars)) / sizeof((unpsqctl_vars)[0])),
492 name + 1, namelen - 1, oldp, oldlenp, newp, newlen);
493 case SOCK_DGRAM2:
494 if (namelen != 2)
495 return (ENOTDIR20);
496 return sysctl_bounded_arr(unpdgctl_vars, nitems(unpdgctl_vars)(sizeof((unpdgctl_vars)) / sizeof((unpdgctl_vars)[0])),
497 name + 1, namelen - 1, oldp, oldlenp, newp, newlen);
498 case NET_UNIX_INFLIGHT6:
499 valp = &unp_rights;
500 /* FALLTHOUGH */
501 case NET_UNIX_DEFERRED7:
502 if (namelen != 1)
503 return (ENOTDIR20);
504 return sysctl_rdint(oldp, oldlenp, newp, *valp);
505 default:
506 return (ENOPROTOOPT42);
507 }
508}
509
510void
511unp_detach(struct unpcb *unp)
512{
513 struct socket *so = unp->unp_socket;
514 struct vnode *vp = unp->unp_vnode;
515
516 rw_assert_wrlock(&unp_lock);
517
518 unp->unp_vnode = NULL((void *)0);
519
520 /*
521 * Enforce `unp_gc_lock' -> `solock()' lock order.
522 * Enforce `i_lock' -> `unp_lock' lock order.
523 */
524 sounlock(so, SL_LOCKED0x42);
525
526 rw_enter_write(&unp_gc_lock);
527 LIST_REMOVE(unp, unp_link)do { if ((unp)->unp_link.le_next != ((void *)0)) (unp)->
unp_link.le_next->unp_link.le_prev = (unp)->unp_link.le_prev
; *(unp)->unp_link.le_prev = (unp)->unp_link.le_next; (
(unp)->unp_link.le_prev) = ((void *)-1); ((unp)->unp_link
.le_next) = ((void *)-1); } while (0)
;
528 rw_exit_write(&unp_gc_lock);
529
530 if (vp != NULL((void *)0)) {
531 VOP_LOCK(vp, LK_EXCLUSIVE0x0001UL);
532 vp->v_socketv_un.vu_socket = NULL((void *)0);
533
534 KERNEL_LOCK()_kernel_lock();
535 vput(vp);
536 KERNEL_UNLOCK()_kernel_unlock();
537 }
538
539 solock(so);
540
541 if (unp->unp_conn)
542 unp_disconnect(unp);
543 while (!SLIST_EMPTY(&unp->unp_refs)(((&unp->unp_refs)->slh_first) == ((void *)0)))
544 unp_drop(SLIST_FIRST(&unp->unp_refs)((&unp->unp_refs)->slh_first), ECONNRESET54);
545 soisdisconnected(so);
546 so->so_pcb = NULL((void *)0);
547 m_freem(unp->unp_addr);
548 pool_put(&unpcb_pool, unp);
549 if (unp_rights)
550 task_add(systqmp, &unp_gc_task);
551}
552
553int
554unp_bind(struct unpcb *unp, struct mbuf *nam, struct proc *p)
555{
556 struct sockaddr_un *soun;
557 struct mbuf *nam2;
558 struct vnode *vp;
559 struct vattr vattr;
560 int error;
561 struct nameidata nd;
562 size_t pathlen;
563
564 if (unp->unp_flags & (UNP_BINDING0x04 | UNP_CONNECTING0x08))
565 return (EINVAL22);
566 if (unp->unp_vnode != NULL((void *)0))
567 return (EINVAL22);
568 if ((error = unp_nam2sun(nam, &soun, &pathlen)))
569 return (error);
570
571 unp->unp_flags |= UNP_BINDING0x04;
572
573 /*
574 * Enforce `i_lock' -> `unplock' because fifo subsystem
575 * requires it. The socket can't be closed concurrently
576 * because the file descriptor reference is still held.
577 */
578
579 sounlock(unp->unp_socket, SL_LOCKED0x42);
580
581 nam2 = m_getclr(M_WAITOK0x0001, MT_SONAME3);
582 nam2->m_lenm_hdr.mh_len = sizeof(struct sockaddr_un);
583 memcpy(mtod(nam2, struct sockaddr_un *), soun,__builtin_memcpy((((struct sockaddr_un *)((nam2)->m_hdr.mh_data
))), (soun), (__builtin_offsetof(struct sockaddr_un, sun_path
) + pathlen))
584 offsetof(struct sockaddr_un, sun_path) + pathlen)__builtin_memcpy((((struct sockaddr_un *)((nam2)->m_hdr.mh_data
))), (soun), (__builtin_offsetof(struct sockaddr_un, sun_path
) + pathlen))
;
585 /* No need to NUL terminate: m_getclr() returns zero'd mbufs. */
586
587 soun = mtod(nam2, struct sockaddr_un *)((struct sockaddr_un *)((nam2)->m_hdr.mh_data));
588
589 /* Fixup sun_len to keep it in sync with m_len. */
590 soun->sun_len = nam2->m_lenm_hdr.mh_len;
591
592 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE,ndinitat(&nd, 1, 0x0000 | 0x0008, UIO_SYSSPACE, -100, soun
->sun_path, p)
593 soun->sun_path, p)ndinitat(&nd, 1, 0x0000 | 0x0008, UIO_SYSSPACE, -100, soun
->sun_path, p)
;
594 nd.ni_pledge = PLEDGE_UNIX0x0000000000000100ULL;
595
596 KERNEL_LOCK()_kernel_lock();
597/* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
598 error = namei(&nd);
599 if (error != 0) {
600 m_freem(nam2);
601 solock(unp->unp_socket);
602 goto out;
603 }
604 vp = nd.ni_vp;
605 if (vp != NULL((void *)0)) {
606 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
607 if (nd.ni_dvp == vp)
608 vrele(nd.ni_dvp);
609 else
610 vput(nd.ni_dvp);
611 vrele(vp);
612 m_freem(nam2);
613 error = EADDRINUSE48;
614 solock(unp->unp_socket);
615 goto out;
616 }
617 VATTR_NULL(&vattr)vattr_null(&vattr);
618 vattr.va_type = VSOCK;
619 vattr.va_mode = ACCESSPERMS(0000700|0000070|0000007) &~ p->p_fd->fd_cmask;
620 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
621 vput(nd.ni_dvp);
622 if (error) {
623 m_freem(nam2);
624 solock(unp->unp_socket);
625 goto out;
626 }
627 solock(unp->unp_socket);
628 unp->unp_addr = nam2;
629 vp = nd.ni_vp;
630 vp->v_socketv_un.vu_socket = unp->unp_socket;
631 unp->unp_vnode = vp;
632 unp->unp_connid.uid = p->p_ucred->cr_uid;
633 unp->unp_connid.gid = p->p_ucred->cr_gid;
634 unp->unp_connid.pid = p->p_p->ps_pid;
635 unp->unp_flags |= UNP_FEIDSBIND0x02;
636 VOP_UNLOCK(vp);
637out:
638 KERNEL_UNLOCK()_kernel_unlock();
639 unp->unp_flags &= ~UNP_BINDING0x04;
640
641 return (error);
642}
643
644int
645unp_connect(struct socket *so, struct mbuf *nam, struct proc *p)
646{
647 struct sockaddr_un *soun;
648 struct vnode *vp;
649 struct socket *so2, *so3;
650 struct unpcb *unp, *unp2, *unp3;
651 struct nameidata nd;
652 int error;
653
654 unp = sotounpcb(so)((struct unpcb *)((so)->so_pcb));
655 if (unp->unp_flags & (UNP_BINDING0x04 | UNP_CONNECTING0x08))
656 return (EISCONN56);
657 if ((error = unp_nam2sun(nam, &soun, NULL((void *)0))))
658 return (error);
659
660 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p)ndinitat(&nd, 0, 0x0040 | 0x0004, UIO_SYSSPACE, -100, soun
->sun_path, p)
;
661 nd.ni_pledge = PLEDGE_UNIX0x0000000000000100ULL;
662
663 unp->unp_flags |= UNP_CONNECTING0x08;
664
665 /*
666 * Enforce `i_lock' -> `unplock' because fifo subsystem
667 * requires it. The socket can't be closed concurrently
668 * because the file descriptor reference is still held.
669 */
670
671 sounlock(so, SL_LOCKED0x42);
672
673 KERNEL_LOCK()_kernel_lock();
674 error = namei(&nd);
675 if (error != 0)
676 goto unlock;
677 vp = nd.ni_vp;
678 if (vp->v_type != VSOCK) {
679 error = ENOTSOCK38;
680 goto put;
681 }
682 if ((error = VOP_ACCESS(vp, VWRITE00200, p->p_ucred, p)) != 0)
683 goto put;
684 solock(so);
685 so2 = vp->v_socketv_un.vu_socket;
686 if (so2 == NULL((void *)0)) {
687 error = ECONNREFUSED61;
688 goto put_locked;
689 }
690 if (so->so_type != so2->so_type) {
691 error = EPROTOTYPE41;
692 goto put_locked;
693 }
694 if (so->so_proto->pr_flags & PR_CONNREQUIRED0x04) {
695 if ((so2->so_options & SO_ACCEPTCONN0x0002) == 0 ||
696 (so3 = sonewconn(so2, 0)) == NULL((void *)0)) {
697 error = ECONNREFUSED61;
698 goto put_locked;
699 }
700 unp2 = sotounpcb(so2)((struct unpcb *)((so2)->so_pcb));
701 unp3 = sotounpcb(so3)((struct unpcb *)((so3)->so_pcb));
702 if (unp2->unp_addr)
703 unp3->unp_addr =
704 m_copym(unp2->unp_addr, 0, M_COPYALL1000000000, M_NOWAIT0x0002);
705 unp3->unp_connid.uid = p->p_ucred->cr_uid;
706 unp3->unp_connid.gid = p->p_ucred->cr_gid;
707 unp3->unp_connid.pid = p->p_p->ps_pid;
708 unp3->unp_flags |= UNP_FEIDS0x01;
709 so2 = so3;
710 if (unp2->unp_flags & UNP_FEIDSBIND0x02) {
711 unp->unp_connid = unp2->unp_connid;
712 unp->unp_flags |= UNP_FEIDS0x01;
713 }
714 }
715 error = unp_connect2(so, so2);
716put_locked:
717 sounlock(so, SL_LOCKED0x42);
718put:
719 vput(vp);
720unlock:
721 KERNEL_UNLOCK()_kernel_unlock();
722 solock(so);
723 unp->unp_flags &= ~UNP_CONNECTING0x08;
724
725 /*
726 * The peer socket could be closed by concurrent thread
727 * when `so' and `vp' are unlocked.
728 */
729 if (error == 0 && unp->unp_conn == NULL((void *)0))
730 error = ECONNREFUSED61;
731
732 return (error);
733}
734
735int
736unp_connect2(struct socket *so, struct socket *so2)
737{
738 struct unpcb *unp = sotounpcb(so)((struct unpcb *)((so)->so_pcb));
739 struct unpcb *unp2;
740
741 rw_assert_wrlock(&unp_lock);
742
743 if (so2->so_type != so->so_type)
744 return (EPROTOTYPE41);
745 unp2 = sotounpcb(so2)((struct unpcb *)((so2)->so_pcb));
746 unp->unp_conn = unp2;
747 switch (so->so_type) {
748
749 case SOCK_DGRAM2:
750 SLIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_nextref)do { (unp)->unp_nextref.sle_next = (&unp2->unp_refs
)->slh_first; (&unp2->unp_refs)->slh_first = (unp
); } while (0)
;
751 soisconnected(so);
752 break;
753
754 case SOCK_STREAM1:
755 case SOCK_SEQPACKET5:
756 unp2->unp_conn = unp;
757 soisconnected(so);
758 soisconnected(so2);
759 break;
760
761 default:
762 panic("unp_connect2");
763 }
764 return (0);
765}
766
767void
768unp_disconnect(struct unpcb *unp)
769{
770 struct unpcb *unp2 = unp->unp_conn;
771
772 if (unp2 == NULL((void *)0))
773 return;
774 unp->unp_conn = NULL((void *)0);
775 switch (unp->unp_socket->so_type) {
776
777 case SOCK_DGRAM2:
778 SLIST_REMOVE(&unp2->unp_refs, unp, unpcb, unp_nextref)do { if ((&unp2->unp_refs)->slh_first == (unp)) { do
{ ((&unp2->unp_refs))->slh_first = ((&unp2->
unp_refs))->slh_first->unp_nextref.sle_next; } while (0
); } else { struct unpcb *curelm = (&unp2->unp_refs)->
slh_first; while (curelm->unp_nextref.sle_next != (unp)) curelm
= curelm->unp_nextref.sle_next; curelm->unp_nextref.sle_next
= curelm->unp_nextref.sle_next->unp_nextref.sle_next; }
((unp)->unp_nextref.sle_next) = ((void *)-1); } while (0)
;
779 unp->unp_socket->so_state &= ~SS_ISCONNECTED0x002;
780 break;
781
782 case SOCK_STREAM1:
783 case SOCK_SEQPACKET5:
784 unp->unp_socket->so_snd.sb_mbcnt = 0;
785 unp->unp_socket->so_snd.sb_cc = 0;
786 soisdisconnected(unp->unp_socket);
787 unp2->unp_conn = NULL((void *)0);
788 unp2->unp_socket->so_snd.sb_mbcnt = 0;
789 unp2->unp_socket->so_snd.sb_cc = 0;
790 soisdisconnected(unp2->unp_socket);
791 break;
792 }
793}
794
795void
796unp_shutdown(struct unpcb *unp)
797{
798 struct socket *so;
799
800 switch (unp->unp_socket->so_type) {
801 case SOCK_STREAM1:
802 case SOCK_SEQPACKET5:
803 if (unp->unp_conn && (so = unp->unp_conn->unp_socket))
804 socantrcvmore(so);
805 break;
806 default:
807 break;
808 }
809}
810
811void
812unp_drop(struct unpcb *unp, int errno)
813{
814 struct socket *so = unp->unp_socket;
815
816 rw_assert_wrlock(&unp_lock);
817
818 so->so_error = errno;
819 unp_disconnect(unp);
820}
821
822#ifdef notdef
823unp_drain(void)
824{
825
826}
827#endif
828
829static struct unpcb *
830fptounp(struct file *fp)
831{
832 struct socket *so;
833
834 if (fp->f_type != DTYPE_SOCKET2)
835 return (NULL((void *)0));
836 if ((so = fp->f_data) == NULL((void *)0))
837 return (NULL((void *)0));
838 if (so->so_proto->pr_domain != &unixdomain)
839 return (NULL((void *)0));
840 return (sotounpcb(so)((struct unpcb *)((so)->so_pcb)));
841}
842
843int
844unp_externalize(struct mbuf *rights, socklen_t controllen, int flags)
845{
846 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
; /* XXX */
847 struct cmsghdr *cm = mtod(rights, struct cmsghdr *)((struct cmsghdr *)((rights)->m_hdr.mh_data));
848 struct filedesc *fdp = p->p_fd;
849 int i, *fds = NULL((void *)0);
850 struct fdpass *rp;
851 struct file *fp;
852 int nfds, error = 0;
853
854 /*
855 * This code only works because SCM_RIGHTS is the only supported
856 * control message type on unix sockets. Enforce this here.
857 */
858 if (cm->cmsg_type != SCM_RIGHTS0x01 || cm->cmsg_level != SOL_SOCKET0xffff)
859 return EINVAL22;
860
861 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))(((unsigned long)(sizeof(*cm)) + (sizeof(long) - 1)) &~(sizeof
(long) - 1))
) /
862 sizeof(struct fdpass);
863 if (controllen < CMSG_ALIGN(sizeof(struct cmsghdr))(((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1))
)
864 controllen = 0;
865 else
866 controllen -= CMSG_ALIGN(sizeof(struct cmsghdr))(((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1))
;
867 if (nfds > controllen / sizeof(int)) {
868 error = EMSGSIZE40;
869 goto out;
870 }
871
872 /* Make sure the recipient should be able to see the descriptors.. */
873 rp = (struct fdpass *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
;
874
875 /* fdp->fd_rdir requires KERNEL_LOCK() */
876 KERNEL_LOCK()_kernel_lock();
877
878 for (i = 0; i < nfds; i++) {
879 fp = rp->fp;
880 rp++;
881 error = pledge_recvfd(p, fp);
882 if (error)
883 break;
884
885 /*
886 * No to block devices. If passing a directory,
887 * make sure that it is underneath the root.
888 */
889 if (fdp->fd_rdir != NULL((void *)0) && fp->f_type == DTYPE_VNODE1) {
890 struct vnode *vp = (struct vnode *)fp->f_data;
891
892 if (vp->v_type == VBLK ||
893 (vp->v_type == VDIR &&
894 !vn_isunder(vp, fdp->fd_rdir, p))) {
895 error = EPERM1;
896 break;
897 }
898 }
899 }
900
901 KERNEL_UNLOCK()_kernel_unlock();
902
903 if (error)
904 goto out;
905
906 fds = mallocarray(nfds, sizeof(int), M_TEMP127, M_WAITOK0x0001);
907
908 fdplock(fdp)do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s == 0x0001UL)) splassert_fail(0, 0x0001UL
, __func__); } while (0); rw_enter_write(&(fdp)->fd_lock
); } while (0)
;
909restart:
910 /*
911 * First loop -- allocate file descriptor table slots for the
912 * new descriptors.
913 */
914 rp = ((struct fdpass *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
);
915 for (i = 0; i < nfds; i++) {
916 if ((error = fdalloc(p, 0, &fds[i])) != 0) {
917 /*
918 * Back out what we've done so far.
919 */
920 for (--i; i >= 0; i--)
921 fdremove(fdp, fds[i]);
922
923 if (error == ENOSPC28) {
924 fdexpand(p);
925 goto restart;
926 }
927
928 fdpunlock(fdp)rw_exit_write(&(fdp)->fd_lock);
929
930 /*
931 * This is the error that has historically
932 * been returned, and some callers may
933 * expect it.
934 */
935
936 error = EMSGSIZE40;
937 goto out;
938 }
939
940 /*
941 * Make the slot reference the descriptor so that
942 * fdalloc() works properly.. We finalize it all
943 * in the loop below.
944 */
945 mtx_enter(&fdp->fd_fplock);
946 KASSERT(fdp->fd_ofiles[fds[i]] == NULL)((fdp->fd_ofiles[fds[i]] == ((void *)0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/kern/uipc_usrreq.c", 946, "fdp->fd_ofiles[fds[i]] == NULL"
))
;
947 fdp->fd_ofiles[fds[i]] = rp->fp;
948 mtx_leave(&fdp->fd_fplock);
949
950 fdp->fd_ofileflags[fds[i]] = (rp->flags & UF_PLEDGED0x02);
951 if (flags & MSG_CMSG_CLOEXEC0x800)
952 fdp->fd_ofileflags[fds[i]] |= UF_EXCLOSE0x01;
953
954 rp++;
955 }
956 fdpunlock(fdp)rw_exit_write(&(fdp)->fd_lock);
957
958 /*
959 * Now that adding them has succeeded, update all of the
960 * descriptor passing state.
961 */
962 rp = (struct fdpass *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
;
963
964 for (i = 0; i < nfds; i++) {
965 struct unpcb *unp;
966
967 fp = rp->fp;
968 rp++;
969 if ((unp = fptounp(fp)) != NULL((void *)0)) {
970 rw_enter_write(&unp_gc_lock);
971 unp->unp_msgcount--;
972 rw_exit_write(&unp_gc_lock);
973 }
974 }
975
976 mtx_enter(&unp_rights_mtx);
977 unp_rights -= nfds;
978 mtx_leave(&unp_rights_mtx);
979
980 /*
981 * Copy temporary array to message and adjust length, in case of
982 * transition from large struct file pointers to ints.
983 */
984 memcpy(CMSG_DATA(cm), fds, nfds * sizeof(int))__builtin_memcpy((((unsigned char *)(cm) + (((unsigned long)(
sizeof(struct cmsghdr)) + (sizeof(long) - 1)) &~(sizeof(long
) - 1)))), (fds), (nfds * sizeof(int)))
;
985 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int))((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (nfds * sizeof(int)))
;
986 rights->m_lenm_hdr.mh_len = CMSG_LEN(nfds * sizeof(int))((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (nfds * sizeof(int)))
;
987 out:
988 if (fds != NULL((void *)0))
989 free(fds, M_TEMP127, nfds * sizeof(int));
990
991 if (error) {
992 if (nfds > 0) {
993 /*
994 * No lock required. We are the only `cm' holder.
995 */
996 rp = ((struct fdpass *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
);
997 unp_discard(rp, nfds);
998 }
999 }
1000
1001 return (error);
1002}
1003
1004int
1005unp_internalize(struct mbuf *control, struct proc *p)
1006{
1007 struct filedesc *fdp = p->p_fd;
1008 struct cmsghdr *cm = mtod(control, struct cmsghdr *)((struct cmsghdr *)((control)->m_hdr.mh_data));
1009 struct fdpass *rp;
1010 struct file *fp;
1011 struct unpcb *unp;
1012 int i, error;
1013 int nfds, *ip, fd, neededspace;
1014
1015 /*
1016 * Check for two potential msg_controllen values because
1017 * IETF stuck their nose in a place it does not belong.
1018 */
1019 if (control->m_lenm_hdr.mh_len < CMSG_LEN(0)((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (0))
|| cm->cmsg_len < CMSG_LEN(0)((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (0))
)
10
Assuming the condition is false
11
Assuming the condition is false
12
Taking false branch
1020 return (EINVAL22);
1021 if (cm->cmsg_type != SCM_RIGHTS0x01 || cm->cmsg_level != SOL_SOCKET0xffff ||
13
Assuming field 'cmsg_type' is equal to SCM_RIGHTS
14
Assuming field 'cmsg_level' is equal to SOL_SOCKET
16
Taking false branch
1022 !(cm->cmsg_len == control->m_lenm_hdr.mh_len ||
15
Assuming field 'cmsg_len' is equal to field 'mh_len'
1023 control->m_lenm_hdr.mh_len == CMSG_ALIGN(cm->cmsg_len)(((unsigned long)(cm->cmsg_len) + (sizeof(long) - 1)) &
~(sizeof(long) - 1))
))
1024 return (EINVAL22);
1025 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))(((unsigned long)(sizeof(*cm)) + (sizeof(long) - 1)) &~(sizeof
(long) - 1))
) / sizeof (int);
1026
1027 mtx_enter(&unp_rights_mtx);
1028 if (unp_rights + nfds > maxfiles / 10) {
17
Assuming the condition is false
18
Taking false branch
1029 mtx_leave(&unp_rights_mtx);
1030 return (EMFILE24);
1031 }
1032 unp_rights += nfds;
1033 mtx_leave(&unp_rights_mtx);
1034
1035 /* Make sure we have room for the struct file pointers */
1036morespace:
1037 neededspace = CMSG_SPACE(nfds * sizeof(struct fdpass))((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (((unsigned long)(nfds * sizeof
(struct fdpass)) + (sizeof(long) - 1)) &~(sizeof(long) - 1
)))
-
1038 control->m_lenm_hdr.mh_len;
1039 if (neededspace > m_trailingspace(control)) {
19
Assuming the condition is false
20
Taking false branch
1040 char *tmp;
1041 /* if we already have a cluster, the message is just too big */
1042 if (control->m_flagsm_hdr.mh_flags & M_EXT0x0001) {
1043 error = E2BIG7;
1044 goto nospace;
1045 }
1046
1047 /* copy cmsg data temporarily out of the mbuf */
1048 tmp = malloc(control->m_lenm_hdr.mh_len, M_TEMP127, M_WAITOK0x0001);
1049 memcpy(tmp, mtod(control, caddr_t), control->m_len)__builtin_memcpy((tmp), (((caddr_t)((control)->m_hdr.mh_data
))), (control->m_hdr.mh_len))
;
1050
1051 /* allocate a cluster and try again */
1052 MCLGET(control, M_WAIT)(void) m_clget((control), (0x0001), (1 << 11));
1053 if ((control->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
1054 free(tmp, M_TEMP127, control->m_lenm_hdr.mh_len);
1055 error = ENOBUFS55; /* allocation failed */
1056 goto nospace;
1057 }
1058
1059 /* copy the data back into the cluster */
1060 cm = mtod(control, struct cmsghdr *)((struct cmsghdr *)((control)->m_hdr.mh_data));
1061 memcpy(cm, tmp, control->m_len)__builtin_memcpy((cm), (tmp), (control->m_hdr.mh_len));
1062 free(tmp, M_TEMP127, control->m_lenm_hdr.mh_len);
1063 goto morespace;
1064 }
1065
1066 /* adjust message & mbuf to note amount of space actually used. */
1067 cm->cmsg_len = CMSG_LEN(nfds * sizeof(struct fdpass))((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (nfds * sizeof(struct fdpass))
)
;
1068 control->m_lenm_hdr.mh_len = CMSG_SPACE(nfds * sizeof(struct fdpass))((((unsigned long)(sizeof(struct cmsghdr)) + (sizeof(long) - 1
)) &~(sizeof(long) - 1)) + (((unsigned long)(nfds * sizeof
(struct fdpass)) + (sizeof(long) - 1)) &~(sizeof(long) - 1
)))
;
1069
1070 ip = ((int *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
) + nfds - 1;
1071 rp = ((struct fdpass *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
) + nfds - 1;
1072 fdplock(fdp)do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s == 0x0001UL)) splassert_fail(0, 0x0001UL
, __func__); } while (0); rw_enter_write(&(fdp)->fd_lock
); } while (0)
;
21
Assuming 'splassert_ctl' is <= 0
22
Loop condition is false. Exiting loop
23
Loop condition is false. Exiting loop
1073 for (i = 0; i < nfds; i++) {
24
Assuming 'i' is < 'nfds'
25
Loop condition is true. Entering loop body
36
Assuming 'i' is >= 'nfds'
37
Loop condition is false. Execution continues on line 1110
1074 memcpy(&fd, ip, sizeof fd)__builtin_memcpy((&fd), (ip), (sizeof fd));
1075 ip--;
1076 if ((fp = fd_getfile(fdp, fd)) == NULL((void *)0)) {
26
Assuming the condition is false
27
Taking false branch
1077 error = EBADF9;
1078 goto fail;
1079 }
1080 if (fp->f_count >= FDUP_MAX_COUNT(0xffffffffU - 2 * 64)) {
28
Assuming the condition is false
29
Taking false branch
1081 error = EDEADLK11;
1082 goto fail;
1083 }
1084 error = pledge_sendfd(p, fp);
1085 if (error)
30
Assuming 'error' is 0
31
Taking false branch
1086 goto fail;
1087
1088 /* kqueue descriptors cannot be copied */
1089 if (fp->f_type == DTYPE_KQUEUE4) {
32
Assuming field 'f_type' is not equal to DTYPE_KQUEUE
33
Taking false branch
1090 error = EINVAL22;
1091 goto fail;
1092 }
1093#if NKCOV0 > 0
1094 /* kcov descriptors cannot be copied */
1095 if (fp->f_type == DTYPE_VNODE1 && kcov_vnode(fp->f_data)) {
1096 error = EINVAL22;
1097 goto fail;
1098 }
1099#endif
1100 rp->fp = fp;
1101 rp->flags = fdp->fd_ofileflags[fd] & UF_PLEDGED0x02;
1102 rp--;
1103 if ((unp = fptounp(fp)) != NULL((void *)0)) {
34
Assuming the condition is false
35
Taking false branch
1104 rw_enter_write(&unp_gc_lock);
1105 unp->unp_msgcount++;
1106 unp->unp_file = fp;
1107 rw_exit_write(&unp_gc_lock);
1108 }
1109 }
1110 fdpunlock(fdp)rw_exit_write(&(fdp)->fd_lock);
1111 return (0);
38
Returning zero, which participates in a condition later
1112fail:
1113 fdpunlock(fdp)rw_exit_write(&(fdp)->fd_lock);
1114 if (fp != NULL((void *)0))
1115 FRELE(fp, p)(_atomic_sub_int_nv((&fp->f_count), 1) == 0 ? fdrop(fp
, p) : 0)
;
1116 /* Back out what we just did. */
1117 for ( ; i > 0; i--) {
1118 rp++;
1119 fp = rp->fp;
1120 if ((unp = fptounp(fp)) != NULL((void *)0)) {
1121 rw_enter_write(&unp_gc_lock);
1122 unp->unp_msgcount--;
1123 rw_exit_write(&unp_gc_lock);
1124 }
1125 FRELE(fp, p)(_atomic_sub_int_nv((&fp->f_count), 1) == 0 ? fdrop(fp
, p) : 0)
;
1126 }
1127
1128nospace:
1129 mtx_enter(&unp_rights_mtx);
1130 unp_rights -= nfds;
1131 mtx_leave(&unp_rights_mtx);
1132
1133 return (error);
1134}
1135
1136void
1137unp_gc(void *arg __unused__attribute__((__unused__)))
1138{
1139 struct unp_deferral *defer;
1140 struct file *fp;
1141 struct socket *so;
1142 struct unpcb *unp;
1143 int nunref, i;
1144
1145 rw_enter_write(&unp_gc_lock);
1146 if (unp_gcing)
1147 goto unlock;
1148 unp_gcing = 1;
1149 rw_exit_write(&unp_gc_lock);
1150
1151 rw_enter_write(&unp_df_lock);
1152 /* close any fds on the deferred list */
1153 while ((defer = SLIST_FIRST(&unp_deferred)((&unp_deferred)->slh_first)) != NULL((void *)0)) {
1154 SLIST_REMOVE_HEAD(&unp_deferred, ud_link)do { (&unp_deferred)->slh_first = (&unp_deferred)->
slh_first->ud_link.sle_next; } while (0)
;
1155 rw_exit_write(&unp_df_lock);
1156 for (i = 0; i < defer->ud_n; i++) {
1157 fp = defer->ud_fp[i].fp;
1158 if (fp == NULL((void *)0))
1159 continue;
1160 if ((unp = fptounp(fp)) != NULL((void *)0)) {
1161 rw_enter_write(&unp_gc_lock);
1162 unp->unp_msgcount--;
1163 rw_exit_write(&unp_gc_lock);
1164 }
1165 mtx_enter(&unp_rights_mtx);
1166 unp_rights--;
1167 mtx_leave(&unp_rights_mtx);
1168 /* closef() expects a refcount of 2 */
1169 FREF(fp)do { extern void vfs_stall_barrier(void); vfs_stall_barrier()
; _atomic_inc_int(&(fp)->f_count); } while (0)
;
1170 (void) closef(fp, NULL((void *)0));
1171 }
1172 free(defer, M_TEMP127, sizeof(*defer) +
1173 sizeof(struct fdpass) * defer->ud_n);
1174 rw_enter_write(&unp_df_lock);
1175 }
1176 rw_exit_write(&unp_df_lock);
1177
1178 nunref = 0;
1179
1180 rw_enter_write(&unp_gc_lock);
1181
1182 /*
1183 * Determine sockets which may be prospectively dead. Such
1184 * sockets have their `unp_msgcount' equal to the `f_count'.
1185 * If `unp_msgcount' is 0, the socket has not been passed
1186 * and can't be unreferenced.
1187 */
1188 LIST_FOREACH(unp, &unp_head, unp_link)for((unp) = ((&unp_head)->lh_first); (unp)!= ((void *)
0); (unp) = ((unp)->unp_link.le_next))
{
1189 unp->unp_gcflags = 0;
1190
1191 if (unp->unp_msgcount == 0)
1192 continue;
1193 if ((fp = unp->unp_file) == NULL((void *)0))
1194 continue;
1195 if (fp->f_count == unp->unp_msgcount) {
1196 unp->unp_gcflags |= UNP_GCDEAD0x01;
1197 unp->unp_gcrefs = unp->unp_msgcount;
1198 nunref++;
1199 }
1200 }
1201
1202 /*
1203 * Scan all sockets previously marked as dead. Remove
1204 * the `unp_gcrefs' reference each socket holds on any
1205 * dead socket in its buffer.
1206 */
1207 LIST_FOREACH(unp, &unp_head, unp_link)for((unp) = ((&unp_head)->lh_first); (unp)!= ((void *)
0); (unp) = ((unp)->unp_link.le_next))
{
1208 if ((unp->unp_gcflags & UNP_GCDEAD0x01) == 0)
1209 continue;
1210 so = unp->unp_socket;
1211 solock(so);
1212 unp_scan(so->so_rcv.sb_mb, unp_remove_gcrefs);
1213 sounlock(so, SL_LOCKED0x42);
1214 }
1215
1216 /*
1217 * If the dead socket has `unp_gcrefs' reference counter
1218 * greater than 0, it can't be unreferenced. Mark it as
1219 * alive and increment the `unp_gcrefs' reference for each
1220 * dead socket within its buffer. Repeat this until we
1221 * have no new alive sockets found.
1222 */
1223 do {
1224 unp_defer = 0;
1225
1226 LIST_FOREACH(unp, &unp_head, unp_link)for((unp) = ((&unp_head)->lh_first); (unp)!= ((void *)
0); (unp) = ((unp)->unp_link.le_next))
{
1227 if ((unp->unp_gcflags & UNP_GCDEAD0x01) == 0)
1228 continue;
1229 if (unp->unp_gcrefs == 0)
1230 continue;
1231
1232 unp->unp_gcflags &= ~UNP_GCDEAD0x01;
1233
1234 so = unp->unp_socket;
1235 solock(so);
1236 unp_scan(so->so_rcv.sb_mb, unp_restore_gcrefs);
1237 sounlock(so, SL_LOCKED0x42);
1238
1239 KASSERT(nunref > 0)((nunref > 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/uipc_usrreq.c"
, 1239, "nunref > 0"))
;
1240 nunref--;
1241 }
1242 } while (unp_defer > 0);
1243
1244 /*
1245 * If there are any unreferenced sockets, then for each dispose
1246 * of files in its receive buffer and then close it.
1247 */
1248 if (nunref) {
1249 LIST_FOREACH(unp, &unp_head, unp_link)for((unp) = ((&unp_head)->lh_first); (unp)!= ((void *)
0); (unp) = ((unp)->unp_link.le_next))
{
1250 if (unp->unp_gcflags & UNP_GCDEAD0x01) {
1251 /*
1252 * This socket could still be connected
1253 * and if so it's `so_rcv' is still
1254 * accessible by concurrent PRU_SEND
1255 * thread.
1256 */
1257 so = unp->unp_socket;
1258 solock(so);
1259 unp_scan(so->so_rcv.sb_mb, unp_discard);
1260 sounlock(so, SL_LOCKED0x42);
1261 }
1262 }
1263 }
1264
1265 unp_gcing = 0;
1266unlock:
1267 rw_exit_write(&unp_gc_lock);
1268}
1269
1270void
1271unp_dispose(struct mbuf *m)
1272{
1273
1274 if (m)
1275 unp_scan(m, unp_discard);
1276}
1277
1278void
1279unp_scan(struct mbuf *m0, void (*op)(struct fdpass *, int))
1280{
1281 struct mbuf *m;
1282 struct fdpass *rp;
1283 struct cmsghdr *cm;
1284 int qfds;
1285
1286 while (m0) {
1287 for (m = m0; m; m = m->m_nextm_hdr.mh_next) {
1288 if (m->m_typem_hdr.mh_type == MT_CONTROL6 &&
1289 m->m_lenm_hdr.mh_len >= sizeof(*cm)) {
1290 cm = mtod(m, struct cmsghdr *)((struct cmsghdr *)((m)->m_hdr.mh_data));
1291 if (cm->cmsg_level != SOL_SOCKET0xffff ||
1292 cm->cmsg_type != SCM_RIGHTS0x01)
1293 continue;
1294 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof *cm)(((unsigned long)(sizeof *cm) + (sizeof(long) - 1)) &~(sizeof
(long) - 1))
)
1295 / sizeof(struct fdpass);
1296 if (qfds > 0) {
1297 rp = (struct fdpass *)CMSG_DATA(cm)((unsigned char *)(cm) + (((unsigned long)(sizeof(struct cmsghdr
)) + (sizeof(long) - 1)) &~(sizeof(long) - 1)))
;
1298 op(rp, qfds);
1299 }
1300 break; /* XXX, but saves time */
1301 }
1302 }
1303 m0 = m0->m_nextpktm_hdr.mh_nextpkt;
1304 }
1305}
1306
1307void
1308unp_discard(struct fdpass *rp, int nfds)
1309{
1310 struct unp_deferral *defer;
1311
1312 /* copy the file pointers to a deferral structure */
1313 defer = malloc(sizeof(*defer) + sizeof(*rp) * nfds, M_TEMP127, M_WAITOK0x0001);
1314 defer->ud_n = nfds;
1315 memcpy(&defer->ud_fp[0], rp, sizeof(*rp) * nfds)__builtin_memcpy((&defer->ud_fp[0]), (rp), (sizeof(*rp
) * nfds))
;
1316 memset(rp, 0, sizeof(*rp) * nfds)__builtin_memset((rp), (0), (sizeof(*rp) * nfds));
1317
1318 rw_enter_write(&unp_df_lock);
1319 SLIST_INSERT_HEAD(&unp_deferred, defer, ud_link)do { (defer)->ud_link.sle_next = (&unp_deferred)->slh_first
; (&unp_deferred)->slh_first = (defer); } while (0)
;
1320 rw_exit_write(&unp_df_lock);
1321
1322 task_add(systqmp, &unp_gc_task);
1323}
1324
1325void
1326unp_remove_gcrefs(struct fdpass *rp, int nfds)
1327{
1328 struct unpcb *unp;
1329 int i;
1330
1331 rw_assert_wrlock(&unp_gc_lock);
1332
1333 for (i = 0; i < nfds; i++) {
1334 if (rp[i].fp == NULL((void *)0))
1335 continue;
1336 if ((unp = fptounp(rp[i].fp)) == NULL((void *)0))
1337 continue;
1338 if (unp->unp_gcflags & UNP_GCDEAD0x01) {
1339 KASSERT(unp->unp_gcrefs > 0)((unp->unp_gcrefs > 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/uipc_usrreq.c", 1339, "unp->unp_gcrefs > 0"
))
;
1340 unp->unp_gcrefs--;
1341 }
1342 }
1343}
1344
1345void
1346unp_restore_gcrefs(struct fdpass *rp, int nfds)
1347{
1348 struct unpcb *unp;
1349 int i;
1350
1351 rw_assert_wrlock(&unp_gc_lock);
1352
1353 for (i = 0; i < nfds; i++) {
1354 if (rp[i].fp == NULL((void *)0))
1355 continue;
1356 if ((unp = fptounp(rp[i].fp)) == NULL((void *)0))
1357 continue;
1358 if (unp->unp_gcflags & UNP_GCDEAD0x01) {
1359 unp->unp_gcrefs++;
1360 unp_defer++;
1361 }
1362 }
1363}
1364
1365int
1366unp_nam2sun(struct mbuf *nam, struct sockaddr_un **sun, size_t *pathlen)
1367{
1368 struct sockaddr *sa = mtod(nam, struct sockaddr *)((struct sockaddr *)((nam)->m_hdr.mh_data));
1369 size_t size, len;
1370
1371 if (nam->m_lenm_hdr.mh_len < offsetof(struct sockaddr, sa_data)__builtin_offsetof(struct sockaddr, sa_data))
1372 return EINVAL22;
1373 if (sa->sa_family != AF_UNIX1)
1374 return EAFNOSUPPORT47;
1375 if (sa->sa_len != nam->m_lenm_hdr.mh_len)
1376 return EINVAL22;
1377 if (sa->sa_len > sizeof(struct sockaddr_un))
1378 return EINVAL22;
1379 *sun = (struct sockaddr_un *)sa;
1380
1381 /* ensure that sun_path is NUL terminated and fits */
1382 size = (*sun)->sun_len - offsetof(struct sockaddr_un, sun_path)__builtin_offsetof(struct sockaddr_un, sun_path);
1383 len = strnlen((*sun)->sun_path, size);
1384 if (len == sizeof((*sun)->sun_path))
1385 return EINVAL22;
1386 if (len == size) {
1387 if (m_trailingspace(nam) == 0)
1388 return EINVAL22;
1389 nam->m_lenm_hdr.mh_len++;
1390 (*sun)->sun_len++;
1391 (*sun)->sun_path[len] = '\0';
1392 }
1393 if (pathlen != NULL((void *)0))
1394 *pathlen = len;
1395
1396 return 0;
1397}