Bug Summary

File:netinet/tcp_output.c
Warning:line 1117, column 6
Branch condition evaluates to a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name tcp_output.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/netinet/tcp_output.c
1/* $OpenBSD: tcp_output.c,v 1.141 2023/11/26 22:08:10 bluhm Exp $ */
2/* $NetBSD: tcp_output.c,v 1.16 1997/06/03 16:17:09 kml Exp $ */
3
4/*
5 * Copyright (c) 1982, 1986, 1988, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
33 *
34 * NRL grants permission for redistribution and use in source and binary
35 * forms, with or without modification, of the software and documentation
36 * created at NRL provided that the following conditions are met:
37 *
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgements:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * This product includes software developed at the Information
48 * Technology Division, US Naval Research Laboratory.
49 * 4. Neither the name of the NRL nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
54 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
56 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
57 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
59 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
60 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
61 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 * The views and conclusions contained in the software and documentation
66 * are those of the authors and should not be interpreted as representing
67 * official policies, either expressed or implied, of the US Naval
68 * Research Laboratory (NRL).
69 */
70
71#include "pf.h"
72#include "stoeplitz.h"
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/mbuf.h>
77#include <sys/protosw.h>
78#include <sys/socket.h>
79#include <sys/socketvar.h>
80#include <sys/kernel.h>
81
82#include <net/if.h>
83#include <net/if_var.h>
84#include <net/route.h>
85#if NPF1 > 0
86#include <net/pfvar.h>
87#endif
88
89#include <netinet/in.h>
90#include <netinet/ip.h>
91#include <netinet/in_pcb.h>
92#include <netinet/ip_var.h>
93#include <netinet/tcp.h>
94#define TCPOUTFLAGS
95#include <netinet/tcp_fsm.h>
96#include <netinet/tcp_seq.h>
97#include <netinet/tcp_timer.h>
98#include <netinet/tcp_var.h>
99#include <netinet/tcp_debug.h>
100
101#ifdef notyet
102extern struct mbuf *m_copypack();
103#endif
104
105extern int tcprexmtthresh;
106
107#ifdef TCP_SACK_DEBUG
108void tcp_print_holes(struct tcpcb *tp);
109
110void
111tcp_print_holes(struct tcpcb *tp)
112{
113 struct sackhole *p = tp->snd_holes;
114 if (p == NULL((void *)0))
115 return;
116 printf("Hole report: start--end dups rxmit\n");
117 while (p) {
118 printf("%x--%x d %d r %x\n", p->start, p->end, p->dups,
119 p->rxmit);
120 p = p->next;
121 }
122 printf("\n");
123}
124#endif /* TCP_SACK_DEBUG */
125
126/*
127 * Returns pointer to a sackhole if there are any pending retransmissions;
128 * NULL otherwise.
129 */
130struct sackhole *
131tcp_sack_output(struct tcpcb *tp)
132{
133 struct sackhole *p;
134
135 if (!tp->sack_enable)
136 return (NULL((void *)0));
137 p = tp->snd_holes;
138 while (p) {
139 if (p->dups >= tcprexmtthresh && SEQ_LT(p->rxmit, p->end)((int)((p->rxmit)-(p->end)) < 0)) {
140 if (SEQ_LT(p->rxmit, tp->snd_una)((int)((p->rxmit)-(tp->snd_una)) < 0)) {/* old SACK hole */
141 p = p->next;
142 continue;
143 }
144#ifdef TCP_SACK_DEBUG
145 if (p)
146 tcp_print_holes(tp);
147#endif
148 return (p);
149 }
150 p = p->next;
151 }
152 return (NULL((void *)0));
153}
154
155/*
156 * After a timeout, the SACK list may be rebuilt. This SACK information
157 * should be used to avoid retransmitting SACKed data. This function
158 * traverses the SACK list to see if snd_nxt should be moved forward.
159 */
160
161void
162tcp_sack_adjust(struct tcpcb *tp)
163{
164 struct sackhole *cur = tp->snd_holes;
165 if (cur == NULL((void *)0))
166 return; /* No holes */
167 if (SEQ_GEQ(tp->snd_nxt, tp->rcv_lastsack)((int)((tp->snd_nxt)-(tp->rcv_lastsack)) >= 0))
168 return; /* We're already beyond any SACKed blocks */
169 /*
170 * Two cases for which we want to advance snd_nxt:
171 * i) snd_nxt lies between end of one hole and beginning of another
172 * ii) snd_nxt lies between end of last hole and rcv_lastsack
173 */
174 while (cur->next) {
175 if (SEQ_LT(tp->snd_nxt, cur->end)((int)((tp->snd_nxt)-(cur->end)) < 0))
176 return;
177 if (SEQ_GEQ(tp->snd_nxt, cur->next->start)((int)((tp->snd_nxt)-(cur->next->start)) >= 0))
178 cur = cur->next;
179 else {
180 tp->snd_nxt = cur->next->start;
181 return;
182 }
183 }
184 if (SEQ_LT(tp->snd_nxt, cur->end)((int)((tp->snd_nxt)-(cur->end)) < 0))
185 return;
186 tp->snd_nxt = tp->rcv_lastsack;
187 return;
188}
189
190/*
191 * Tcp output routine: figure out what should be sent and send it.
192 */
193int
194tcp_output(struct tcpcb *tp)
195{
196 struct socket *so = tp->t_inpcb->inp_socket;
197 long len, win, txmaxseg;
198 int off, flags, error;
1
'error' declared without an initial value
199 struct mbuf *m;
200 struct tcphdr *th;
201 u_int32_t optbuf[howmany(MAX_TCPOPTLEN, sizeof(u_int32_t))(((40) + ((sizeof(u_int32_t)) - 1)) / (sizeof(u_int32_t)))];
202 u_char *opt = (u_char *)optbuf;
203 unsigned int optlen, hdrlen, packetlen;
204 int idle, sendalot = 0;
205 int i, sack_rxmit = 0;
206 struct sackhole *p;
207 uint64_t now;
208#ifdef TCP_SIGNATURE1
209 unsigned int sigoff;
210#endif /* TCP_SIGNATURE */
211#ifdef TCP_ECN1
212 int needect;
213#endif
214 int tso;
215
216 if (tp->t_flags & TF_BLOCKOUTPUT0x01000000U) {
2
Assuming the condition is false
3
Taking false branch
217 tp->t_flags |= TF_NEEDOUTPUT0x00800000U;
218 return (0);
219 } else
220 tp->t_flags &= ~TF_NEEDOUTPUT0x00800000U;
221
222#if defined(TCP_SIGNATURE1) && defined(DIAGNOSTIC1)
223 if (tp->sack_enable && (tp->t_flags & TF_SIGNATURE0x0400U))
4
Assuming field 'sack_enable' is 0
224 return (EINVAL22);
225#endif /* defined(TCP_SIGNATURE) && defined(DIAGNOSTIC) */
226
227 now = tcp_now();
228
229 /*
230 * Determine length of data that should be transmitted,
231 * and flags that will be used.
232 * If there is some data or critical controls (SYN, RST)
233 * to send, then transmit; otherwise, investigate further.
234 */
235 idle = (tp->t_flags & TF_LASTIDLE0x00100000U) || (tp->snd_max == tp->snd_una);
5
Assuming the condition is false
6
Assuming field 'snd_max' is not equal to field 'snd_una'
236 if (idle
6.1
'idle' is 0
&& (now - tp->t_rcvtime) >= tp->t_rxtcur)
237 /*
238 * We have been idle for "a while" and no acks are
239 * expected to clock out any data we send --
240 * slow start to get ack "clock" running again.
241 */
242 tp->snd_cwnd = 2 * tp->t_maxseg;
243
244 /* remember 'idle' for next invocation of tcp_output */
245 if (idle
6.2
'idle' is 0
&& soissending(so)((so)->so_snd.sb_state & 0x2000)) {
246 tp->t_flags |= TF_LASTIDLE0x00100000U;
247 idle = 0;
248 } else
249 tp->t_flags &= ~TF_LASTIDLE0x00100000U;
250
251again:
252 /*
253 * If we've recently taken a timeout, snd_max will be greater than
254 * snd_nxt. There may be SACK information that allows us to avoid
255 * resending already delivered data. Adjust snd_nxt accordingly.
256 */
257 if (tp->sack_enable
6.3
Field 'sack_enable' is 0
&& SEQ_LT(tp->snd_nxt, tp->snd_max)((int)((tp->snd_nxt)-(tp->snd_max)) < 0))
258 tcp_sack_adjust(tp);
259 off = tp->snd_nxt - tp->snd_una;
260 win = ulmin(tp->snd_wnd, tp->snd_cwnd);
261
262 flags = tcp_outflags[tp->t_state];
263
264 /*
265 * Send any SACK-generated retransmissions. If we're explicitly trying
266 * to send out new data (when sendalot is 1), bypass this function.
267 * If we retransmit in fast recovery mode, decrement snd_cwnd, since
268 * we're replacing a (future) new transmission with a retransmission
269 * now, and we previously incremented snd_cwnd in tcp_input().
270 */
271 if (tp->sack_enable
6.4
Field 'sack_enable' is 0
&& !sendalot) {
272 if (tp->t_dupacks >= tcprexmtthresh &&
273 (p = tcp_sack_output(tp))) {
274 off = p->rxmit - tp->snd_una;
275 sack_rxmit = 1;
276 /* Coalesce holes into a single retransmission */
277 len = min(tp->t_maxseg, p->end - p->rxmit);
278 if (SEQ_LT(tp->snd_una, tp->snd_last)((int)((tp->snd_una)-(tp->snd_last)) < 0))
279 tp->snd_cwnd -= tp->t_maxseg;
280 }
281 }
282
283 sendalot = 0;
284 tso = 0;
285 /*
286 * If in persist timeout with window of 0, send 1 byte.
287 * Otherwise, if window is small but nonzero
288 * and timer expired, we will send what we can
289 * and go to transmit state.
290 */
291 if (tp->t_force) {
7
Assuming field 't_force' is 0
8
Taking false branch
292 if (win == 0) {
293 /*
294 * If we still have some data to send, then
295 * clear the FIN bit. Usually this would
296 * happen below when it realizes that we
297 * aren't sending all the data. However,
298 * if we have exactly 1 byte of unset data,
299 * then it won't clear the FIN bit below,
300 * and if we are in persist state, we wind
301 * up sending the packet without recording
302 * that we sent the FIN bit.
303 *
304 * We can't just blindly clear the FIN bit,
305 * because if we don't have any more data
306 * to send then the probe will be the FIN
307 * itself.
308 */
309 if (off < so->so_snd.sb_cc)
310 flags &= ~TH_FIN0x01;
311 win = 1;
312 } else {
313 TCP_TIMER_DISARM(tp, TCPT_PERSIST)do { (((tp)->t_flags) &= ~(0x04000000U << (1)));
timeout_del(&(tp)->t_timer[(1)]); } while (0)
;
314 tp->t_rxtshift = 0;
315 }
316 }
317
318 if (!sack_rxmit
8.1
'sack_rxmit' is 0
) {
9
Taking true branch
319 len = ulmin(so->so_snd.sb_cc, win) - off;
320 }
321
322 if (len < 0) {
10
Assuming 'len' is >= 0
11
Taking false branch
323 /*
324 * If FIN has been sent but not acked,
325 * but we haven't been called to retransmit,
326 * len will be -1. Otherwise, window shrank
327 * after we sent into it. If window shrank to 0,
328 * cancel pending retransmit, pull snd_nxt back
329 * to (closed) window, and set the persist timer
330 * if it isn't already going. If the window didn't
331 * close completely, just wait for an ACK.
332 */
333 len = 0;
334 if (win == 0) {
335 TCP_TIMER_DISARM(tp, TCPT_REXMT)do { (((tp)->t_flags) &= ~(0x04000000U << (0)));
timeout_del(&(tp)->t_timer[(0)]); } while (0)
;
336 tp->t_rxtshift = 0;
337 tp->snd_nxt = tp->snd_una;
338 if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1))) == 0)
339 tcp_setpersist(tp);
340 }
341 }
342
343 /*
344 * Never send more than half a buffer full. This insures that we can
345 * always keep 2 packets on the wire, no matter what SO_SNDBUF is, and
346 * therefore acks will never be delayed unless we run out of data to
347 * transmit.
348 */
349 txmaxseg = ulmin(so->so_snd.sb_hiwat / 2, tp->t_maxseg);
350
351 if (len > txmaxseg) {
12
Assuming 'len' is <= 'txmaxseg'
13
Taking false branch
352 if (tcp_do_tso &&
353 tp->t_inpcb->inp_options == NULL((void *)0) &&
354 tp->t_inpcb->inp_outputopts6 == NULL((void *)0) &&
355#ifdef TCP_SIGNATURE1
356 ((tp->t_flags & TF_SIGNATURE0x0400U) == 0) &&
357#endif
358 len >= 2 * tp->t_maxseg &&
359 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
360 !(flags & (TH_SYN0x02|TH_RST0x04|TH_FIN0x01))) {
361 tso = 1;
362 /* avoid small chopped packets */
363 if (len > (len / tp->t_maxseg) * tp->t_maxseg) {
364 len = (len / tp->t_maxseg) * tp->t_maxseg;
365 sendalot = 1;
366 }
367 } else {
368 len = txmaxseg;
369 sendalot = 1;
370 }
371 }
372 if (off + len < so->so_snd.sb_cc)
14
Assuming the condition is false
15
Taking false branch
373 flags &= ~TH_FIN0x01;
374
375 win = sbspace(so, &so->so_rcv);
376
377 /*
378 * Sender silly window avoidance. If connection is idle
379 * and can send all data, a maximum segment,
380 * at least a maximum default-size segment do it,
381 * or are forced, do it; otherwise don't bother.
382 * If peer's buffer is tiny, then send
383 * when window is at least half open.
384 * If retransmitting (possibly after persist timer forced us
385 * to send into a small window), then must resend.
386 */
387 if (len) {
16
Assuming 'len' is 0
17
Taking false branch
388 if (len >= txmaxseg)
389 goto send;
390 if ((idle || (tp->t_flags & TF_NODELAY0x0004U)) &&
391 len + off >= so->so_snd.sb_cc && !soissending(so)((so)->so_snd.sb_state & 0x2000) &&
392 (tp->t_flags & TF_NOPUSH0x02000000U) == 0)
393 goto send;
394 if (tp->t_force)
395 goto send;
396 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
397 goto send;
398 if (SEQ_LT(tp->snd_nxt, tp->snd_max)((int)((tp->snd_nxt)-(tp->snd_max)) < 0))
399 goto send;
400 if (sack_rxmit)
401 goto send;
402 }
403
404 /*
405 * Compare available window to amount of window
406 * known to peer (as advertised window less
407 * next expected input). If the difference is at least two
408 * max size segments, or at least 50% of the maximum possible
409 * window, then want to send a window update to peer.
410 */
411 if (win > 0) {
18
Assuming 'win' is <= 0
19
Taking false branch
412 /*
413 * "adv" is the amount we can increase the window,
414 * taking into account that we are limited by
415 * TCP_MAXWIN << tp->rcv_scale.
416 */
417 long adv = lmin(win, (long)TCP_MAXWIN65535 << tp->rcv_scale) -
418 (tp->rcv_adv - tp->rcv_nxt);
419
420 if (adv >= (long) (2 * tp->t_maxseg))
421 goto send;
422 if (2 * adv >= (long) so->so_rcv.sb_hiwat)
423 goto send;
424 }
425
426 /*
427 * Send if we owe peer an ACK.
428 */
429 if (tp->t_flags & TF_ACKNOW0x0001U)
20
Assuming the condition is false
21
Taking false branch
430 goto send;
431 if (flags & (TH_SYN0x02|TH_RST0x04))
22
Assuming the condition is false
23
Taking false branch
432 goto send;
433 if (SEQ_GT(tp->snd_up, tp->snd_una)((int)((tp->snd_up)-(tp->snd_una)) > 0))
24
Assuming the condition is false
434 goto send;
435 /*
436 * If our state indicates that FIN should be sent
437 * and we have not yet done so, or we're retransmitting the FIN,
438 * then we need to send.
439 */
440 if (flags & TH_FIN0x01 &&
25
Assuming the condition is true
28
Taking true branch
441 ((tp->t_flags & TF_SENTFIN0x0010U) == 0 || tp->snd_nxt == tp->snd_una))
26
Assuming the condition is false
27
Assuming field 'snd_nxt' is equal to field 'snd_una'
442 goto send;
29
Control jumps to line 498
443 /*
444 * In SACK, it is possible for tcp_output to fail to send a segment
445 * after the retransmission timer has been turned off. Make sure
446 * that the retransmission timer is set.
447 */
448 if (SEQ_GT(tp->snd_max, tp->snd_una)((int)((tp->snd_max)-(tp->snd_una)) > 0) &&
449 TCP_TIMER_ISARMED(tp, TCPT_REXMT)(((tp)->t_flags) & (0x04000000U << (0))) == 0 &&
450 TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1))) == 0) {
451 TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur)do { (((tp)->t_flags) |= (0x04000000U << (0))); timeout_add_msec
(&(tp)->t_timer[(0)], (tp->t_rxtcur)); } while (0)
;
452 return (0);
453 }
454
455 /*
456 * TCP window updates are not reliable, rather a polling protocol
457 * using ``persist'' packets is used to insure receipt of window
458 * updates. The three ``states'' for the output side are:
459 * idle not doing retransmits or persists
460 * persisting to move a small or zero window
461 * (re)transmitting and thereby not persisting
462 *
463 * tp->t_timer[TCPT_PERSIST]
464 * is set when we are in persist state.
465 * tp->t_force
466 * is set when we are called to send a persist packet.
467 * tp->t_timer[TCPT_REXMT]
468 * is set when we are retransmitting
469 * The output side is idle when both timers are zero.
470 *
471 * If send window is too small, there is data to transmit, and no
472 * retransmit or persist is pending, then go to persist state.
473 * If nothing happens soon, send when timer expires:
474 * if window is nonzero, transmit what we can,
475 * otherwise force out a byte.
476 */
477 if (so->so_snd.sb_cc && TCP_TIMER_ISARMED(tp, TCPT_REXMT)(((tp)->t_flags) & (0x04000000U << (0))) == 0 &&
478 TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1))) == 0) {
479 tp->t_rxtshift = 0;
480 tcp_setpersist(tp);
481 }
482
483 /*
484 * No reason to send a segment, just return.
485 */
486 return (0);
487
488send:
489 /*
490 * Before ESTABLISHED, force sending of initial options
491 * unless TCP set not to do any options.
492 * NOTE: we assume that the IP/TCP header plus TCP options
493 * always fit in a single mbuf, leaving room for a maximum
494 * link header, i.e.
495 * max_linkhdr + sizeof(network header) + sizeof(struct tcphdr +
496 * optlen <= MHLEN
497 */
498 optlen = 0;
499
500 switch (tp->pf) {
30
Control jumps to 'case 24:' at line 506
501 case 0: /*default to PF_INET*/
502 case PF_INET2:
503 hdrlen = sizeof(struct ip) + sizeof(struct tcphdr);
504 break;
505#ifdef INET61
506 case PF_INET624:
507 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
508 break;
509#endif /* INET6 */
510 default:
511 return (EPFNOSUPPORT46);
512 }
513
514 if (flags & TH_SYN0x02) {
31
Execution continues on line 514
32
Assuming the condition is false
515 tp->snd_nxt = tp->iss;
516 if ((tp->t_flags & TF_NOOPT0x0008U) == 0) {
517 u_int16_t mss;
518
519 opt[0] = TCPOPT_MAXSEG2;
520 opt[1] = 4;
521 mss = htons((u_int16_t) tcp_mss(tp, 0))(__uint16_t)(__builtin_constant_p((u_int16_t) tcp_mss(tp, 0))
? (__uint16_t)(((__uint16_t)((u_int16_t) tcp_mss(tp, 0)) &
0xffU) << 8 | ((__uint16_t)((u_int16_t) tcp_mss(tp, 0)
) & 0xff00U) >> 8) : __swap16md((u_int16_t) tcp_mss
(tp, 0)))
;
522 memcpy(opt + 2, &mss, sizeof(mss))__builtin_memcpy((opt + 2), (&mss), (sizeof(mss)));
523 optlen = 4;
524
525 if (flags & TH_ACK0x10)
526 tcp_mss_update(tp);
527 /*
528 * If this is the first SYN of connection (not a SYN
529 * ACK), include SACK_PERMIT_HDR option. If this is a
530 * SYN ACK, include SACK_PERMIT_HDR option if peer has
531 * already done so.
532 */
533 if (tp->sack_enable && ((flags & TH_ACK0x10) == 0 ||
534 (tp->t_flags & TF_SACK_PERMIT0x0200U))) {
535 *((u_int32_t *) (opt + optlen)) =
536 htonl(TCPOPT_SACK_PERMIT_HDR)(__uint32_t)(__builtin_constant_p((1<<24|1<<16|4<<
8|2)) ? (__uint32_t)(((__uint32_t)((1<<24|1<<16|4
<<8|2)) & 0xff) << 24 | ((__uint32_t)((1<<
24|1<<16|4<<8|2)) & 0xff00) << 8 | ((__uint32_t
)((1<<24|1<<16|4<<8|2)) & 0xff0000) >>
8 | ((__uint32_t)((1<<24|1<<16|4<<8|2)) &
0xff000000) >> 24) : __swap32md((1<<24|1<<
16|4<<8|2)))
;
537 optlen += 4;
538 }
539 if ((tp->t_flags & TF_REQ_SCALE0x0020U) &&
540 ((flags & TH_ACK0x10) == 0 ||
541 (tp->t_flags & TF_RCVD_SCALE0x0040U))) {
542 *((u_int32_t *) (opt + optlen)) = htonl((__uint32_t)(__builtin_constant_p(1 << 24 | 3 << 16
| 3 << 8 | tp->request_r_scale) ? (__uint32_t)(((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff) << 24 | ((__uint32_t)(1 << 24 | 3 <<
16 | 3 << 8 | tp->request_r_scale) & 0xff00) <<
8 | ((__uint32_t)(1 << 24 | 3 << 16 | 3 <<
8 | tp->request_r_scale) & 0xff0000) >> 8 | ((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff000000) >> 24) : __swap32md(1 << 24 |
3 << 16 | 3 << 8 | tp->request_r_scale))
543 TCPOPT_NOP << 24 |(__uint32_t)(__builtin_constant_p(1 << 24 | 3 << 16
| 3 << 8 | tp->request_r_scale) ? (__uint32_t)(((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff) << 24 | ((__uint32_t)(1 << 24 | 3 <<
16 | 3 << 8 | tp->request_r_scale) & 0xff00) <<
8 | ((__uint32_t)(1 << 24 | 3 << 16 | 3 <<
8 | tp->request_r_scale) & 0xff0000) >> 8 | ((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff000000) >> 24) : __swap32md(1 << 24 |
3 << 16 | 3 << 8 | tp->request_r_scale))
544 TCPOPT_WINDOW << 16 |(__uint32_t)(__builtin_constant_p(1 << 24 | 3 << 16
| 3 << 8 | tp->request_r_scale) ? (__uint32_t)(((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff) << 24 | ((__uint32_t)(1 << 24 | 3 <<
16 | 3 << 8 | tp->request_r_scale) & 0xff00) <<
8 | ((__uint32_t)(1 << 24 | 3 << 16 | 3 <<
8 | tp->request_r_scale) & 0xff0000) >> 8 | ((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff000000) >> 24) : __swap32md(1 << 24 |
3 << 16 | 3 << 8 | tp->request_r_scale))
545 TCPOLEN_WINDOW << 8 |(__uint32_t)(__builtin_constant_p(1 << 24 | 3 << 16
| 3 << 8 | tp->request_r_scale) ? (__uint32_t)(((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff) << 24 | ((__uint32_t)(1 << 24 | 3 <<
16 | 3 << 8 | tp->request_r_scale) & 0xff00) <<
8 | ((__uint32_t)(1 << 24 | 3 << 16 | 3 <<
8 | tp->request_r_scale) & 0xff0000) >> 8 | ((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff000000) >> 24) : __swap32md(1 << 24 |
3 << 16 | 3 << 8 | tp->request_r_scale))
546 tp->request_r_scale)(__uint32_t)(__builtin_constant_p(1 << 24 | 3 << 16
| 3 << 8 | tp->request_r_scale) ? (__uint32_t)(((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff) << 24 | ((__uint32_t)(1 << 24 | 3 <<
16 | 3 << 8 | tp->request_r_scale) & 0xff00) <<
8 | ((__uint32_t)(1 << 24 | 3 << 16 | 3 <<
8 | tp->request_r_scale) & 0xff0000) >> 8 | ((__uint32_t
)(1 << 24 | 3 << 16 | 3 << 8 | tp->request_r_scale
) & 0xff000000) >> 24) : __swap32md(1 << 24 |
3 << 16 | 3 << 8 | tp->request_r_scale))
;
547 optlen += 4;
548 }
549 }
550 }
551
552 /*
553 * Send a timestamp and echo-reply if this is a SYN and our side
554 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
555 * and our peer have sent timestamps in our SYN's.
556 */
557 if ((tp->t_flags & (TF_REQ_TSTMP0x0080U|TF_NOOPT0x0008U)) == TF_REQ_TSTMP0x0080U &&
33
Assuming the condition is false
558 (flags & TH_RST0x04) == 0 &&
559 ((flags & (TH_SYN0x02|TH_ACK0x10)) == TH_SYN0x02 ||
560 (tp->t_flags & TF_RCVD_TSTMP0x0100U))) {
561 u_int32_t *lp = (u_int32_t *)(opt + optlen);
562
563 /* Form timestamp option as shown in appendix A of RFC 1323. */
564 *lp++ = htonl(TCPOPT_TSTAMP_HDR)(__uint32_t)(__builtin_constant_p((1<<24|1<<16|8<<
8|10)) ? (__uint32_t)(((__uint32_t)((1<<24|1<<16|
8<<8|10)) & 0xff) << 24 | ((__uint32_t)((1<<
24|1<<16|8<<8|10)) & 0xff00) << 8 | ((__uint32_t
)((1<<24|1<<16|8<<8|10)) & 0xff0000) >>
8 | ((__uint32_t)((1<<24|1<<16|8<<8|10)) &
0xff000000) >> 24) : __swap32md((1<<24|1<<
16|8<<8|10)))
;
565 *lp++ = htonl(now + tp->ts_modulate)(__uint32_t)(__builtin_constant_p(now + tp->ts_modulate) ?
(__uint32_t)(((__uint32_t)(now + tp->ts_modulate) & 0xff
) << 24 | ((__uint32_t)(now + tp->ts_modulate) &
0xff00) << 8 | ((__uint32_t)(now + tp->ts_modulate)
& 0xff0000) >> 8 | ((__uint32_t)(now + tp->ts_modulate
) & 0xff000000) >> 24) : __swap32md(now + tp->ts_modulate
))
;
566 *lp = htonl(tp->ts_recent)(__uint32_t)(__builtin_constant_p(tp->ts_recent) ? (__uint32_t
)(((__uint32_t)(tp->ts_recent) & 0xff) << 24 | (
(__uint32_t)(tp->ts_recent) & 0xff00) << 8 | ((__uint32_t
)(tp->ts_recent) & 0xff0000) >> 8 | ((__uint32_t
)(tp->ts_recent) & 0xff000000) >> 24) : __swap32md
(tp->ts_recent))
;
567 optlen += TCPOLEN_TSTAMP_APPA(10 +2);
568 }
569 /* Set receive buffer autosizing timestamp. */
570 if (tp->rfbuf_ts == 0) {
34
Assuming field 'rfbuf_ts' is not equal to 0
35
Taking false branch
571 tp->rfbuf_ts = now;
572 tp->rfbuf_cnt = 0;
573 }
574
575#ifdef TCP_SIGNATURE1
576 if (tp->t_flags & TF_SIGNATURE0x0400U) {
36
Assuming the condition is false
577 u_int8_t *bp = (u_int8_t *)(opt + optlen);
578
579 /* Send signature option */
580 *(bp++) = TCPOPT_SIGNATURE19;
581 *(bp++) = TCPOLEN_SIGNATURE18;
582 sigoff = optlen + 2;
583
584 {
585 unsigned int i;
586
587 for (i = 0; i < 16; i++)
588 *(bp++) = 0;
589 }
590
591
592 /* Pad options list to the next 32 bit boundary and
593 * terminate it.
594 */
595 *bp++ = TCPOPT_NOP1;
596 *bp++ = TCPOPT_NOP1;
597
598 optlen += TCPOLEN_SIGLEN(18 +2);
599 }
600#endif /* TCP_SIGNATURE */
601
602 /*
603 * Send SACKs if necessary. This should be the last option processed.
604 * Only as many SACKs are sent as are permitted by the maximum options
605 * size. No more than three SACKs are sent.
606 */
607 if (tp->sack_enable
36.1
Field 'sack_enable' is 0
&& tp->t_state == TCPS_ESTABLISHED4 &&
608 (tp->t_flags & (TF_SACK_PERMIT0x0200U|TF_NOOPT0x0008U)) == TF_SACK_PERMIT0x0200U &&
609 tp->rcv_numsacks) {
610 u_int32_t *lp = (u_int32_t *)(opt + optlen);
611 u_int32_t *olp = lp++;
612 int count = 0; /* actual number of SACKs inserted */
613 int maxsack = (MAX_TCPOPTLEN40 - (optlen + 4))/TCPOLEN_SACK8;
614
615 tcpstat_inc(tcps_sack_snd_opts);
616 maxsack = min(maxsack, TCP_MAX_SACK3);
617 for (i = 0; (i < tp->rcv_numsacks && count < maxsack); i++) {
618 struct sackblk sack = tp->sackblks[i];
619 if (sack.start == 0 && sack.end == 0)
620 continue;
621 *lp++ = htonl(sack.start)(__uint32_t)(__builtin_constant_p(sack.start) ? (__uint32_t)(
((__uint32_t)(sack.start) & 0xff) << 24 | ((__uint32_t
)(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start
) & 0xff0000) >> 8 | ((__uint32_t)(sack.start) &
0xff000000) >> 24) : __swap32md(sack.start))
;
622 *lp++ = htonl(sack.end)(__uint32_t)(__builtin_constant_p(sack.end) ? (__uint32_t)(((
__uint32_t)(sack.end) & 0xff) << 24 | ((__uint32_t)
(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end)
& 0xff0000) >> 8 | ((__uint32_t)(sack.end) & 0xff000000
) >> 24) : __swap32md(sack.end))
;
623 count++;
624 }
625 *olp = htonl(TCPOPT_SACK_HDR|(TCPOLEN_SACK*count+2))(__uint32_t)(__builtin_constant_p((1<<24|1<<16|5<<
8)|(8*count+2)) ? (__uint32_t)(((__uint32_t)((1<<24|1<<
16|5<<8)|(8*count+2)) & 0xff) << 24 | ((__uint32_t
)((1<<24|1<<16|5<<8)|(8*count+2)) & 0xff00
) << 8 | ((__uint32_t)((1<<24|1<<16|5<<
8)|(8*count+2)) & 0xff0000) >> 8 | ((__uint32_t)((1
<<24|1<<16|5<<8)|(8*count+2)) & 0xff000000
) >> 24) : __swap32md((1<<24|1<<16|5<<
8)|(8*count+2)))
;
626 optlen += TCPOLEN_SACK8*count + 4; /* including leading NOPs */
627 }
628
629#ifdef DIAGNOSTIC1
630 if (optlen
36.2
'optlen' is <= MAX_TCPOPTLEN
> MAX_TCPOPTLEN40)
37
Taking false branch
631 panic("tcp_output: options too long");
632#endif /* DIAGNOSTIC */
633
634 hdrlen += optlen;
635
636 /*
637 * Adjust data length if insertion of options will
638 * bump the packet length beyond the t_maxopd length.
639 * Clear the FIN bit because we cut off the tail of
640 * the segment.
641 */
642 if (len > tp->t_maxopd - optlen) {
38
Assuming the condition is false
39
Taking false branch
643 if (tso) {
644 if (len + hdrlen + max_linkhdr > MAXMCLBYTES(64 * 1024)) {
645 len = MAXMCLBYTES(64 * 1024) - hdrlen - max_linkhdr;
646 sendalot = 1;
647 }
648 } else {
649 len = tp->t_maxopd - optlen;
650 sendalot = 1;
651 }
652 flags &= ~TH_FIN0x01;
653 }
654
655#ifdef DIAGNOSTIC1
656 if (max_linkhdr + hdrlen > MCLBYTES(1 << 11))
40
Assuming the condition is false
41
Taking false branch
657 panic("tcphdr too big");
658#endif
659
660 /*
661 * Grab a header mbuf, attaching a copy of data to
662 * be transmitted, and initialize the header from
663 * the template for sends on this connection.
664 */
665 if (len
41.1
'len' is 0
) {
42
Taking false branch
666 if (tp->t_force && len == 1)
667 tcpstat_inc(tcps_sndprobe);
668 else if (SEQ_LT(tp->snd_nxt, tp->snd_max)((int)((tp->snd_nxt)-(tp->snd_max)) < 0)) {
669 tcpstat_pkt(tcps_sndrexmitpack, tcps_sndrexmitbyte,
670 len);
671 tp->t_sndrexmitpack++;
672 } else {
673 tcpstat_pkt(tcps_sndpack, tcps_sndbyte, len);
674 }
675#ifdef notyet
676 if ((m = m_copypack(so->so_snd.sb_mb, off,
677 (int)len, max_linkhdr + hdrlen)) == 0) {
678 error = ENOBUFS55;
679 goto out;
680 }
681 /*
682 * m_copypack left space for our hdr; use it.
683 */
684 m->m_lenm_hdr.mh_len += hdrlen;
685 m->m_datam_hdr.mh_data -= hdrlen;
686#else
687 MGETHDR(m, M_DONTWAIT, MT_HEADER)m = m_gethdr((0x0002), (2));
688 if (m != NULL((void *)0) && max_linkhdr + hdrlen > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
689 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
690 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
691 m_freem(m);
692 m = NULL((void *)0);
693 }
694 }
695 if (m == NULL((void *)0)) {
696 error = ENOBUFS55;
697 goto out;
698 }
699 m->m_datam_hdr.mh_data += max_linkhdr;
700 m->m_lenm_hdr.mh_len = hdrlen;
701 if (len <= m_trailingspace(m)) {
702 m_copydata(so->so_snd.sb_mb, off, (int) len,
703 mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)) + hdrlen);
704 m->m_lenm_hdr.mh_len += len;
705 } else {
706 m->m_nextm_hdr.mh_next = m_copym(so->so_snd.sb_mb, off, (int) len,
707 M_NOWAIT0x0002);
708 if (m->m_nextm_hdr.mh_next == 0) {
709 (void) m_free(m);
710 error = ENOBUFS55;
711 goto out;
712 }
713 }
714 if (so->so_snd.sb_mb->m_flagsm_hdr.mh_flags & M_PKTHDR0x0002)
715 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_loopcnt =
716 so->so_snd.sb_mb->m_pkthdrM_dat.MH.MH_pkthdr.ph_loopcnt;
717#endif
718 /*
719 * If we're sending everything we've got, set PUSH.
720 * (This will keep happy those implementations which only
721 * give data to the user when a buffer fills or
722 * a PUSH comes in.)
723 */
724 if (off + len == so->so_snd.sb_cc && !soissending(so)((so)->so_snd.sb_state & 0x2000))
725 flags |= TH_PUSH0x08;
726 tp->t_sndtime = now;
727 } else {
728 if (tp->t_flags & TF_ACKNOW0x0001U)
43
Taking false branch
729 tcpstat_inc(tcps_sndacks);
730 else if (flags & (TH_SYN0x02|TH_FIN0x01|TH_RST0x04))
44
Assuming the condition is false
45
Taking false branch
731 tcpstat_inc(tcps_sndctrl);
732 else if (SEQ_GT(tp->snd_up, tp->snd_una)((int)((tp->snd_up)-(tp->snd_una)) > 0))
46
Taking false branch
733 tcpstat_inc(tcps_sndurg);
734 else
735 tcpstat_inc(tcps_sndwinup);
736
737 MGETHDR(m, M_DONTWAIT, MT_HEADER)m = m_gethdr((0x0002), (2));
738 if (m != NULL((void *)0) && max_linkhdr + hdrlen > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
47
Assuming 'm' is not equal to NULL
48
Assuming the condition is false
49
Taking false branch
739 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
740 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
741 m_freem(m);
742 m = NULL((void *)0);
743 }
744 }
745 if (m
49.1
'm' is not equal to NULL
== NULL((void *)0)) {
50
Taking false branch
746 error = ENOBUFS55;
747 goto out;
748 }
749 m->m_datam_hdr.mh_data += max_linkhdr;
750 m->m_lenm_hdr.mh_len = hdrlen;
751 }
752 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = 0;
753 m->m_pkthdrM_dat.MH.MH_pkthdr.len = hdrlen + len;
754
755 /* Enable TSO and specify the size of the resulting segments. */
756 if (tso
50.1
'tso' is 0
) {
51
Taking false branch
757 SET(m->m_pkthdr.csum_flags, M_TCP_TSO)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x8000));
758 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss = tp->t_maxseg;
759 }
760
761 if (!tp->t_template)
52
Assuming field 't_template' is non-null
53
Taking false branch
762 panic("tcp_output");
763#ifdef DIAGNOSTIC1
764 if (tp->t_template->m_lenm_hdr.mh_len != hdrlen - optlen)
54
Assuming the condition is false
55
Taking false branch
765 panic("tcp_output: template len != hdrlen - optlen");
766#endif /* DIAGNOSTIC */
767 memcpy(mtod(m, caddr_t), mtod(tp->t_template, caddr_t),__builtin_memcpy((((caddr_t)((m)->m_hdr.mh_data))), (((caddr_t
)((tp->t_template)->m_hdr.mh_data))), (tp->t_template
->m_hdr.mh_len))
768 tp->t_template->m_len)__builtin_memcpy((((caddr_t)((m)->m_hdr.mh_data))), (((caddr_t
)((tp->t_template)->m_hdr.mh_data))), (tp->t_template
->m_hdr.mh_len))
;
769 th = (struct tcphdr *)(mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)) + tp->t_template->m_lenm_hdr.mh_len -
770 sizeof(struct tcphdr));
771
772 /*
773 * Fill in fields, remembering maximum advertised
774 * window for use in delaying messages about window sizes.
775 * If resending a FIN, be sure not to use a new sequence number.
776 */
777 if ((flags & TH_FIN0x01) && (tp->t_flags & TF_SENTFIN0x0010U) &&
778 (tp->snd_nxt == tp->snd_max))
56
Assuming field 'snd_nxt' is not equal to field 'snd_max'
779 tp->snd_nxt--;
780 /*
781 * If we are doing retransmissions, then snd_nxt will
782 * not reflect the first unsent octet. For ACK only
783 * packets, we do not want the sequence number of the
784 * retransmitted packet, we want the sequence number
785 * of the next unsent octet. So, if there is no data
786 * (and no SYN or FIN), use snd_max instead of snd_nxt
787 * when filling in ti_seq. But if we are in persist
788 * state, snd_max might reflect one byte beyond the
789 * right edge of the window, so use snd_nxt in that
790 * case, since we know we aren't doing a retransmission.
791 * (retransmit and persist are mutually exclusive...)
792 */
793 if (len
56.1
'len' is 0
|| (flags & (TH_SYN0x02|TH_FIN0x01)) ||
57
Assuming the condition is false
794 TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1))))
58
Assuming the condition is false
795 th->th_seq = htonl(tp->snd_nxt)(__uint32_t)(__builtin_constant_p(tp->snd_nxt) ? (__uint32_t
)(((__uint32_t)(tp->snd_nxt) & 0xff) << 24 | ((__uint32_t
)(tp->snd_nxt) & 0xff00) << 8 | ((__uint32_t)(tp
->snd_nxt) & 0xff0000) >> 8 | ((__uint32_t)(tp->
snd_nxt) & 0xff000000) >> 24) : __swap32md(tp->snd_nxt
))
;
796 else
797 th->th_seq = htonl(tp->snd_max)(__uint32_t)(__builtin_constant_p(tp->snd_max) ? (__uint32_t
)(((__uint32_t)(tp->snd_max) & 0xff) << 24 | ((__uint32_t
)(tp->snd_max) & 0xff00) << 8 | ((__uint32_t)(tp
->snd_max) & 0xff0000) >> 8 | ((__uint32_t)(tp->
snd_max) & 0xff000000) >> 24) : __swap32md(tp->snd_max
))
;
59
Taking false branch
60
'?' condition is false
798
799 if (sack_rxmit
60.1
'sack_rxmit' is 0
) {
800 /*
801 * If sendalot was turned on (due to option stuffing), turn it
802 * off. Properly set th_seq field. Advance the ret'x pointer
803 * by len.
804 */
805 if (sendalot)
806 sendalot = 0;
807 th->th_seq = htonl(p->rxmit)(__uint32_t)(__builtin_constant_p(p->rxmit) ? (__uint32_t)
(((__uint32_t)(p->rxmit) & 0xff) << 24 | ((__uint32_t
)(p->rxmit) & 0xff00) << 8 | ((__uint32_t)(p->
rxmit) & 0xff0000) >> 8 | ((__uint32_t)(p->rxmit
) & 0xff000000) >> 24) : __swap32md(p->rxmit))
;
808 p->rxmit += len;
809 tcpstat_pkt(tcps_sack_rexmits, tcps_sack_rexmit_bytes, len);
810 }
811
812 th->th_ack = htonl(tp->rcv_nxt)(__uint32_t)(__builtin_constant_p(tp->rcv_nxt) ? (__uint32_t
)(((__uint32_t)(tp->rcv_nxt) & 0xff) << 24 | ((__uint32_t
)(tp->rcv_nxt) & 0xff00) << 8 | ((__uint32_t)(tp
->rcv_nxt) & 0xff0000) >> 8 | ((__uint32_t)(tp->
rcv_nxt) & 0xff000000) >> 24) : __swap32md(tp->rcv_nxt
))
;
61
Taking false branch
62
'?' condition is false
813 if (optlen
62.1
'optlen' is 0
) {
63
Taking false branch
814 memcpy(th + 1, opt, optlen)__builtin_memcpy((th + 1), (opt), (optlen));
815 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
816 }
817#ifdef TCP_ECN1
818 if (tcp_do_ecn) {
64
Assuming 'tcp_do_ecn' is 0
65
Taking false branch
819 /*
820 * if we have received congestion experienced segs,
821 * set ECE bit.
822 */
823 if (tp->t_flags & TF_RCVD_CE0x00010000U) {
824 flags |= TH_ECE0x40;
825 tcpstat_inc(tcps_ecn_sndece);
826 }
827 if (!(tp->t_flags & TF_DISABLE_ECN0x00040000U)) {
828 /*
829 * if this is a SYN seg, set ECE and CWR.
830 * set only ECE for SYN-ACK if peer supports ECN.
831 */
832 if ((flags & (TH_SYN0x02|TH_ACK0x10)) == TH_SYN0x02)
833 flags |= (TH_ECE0x40|TH_CWR0x80);
834 else if ((tp->t_flags & TF_ECN_PERMIT0x00008000U) &&
835 (flags & (TH_SYN0x02|TH_ACK0x10)) == (TH_SYN0x02|TH_ACK0x10))
836 flags |= TH_ECE0x40;
837 }
838 /*
839 * if we have reduced the congestion window, notify
840 * the peer by setting CWR bit.
841 */
842 if ((tp->t_flags & TF_ECN_PERMIT0x00008000U) &&
843 (tp->t_flags & TF_SEND_CWR0x00020000U)) {
844 flags |= TH_CWR0x80;
845 tp->t_flags &= ~TF_SEND_CWR0x00020000U;
846 tcpstat_inc(tcps_ecn_sndcwr);
847 }
848 }
849#endif
850 th->th_flags = flags;
851
852 /*
853 * Calculate receive window. Don't shrink window,
854 * but avoid silly window syndrome.
855 */
856 if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_maxseg)
66
Assuming the condition is false
857 win = 0;
858 if (win > (long)TCP_MAXWIN65535 << tp->rcv_scale)
67
Assuming the condition is false
68
Taking false branch
859 win = (long)TCP_MAXWIN65535 << tp->rcv_scale;
860 if (win < (long)(int32_t)(tp->rcv_adv - tp->rcv_nxt))
69
Assuming the condition is false
70
Taking false branch
861 win = (long)(int32_t)(tp->rcv_adv - tp->rcv_nxt);
862 if (flags & TH_RST0x04)
71
Assuming the condition is false
863 win = 0;
864 th->th_win = htons((u_int16_t) (win>>tp->rcv_scale))(__uint16_t)(__builtin_constant_p((u_int16_t) (win>>tp->
rcv_scale)) ? (__uint16_t)(((__uint16_t)((u_int16_t) (win>>
tp->rcv_scale)) & 0xffU) << 8 | ((__uint16_t)((u_int16_t
) (win>>tp->rcv_scale)) & 0xff00U) >> 8) :
__swap16md((u_int16_t) (win>>tp->rcv_scale)))
;
72
Taking false branch
73
'?' condition is false
865 if (th->th_win == 0)
74
Assuming field 'th_win' is not equal to 0
75
Taking false branch
866 tp->t_sndzerowin++;
867 if (SEQ_GT(tp->snd_up, tp->snd_nxt)((int)((tp->snd_up)-(tp->snd_nxt)) > 0)) {
76
Assuming the condition is false
77
Taking false branch
868 u_int32_t urp = tp->snd_up - tp->snd_nxt;
869 if (urp > IP_MAXPACKET65535)
870 urp = IP_MAXPACKET65535;
871 th->th_urp = htons((u_int16_t)urp)(__uint16_t)(__builtin_constant_p((u_int16_t)urp) ? (__uint16_t
)(((__uint16_t)((u_int16_t)urp) & 0xffU) << 8 | ((__uint16_t
)((u_int16_t)urp) & 0xff00U) >> 8) : __swap16md((u_int16_t
)urp))
;
872 th->th_flags |= TH_URG0x20;
873 } else
874 /*
875 * If no urgent pointer to send, then we pull
876 * the urgent pointer to the left edge of the send window
877 * so that it doesn't drift into the send window on sequence
878 * number wraparound.
879 */
880 tp->snd_up = tp->snd_una; /* drag it along */
881
882#ifdef TCP_SIGNATURE1
883 if (tp->t_flags & TF_SIGNATURE0x0400U) {
78
Taking false branch
884 int iphlen;
885 union sockaddr_union src, dst;
886 struct tdb *tdb;
887
888 bzero(&src, sizeof(union sockaddr_union))__builtin_bzero((&src), (sizeof(union sockaddr_union)));
889 bzero(&dst, sizeof(union sockaddr_union))__builtin_bzero((&dst), (sizeof(union sockaddr_union)));
890
891 switch (tp->pf) {
892 case 0: /*default to PF_INET*/
893 case AF_INET2:
894 iphlen = sizeof(struct ip);
895 src.sa.sa_len = sizeof(struct sockaddr_in);
896 src.sa.sa_family = AF_INET2;
897 src.sin.sin_addr = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data))->ip_src;
898 dst.sa.sa_len = sizeof(struct sockaddr_in);
899 dst.sa.sa_family = AF_INET2;
900 dst.sin.sin_addr = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data))->ip_dst;
901 break;
902#ifdef INET61
903 case AF_INET624:
904 iphlen = sizeof(struct ip6_hdr);
905 src.sa.sa_len = sizeof(struct sockaddr_in6);
906 src.sa.sa_family = AF_INET624;
907 src.sin6.sin6_addr = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data))->ip6_src;
908 dst.sa.sa_len = sizeof(struct sockaddr_in6);
909 dst.sa.sa_family = AF_INET624;
910 dst.sin6.sin6_addr = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data))->ip6_dst;
911 break;
912#endif /* INET6 */
913 }
914
915 tdb = gettdbbysrcdst(rtable_l2(tp->t_inpcb->inp_rtableid),gettdbbysrcdst_dir((rtable_l2(tp->t_inpcb->inp_rtableid
)),(0),(&src),(&dst),(6),0)
916 0, &src, &dst, IPPROTO_TCP)gettdbbysrcdst_dir((rtable_l2(tp->t_inpcb->inp_rtableid
)),(0),(&src),(&dst),(6),0)
;
917 if (tdb == NULL((void *)0)) {
918 m_freem(m);
919 return (EPERM1);
920 }
921
922 if (tcp_signature(tdb, tp->pf, m, th, iphlen, 0,
923 mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)) + hdrlen - optlen + sigoff) < 0) {
924 m_freem(m);
925 tdb_unref(tdb);
926 return (EINVAL22);
927 }
928 tdb_unref(tdb);
929 }
930#endif /* TCP_SIGNATURE */
931
932 /* Defer checksumming until later (ip_output() or hardware) */
933 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_OUT0x0002;
934
935 /*
936 * In transmit state, time the transmission and arrange for
937 * the retransmit. In persist state, just set snd_max.
938 */
939 if (tp->t_force
78.1
Field 't_force' is equal to 0
== 0 || TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1))) == 0) {
940 tcp_seq startseq = tp->snd_nxt;
941
942 /*
943 * Advance snd_nxt over sequence space of this segment.
944 */
945 if (flags & (TH_SYN0x02|TH_FIN0x01)) {
79
Taking false branch
946 if (flags & TH_SYN0x02)
947 tp->snd_nxt++;
948 if (flags & TH_FIN0x01) {
949 tp->snd_nxt++;
950 tp->t_flags |= TF_SENTFIN0x0010U;
951 }
952 }
953 if (tp->sack_enable
79.1
Field 'sack_enable' is 0
) {
80
Taking false branch
954 if (sack_rxmit && (p->rxmit != tp->snd_nxt)) {
955 goto timer;
956 }
957 }
958 tp->snd_nxt += len;
959 if (SEQ_GT(tp->snd_nxt, tp->snd_max)((int)((tp->snd_nxt)-(tp->snd_max)) > 0)) {
81
Assuming the condition is true
82
Taking true branch
960 tp->snd_max = tp->snd_nxt;
961 /*
962 * Time this transmission if not a retransmission and
963 * not currently timing anything.
964 */
965 if (tp->t_rtttime == 0) {
83
Assuming field 't_rtttime' is not equal to 0
966 tp->t_rtttime = now;
967 tp->t_rtseq = startseq;
968 tcpstat_inc(tcps_segstimed);
969 }
970 }
971
972 /*
973 * Set retransmit timer if not currently set,
974 * and not doing an ack or a keep-alive probe.
975 * Initial value for retransmit timer is smoothed
976 * round-trip time + 2 * round-trip time variance.
977 * Initialize shift counter which is used for backoff
978 * of retransmit time.
979 */
980 timer:
981 if (tp->sack_enable
83.1
Field 'sack_enable' is 0
&& sack_rxmit &&
982 TCP_TIMER_ISARMED(tp, TCPT_REXMT)(((tp)->t_flags) & (0x04000000U << (0))) == 0 &&
983 tp->snd_nxt != tp->snd_max) {
984 TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur)do { (((tp)->t_flags) |= (0x04000000U << (0))); timeout_add_msec
(&(tp)->t_timer[(0)], (tp->t_rxtcur)); } while (0)
;
985 if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1)))) {
986 TCP_TIMER_DISARM(tp, TCPT_PERSIST)do { (((tp)->t_flags) &= ~(0x04000000U << (1)));
timeout_del(&(tp)->t_timer[(1)]); } while (0)
;
987 tp->t_rxtshift = 0;
988 }
989 }
990
991 if (TCP_TIMER_ISARMED(tp, TCPT_REXMT)(((tp)->t_flags) & (0x04000000U << (0))) == 0 &&
84
Assuming the condition is false
992 tp->snd_nxt != tp->snd_una) {
993 TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur)do { (((tp)->t_flags) |= (0x04000000U << (0))); timeout_add_msec
(&(tp)->t_timer[(0)], (tp->t_rxtcur)); } while (0)
;
994 if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1)))) {
995 TCP_TIMER_DISARM(tp, TCPT_PERSIST)do { (((tp)->t_flags) &= ~(0x04000000U << (1)));
timeout_del(&(tp)->t_timer[(1)]); } while (0)
;
996 tp->t_rxtshift = 0;
997 }
998 }
999
1000 if (len
84.1
'len' is equal to 0
== 0 && so->so_snd.sb_cc &&
85
Assuming field 'sb_cc' is 0
1001 TCP_TIMER_ISARMED(tp, TCPT_REXMT)(((tp)->t_flags) & (0x04000000U << (0))) == 0 &&
1002 TCP_TIMER_ISARMED(tp, TCPT_PERSIST)(((tp)->t_flags) & (0x04000000U << (1))) == 0) {
1003 /*
1004 * Avoid a situation where we do not set persist timer
1005 * after a zero window condition. For example:
1006 * 1) A -> B: packet with enough data to fill the window
1007 * 2) B -> A: ACK for #1 + new data (0 window
1008 * advertisement)
1009 * 3) A -> B: ACK for #2, 0 len packet
1010 *
1011 * In this case, A will not activate the persist timer,
1012 * because it chose to send a packet. Unless tcp_output
1013 * is called for some other reason (delayed ack timer,
1014 * another input packet from B, socket syscall), A will
1015 * not send zero window probes.
1016 *
1017 * So, if you send a 0-length packet, but there is data
1018 * in the socket buffer, and neither the rexmt or
1019 * persist timer is already set, then activate the
1020 * persist timer.
1021 */
1022 tp->t_rxtshift = 0;
1023 tcp_setpersist(tp);
1024 }
1025 } else
1026 if (SEQ_GT(tp->snd_nxt + len, tp->snd_max)((int)((tp->snd_nxt + len)-(tp->snd_max)) > 0))
1027 tp->snd_max = tp->snd_nxt + len;
1028
1029 tcp_update_sndspace(tp);
1030
1031 /*
1032 * Trace.
1033 */
1034 if (so->so_options & SO_DEBUG0x0001)
86
Assuming the condition is false
87
Taking false branch
1035 tcp_trace(TA_OUTPUT1, tp->t_state, tp, tp, mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)), 0,
1036 len);
1037
1038 /*
1039 * Fill in IP length and desired time to live and
1040 * send to IP level. There should be a better way
1041 * to handle ttl and tos; we could keep them in
1042 * the template, but need a way to checksum without them.
1043 */
1044
1045#ifdef TCP_ECN1
1046 /*
1047 * if peer is ECN capable, set the ECT bit in the IP header.
1048 * but don't set ECT for a pure ack, a retransmit or a window probe.
1049 */
1050 needect = 0;
1051 if (tcp_do_ecn && (tp->t_flags & TF_ECN_PERMIT0x00008000U)) {
88
Assuming 'tcp_do_ecn' is 0
1052 if (len == 0 || SEQ_LT(tp->snd_nxt, tp->snd_max)((int)((tp->snd_nxt)-(tp->snd_max)) < 0) ||
1053 (tp->t_force && len == 1)) {
1054 /* don't set ECT */
1055 } else {
1056 needect = 1;
1057 tcpstat_inc(tcps_ecn_sndect);
1058 }
1059 }
1060#endif
1061
1062 /* force routing table */
1063 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = tp->t_inpcb->inp_rtableid;
1064
1065#if NPF1 > 0
1066 pf_mbuf_link_inpcb(m, tp->t_inpcb);
1067#endif
1068
1069 switch (tp->pf) {
1070 case 0: /*default to PF_INET*/
1071 case AF_INET2:
1072 {
1073 struct ip *ip;
1074
1075 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1076 ip->ip_len = htons(m->m_pkthdr.len)(__uint16_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.len
) ? (__uint16_t)(((__uint16_t)(m->M_dat.MH.MH_pkthdr.len) &
0xffU) << 8 | ((__uint16_t)(m->M_dat.MH.MH_pkthdr.len
) & 0xff00U) >> 8) : __swap16md(m->M_dat.MH.MH_pkthdr
.len))
;
1077 packetlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1078 ip->ip_ttl = tp->t_inpcb->inp_ipinp_hu.hu_ip.ip_ttl;
1079 ip->ip_tos = tp->t_inpcb->inp_ipinp_hu.hu_ip.ip_tos;
1080#ifdef TCP_ECN1
1081 if (needect)
1082 ip->ip_tos |= IPTOS_ECN_ECT00x02;
1083#endif
1084 }
1085#if NSTOEPLITZ1 > 0
1086 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = tp->t_inpcb->inp_flowid;
1087 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1088#endif
1089 error = ip_output(m, tp->t_inpcb->inp_options,
1090 &tp->t_inpcb->inp_routeinp_ru.ru_route,
1091 (ip_mtudisc ? IP_MTUDISC0x0800 : 0), NULL((void *)0),
1092 tp->t_inpcb->inp_seclevel, 0);
1093 break;
1094#ifdef INET61
1095 case AF_INET624:
1096 {
1097 struct ip6_hdr *ip6;
1098
1099 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1100 ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = m->m_pkthdrM_dat.MH.MH_pkthdr.len -
1101 sizeof(struct ip6_hdr);
1102 packetlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1103 ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_TCP6;
1104 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = in6_selecthlim(tp->t_inpcb);
1105#ifdef TCP_ECN1
1106 if (needect)
1107 ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow |= htonl(IPTOS_ECN_ECT0 << 20)(__uint32_t)(__builtin_constant_p(0x02 << 20) ? (__uint32_t
)(((__uint32_t)(0x02 << 20) & 0xff) << 24 | (
(__uint32_t)(0x02 << 20) & 0xff00) << 8 | ((__uint32_t
)(0x02 << 20) & 0xff0000) >> 8 | ((__uint32_t
)(0x02 << 20) & 0xff000000) >> 24) : __swap32md
(0x02 << 20))
;
1108#endif
1109 }
1110 error = ip6_output(m, tp->t_inpcb->inp_outputopts6,
1111 &tp->t_inpcb->inp_route6inp_ru.ru_route6, 0, NULL((void *)0),
1112 tp->t_inpcb->inp_seclevel);
1113 break;
1114#endif /* INET6 */
1115 }
1116
1117 if (error) {
89
'Default' branch taken. Execution continues on line 1117
90
Branch condition evaluates to a garbage value
1118out:
1119 if (error == ENOBUFS55) {
1120 /*
1121 * If the interface queue is full, or IP cannot
1122 * get an mbuf, trigger TCP slow start.
1123 */
1124 tp->snd_cwnd = tp->t_maxseg;
1125 return (0);
1126 }
1127 if (error == EMSGSIZE40) {
1128 /*
1129 * ip_output() will have already fixed the route
1130 * for us. tcp_mtudisc() will, as its last action,
1131 * initiate retransmission, so it is important to
1132 * not do so here.
1133 */
1134 tcp_mtudisc(tp->t_inpcb, -1);
1135 return (0);
1136 }
1137 if ((error == EHOSTUNREACH65 || error == ENETDOWN50) &&
1138 TCPS_HAVERCVDSYN(tp->t_state)((tp->t_state) >= 3)) {
1139 tp->t_softerror = error;
1140 return (0);
1141 }
1142
1143 /* Restart the delayed ACK timer, if necessary. */
1144 if (TCP_TIMER_ISARMED(tp, TCPT_DELACK)(((tp)->t_flags) & (0x04000000U << (5))))
1145 TCP_TIMER_ARM(tp, TCPT_DELACK, tcp_delack_msecs)do { (((tp)->t_flags) |= (0x04000000U << (5))); timeout_add_msec
(&(tp)->t_timer[(5)], (tcp_delack_msecs)); } while (0)
;
1146
1147 return (error);
1148 }
1149
1150 if (packetlen > tp->t_pmtud_mtu_sent)
1151 tp->t_pmtud_mtu_sent = packetlen;
1152
1153 tcpstat_inc(tcps_sndtotal);
1154 if (TCP_TIMER_ISARMED(tp, TCPT_DELACK)(((tp)->t_flags) & (0x04000000U << (5))))
1155 tcpstat_inc(tcps_delack);
1156
1157 /*
1158 * Data sent (as far as we can tell).
1159 * If this advertises a larger window than any other segment,
1160 * then remember the size of the advertised window.
1161 * Any pending ACK has now been sent.
1162 */
1163 if (win > 0 && SEQ_GT(tp->rcv_nxt+win, tp->rcv_adv)((int)((tp->rcv_nxt+win)-(tp->rcv_adv)) > 0))
1164 tp->rcv_adv = tp->rcv_nxt + win;
1165 tp->last_ack_sent = tp->rcv_nxt;
1166 tp->t_sndacktime = now;
1167 tp->t_flags &= ~TF_ACKNOW0x0001U;
1168 TCP_TIMER_DISARM(tp, TCPT_DELACK)do { (((tp)->t_flags) &= ~(0x04000000U << (5)));
timeout_del(&(tp)->t_timer[(5)]); } while (0)
;
1169 if (sendalot)
1170 goto again;
1171 return (0);
1172}
1173
1174void
1175tcp_setpersist(struct tcpcb *tp)
1176{
1177 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + TCP_RTT_BASE_SHIFT2);
1178 int msec;
1179
1180 if (TCP_TIMER_ISARMED(tp, TCPT_REXMT)(((tp)->t_flags) & (0x04000000U << (0))))
1181 panic("tcp_output REXMT");
1182 /*
1183 * Start/restart persistence timer.
1184 */
1185 if (t < tp->t_rttmin)
1186 t = tp->t_rttmin;
1187 TCPT_RANGESET(msec, t * tcp_backoff[tp->t_rxtshift],do { (msec) = (t * tcp_backoff[tp->t_rxtshift]); if ((msec
) < (((5) * 1000))) (msec) = (((5) * 1000)); else if ((msec
) > (((60) * 1000))) (msec) = (((60) * 1000)); } while ( 0
)
1188 TCPTV_PERSMIN, TCPTV_PERSMAX)do { (msec) = (t * tcp_backoff[tp->t_rxtshift]); if ((msec
) < (((5) * 1000))) (msec) = (((5) * 1000)); else if ((msec
) > (((60) * 1000))) (msec) = (((60) * 1000)); } while ( 0
)
;
1189 TCP_TIMER_ARM(tp, TCPT_PERSIST, msec)do { (((tp)->t_flags) |= (0x04000000U << (1))); timeout_add_msec
(&(tp)->t_timer[(1)], (msec)); } while (0)
;
1190 if (tp->t_rxtshift < TCP_MAXRXTSHIFT12)
1191 tp->t_rxtshift++;
1192}
1193
1194int
1195tcp_chopper(struct mbuf *m0, struct mbuf_list *ml, struct ifnet *ifp,
1196 u_int mss)
1197{
1198 struct ip *ip = NULL((void *)0);
1199#ifdef INET61
1200 struct ip6_hdr *ip6 = NULL((void *)0);
1201#endif
1202 struct tcphdr *th;
1203 int firstlen, iphlen, hlen, tlen, off;
1204 int error;
1205
1206 ml_init(ml);
1207 ml_enqueue(ml, m0);
1208
1209 ip = mtod(m0, struct ip *)((struct ip *)((m0)->m_hdr.mh_data));
1210 switch (ip->ip_v) {
1211 case 4:
1212 iphlen = ip->ip_hl << 2;
1213 if (ISSET(ip->ip_off, htons(IP_OFFMASK | IP_MF))((ip->ip_off) & ((__uint16_t)(__builtin_constant_p(0x1fff
| 0x2000) ? (__uint16_t)(((__uint16_t)(0x1fff | 0x2000) &
0xffU) << 8 | ((__uint16_t)(0x1fff | 0x2000) & 0xff00U
) >> 8) : __swap16md(0x1fff | 0x2000))))
||
1214 iphlen != sizeof(struct ip) || ip->ip_p != IPPROTO_TCP6) {
1215 /* only TCP without fragment or IP option supported */
1216 error = EPROTOTYPE41;
1217 goto bad;
1218 }
1219 break;
1220#ifdef INET61
1221 case 6:
1222 ip = NULL((void *)0);
1223 ip6 = mtod(m0, struct ip6_hdr *)((struct ip6_hdr *)((m0)->m_hdr.mh_data));
1224 iphlen = sizeof(struct ip6_hdr);
1225 if (ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP6) {
1226 /* only TCP without IPv6 header chain supported */
1227 error = EPROTOTYPE41;
1228 goto bad;
1229 }
1230 break;
1231#endif
1232 default:
1233 panic("%s: unknown ip version %d", __func__, ip->ip_v);
1234 }
1235
1236 tlen = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
1237 if (tlen < iphlen + sizeof(struct tcphdr)) {
1238 error = ENOPROTOOPT42;
1239 goto bad;
1240 }
1241 /* IP and TCP header should be contiguous, this check is paranoia */
1242 if (m0->m_lenm_hdr.mh_len < iphlen + sizeof(*th)) {
1243 ml_dequeue(ml);
1244 if ((m0 = m_pullup(m0, iphlen + sizeof(*th))) == NULL((void *)0)) {
1245 error = ENOBUFS55;
1246 goto bad;
1247 }
1248 ml_enqueue(ml, m0);
1249 }
1250 th = (struct tcphdr *)(mtod(m0, caddr_t)((caddr_t)((m0)->m_hdr.mh_data)) + iphlen);
1251 hlen = iphlen + (th->th_off << 2);
1252 if (tlen < hlen) {
1253 error = ENOPROTOOPT42;
1254 goto bad;
1255 }
1256 firstlen = MIN(tlen - hlen, mss)(((tlen - hlen)<(mss))?(tlen - hlen):(mss));
1257
1258 CLR(m0->m_pkthdr.csum_flags, M_TCP_TSO)((m0->M_dat.MH.MH_pkthdr.csum_flags) &= ~(0x8000));
1259
1260 /*
1261 * Loop through length of payload after first segment,
1262 * make new header and copy data of each part and link onto chain.
1263 */
1264 for (off = hlen + firstlen; off < tlen; off += mss) {
1265 struct mbuf *m;
1266 struct tcphdr *mhth;
1267 int len;
1268
1269 len = MIN(tlen - off, mss)(((tlen - off)<(mss))?(tlen - off):(mss));
1270
1271 MGETHDR(m, M_DONTWAIT, MT_HEADER)m = m_gethdr((0x0002), (2));
1272 if (m == NULL((void *)0)) {
1273 error = ENOBUFS55;
1274 goto bad;
1275 }
1276 ml_enqueue(ml, m);
1277 if ((error = m_dup_pkthdr(m, m0, M_DONTWAIT0x0002)) != 0)
1278 goto bad;
1279
1280 /* IP and TCP header to the end, space for link layer header */
1281 m->m_lenm_hdr.mh_len = hlen;
1282 m_align(m, hlen);
1283
1284 /* copy and adjust TCP header */
1285 mhth = (struct tcphdr *)(mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data)) + iphlen);
1286 memcpy(mhth, th, hlen - iphlen)__builtin_memcpy((mhth), (th), (hlen - iphlen));
1287 mhth->th_seq = htonl(ntohl(th->th_seq) + (off - hlen))(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(th->th_seq) ? (__uint32_t)(((__uint32_t)(th->th_seq) &
0xff) << 24 | ((__uint32_t)(th->th_seq) & 0xff00
) << 8 | ((__uint32_t)(th->th_seq) & 0xff0000) >>
8 | ((__uint32_t)(th->th_seq) & 0xff000000) >> 24
) : __swap32md(th->th_seq)) + (off - hlen)) ? (__uint32_t)
(((__uint32_t)((__uint32_t)(__builtin_constant_p(th->th_seq
) ? (__uint32_t)(((__uint32_t)(th->th_seq) & 0xff) <<
24 | ((__uint32_t)(th->th_seq) & 0xff00) << 8 |
((__uint32_t)(th->th_seq) & 0xff0000) >> 8 | ((
__uint32_t)(th->th_seq) & 0xff000000) >> 24) : __swap32md
(th->th_seq)) + (off - hlen)) & 0xff) << 24 | ((
__uint32_t)((__uint32_t)(__builtin_constant_p(th->th_seq) ?
(__uint32_t)(((__uint32_t)(th->th_seq) & 0xff) <<
24 | ((__uint32_t)(th->th_seq) & 0xff00) << 8 |
((__uint32_t)(th->th_seq) & 0xff0000) >> 8 | ((
__uint32_t)(th->th_seq) & 0xff000000) >> 24) : __swap32md
(th->th_seq)) + (off - hlen)) & 0xff00) << 8 | (
(__uint32_t)((__uint32_t)(__builtin_constant_p(th->th_seq)
? (__uint32_t)(((__uint32_t)(th->th_seq) & 0xff) <<
24 | ((__uint32_t)(th->th_seq) & 0xff00) << 8 |
((__uint32_t)(th->th_seq) & 0xff0000) >> 8 | ((
__uint32_t)(th->th_seq) & 0xff000000) >> 24) : __swap32md
(th->th_seq)) + (off - hlen)) & 0xff0000) >> 8 |
((__uint32_t)((__uint32_t)(__builtin_constant_p(th->th_seq
) ? (__uint32_t)(((__uint32_t)(th->th_seq) & 0xff) <<
24 | ((__uint32_t)(th->th_seq) & 0xff00) << 8 |
((__uint32_t)(th->th_seq) & 0xff0000) >> 8 | ((
__uint32_t)(th->th_seq) & 0xff000000) >> 24) : __swap32md
(th->th_seq)) + (off - hlen)) & 0xff000000) >> 24
) : __swap32md((__uint32_t)(__builtin_constant_p(th->th_seq
) ? (__uint32_t)(((__uint32_t)(th->th_seq) & 0xff) <<
24 | ((__uint32_t)(th->th_seq) & 0xff00) << 8 |
((__uint32_t)(th->th_seq) & 0xff0000) >> 8 | ((
__uint32_t)(th->th_seq) & 0xff000000) >> 24) : __swap32md
(th->th_seq)) + (off - hlen)))
;
1288 if (off + len < tlen)
1289 CLR(mhth->th_flags, TH_PUSH|TH_FIN)((mhth->th_flags) &= ~(0x08|0x01));
1290
1291 /* add mbuf chain with payload */
1292 m->m_pkthdrM_dat.MH.MH_pkthdr.len = hlen + len;
1293 if ((m->m_nextm_hdr.mh_next = m_copym(m0, off, len, M_DONTWAIT0x0002)) == NULL((void *)0)) {
1294 error = ENOBUFS55;
1295 goto bad;
1296 }
1297
1298 /* copy and adjust IP header, calculate checksum */
1299 SET(m->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x0002));
1300 if (ip) {
1301 struct ip *mhip;
1302
1303 mhip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1304 *mhip = *ip;
1305 mhip->ip_len = htons(hlen + len)(__uint16_t)(__builtin_constant_p(hlen + len) ? (__uint16_t)(
((__uint16_t)(hlen + len) & 0xffU) << 8 | ((__uint16_t
)(hlen + len) & 0xff00U) >> 8) : __swap16md(hlen + len
))
;
1306 mhip->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t
)(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t
)(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid
()))
;
1307 in_hdr_cksum_out(m, ifp);
1308 in_proto_cksum_out(m, ifp);
1309 }
1310#ifdef INET61
1311 if (ip6) {
1312 struct ip6_hdr *mhip6;
1313
1314 mhip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1315 *mhip6 = *ip6;
1316 mhip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(hlen - iphlen + len)(__uint16_t)(__builtin_constant_p(hlen - iphlen + len) ? (__uint16_t
)(((__uint16_t)(hlen - iphlen + len) & 0xffU) << 8 |
((__uint16_t)(hlen - iphlen + len) & 0xff00U) >> 8
) : __swap16md(hlen - iphlen + len))
;
1317 in6_proto_cksum_out(m, ifp);
1318 }
1319#endif
1320 }
1321
1322 /*
1323 * Update first segment by trimming what's been copied out
1324 * and updating header, then send each segment (in order).
1325 */
1326 if (hlen + firstlen < tlen) {
1327 m_adj(m0, hlen + firstlen - tlen);
1328 CLR(th->th_flags, TH_PUSH|TH_FIN)((th->th_flags) &= ~(0x08|0x01));
1329 }
1330 /* adjust IP header, calculate checksum */
1331 SET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)((m0->M_dat.MH.MH_pkthdr.csum_flags) |= (0x0002));
1332 if (ip) {
1333 ip->ip_len = htons(m0->m_pkthdr.len)(__uint16_t)(__builtin_constant_p(m0->M_dat.MH.MH_pkthdr.len
) ? (__uint16_t)(((__uint16_t)(m0->M_dat.MH.MH_pkthdr.len)
& 0xffU) << 8 | ((__uint16_t)(m0->M_dat.MH.MH_pkthdr
.len) & 0xff00U) >> 8) : __swap16md(m0->M_dat.MH
.MH_pkthdr.len))
;
1334 in_hdr_cksum_out(m0, ifp);
1335 in_proto_cksum_out(m0, ifp);
1336 }
1337#ifdef INET61
1338 if (ip6) {
1339 ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(m0->m_pkthdr.len - iphlen)(__uint16_t)(__builtin_constant_p(m0->M_dat.MH.MH_pkthdr.len
- iphlen) ? (__uint16_t)(((__uint16_t)(m0->M_dat.MH.MH_pkthdr
.len - iphlen) & 0xffU) << 8 | ((__uint16_t)(m0->
M_dat.MH.MH_pkthdr.len - iphlen) & 0xff00U) >> 8) :
__swap16md(m0->M_dat.MH.MH_pkthdr.len - iphlen))
;
1340 in6_proto_cksum_out(m0, ifp);
1341 }
1342#endif
1343
1344 tcpstat_add(tcps_outpkttso, ml_len(ml)((ml)->ml_len));
1345 return 0;
1346
1347 bad:
1348 tcpstat_inc(tcps_outbadtso);
1349 ml_purge(ml);
1350 return error;
1351}
1352
1353int
1354tcp_if_output_tso(struct ifnet *ifp, struct mbuf **mp, struct sockaddr *dst,
1355 struct rtentry *rt, uint32_t ifcap, u_int mtu)
1356{
1357 struct mbuf_list ml;
1358 int error;
1359
1360 /* caller must fail later or fragment */
1361 if (!ISSET((*mp)->m_pkthdr.csum_flags, M_TCP_TSO)(((*mp)->M_dat.MH.MH_pkthdr.csum_flags) & (0x8000)))
1362 return 0;
1363 if ((*mp)->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss > mtu) {
1364 CLR((*mp)->m_pkthdr.csum_flags, M_TCP_TSO)(((*mp)->M_dat.MH.MH_pkthdr.csum_flags) &= ~(0x8000));
1365 return 0;
1366 }
1367
1368 /* network interface hardware will do TSO */
1369 if (in_ifcap_cksum(*mp, ifp, ifcap)) {
1370 if (ISSET(ifcap, IFCAP_TSOv4)((ifcap) & (0x00001000))) {
1371 in_hdr_cksum_out(*mp, ifp);
1372 in_proto_cksum_out(*mp, ifp);
1373 }
1374#ifdef INET61
1375 if (ISSET(ifcap, IFCAP_TSOv6)((ifcap) & (0x00002000)))
1376 in6_proto_cksum_out(*mp, ifp);
1377#endif
1378 error = ifp->if_output(ifp, *mp, dst, rt);
1379 if (!error)
1380 tcpstat_inc(tcps_outhwtso);
1381 goto done;
1382 }
1383
1384 /* as fallback do TSO in software */
1385 if ((error = tcp_chopper(*mp, &ml, ifp, (*mp)->m_pkthdrM_dat.MH.MH_pkthdr.ph_mss)) ||
1386 (error = if_output_ml(ifp, &ml, dst, rt)))
1387 goto done;
1388 tcpstat_inc(tcps_outswtso);
1389
1390 done:
1391 *mp = NULL((void *)0);
1392 return error;
1393}