From 84565070e442583ec67fb08a5962c80203e491c3 Mon Sep 17 00:00:00 2001 From: John Heffner Date: Mon, 2 Apr 2007 13:56:32 -0700 Subject: [TCP]: Do receiver-side SWS avoidance for rcvbuf < MSS. Signed-off-by: John Heffner Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dc151139b5a..688b95594f2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1607,6 +1607,9 @@ u32 __tcp_select_window(struct sock *sk) */ if (window <= free_space - mss || window > free_space) window = (free_space/mss)*mss; + else if (mss == full_space && + free_space > window + full_space/2) + window = free_space; } return window; -- cgit From 15d33c070ddde99f7368e6b17b71d22c866f97d9 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 9 Apr 2007 13:23:14 -0700 Subject: [TCP]: slow_start_after_idle should influence cwnd validation too For the cases that slow_start_after_idle are meant to deal with, it is almost a certainty that the congestion window tests will think the connection is application limited and we'll thus decrease the cwnd there too. This defeats the whole point of setting slow_start_after_idle to zero. So test it there too. We do not cancel out the entire tcp_cwnd_validate() function so that if the sysctl is changed we still have the validation state maintained. Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 688b95594f2..3c24881f2a6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -943,7 +943,8 @@ static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) if (tp->packets_out > tp->snd_cwnd_used) tp->snd_cwnd_used = tp->packets_out; - if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) + if (sysctl_tcp_slow_start_after_idle && + (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) tcp_cwnd_application_limited(sk); } } -- cgit From 3cfe3baaf07c9e40a75f9a70662de56df1c246a8 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Tue, 27 Feb 2007 10:09:49 -0800 Subject: [TCP]: Add two new spurious RTO responses to FRTO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New sysctl tcp_frto_response is added to select amongst these responses: - Rate halving based; reuses CA_CWR state (default) - Very conservative; used to be the only one available (=1) - Undo cwr; undoes ssthresh and cwnd reductions (=2) The response with rate halving requires a new parameter to tcp_enter_cwr because FRTO has already reduced ssthresh and doing a second reduction there has to be prevented. In addition, to keep things nice on 80 cols screen, a local variable was added. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3c24881f2a6..d19b2f3b70f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -545,7 +545,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (likely(err <= 0)) return err; - tcp_enter_cwr(sk); + tcp_enter_cwr(sk, 1); return net_xmit_eval(err); -- cgit From fe067e8ab5e0dc5ca3c54634924c628da92090b4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 7 Mar 2007 12:12:44 -0800 Subject: [TCP]: Abstract out all write queue operations. This allows the write queue implementation to be changed, for example, to one which allows fast interval searching. Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 95 ++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 51 deletions(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d19b2f3b70f..2a62b55b15f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -65,9 +65,7 @@ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; static void update_send_head(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { - sk->sk_send_head = skb->next; - if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) - sk->sk_send_head = NULL; + tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tcp_packets_out_inc(sk, tp, skb); } @@ -567,12 +565,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) /* Advance write_seq and place onto the write_queue. */ tp->write_seq = TCP_SKB_CB(skb)->end_seq; skb_header_release(skb); - __skb_queue_tail(&sk->sk_write_queue, skb); + tcp_add_write_queue_tail(sk, skb); sk_charge_skb(sk, skb); - - /* Queue it, remembering where we must start sending. */ - if (sk->sk_send_head == NULL) - sk->sk_send_head = skb; } static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) @@ -705,7 +699,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss /* Link BUFF into the send queue. */ skb_header_release(buff); - __skb_append(skb, buff, &sk->sk_write_queue); + tcp_insert_write_queue_after(skb, buff, sk); return 0; } @@ -1056,7 +1050,7 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, uns return !after(end_seq, tp->snd_una + tp->snd_wnd); } -/* This checks if the data bearing packet SKB (usually sk->sk_send_head) +/* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) * should be put on the wire right now. If so, it returns the number of * packets allowed by the congestion window. */ @@ -1079,15 +1073,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, return cwnd_quota; } -static inline int tcp_skb_is_last(const struct sock *sk, - const struct sk_buff *skb) -{ - return skb->next == (struct sk_buff *)&sk->sk_write_queue; -} - int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) { - struct sk_buff *skb = sk->sk_send_head; + struct sk_buff *skb = tcp_send_head(sk); return (skb && tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), @@ -1143,7 +1131,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, /* Link BUFF into the send queue. */ skb_header_release(buff); - __skb_append(skb, buff, &sk->sk_write_queue); + tcp_insert_write_queue_after(skb, buff, sk); return 0; } @@ -1249,10 +1237,10 @@ static int tcp_mtu_probe(struct sock *sk) /* Have enough data in the send queue to probe? */ len = 0; - if ((skb = sk->sk_send_head) == NULL) + if ((skb = tcp_send_head(sk)) == NULL) return -1; while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) - skb = skb->next; + skb = tcp_write_queue_next(sk, skb); if (len < probe_size) return -1; @@ -1279,9 +1267,9 @@ static int tcp_mtu_probe(struct sock *sk) return -1; sk_charge_skb(sk, nskb); - skb = sk->sk_send_head; - __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); - sk->sk_send_head = nskb; + skb = tcp_send_head(sk); + tcp_insert_write_queue_before(nskb, skb, sk); + tcp_advance_send_head(sk, skb); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; @@ -1292,7 +1280,7 @@ static int tcp_mtu_probe(struct sock *sk) len = 0; while (len < probe_size) { - next = skb->next; + next = tcp_write_queue_next(sk, skb); copy = min_t(int, skb->len, probe_size - len); if (nskb->ip_summed) @@ -1305,7 +1293,7 @@ static int tcp_mtu_probe(struct sock *sk) /* We've eaten all the data from this skb. * Throw it away. */ TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; - __skb_unlink(skb, &sk->sk_write_queue); + tcp_unlink_write_queue(skb, sk); sk_stream_free_skb(sk, skb); } else { TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & @@ -1377,7 +1365,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) sent_pkts = 1; } - while ((skb = sk->sk_send_head)) { + while ((skb = tcp_send_head(sk))) { unsigned int limit; tso_segs = tcp_init_tso_segs(sk, skb, mss_now); @@ -1435,7 +1423,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) tcp_cwnd_validate(sk, tp); return 0; } - return !tp->packets_out && sk->sk_send_head; + return !tp->packets_out && tcp_send_head(sk); } /* Push out any pending frames which were held back due to @@ -1445,7 +1433,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, unsigned int cur_mss, int nonagle) { - struct sk_buff *skb = sk->sk_send_head; + struct sk_buff *skb = tcp_send_head(sk); if (skb) { if (tcp_write_xmit(sk, cur_mss, nonagle)) @@ -1459,7 +1447,7 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, void tcp_push_one(struct sock *sk, unsigned int mss_now) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = sk->sk_send_head; + struct sk_buff *skb = tcp_send_head(sk); unsigned int tso_segs, cwnd_quota; BUG_ON(!skb || skb->len < mss_now); @@ -1620,7 +1608,7 @@ u32 __tcp_select_window(struct sock *sk) static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *next_skb = skb->next; + struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); /* The first test we must make is that neither of these two * SKB's are still referenced by someone else. @@ -1652,7 +1640,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m clear_all_retrans_hints(tp); /* Ok. We will be able to collapse the packet. */ - __skb_unlink(next_skb, &sk->sk_write_queue); + tcp_unlink_write_queue(next_skb, sk); memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); @@ -1706,7 +1694,9 @@ void tcp_simple_retransmit(struct sock *sk) unsigned int mss = tcp_current_mss(sk, 0); int lost = 0; - sk_stream_for_retrans_queue(skb, sk) { + tcp_for_write_queue(skb, sk) { + if (skb == tcp_send_head(sk)) + break; if (skb->len > mss && !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { @@ -1790,10 +1780,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Collapse two adjacent packets if worthwhile and we can. */ if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && (skb->len < (cur_mss >> 1)) && - (skb->next != sk->sk_send_head) && - (skb->next != (struct sk_buff *)&sk->sk_write_queue) && - (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && - (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && + (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && + (!tcp_skb_is_last(sk, skb)) && + (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && + (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && (sysctl_tcp_retrans_collapse != 0)) tcp_retrans_try_collapse(sk, skb, cur_mss); @@ -1872,15 +1862,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk) skb = tp->retransmit_skb_hint; packet_cnt = tp->retransmit_cnt_hint; }else{ - skb = sk->sk_write_queue.next; + skb = tcp_write_queue_head(sk); packet_cnt = 0; } /* First pass: retransmit lost packets. */ if (tp->lost_out) { - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; + if (skb == tcp_send_head(sk)) + break; /* we could do better than to assign each time */ tp->retransmit_skb_hint = skb; tp->retransmit_cnt_hint = packet_cnt; @@ -1906,8 +1898,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) else NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); - if (skb == - skb_peek(&sk->sk_write_queue)) + if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); @@ -1944,11 +1935,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk) skb = tp->forward_skb_hint; packet_cnt = tp->forward_cnt_hint; } else{ - skb = sk->sk_write_queue.next; + skb = tcp_write_queue_head(sk); packet_cnt = 0; } - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { + if (skb == tcp_send_head(sk)) + break; tp->forward_cnt_hint = packet_cnt; tp->forward_skb_hint = skb; @@ -1973,7 +1966,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) break; } - if (skb == skb_peek(&sk->sk_write_queue)) + if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); @@ -1989,7 +1982,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) void tcp_send_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); + struct sk_buff *skb = tcp_write_queue_tail(sk); int mss_now; /* Optimization, tack on the FIN if we have a queue of @@ -1998,7 +1991,7 @@ void tcp_send_fin(struct sock *sk) */ mss_now = tcp_current_mss(sk, 1); - if (sk->sk_send_head != NULL) { + if (tcp_send_head(sk) != NULL) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; TCP_SKB_CB(skb)->end_seq++; tp->write_seq++; @@ -2071,7 +2064,7 @@ int tcp_send_synack(struct sock *sk) { struct sk_buff* skb; - skb = skb_peek(&sk->sk_write_queue); + skb = tcp_write_queue_head(sk); if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); return -EFAULT; @@ -2081,9 +2074,9 @@ int tcp_send_synack(struct sock *sk) struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); if (nskb == NULL) return -ENOMEM; - __skb_unlink(skb, &sk->sk_write_queue); + tcp_unlink_write_queue(skb, sk); skb_header_release(nskb); - __skb_queue_head(&sk->sk_write_queue, nskb); + __tcp_add_write_queue_head(sk, nskb); sk_stream_free_skb(sk, skb); sk_charge_skb(sk, nskb); skb = nskb; @@ -2285,7 +2278,7 @@ int tcp_connect(struct sock *sk) TCP_SKB_CB(buff)->when = tcp_time_stamp; tp->retrans_stamp = TCP_SKB_CB(buff)->when; skb_header_release(buff); - __skb_queue_tail(&sk->sk_write_queue, buff); + __tcp_add_write_queue_tail(sk, buff); sk_charge_skb(sk, buff); tp->packets_out += tcp_skb_pcount(buff); tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); @@ -2441,7 +2434,7 @@ int tcp_write_wakeup(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - if ((skb = sk->sk_send_head) != NULL && + if ((skb = tcp_send_head(sk)) != NULL && before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { int err; unsigned int mss = tcp_current_mss(sk, 0); @@ -2491,7 +2484,7 @@ void tcp_send_probe0(struct sock *sk) err = tcp_write_wakeup(sk); - if (tp->packets_out || !sk->sk_send_head) { + if (tp->packets_out || !tcp_send_head(sk)) { /* Cancel probe timer, if it is not required. */ icsk->icsk_probes_out = 0; icsk->icsk_backoff = 0; -- cgit From 2de979bd7da9c8b39cc0aabb0ab5aa1516d929eb Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 8 Mar 2007 20:45:19 -0800 Subject: [TCP]: whitespace cleanup Add whitespace around keywords. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2a62b55b15f..f19f5fb361b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -236,7 +236,7 @@ static u16 tcp_select_window(struct sock *sk) u32 new_win = __tcp_select_window(sk); /* Never shrink the offered window */ - if(new_win < cur_win) { + if (new_win < cur_win) { /* Danger Will Robinson! * Don't update rcv_wup/rcv_wnd here or else * we will not be able to advertise a zero @@ -287,10 +287,12 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, (TCPOPT_SACK << 8) | (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK))); - for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { + + for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { *ptr++ = htonl(sp[this_sack].start_seq); *ptr++ = htonl(sp[this_sack].end_seq); } + if (tp->rx_opt.dsack) { tp->rx_opt.dsack = 0; tp->rx_opt.eff_sacks--; @@ -335,7 +337,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, */ *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); if (ts) { - if(sack) + if (sack) *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | (TCPOPT_TIMESTAMP << 8) | @@ -347,7 +349,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, TCPOLEN_TIMESTAMP); *ptr++ = htonl(tstamp); /* TSVAL */ *ptr++ = htonl(ts_recent); /* TSECR */ - } else if(sack) + } else if (sack) *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK_PERM << 8) | @@ -428,7 +430,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, sysctl_flags = 0; if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; - if(sysctl_tcp_timestamps) { + if (sysctl_tcp_timestamps) { tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; sysctl_flags |= SYSCTL_FLAG_TSTAMPS; } @@ -1618,7 +1620,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m u16 flags = TCP_SKB_CB(skb)->flags; /* Also punt if next skb has been SACK'd. */ - if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) + if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) return; /* Next skb is out of window. */ @@ -1778,13 +1780,13 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) } /* Collapse two adjacent packets if worthwhile and we can. */ - if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && - (skb->len < (cur_mss >> 1)) && - (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && - (!tcp_skb_is_last(sk, skb)) && - (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && - (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && - (sysctl_tcp_retrans_collapse != 0)) + if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && + (skb->len < (cur_mss >> 1)) && + (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && + (!tcp_skb_is_last(sk, skb)) && + (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && + (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && + (sysctl_tcp_retrans_collapse != 0)) tcp_retrans_try_collapse(sk, skb, cur_mss); if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) @@ -1794,9 +1796,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) * retransmit when old data is attached. So strip it off * since it is cheap to do so and saves bytes on the network. */ - if(skb->len > 0 && - (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && - tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { + if (skb->len > 0 && + (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && + tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { if (!pskb_trim(skb, 0)) { TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; skb_shinfo(skb)->gso_segs = 1; -- cgit From aa8223c7bb0b05183e1737881ed21827aa5b9e73 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:04:22 -0700 Subject: [SK_BUFF]: Introduce tcp_hdr(), remove skb->h.th Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f19f5fb361b..29c53fbb220 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -465,11 +465,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; #endif - th = (struct tcphdr *) skb_push(skb, tcp_header_size); - skb->h.th = th; + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); skb_set_owner_w(skb, sk); /* Build TCP header and checksum it. */ + th = tcp_hdr(skb); th->source = inet->sport; th->dest = inet->dport; th->seq = htonl(tcb->seq); @@ -524,7 +525,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tp->af_specific->calc_md5_hash(md5_hash_location, md5, sk, NULL, NULL, - skb->h.th, + tcp_hdr(skb), sk->sk_protocol, skb->len); } @@ -2128,8 +2129,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, if (md5) tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; #endif - skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); + th = tcp_hdr(skb); memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; @@ -2183,7 +2186,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, tp->af_specific->calc_md5_hash(md5_hash_location, md5, NULL, dst, req, - skb->h.th, sk->sk_protocol, + tcp_hdr(skb), sk->sk_protocol, skb->len); } #endif -- cgit From 27a884dc3cb63b93c2b3b643f5b31eed5f8a4d26 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Apr 2007 20:29:13 -0700 Subject: [SK_BUFF]: Convert skb->tail to sk_buff_data_t So that it is also an offset from skb->head, reduces its size from 8 to 4 bytes on 64bit architectures, allowing us to combine the 4 bytes hole left by the layer headers conversion, reducing struct sk_buff size to 256 bytes, i.e. 4 64byte cachelines, and since the sk_buff slab cache is SLAB_HWCACHE_ALIGN... :-) Many calculations that previously required that skb->{transport,network, mac}_header be first converted to a pointer now can be done directly, being meaningful as offsets or pointers. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 29c53fbb220..c22cdcd8432 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -733,7 +733,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) } skb_shinfo(skb)->nr_frags = k; - skb->tail = skb->data; + skb_reset_tail_pointer(skb); skb->data_len -= len; skb->len = skb->data_len; } -- cgit From 1a4e2d093fd5f3eaf8cffc04a1b803f8b0ddef6d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 31 Mar 2007 11:55:45 -0300 Subject: [SK_BUFF]: Some more conversions to skb_copy_from_linear_data Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/tcp_output.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index c22cdcd8432..94d9f0c6368 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1645,7 +1645,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m /* Ok. We will be able to collapse the packet. */ tcp_unlink_write_queue(next_skb, sk); - memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); + skb_copy_from_linear_data(next_skb, + skb_put(skb, next_skb_size), + next_skb_size); if (next_skb->ip_summed == CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_PARTIAL; -- cgit From 9e412ba7632f71259a53085665d4983b78257b7c Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Fri, 20 Apr 2007 22:18:02 -0700 Subject: [TCP]: Sed magic converts func(sk, tp, ...) -> func(sk, ...) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is (mostly) automated change using magic: sed -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e 's|struct sock \*sk,[\n\t ]*struct tcp_sock \*tp\([^{]*\n{\n\)| struct sock \*sk\1\tstruct tcp_sock *tp = tcp_sk(sk);\n|g' -e 's|struct sock \*sk, struct tcp_sock \*tp| struct sock \*sk|g' -e 's|sk, tp\([^-]\)|sk\1|g' Fixed four unused variable (tp) warnings that were introduced. In addition, manually added newlines after local variables and tweaked function arguments positioning. $ gcc --version gcc (GCC) 4.1.1 20060525 (Red Hat 4.1.1-1) ... $ codiff -fV built-in.o.old built-in.o.new net/ipv4/route.c: rt_cache_flush | +14 1 function changed, 14 bytes added net/ipv4/tcp.c: tcp_setsockopt | -5 tcp_sendpage | -25 tcp_sendmsg | -16 3 functions changed, 46 bytes removed net/ipv4/tcp_input.c: tcp_try_undo_recovery | +3 tcp_try_undo_dsack | +2 tcp_mark_head_lost | -12 tcp_ack | -15 tcp_event_data_recv | -32 tcp_rcv_state_process | -10 tcp_rcv_established | +1 7 functions changed, 6 bytes added, 69 bytes removed, diff: -63 net/ipv4/tcp_output.c: update_send_head | -9 tcp_transmit_skb | +19 tcp_cwnd_validate | +1 tcp_write_wakeup | -17 __tcp_push_pending_frames | -25 tcp_push_one | -8 tcp_send_fin | -4 7 functions changed, 20 bytes added, 63 bytes removed, diff: -43 built-in.o.new: 18 functions changed, 40 bytes added, 178 bytes removed, diff: -138 Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 54 +++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 25 deletions(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 94d9f0c6368..3a60aea744a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512; /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; -static void update_send_head(struct sock *sk, struct tcp_sock *tp, - struct sk_buff *skb) +static void update_send_head(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); + tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; - tcp_packets_out_inc(sk, tp, skb); + tcp_packets_out_inc(sk, skb); } /* SND.NXT, if window was not shrunk. @@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp, * Anything in between SND.UNA...SND.UNA+SND.WND also can be already * invalid. OK, let's make this for now: */ -static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) +static inline __u32 tcp_acceptable_seq(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) return tp->snd_nxt; else @@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, md5 ? &md5_hash_location : #endif NULL); - TCP_ECN_send(sk, tp, skb, tcp_header_size); + TCP_ECN_send(sk, skb, tcp_header_size); } #ifdef CONFIG_TCP_MD5SIG @@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) /* Congestion window validation. (RFC2861) */ -static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) +static void tcp_cwnd_validate(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out = tp->packets_out; if (packets_out >= tp->snd_cwnd) { @@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, return cwnd_quota; } -int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) +int tcp_may_send_now(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); return (skb && @@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, * * This algorithm is from John Heffner. */ -static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) +static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; @@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk) /* Decrement cwnd here because we are sending * effectively two packets. */ tp->snd_cwnd--; - update_send_head(sk, tp, nskb); + update_send_head(sk, nskb); icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; @@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) nonagle : TCP_NAGLE_PUSH)))) break; } else { - if (tcp_tso_should_defer(sk, tp, skb)) + if (tcp_tso_should_defer(sk, skb)) break; } @@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) /* Advance the send_head. This one is sent out. * This call will increment packets_out. */ - update_send_head(sk, tp, skb); + update_send_head(sk, skb); tcp_minshall_update(tp, mss_now, skb); sent_pkts++; } if (likely(sent_pkts)) { - tcp_cwnd_validate(sk, tp); + tcp_cwnd_validate(sk); return 0; } return !tp->packets_out && tcp_send_head(sk); @@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) * TCP_CORK or attempt at coalescing tiny packets. * The socket must be locked by the caller. */ -void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, - unsigned int cur_mss, int nonagle) +void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, + int nonagle) { struct sk_buff *skb = tcp_send_head(sk); if (skb) { if (tcp_write_xmit(sk, cur_mss, nonagle)) - tcp_check_probe_timer(sk, tp); + tcp_check_probe_timer(sk); } } @@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) TCP_SKB_CB(skb)->when = tcp_time_stamp; if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { - update_send_head(sk, tp, skb); - tcp_cwnd_validate(sk, tp); + update_send_head(sk, skb); + tcp_cwnd_validate(sk); return; } } @@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) * segments to send. */ - if (tcp_may_send_now(sk, tp)) + if (tcp_may_send_now(sk)) return; if (tp->forward_skb_hint) { @@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk) TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; tcp_queue_skb(sk, skb); } - __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); + __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); } /* We get here when a process closes a file descriptor (either due to @@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk) */ void tcp_send_active_reset(struct sock *sk, gfp_t priority) { - struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* NOTE: No TCP options attached and we never retransmit this. */ @@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) skb_shinfo(skb)->gso_type = 0; /* Send it off. */ - TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); + TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk); TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(skb)->when = tcp_time_stamp; if (tcp_transmit_skb(sk, skb, 0, priority)) @@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk) skb_reserve(buff, MAX_TCP_HEADER); TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; - TCP_ECN_send_syn(sk, tp, buff); + TCP_ECN_send_syn(sk, buff); TCP_SKB_CB(buff)->sacked = 0; skb_shinfo(buff)->gso_segs = 1; skb_shinfo(buff)->gso_size = 0; @@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk) { /* If we have been reset, we may not send again. */ if (sk->sk_state != TCP_CLOSE) { - struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; /* We are not putting this on the write queue, so @@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk) skb_shinfo(buff)->gso_type = 0; /* Send it off, this clears delayed acks for us. */ - TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); + TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk); TCP_SKB_CB(buff)->when = tcp_time_stamp; tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); } @@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk) TCP_SKB_CB(skb)->when = tcp_time_stamp; err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (!err) { - update_send_head(sk, tp, skb); + update_send_head(sk, skb); } return err; } else { -- cgit From 164891aadf1721fca4dce473bb0e0998181537c6 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Mon, 23 Apr 2007 22:26:16 -0700 Subject: [TCP]: Congestion control API update. Do some simple changes to make congestion control API faster/cleaner. * use ktime_t rather than timeval * merge rtt sampling into existing ack callback this means one indirect call versus two per ack. * use flags bits to store options/settings Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3a60aea744a..e70a6840cb6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -409,7 +409,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, /* If congestion control is doing timestamping, we must * take such a timestamp before we potentially clone/copy. */ - if (icsk->icsk_ca_ops->rtt_sample) + if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) __net_timestamp(skb); if (likely(clone_it)) { -- cgit From 65bb723c9502b7ba0a3aad13bdac8832e213ba74 Mon Sep 17 00:00:00 2001 From: Gerrit Renker Date: Sat, 28 Apr 2007 21:21:46 -0700 Subject: [TCP]: Update references in two old comments This updates references to drafts in comments which must be about 10 years old. Internet draft draft-ietf-tcpimpl-prob-03.txt expired in 1998 and was replaced by RFC 2525 in March 1999. Section 3.10 of the draft maps almost identically into section 2.17 of RFC 2525: both are entitled "Failure to RST on close with data pending", the differences in text body amount to a typo and minor sentence change. Signed-off-by: Gerrit Renker Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e70a6840cb6..b5fa3c19afe 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2035,7 +2035,7 @@ void tcp_send_fin(struct sock *sk) /* We get here when a process closes a file descriptor (either due to * an explicit close() or as a byproduct of exit()'ing) and there * was unread data in the receive queue. This behavior is recommended - * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM + * by RFC 2525, section 2.17. -DaveM */ void tcp_send_active_reset(struct sock *sk, gfp_t priority) { -- cgit From d551e4541dd60ae53459f77a971f2d6043431f5f Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Mon, 30 Apr 2007 00:42:20 -0700 Subject: [TCP] FRTO: RFC4138 allows Nagle override when new data must be sent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a corner case where less than MSS sized new data thingie is awaiting in the send queue. For F-RTO to work correctly, a new data segment must be sent at certain point or F-RTO cannot be used at all. RFC4138 allows overriding of Nagle at that point. Implementation uses frto_counter states 2 and 3 to distinguish when Nagle override is needed. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net/ipv4/tcp_output.c') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b5fa3c19afe..0faacf9c419 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1035,8 +1035,10 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, if (nonagle & TCP_NAGLE_PUSH) return 1; - /* Don't use the nagle rule for urgent data (or for the final FIN). */ - if (tp->urg_mode || + /* Don't use the nagle rule for urgent data (or for the final FIN). + * Nagle can be ignored during F-RTO too (see RFC4138). + */ + if (tp->urg_mode || (tp->frto_counter == 2) || (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) return 1; -- cgit