summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-12 11:32:03 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-12 11:32:03 +0100
commite3ee1e123183ca9847e74b7b8e2694c9e3b817a6 (patch)
tree652a84674ed05eaa46a813de2223af0bd0168a5a /net/ipv4/tcp.c
parent5762ba1873b0bb9faa631aaa02f533c2b9837f82 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
downloadkernel-crypto-e3ee1e123183ca9847e74b7b8e2694c9e3b817a6.tar.gz
kernel-crypto-e3ee1e123183ca9847e74b7b8e2694c9e3b817a6.tar.xz
kernel-crypto-e3ee1e123183ca9847e74b7b8e2694c9e3b817a6.zip
Merge commit 'v2.6.29-rc1' into timers/hrtimers
Conflicts: kernel/time/tick-common.c
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1f3d52946b3..ce572f9dff0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -580,10 +580,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
else if (!ret) {
if (spliced)
break;
- if (flags & SPLICE_F_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
@@ -1317,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
- __get_cpu_var(softnet_data).net_dma) {
+ dma_find_channel(DMA_MEMCPY)) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1527,7 +1523,7 @@ do_prequeue:
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = get_softnet_dma();
+ tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1632,7 +1628,6 @@ skip_copy:
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
- dma_chan_put(tp->ucopy.dma_chan);
tp->ucopy.dma_chan = NULL;
}
if (tp->ucopy.pinned_list) {
@@ -1836,7 +1831,6 @@ adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
@@ -1849,6 +1843,8 @@ adjudge_to_death:
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
goto out;
@@ -2518,9 +2514,7 @@ found:
flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
total = p->len;
- mss = total;
- if (skb_shinfo(p)->frag_list)
- mss = skb_shinfo(p)->frag_list->len;
+ mss = skb_shinfo(p)->gso_size;
flush |= skb->len > mss || skb->len <= 0;
flush |= ntohl(th2->seq) + total != ntohl(th->seq);
@@ -2547,6 +2541,7 @@ out:
return pp;
}
+EXPORT_SYMBOL(tcp_gro_receive);
int tcp_gro_complete(struct sk_buff *skb)
{
@@ -2556,7 +2551,6 @@ int tcp_gro_complete(struct sk_buff *skb)
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len;
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
@@ -2564,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb)
return 0;
}
+EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;