summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 87f68e787d0..e2b7b805503 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1468,6 +1468,7 @@ void tcp_close(struct sock *sk, long timeout)
{
struct sk_buff *skb;
int data_was_unread = 0;
+ int state;
lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
@@ -1544,6 +1545,11 @@ void tcp_close(struct sock *sk, long timeout)
sk_stream_wait_close(sk, timeout);
adjudge_to_death:
+ state = sk->sk_state;
+ sock_hold(sk);
+ sock_orphan(sk);
+ atomic_inc(sk->sk_prot->orphan_count);
+
/* It is the last release_sock in its life. It will remove backlog. */
release_sock(sk);
@@ -1555,8 +1561,9 @@ adjudge_to_death:
bh_lock_sock(sk);
BUG_TRAP(!sock_owned_by_user(sk));
- sock_hold(sk);
- sock_orphan(sk);
+ /* Have we already been destroyed by a softirq or backlog? */
+ if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
+ goto out;
/* This is a (useful) BSD violating of the RFC. There is a
* problem with TCP as specified in that the other end could
@@ -1584,7 +1591,6 @@ adjudge_to_death:
if (tmo > TCP_TIMEWAIT_LEN) {
inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
} else {
- atomic_inc(sk->sk_prot->orphan_count);
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto out;
}
@@ -1603,7 +1609,6 @@ adjudge_to_death:
NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
}
}
- atomic_inc(sk->sk_prot->orphan_count);
if (sk->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(sk);