summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosh Boyer <jwboyer@fedoraproject.org>2016-07-25 09:05:30 -0400
committerJosh Boyer <jwboyer@fedoraproject.org>2016-07-25 09:05:30 -0400
commite9ad699b524e6ab5eb6a3a465f7a45ce4933e930 (patch)
tree2aa563f9d4c99119f8e3de0b26b1a7e4ce842e9f
parent16c334e59594e2c09e3d81497d8304c77d20c520 (diff)
downloadkernel-e9ad699b524e6ab5eb6a3a465f7a45ce4933e930.tar.gz
kernel-e9ad699b524e6ab5eb6a3a465f7a45ce4933e930.tar.xz
kernel-e9ad699b524e6ab5eb6a3a465f7a45ce4933e930.zip
Add second patch needed to fix CVE-2016-5696
-rw-r--r--kernel.spec1
-rw-r--r--tcp-enable-per-socket-rate-limiting-of-all-challenge.patch102
2 files changed, 103 insertions, 0 deletions
diff --git a/kernel.spec b/kernel.spec
index 25b1196c5..012f6a133 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -670,6 +670,7 @@ Patch834: qla2xxx-Fix-NULL-pointer-deref-in-QLA-interrupt.patch
#CVE-2016-5389 CVE-2016-5969 rhbz 1354708 1355615
Patch835: tcp-make-challenge-acks-less-predictable.patch
+Patch839: tcp-enable-per-socket-rate-limiting-of-all-challenge.patch
# https://lists.fedoraproject.org/archives/list/kernel@lists.fedoraproject.org/message/A4YCP7OGMX6JLFT5V44H57GOMAQLC3M4/
Patch836: drm-amdgpu-Disable-RPM-helpers-while-reprobing.patch
diff --git a/tcp-enable-per-socket-rate-limiting-of-all-challenge.patch b/tcp-enable-per-socket-rate-limiting-of-all-challenge.patch
new file mode 100644
index 000000000..0a5eab8aa
--- /dev/null
+++ b/tcp-enable-per-socket-rate-limiting-of-all-challenge.patch
@@ -0,0 +1,102 @@
+From 8272c58d085e5611a7f839fa32e148ae62446375 Mon Sep 17 00:00:00 2001
+From: Jason Baron <jbaron@akamai.com>
+Date: Thu, 14 Jul 2016 11:38:40 -0400
+Subject: [PATCH] tcp: enable per-socket rate limiting of all 'challenge acks'
+
+The per-socket rate limit for 'challenge acks' was introduced in the
+context of limiting ack loops:
+
+commit f2b2c582e824 ("tcp: mitigate ACK loops for connections as tcp_sock")
+
+And I think it can be extended to rate limit all 'challenge acks' on a
+per-socket basis.
+
+Since we have the global tcp_challenge_ack_limit, this patch allows for
+tcp_challenge_ack_limit to be set to a large value and effectively rely on
+the per-socket limit, or set tcp_challenge_ack_limit to a lower value and
+still prevents a single connections from consuming the entire challenge ack
+quota.
+
+It further moves in the direction of eliminating the global limit at some
+point, as Eric Dumazet has suggested. This a follow-up to:
+Subject: tcp: make challenge acks less predictable
+
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Neal Cardwell <ncardwell@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Yue Cao <ycao009@ucr.edu>
+Signed-off-by: Jason Baron <jbaron@akamai.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv4/tcp_input.c | 39 ++++++++++++++++++++++-----------------
+ 1 file changed, 22 insertions(+), 17 deletions(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 8c011359646b..796315104ad7 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3423,6 +3423,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
+ return flag;
+ }
+
++static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
++ u32 *last_oow_ack_time)
++{
++ if (*last_oow_ack_time) {
++ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
++
++ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
++ NET_INC_STATS(net, mib_idx);
++ return true; /* rate-limited: don't send yet! */
++ }
++ }
++
++ *last_oow_ack_time = tcp_time_stamp;
++
++ return false; /* not rate-limited: go ahead, send dupack now! */
++}
++
+ /* Return true if we're currently rate-limiting out-of-window ACKs and
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+@@ -3436,21 +3453,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ /* Data packets without SYNs are not likely part of an ACK loop. */
+ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+ !tcp_hdr(skb)->syn)
+- goto not_rate_limited;
+-
+- if (*last_oow_ack_time) {
+- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+-
+- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+- NET_INC_STATS_BH(net, mib_idx);
+- return true; /* rate-limited: don't send yet! */
+- }
+- }
+-
+- *last_oow_ack_time = tcp_time_stamp;
++ return false;
+
+-not_rate_limited:
+- return false; /* not rate-limited: go ahead, send dupack now! */
++ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
+ }
+
+ /* RFC 5961 7 [ACK Throttling] */
+@@ -3463,9 +3468,9 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
+ u32 count, now;
+
+ /* First check our per-socket dupack rate limit. */
+- if (tcp_oow_rate_limited(sock_net(sk), skb,
+- LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+- &tp->last_oow_ack_time))
++ if (__tcp_oow_rate_limited(sock_net(sk),
++ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
++ &tp->last_oow_ack_time))
+ return;
+
+ /* Then check host-wide RFC 5961 rate limit. */
+--
+2.7.4
+