aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorCharles (Chas) Williams <ciwillia@brocade.com>2016-08-16 16:50:11 -0400
committerMoyster <oysterized@gmail.com>2016-11-07 13:47:01 +0100
commit5347a622c40f09112a56c45a4644b70e3dedc7e9 (patch)
tree1e419f3d96c46279aa5246b9c509138aff9e1552 /net
parentc3998b6dfd7690a8b7e24ef970f2932e1e65a616 (diff)
tcp: make challenge acks less predictable
commit 75ff39ccc1bd5d3c455b6822ab09e533c551f758 upstream. From: Eric Dumazet <edumazet@google.com> Yue Cao claims that current host rate limiting of challenge ACKS (RFC 5961) could leak enough information to allow a patient attacker to hijack TCP sessions. He will soon provide details in an academic paper. This patch increases the default limit from 100 to 1000, and adds some randomization so that the attacker can no longer hijack sessions without spending a considerable amount of probes. Based on initial analysis and patch from Linus. Note that we also have per socket rate limiting, so it is tempting to remove the host limit in the future. v2: randomize the count of challenge acks per second, not the period. Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2") Reported-by: Yue Cao <ycao009@ucr.edu> Signed-off-by: Eric Dumazet <edumazet@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> [ ciwillia: backport to 3.10-stable ] Signed-off-by: Chas Williams <ciwillia@brocade.com> Signed-off-by: Willy Tarreau <w@1wt.eu>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6c340512..0cf5870f4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3323,7 +3323,8 @@ static void tcp_send_challenge_ack(struct sock *sk)
/* unprotected vars, we dont care of overwrites */
static u32 challenge_timestamp;
static unsigned int challenge_count;
- u32 count, now;
+ u32 now = jiffies / HZ;
+ u32 count;
/* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
@@ -3331,12 +3332,13 @@ static void tcp_send_challenge_ack(struct sock *sk)
u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
challenge_timestamp = now;
- WRITE_ONCE(challenge_count, half +
- prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+ ACCESS_ONCE(challenge_count) = half +
+ reciprocal_divide(prandom_u32(),
+ sysctl_tcp_challenge_ack_limit);
}
- count = READ_ONCE(challenge_count);
+ count = ACCESS_ONCE(challenge_count);
if (count > 0) {
- WRITE_ONCE(challenge_count, count - 1);
+ ACCESS_ONCE(challenge_count) = count - 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}