net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks, because user thread spend many time to process a full backlog each round, and user might crazy spin on socket lock. We should add backlog size and receive_queue size (aka rmem_alloc) to pace writers, and let user run without being slow down too much. Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in stress situations. Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp receiver can now process ~200.000 pps (instead of ~100 pps before the patch) on a 8 core machine. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
6e7676c1a7
commit
c377411f24
@ -256,7 +256,6 @@ struct sock {
|
||||
struct sk_buff *head;
|
||||
struct sk_buff *tail;
|
||||
int len;
|
||||
int limit;
|
||||
} sk_backlog;
|
||||
wait_queue_head_t *sk_sleep;
|
||||
struct dst_entry *sk_dst_cache;
|
||||
@ -608,10 +607,20 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
skb->next = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take into account size of receive queue and backlog queue
|
||||
*/
|
||||
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
|
||||
|
||||
return qsize + skb->truesize > sk->sk_rcvbuf;
|
||||
}
|
||||
|
||||
/* The per-socket spinlock must be held here. */
|
||||
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
|
||||
if (sk_rcvqueues_full(sk, skb))
|
||||
return -ENOBUFS;
|
||||
|
||||
__sk_add_backlog(sk, skb);
|
||||
|
Reference in New Issue
Block a user