net: more accurate skb truesize
skb truesize currently accounts for sk_buff struct and part of skb head. kmalloc() roundings are also ignored. Considering that skb_shared_info is larger than sk_buff, its time to take it into account for better memory accounting. This patch introduces SKB_TRUESIZE(X) macro to centralize various assumptions into a single place. At skb alloc phase, we put skb_shared_info struct at the exact end of skb head, to allow a better use of memory (lowering number of reallocations), since kmalloc() gives us power-of-two memory blocks. Unless SLUB/SLUB debug is active, both skb->head and skb_shared_info are aligned to cache lines, as before. Note: This patch might trigger performance regressions because of misconfigured protocol stacks, hitting per socket or global memory limits that were previously not reached. But its a necessary step for a more accurate memory accounting. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Andi Kleen <ak@linux.intel.com> CC: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
97ba0eb64c
commit
87fb4b7b53
@ -184,11 +184,20 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
goto out;
|
||||
prefetchw(skb);
|
||||
|
||||
size = SKB_DATA_ALIGN(size);
|
||||
data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
|
||||
gfp_mask, node);
|
||||
/* We do our best to align skb_shared_info on a separate cache
|
||||
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
|
||||
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
|
||||
* Both skb->head and skb_shared_info are cache line aligned.
|
||||
*/
|
||||
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_node_track_caller(size, gfp_mask, node);
|
||||
if (!data)
|
||||
goto nodata;
|
||||
/* kmalloc(size) might give us more room than requested.
|
||||
* Put skb_shared_info exactly at the end of allocated zone,
|
||||
* to allow max possible filling before reallocation.
|
||||
*/
|
||||
size = SKB_WITH_OVERHEAD(ksize(data));
|
||||
prefetchw(data + size);
|
||||
|
||||
/*
|
||||
@ -197,7 +206,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
* the tail pointer in struct sk_buff!
|
||||
*/
|
||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||
skb->truesize = size + sizeof(struct sk_buff);
|
||||
/* Account for allocated memory : skb + skb->head */
|
||||
skb->truesize = SKB_TRUESIZE(size);
|
||||
atomic_set(&skb->users, 1);
|
||||
skb->head = data;
|
||||
skb->data = data;
|
||||
|
Reference in New Issue
Block a user