net: add alloc_skb_with_frags() helper
Extract from sock_alloc_send_pskb() code building skb with frags, so that we can reuse this in other contexts. Intent is to use it from tcp_send_rcvq(), tcp_collapse(), ... We also want to replace some skb_linearize() calls to a more reliable strategy in pathological cases where we need to reduce number of frags. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
cb93471acc
commit
2e4e441071
@@ -769,6 +769,12 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
|
|||||||
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
|
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
|
||||||
|
unsigned long data_len,
|
||||||
|
int max_page_order,
|
||||||
|
int *errcode,
|
||||||
|
gfp_t gfp_mask);
|
||||||
|
|
||||||
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
|
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
|
||||||
gfp_t priority)
|
gfp_t priority)
|
||||||
{
|
{
|
||||||
|
@@ -4102,3 +4102,81 @@ err_free:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(skb_vlan_untag);
|
EXPORT_SYMBOL(skb_vlan_untag);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* alloc_skb_with_frags - allocate skb with page frags
|
||||||
|
*
|
||||||
|
* header_len: size of linear part
|
||||||
|
* data_len: needed length in frags
|
||||||
|
* max_page_order: max page order desired.
|
||||||
|
* errcode: pointer to error code if any
|
||||||
|
* gfp_mask: allocation mask
|
||||||
|
*
|
||||||
|
* This can be used to allocate a paged skb, given a maximal order for frags.
|
||||||
|
*/
|
||||||
|
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
|
||||||
|
unsigned long data_len,
|
||||||
|
int max_page_order,
|
||||||
|
int *errcode,
|
||||||
|
gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||||
|
unsigned long chunk;
|
||||||
|
struct sk_buff *skb;
|
||||||
|
struct page *page;
|
||||||
|
gfp_t gfp_head;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
*errcode = -EMSGSIZE;
|
||||||
|
/* Note this test could be relaxed, if we succeed to allocate
|
||||||
|
* high order pages...
|
||||||
|
*/
|
||||||
|
if (npages > MAX_SKB_FRAGS)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
gfp_head = gfp_mask;
|
||||||
|
if (gfp_head & __GFP_WAIT)
|
||||||
|
gfp_head |= __GFP_REPEAT;
|
||||||
|
|
||||||
|
*errcode = -ENOBUFS;
|
||||||
|
skb = alloc_skb(header_len, gfp_head);
|
||||||
|
if (!skb)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
skb->truesize += npages << PAGE_SHIFT;
|
||||||
|
|
||||||
|
for (i = 0; npages > 0; i++) {
|
||||||
|
int order = max_page_order;
|
||||||
|
|
||||||
|
while (order) {
|
||||||
|
if (npages >= 1 << order) {
|
||||||
|
page = alloc_pages(gfp_mask |
|
||||||
|
__GFP_COMP |
|
||||||
|
__GFP_NOWARN |
|
||||||
|
__GFP_NORETRY,
|
||||||
|
order);
|
||||||
|
if (page)
|
||||||
|
goto fill_page;
|
||||||
|
/* Do not retry other high order allocations */
|
||||||
|
order = 1;
|
||||||
|
max_page_order = 0;
|
||||||
|
}
|
||||||
|
order--;
|
||||||
|
}
|
||||||
|
page = alloc_page(gfp_mask);
|
||||||
|
if (!page)
|
||||||
|
goto failure;
|
||||||
|
fill_page:
|
||||||
|
chunk = min_t(unsigned long, data_len,
|
||||||
|
PAGE_SIZE << order);
|
||||||
|
skb_fill_page_desc(skb, i, page, 0, chunk);
|
||||||
|
data_len -= chunk;
|
||||||
|
npages -= 1 << order;
|
||||||
|
}
|
||||||
|
return skb;
|
||||||
|
|
||||||
|
failure:
|
||||||
|
kfree_skb(skb);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(alloc_skb_with_frags);
|
||||||
|
@@ -1762,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|||||||
unsigned long data_len, int noblock,
|
unsigned long data_len, int noblock,
|
||||||
int *errcode, int max_page_order)
|
int *errcode, int max_page_order)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = NULL;
|
struct sk_buff *skb;
|
||||||
unsigned long chunk;
|
|
||||||
gfp_t gfp_mask;
|
|
||||||
long timeo;
|
long timeo;
|
||||||
int err;
|
int err;
|
||||||
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
||||||
struct page *page;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
err = -EMSGSIZE;
|
|
||||||
if (npages > MAX_SKB_FRAGS)
|
|
||||||
goto failure;
|
|
||||||
|
|
||||||
timeo = sock_sndtimeo(sk, noblock);
|
timeo = sock_sndtimeo(sk, noblock);
|
||||||
while (!skb) {
|
for (;;) {
|
||||||
err = sock_error(sk);
|
err = sock_error(sk);
|
||||||
if (err != 0)
|
if (err != 0)
|
||||||
goto failure;
|
goto failure;
|
||||||
@@ -1785,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|||||||
if (sk->sk_shutdown & SEND_SHUTDOWN)
|
if (sk->sk_shutdown & SEND_SHUTDOWN)
|
||||||
goto failure;
|
goto failure;
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
|
if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
|
||||||
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
break;
|
||||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
||||||
err = -EAGAIN;
|
|
||||||
if (!timeo)
|
|
||||||
goto failure;
|
|
||||||
if (signal_pending(current))
|
|
||||||
goto interrupted;
|
|
||||||
timeo = sock_wait_for_wmem(sk, timeo);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = -ENOBUFS;
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||||
gfp_mask = sk->sk_allocation;
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||||
if (gfp_mask & __GFP_WAIT)
|
err = -EAGAIN;
|
||||||
gfp_mask |= __GFP_REPEAT;
|
if (!timeo)
|
||||||
|
|
||||||
skb = alloc_skb(header_len, gfp_mask);
|
|
||||||
if (!skb)
|
|
||||||
goto failure;
|
goto failure;
|
||||||
|
if (signal_pending(current))
|
||||||
skb->truesize += data_len;
|
goto interrupted;
|
||||||
|
timeo = sock_wait_for_wmem(sk, timeo);
|
||||||
for (i = 0; npages > 0; i++) {
|
|
||||||
int order = max_page_order;
|
|
||||||
|
|
||||||
while (order) {
|
|
||||||
if (npages >= 1 << order) {
|
|
||||||
page = alloc_pages(sk->sk_allocation |
|
|
||||||
__GFP_COMP |
|
|
||||||
__GFP_NOWARN |
|
|
||||||
__GFP_NORETRY,
|
|
||||||
order);
|
|
||||||
if (page)
|
|
||||||
goto fill_page;
|
|
||||||
/* Do not retry other high order allocations */
|
|
||||||
order = 1;
|
|
||||||
max_page_order = 0;
|
|
||||||
}
|
|
||||||
order--;
|
|
||||||
}
|
|
||||||
page = alloc_page(sk->sk_allocation);
|
|
||||||
if (!page)
|
|
||||||
goto failure;
|
|
||||||
fill_page:
|
|
||||||
chunk = min_t(unsigned long, data_len,
|
|
||||||
PAGE_SIZE << order);
|
|
||||||
skb_fill_page_desc(skb, i, page, 0, chunk);
|
|
||||||
data_len -= chunk;
|
|
||||||
npages -= 1 << order;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
|
||||||
skb_set_owner_w(skb, sk);
|
errcode, sk->sk_allocation);
|
||||||
|
if (skb)
|
||||||
|
skb_set_owner_w(skb, sk);
|
||||||
return skb;
|
return skb;
|
||||||
|
|
||||||
interrupted:
|
interrupted:
|
||||||
err = sock_intr_errno(timeo);
|
err = sock_intr_errno(timeo);
|
||||||
failure:
|
failure:
|
||||||
kfree_skb(skb);
|
|
||||||
*errcode = err;
|
*errcode = err;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user