net: skb_shared_info optimization
skb_dma_unmap() is quite expensive for small packets, because we use two different cache lines from skb_shared_info. One to access nr_frags, one to access dma_maps[0] Instead of dma_maps being an array of MAX_SKB_FRAGS + 1 elements, let dma_head alone in a new dma_head field, close to nr_frags, to reduce cache lines misses. Tested on my dev machine (bnx2 & tg3 adapters), nice speedup ! Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
eae3f29cc7
commit
042a53a9e4
@@ -3139,8 +3139,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
|
||||
/* set time_stamp *before* dma to help avoid a possible race */
|
||||
buffer_info->time_stamp = jiffies;
|
||||
buffer_info->next_to_watch = i;
|
||||
buffer_info->dma = map[count];
|
||||
count++;
|
||||
buffer_info->dma = skb_shinfo(skb)->dma_head;
|
||||
|
||||
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
|
||||
struct skb_frag_struct *frag;
|
||||
@@ -3164,7 +3163,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
|
||||
tx_ring->buffer_info[i].skb = skb;
|
||||
tx_ring->buffer_info[first].next_to_watch = i;
|
||||
|
||||
return count;
|
||||
return count + 1;
|
||||
}
|
||||
|
||||
static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
|
||||
|
Reference in New Issue
Block a user