net: Add queue state xoff flag for stack
Create separate queue state flags so that either the stack or drivers can turn on XOFF. Added a set of functions used in the stack to determine if a queue is really stopped (either by stack or driver) Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
75957ba36c
commit
7346649826
@@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
|
||||
|
||||
/* check the reason of requeuing without tx lock first */
|
||||
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
||||
if (!netif_tx_queue_frozen_or_stopped(txq)) {
|
||||
if (!netif_xmit_frozen_or_stopped(txq)) {
|
||||
q->gso_skb = NULL;
|
||||
q->q.qlen--;
|
||||
} else
|
||||
@@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
||||
spin_unlock(root_lock);
|
||||
|
||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||
if (!netif_tx_queue_frozen_or_stopped(txq))
|
||||
if (!netif_xmit_frozen_or_stopped(txq))
|
||||
ret = dev_hard_start_xmit(skb, dev, txq);
|
||||
|
||||
HARD_TX_UNLOCK(dev, txq);
|
||||
@@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
||||
ret = dev_requeue_skb(skb, q);
|
||||
}
|
||||
|
||||
if (ret && netif_tx_queue_frozen_or_stopped(txq))
|
||||
if (ret && netif_xmit_frozen_or_stopped(txq))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
@@ -242,7 +242,7 @@ static void dev_watchdog(unsigned long arg)
|
||||
* old device drivers set dev->trans_start
|
||||
*/
|
||||
trans_start = txq->trans_start ? : dev->trans_start;
|
||||
if (netif_tx_queue_stopped(txq) &&
|
||||
if (netif_xmit_stopped(txq) &&
|
||||
time_after(jiffies, (trans_start +
|
||||
dev->watchdog_timeo))) {
|
||||
some_queue_timedout = 1;
|
||||
|
@@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
|
||||
/* Check that target subqueue is available before
|
||||
* pulling an skb to avoid head-of-line blocking.
|
||||
*/
|
||||
if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
|
||||
if (!netif_xmit_stopped(
|
||||
netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
|
||||
qdisc = q->queues[q->curband];
|
||||
skb = qdisc->dequeue(qdisc);
|
||||
if (skb) {
|
||||
@@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
|
||||
/* Check that target subqueue is available before
|
||||
* pulling an skb to avoid head-of-line blocking.
|
||||
*/
|
||||
if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
|
||||
if (!netif_xmit_stopped(
|
||||
netdev_get_tx_queue(qdisc_dev(sch), curband))) {
|
||||
qdisc = q->queues[curband];
|
||||
skb = qdisc->ops->peek(qdisc);
|
||||
if (skb)
|
||||
|
@@ -301,7 +301,7 @@ restart:
|
||||
|
||||
if (slave_txq->qdisc_sleeping != q)
|
||||
continue;
|
||||
if (__netif_subqueue_stopped(slave, subq) ||
|
||||
if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
|
||||
!netif_running(slave)) {
|
||||
busy = 1;
|
||||
continue;
|
||||
@@ -312,7 +312,7 @@ restart:
|
||||
if (__netif_tx_trylock(slave_txq)) {
|
||||
unsigned int length = qdisc_pkt_len(skb);
|
||||
|
||||
if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
|
||||
if (!netif_xmit_frozen_or_stopped(slave_txq) &&
|
||||
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
|
||||
txq_trans_update(slave_txq);
|
||||
__netif_tx_unlock(slave_txq);
|
||||
@@ -324,7 +324,7 @@ restart:
|
||||
}
|
||||
__netif_tx_unlock(slave_txq);
|
||||
}
|
||||
if (netif_queue_stopped(dev))
|
||||
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
|
||||
busy = 1;
|
||||
break;
|
||||
case 1:
|
||||
|
Reference in New Issue
Block a user