qlge: Cleanup atomic queue threshold check.
Signed-off-by: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
41812db8e2
commit
d0de73096e
@@ -1397,7 +1397,6 @@ struct tx_ring {
|
|||||||
struct tx_ring_desc *q; /* descriptor list for the queue */
|
struct tx_ring_desc *q; /* descriptor list for the queue */
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
atomic_t tx_count; /* counts down for every outstanding IO */
|
atomic_t tx_count; /* counts down for every outstanding IO */
|
||||||
atomic_t queue_stopped; /* Turns queue off when full. */
|
|
||||||
struct delayed_work tx_work;
|
struct delayed_work tx_work;
|
||||||
struct ql_adapter *qdev;
|
struct ql_adapter *qdev;
|
||||||
u64 tx_packets;
|
u64 tx_packets;
|
||||||
|
@@ -2171,8 +2171,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
|
|||||||
ql_write_cq_idx(rx_ring);
|
ql_write_cq_idx(rx_ring);
|
||||||
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
|
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
|
||||||
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
|
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
|
||||||
if (atomic_read(&tx_ring->queue_stopped) &&
|
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
|
||||||
(atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
|
|
||||||
/*
|
/*
|
||||||
* The queue got stopped because the tx_ring was full.
|
* The queue got stopped because the tx_ring was full.
|
||||||
* Wake it up, because it's now at least 25% empty.
|
* Wake it up, because it's now at least 25% empty.
|
||||||
@@ -2559,7 +2558,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
"%s: BUG! shutting down tx queue %d due to lack of resources.\n",
|
"%s: BUG! shutting down tx queue %d due to lack of resources.\n",
|
||||||
__func__, tx_ring_idx);
|
__func__, tx_ring_idx);
|
||||||
netif_stop_subqueue(ndev, tx_ring->wq_id);
|
netif_stop_subqueue(ndev, tx_ring->wq_id);
|
||||||
atomic_inc(&tx_ring->queue_stopped);
|
|
||||||
tx_ring->tx_errors++;
|
tx_ring->tx_errors++;
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
@@ -2688,7 +2686,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
|
|||||||
tx_ring_desc++;
|
tx_ring_desc++;
|
||||||
}
|
}
|
||||||
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
|
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
|
||||||
atomic_set(&tx_ring->queue_stopped, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ql_free_tx_resources(struct ql_adapter *qdev,
|
static void ql_free_tx_resources(struct ql_adapter *qdev,
|
||||||
|
Reference in New Issue
Block a user