iwl3945: fix lock dependency
Patch seperates rx_used and rx_free into two different atomic contexts. We can now avoid using GFP_ATOMIC for skb allocation and use GFP_KERNEL. Signed-off-by: Abhijeet Kolekar <abhijeet.kolekar@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
committed by
John W. Linville
parent
84379cba44
commit
722404983b
@@ -1344,15 +1344,24 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
|
|||||||
struct list_head *element;
|
struct list_head *element;
|
||||||
struct iwl_rx_mem_buffer *rxb;
|
struct iwl_rx_mem_buffer *rxb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
spin_lock_irqsave(&rxq->lock, flags);
|
spin_lock_irqsave(&rxq->lock, flags);
|
||||||
while (!list_empty(&rxq->rx_used)) {
|
|
||||||
|
if (list_empty(&rxq->rx_used)) {
|
||||||
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
element = rxq->rx_used.next;
|
element = rxq->rx_used.next;
|
||||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||||
|
list_del(element);
|
||||||
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
|
|
||||||
/* Alloc a new receive buffer */
|
/* Alloc a new receive buffer */
|
||||||
rxb->skb =
|
rxb->skb =
|
||||||
alloc_skb(priv->hw_params.rx_buf_size,
|
alloc_skb(priv->hw_params.rx_buf_size,
|
||||||
__GFP_NOWARN | GFP_ATOMIC);
|
GFP_KERNEL);
|
||||||
if (!rxb->skb) {
|
if (!rxb->skb) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
|
IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
|
||||||
@@ -1370,19 +1379,19 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
|
|||||||
*/
|
*/
|
||||||
skb_reserve(rxb->skb, 4);
|
skb_reserve(rxb->skb, 4);
|
||||||
|
|
||||||
priv->alloc_rxb_skb++;
|
|
||||||
list_del(element);
|
|
||||||
|
|
||||||
/* Get physical address of RB/SKB */
|
/* Get physical address of RB/SKB */
|
||||||
rxb->real_dma_addr = pci_map_single(priv->pci_dev,
|
rxb->real_dma_addr = pci_map_single(priv->pci_dev,
|
||||||
rxb->skb->data,
|
rxb->skb->data,
|
||||||
priv->hw_params.rx_buf_size,
|
priv->hw_params.rx_buf_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rxq->lock, flags);
|
||||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||||
|
priv->alloc_rxb_skb++;
|
||||||
rxq->free_count++;
|
rxq->free_count++;
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
||||||
{
|
{
|
||||||
@@ -1414,18 +1423,6 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
|||||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* this should be called while priv->lock is locked
|
|
||||||
*/
|
|
||||||
static void __iwl3945_rx_replenish(void *data)
|
|
||||||
{
|
|
||||||
struct iwl_priv *priv = data;
|
|
||||||
|
|
||||||
iwl3945_rx_allocate(priv);
|
|
||||||
iwl3945_rx_queue_restock(priv);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void iwl3945_rx_replenish(void *data)
|
void iwl3945_rx_replenish(void *data)
|
||||||
{
|
{
|
||||||
struct iwl_priv *priv = data;
|
struct iwl_priv *priv = data;
|
||||||
@@ -1644,7 +1641,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
|
|||||||
count++;
|
count++;
|
||||||
if (count >= 8) {
|
if (count >= 8) {
|
||||||
priv->rxq.read = i;
|
priv->rxq.read = i;
|
||||||
__iwl3945_rx_replenish(priv);
|
iwl3945_rx_queue_restock(priv);
|
||||||
count = 0;
|
count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user