Staging: et131x: Clean up tx naming
Clean up the names to be Linux like Remove the unused pad buffer Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
9251d71a4e
commit
c78732ad75
@@ -118,9 +118,9 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
|||||||
struct tx_ring *tx_ring = &adapter->tx_ring;
|
struct tx_ring *tx_ring = &adapter->tx_ring;
|
||||||
|
|
||||||
/* Allocate memory for the TCB's (Transmit Control Block) */
|
/* Allocate memory for the TCB's (Transmit Control Block) */
|
||||||
adapter->tx_ring.MpTcbMem = (struct tcb *)
|
adapter->tx_ring.tcb_ring = (struct tcb *)
|
||||||
kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
|
kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
|
||||||
if (!adapter->tx_ring.MpTcbMem) {
|
if (!adapter->tx_ring.tcb_ring) {
|
||||||
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
|
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -145,25 +145,14 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
|||||||
* storing the adjusted address.
|
* storing the adjusted address.
|
||||||
*/
|
*/
|
||||||
/* Allocate memory for the Tx status block */
|
/* Allocate memory for the Tx status block */
|
||||||
tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
|
tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
|
||||||
sizeof(TX_STATUS_BLOCK_t),
|
sizeof(u32),
|
||||||
&tx_ring->pTxStatusPa);
|
&tx_ring->tx_status_pa);
|
||||||
if (!adapter->tx_ring.pTxStatusPa) {
|
if (!adapter->tx_ring.tx_status_pa) {
|
||||||
dev_err(&adapter->pdev->dev,
|
dev_err(&adapter->pdev->dev,
|
||||||
"Cannot alloc memory for Tx status block\n");
|
"Cannot alloc memory for Tx status block\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate memory for a dummy buffer */
|
|
||||||
tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
|
|
||||||
NIC_MIN_PACKET_SIZE,
|
|
||||||
&tx_ring->pTxDummyBlkPa);
|
|
||||||
if (!adapter->tx_ring.pTxDummyBlkPa) {
|
|
||||||
dev_err(&adapter->pdev->dev,
|
|
||||||
"Cannot alloc memory for Tx dummy buffer\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,27 +178,16 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Free memory for the Tx status block */
|
/* Free memory for the Tx status block */
|
||||||
if (adapter->tx_ring.pTxStatusVa) {
|
if (adapter->tx_ring.tx_status) {
|
||||||
pci_free_consistent(adapter->pdev,
|
pci_free_consistent(adapter->pdev,
|
||||||
sizeof(TX_STATUS_BLOCK_t),
|
sizeof(u32),
|
||||||
adapter->tx_ring.pTxStatusVa,
|
adapter->tx_ring.tx_status,
|
||||||
adapter->tx_ring.pTxStatusPa);
|
adapter->tx_ring.tx_status_pa);
|
||||||
|
|
||||||
adapter->tx_ring.pTxStatusVa = NULL;
|
adapter->tx_ring.tx_status = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free memory for the dummy buffer */
|
|
||||||
if (adapter->tx_ring.pTxDummyBlkVa) {
|
|
||||||
pci_free_consistent(adapter->pdev,
|
|
||||||
NIC_MIN_PACKET_SIZE,
|
|
||||||
adapter->tx_ring.pTxDummyBlkVa,
|
|
||||||
adapter->tx_ring.pTxDummyBlkPa);
|
|
||||||
|
|
||||||
adapter->tx_ring.pTxDummyBlkVa = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Free the memory for the tcb structures */
|
/* Free the memory for the tcb structures */
|
||||||
kfree(adapter->tx_ring.MpTcbMem);
|
kfree(adapter->tx_ring.tcb_ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -230,14 +208,14 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
|
|||||||
writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
|
writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
|
||||||
|
|
||||||
/* Load the completion writeback physical address */
|
/* Load the completion writeback physical address */
|
||||||
writel((u32)((u64)etdev->tx_ring.pTxStatusPa >> 32),
|
writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
|
||||||
&txdma->dma_wb_base_hi);
|
&txdma->dma_wb_base_hi);
|
||||||
writel((u32)etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
|
writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
|
||||||
|
|
||||||
memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
|
*etdev->tx_ring.tx_status = 0;
|
||||||
|
|
||||||
writel(0, &txdma->service_request);
|
writel(0, &txdma->service_request);
|
||||||
etdev->tx_ring.txDmaReadyToSend = 0;
|
etdev->tx_ring.send_idx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -278,26 +256,26 @@ void et131x_init_send(struct et131x_adapter *adapter)
|
|||||||
|
|
||||||
/* Setup some convenience pointers */
|
/* Setup some convenience pointers */
|
||||||
tx_ring = &adapter->tx_ring;
|
tx_ring = &adapter->tx_ring;
|
||||||
tcb = adapter->tx_ring.MpTcbMem;
|
tcb = adapter->tx_ring.tcb_ring;
|
||||||
|
|
||||||
tx_ring->TCBReadyQueueHead = tcb;
|
tx_ring->tcb_qhead = tcb;
|
||||||
|
|
||||||
memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
|
memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
|
||||||
|
|
||||||
/* Go through and set up each TCB */
|
/* Go through and set up each TCB */
|
||||||
for (ct = 0; ct++ < NUM_TCB; tcb++) {
|
for (ct = 0; ct++ < NUM_TCB; tcb++)
|
||||||
/* Set the link pointer in HW TCB to the next TCB in the
|
/* Set the link pointer in HW TCB to the next TCB in the
|
||||||
* chain. If this is the last TCB in the chain, also set the
|
* chain. If this is the last TCB in the chain, also set the
|
||||||
* tail pointer.
|
* tail pointer.
|
||||||
*/
|
*/
|
||||||
tcb->Next = tcb + 1;
|
tcb->next = tcb + 1;
|
||||||
|
|
||||||
tcb--;
|
tcb--;
|
||||||
tx_ring->TCBReadyQueueTail = tcb;
|
tx_ring->tcb_qtail = tcb;
|
||||||
tcb->Next = NULL;
|
tcb->next = NULL;
|
||||||
/* Curr send queue should now be empty */
|
/* Curr send queue should now be empty */
|
||||||
tx_ring->CurrSendHead = NULL;
|
tx_ring->send_head = NULL;
|
||||||
tx_ring->CurrSendTail = NULL;
|
tx_ring->send_tail = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -321,7 +299,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* TCB is not available */
|
/* TCB is not available */
|
||||||
if (etdev->tx_ring.nBusySend >= NUM_TCB) {
|
if (etdev->tx_ring.used >= NUM_TCB) {
|
||||||
/* NOTE: If there's an error on send, no need to queue the
|
/* NOTE: If there's an error on send, no need to queue the
|
||||||
* packet under Linux; if we just send an error up to the
|
* packet under Linux; if we just send an error up to the
|
||||||
* netif layer, it will resend the skb to us.
|
* netif layer, it will resend the skb to us.
|
||||||
@@ -376,35 +354,35 @@ static int et131x_send_packet(struct sk_buff *skb,
|
|||||||
/* Get a TCB for this packet */
|
/* Get a TCB for this packet */
|
||||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
tcb = etdev->tx_ring.TCBReadyQueueHead;
|
tcb = etdev->tx_ring.tcb_qhead;
|
||||||
|
|
||||||
if (tcb == NULL) {
|
if (tcb == NULL) {
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
etdev->tx_ring.TCBReadyQueueHead = tcb->Next;
|
etdev->tx_ring.tcb_qhead = tcb->next;
|
||||||
|
|
||||||
if (etdev->tx_ring.TCBReadyQueueHead == NULL)
|
if (etdev->tx_ring.tcb_qhead == NULL)
|
||||||
etdev->tx_ring.TCBReadyQueueTail = NULL;
|
etdev->tx_ring.tcb_qtail = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
tcb->PacketLength = skb->len;
|
tcb->len = skb->len;
|
||||||
tcb->Packet = skb;
|
tcb->skb = skb;
|
||||||
|
|
||||||
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
|
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
|
||||||
shbufva = (u16 *) skb->data;
|
shbufva = (u16 *) skb->data;
|
||||||
|
|
||||||
if ((shbufva[0] == 0xffff) &&
|
if ((shbufva[0] == 0xffff) &&
|
||||||
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
|
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
|
||||||
tcb->Flags |= fMP_DEST_BROAD;
|
tcb->flags |= fMP_DEST_BROAD;
|
||||||
} else if ((shbufva[0] & 0x3) == 0x0001) {
|
} else if ((shbufva[0] & 0x3) == 0x0001) {
|
||||||
tcb->Flags |= fMP_DEST_MULTI;
|
tcb->flags |= fMP_DEST_MULTI;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tcb->Next = NULL;
|
tcb->next = NULL;
|
||||||
|
|
||||||
/* Call the NIC specific send handler. */
|
/* Call the NIC specific send handler. */
|
||||||
status = nic_send_packet(etdev, tcb);
|
status = nic_send_packet(etdev, tcb);
|
||||||
@@ -412,18 +390,18 @@ static int et131x_send_packet(struct sk_buff *skb,
|
|||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
if (etdev->tx_ring.TCBReadyQueueTail) {
|
if (etdev->tx_ring.tcb_qtail) {
|
||||||
etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
|
etdev->tx_ring.tcb_qtail->next = tcb;
|
||||||
} else {
|
} else {
|
||||||
/* Apparently ready Q is empty. */
|
/* Apparently ready Q is empty. */
|
||||||
etdev->tx_ring.TCBReadyQueueHead = tcb;
|
etdev->tx_ring.tcb_qhead = tcb;
|
||||||
}
|
}
|
||||||
|
|
||||||
etdev->tx_ring.TCBReadyQueueTail = tcb;
|
etdev->tx_ring.tcb_qtail = tcb;
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
WARN_ON(etdev->tx_ring.nBusySend > NUM_TCB);
|
WARN_ON(etdev->tx_ring.used > NUM_TCB);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,7 +418,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
|
|||||||
struct tx_desc desc[24]; /* 24 x 16 byte */
|
struct tx_desc desc[24]; /* 24 x 16 byte */
|
||||||
u32 frag = 0;
|
u32 frag = 0;
|
||||||
u32 thiscopy, remainder;
|
u32 thiscopy, remainder;
|
||||||
struct sk_buff *skb = tcb->Packet;
|
struct sk_buff *skb = tcb->skb;
|
||||||
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
|
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
|
||||||
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
|
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -558,26 +536,26 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
|
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
|
||||||
if (++etdev->tx_ring.TxPacketsSinceLastinterrupt ==
|
if (++etdev->tx_ring.since_irq ==
|
||||||
PARM_TX_NUM_BUFS_DEF) {
|
PARM_TX_NUM_BUFS_DEF) {
|
||||||
/* Last element & Interrupt flag */
|
/* Last element & Interrupt flag */
|
||||||
desc[frag - 1].flags = 0x5;
|
desc[frag - 1].flags = 0x5;
|
||||||
etdev->tx_ring.TxPacketsSinceLastinterrupt = 0;
|
etdev->tx_ring.since_irq = 0;
|
||||||
} else { /* Last element */
|
} else { /* Last element */
|
||||||
desc[frag - 1].flags = 0x1;
|
desc[frag - 1].flags = 0x1;
|
||||||
}
|
}
|
||||||
} else {
|
} else
|
||||||
desc[frag - 1].flags = 0x5;
|
desc[frag - 1].flags = 0x5;
|
||||||
}
|
|
||||||
desc[0].flags |= 2; /* First element flag */
|
desc[0].flags |= 2; /* First element flag */
|
||||||
|
|
||||||
tcb->WrIndexStart = etdev->tx_ring.txDmaReadyToSend;
|
tcb->index_start = etdev->tx_ring.send_idx;
|
||||||
tcb->PacketStaleCount = 0;
|
tcb->stale = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&etdev->SendHWLock, flags);
|
spin_lock_irqsave(&etdev->SendHWLock, flags);
|
||||||
|
|
||||||
thiscopy = NUM_DESC_PER_RING_TX -
|
thiscopy = NUM_DESC_PER_RING_TX -
|
||||||
INDEX10(etdev->tx_ring.txDmaReadyToSend);
|
INDEX10(etdev->tx_ring.send_idx);
|
||||||
|
|
||||||
if (thiscopy >= frag) {
|
if (thiscopy >= frag) {
|
||||||
remainder = 0;
|
remainder = 0;
|
||||||
@@ -587,15 +565,15 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
memcpy(etdev->tx_ring.tx_desc_ring +
|
memcpy(etdev->tx_ring.tx_desc_ring +
|
||||||
INDEX10(etdev->tx_ring.txDmaReadyToSend), desc,
|
INDEX10(etdev->tx_ring.send_idx), desc,
|
||||||
sizeof(struct tx_desc) * thiscopy);
|
sizeof(struct tx_desc) * thiscopy);
|
||||||
|
|
||||||
add_10bit(&etdev->tx_ring.txDmaReadyToSend, thiscopy);
|
add_10bit(&etdev->tx_ring.send_idx, thiscopy);
|
||||||
|
|
||||||
if (INDEX10(etdev->tx_ring.txDmaReadyToSend)== 0 ||
|
if (INDEX10(etdev->tx_ring.send_idx)== 0 ||
|
||||||
INDEX10(etdev->tx_ring.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
|
INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
|
||||||
etdev->tx_ring.txDmaReadyToSend &= ~ET_DMA10_MASK;
|
etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
|
||||||
etdev->tx_ring.txDmaReadyToSend ^= ET_DMA10_WRAP;
|
etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (remainder) {
|
if (remainder) {
|
||||||
@@ -603,34 +581,34 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
|
|||||||
desc + thiscopy,
|
desc + thiscopy,
|
||||||
sizeof(struct tx_desc) * remainder);
|
sizeof(struct tx_desc) * remainder);
|
||||||
|
|
||||||
add_10bit(&etdev->tx_ring.txDmaReadyToSend, remainder);
|
add_10bit(&etdev->tx_ring.send_idx, remainder);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INDEX10(etdev->tx_ring.txDmaReadyToSend) == 0) {
|
if (INDEX10(etdev->tx_ring.send_idx) == 0) {
|
||||||
if (etdev->tx_ring.txDmaReadyToSend)
|
if (etdev->tx_ring.send_idx)
|
||||||
tcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
|
tcb->index = NUM_DESC_PER_RING_TX - 1;
|
||||||
else
|
else
|
||||||
tcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
|
tcb->index= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
|
||||||
} else
|
} else
|
||||||
tcb->WrIndex = etdev->tx_ring.txDmaReadyToSend - 1;
|
tcb->index = etdev->tx_ring.send_idx - 1;
|
||||||
|
|
||||||
spin_lock(&etdev->TCBSendQLock);
|
spin_lock(&etdev->TCBSendQLock);
|
||||||
|
|
||||||
if (etdev->tx_ring.CurrSendTail)
|
if (etdev->tx_ring.send_tail)
|
||||||
etdev->tx_ring.CurrSendTail->Next = tcb;
|
etdev->tx_ring.send_tail->next = tcb;
|
||||||
else
|
else
|
||||||
etdev->tx_ring.CurrSendHead = tcb;
|
etdev->tx_ring.send_head = tcb;
|
||||||
|
|
||||||
etdev->tx_ring.CurrSendTail = tcb;
|
etdev->tx_ring.send_tail = tcb;
|
||||||
|
|
||||||
WARN_ON(tcb->Next != NULL);
|
WARN_ON(tcb->next != NULL);
|
||||||
|
|
||||||
etdev->tx_ring.nBusySend++;
|
etdev->tx_ring.used++;
|
||||||
|
|
||||||
spin_unlock(&etdev->TCBSendQLock);
|
spin_unlock(&etdev->TCBSendQLock);
|
||||||
|
|
||||||
/* Write the new write pointer back to the device. */
|
/* Write the new write pointer back to the device. */
|
||||||
writel(etdev->tx_ring.txDmaReadyToSend,
|
writel(etdev->tx_ring.send_idx,
|
||||||
&etdev->regs->txdma.service_request);
|
&etdev->regs->txdma.service_request);
|
||||||
|
|
||||||
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
|
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
|
||||||
@@ -661,15 +639,15 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
|||||||
struct tx_desc *desc = NULL;
|
struct tx_desc *desc = NULL;
|
||||||
struct net_device_stats *stats = &etdev->net_stats;
|
struct net_device_stats *stats = &etdev->net_stats;
|
||||||
|
|
||||||
if (tcb->Flags & fMP_DEST_BROAD)
|
if (tcb->flags & fMP_DEST_BROAD)
|
||||||
atomic_inc(&etdev->Stats.brdcstxmt);
|
atomic_inc(&etdev->Stats.brdcstxmt);
|
||||||
else if (tcb->Flags & fMP_DEST_MULTI)
|
else if (tcb->flags & fMP_DEST_MULTI)
|
||||||
atomic_inc(&etdev->Stats.multixmt);
|
atomic_inc(&etdev->Stats.multixmt);
|
||||||
else
|
else
|
||||||
atomic_inc(&etdev->Stats.unixmt);
|
atomic_inc(&etdev->Stats.unixmt);
|
||||||
|
|
||||||
if (tcb->Packet) {
|
if (tcb->skb) {
|
||||||
stats->tx_bytes += tcb->Packet->len;
|
stats->tx_bytes += tcb->skb->len;
|
||||||
|
|
||||||
/* Iterate through the TX descriptors on the ring
|
/* Iterate through the TX descriptors on the ring
|
||||||
* corresponding to this packet and umap the fragments
|
* corresponding to this packet and umap the fragments
|
||||||
@@ -677,22 +655,22 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
|||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
|
desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
|
||||||
INDEX10(tcb->WrIndexStart));
|
INDEX10(tcb->index_start));
|
||||||
|
|
||||||
pci_unmap_single(etdev->pdev,
|
pci_unmap_single(etdev->pdev,
|
||||||
desc->addr_lo,
|
desc->addr_lo,
|
||||||
desc->len_vlan, PCI_DMA_TODEVICE);
|
desc->len_vlan, PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
add_10bit(&tcb->WrIndexStart, 1);
|
add_10bit(&tcb->index_start, 1);
|
||||||
if (INDEX10(tcb->WrIndexStart) >=
|
if (INDEX10(tcb->index_start) >=
|
||||||
NUM_DESC_PER_RING_TX) {
|
NUM_DESC_PER_RING_TX) {
|
||||||
tcb->WrIndexStart &= ~ET_DMA10_MASK;
|
tcb->index_start &= ~ET_DMA10_MASK;
|
||||||
tcb->WrIndexStart ^= ET_DMA10_WRAP;
|
tcb->index_start ^= ET_DMA10_WRAP;
|
||||||
}
|
}
|
||||||
} while (desc != (etdev->tx_ring.tx_desc_ring +
|
} while (desc != (etdev->tx_ring.tx_desc_ring +
|
||||||
INDEX10(tcb->WrIndex)));
|
INDEX10(tcb->index)));
|
||||||
|
|
||||||
dev_kfree_skb_any(tcb->Packet);
|
dev_kfree_skb_any(tcb->skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(tcb, 0, sizeof(struct tcb));
|
memset(tcb, 0, sizeof(struct tcb));
|
||||||
@@ -702,16 +680,16 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
|||||||
|
|
||||||
etdev->Stats.opackets++;
|
etdev->Stats.opackets++;
|
||||||
|
|
||||||
if (etdev->tx_ring.TCBReadyQueueTail)
|
if (etdev->tx_ring.tcb_qtail)
|
||||||
etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
|
etdev->tx_ring.tcb_qtail->next = tcb;
|
||||||
else
|
else
|
||||||
/* Apparently ready Q is empty. */
|
/* Apparently ready Q is empty. */
|
||||||
etdev->tx_ring.TCBReadyQueueHead = tcb;
|
etdev->tx_ring.tcb_qhead = tcb;
|
||||||
|
|
||||||
etdev->tx_ring.TCBReadyQueueTail = tcb;
|
etdev->tx_ring.tcb_qtail = tcb;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
WARN_ON(etdev->tx_ring.nBusySend < 0);
|
WARN_ON(etdev->tx_ring.used < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -729,17 +707,17 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
|
|||||||
/* Any packets being sent? Check the first TCB on the send list */
|
/* Any packets being sent? Check the first TCB on the send list */
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
tcb = etdev->tx_ring.CurrSendHead;
|
tcb = etdev->tx_ring.send_head;
|
||||||
|
|
||||||
while ((tcb != NULL) && (freed < NUM_TCB)) {
|
while ((tcb != NULL) && (freed < NUM_TCB)) {
|
||||||
struct tcb *pNext = tcb->Next;
|
struct tcb *next = tcb->next;
|
||||||
|
|
||||||
etdev->tx_ring.CurrSendHead = pNext;
|
etdev->tx_ring.send_head = next;
|
||||||
|
|
||||||
if (pNext == NULL)
|
if (next == NULL)
|
||||||
etdev->tx_ring.CurrSendTail = NULL;
|
etdev->tx_ring.send_tail = NULL;
|
||||||
|
|
||||||
etdev->tx_ring.nBusySend--;
|
etdev->tx_ring.used--;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
@@ -748,14 +726,14 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
|
|||||||
|
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
tcb = etdev->tx_ring.CurrSendHead;
|
tcb = etdev->tx_ring.send_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(freed == NUM_TCB);
|
WARN_ON(freed == NUM_TCB);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
etdev->tx_ring.nBusySend = 0;
|
etdev->tx_ring.used = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -782,41 +760,41 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
|
|||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
tcb = etdev->tx_ring.CurrSendHead;
|
tcb = etdev->tx_ring.send_head;
|
||||||
|
|
||||||
while (tcb &&
|
while (tcb &&
|
||||||
((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) &&
|
((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
|
||||||
index < INDEX10(tcb->WrIndex)) {
|
index < INDEX10(tcb->index)) {
|
||||||
etdev->tx_ring.nBusySend--;
|
etdev->tx_ring.used--;
|
||||||
etdev->tx_ring.CurrSendHead = tcb->Next;
|
etdev->tx_ring.send_head = tcb->next;
|
||||||
if (tcb->Next == NULL)
|
if (tcb->next == NULL)
|
||||||
etdev->tx_ring.CurrSendTail = NULL;
|
etdev->tx_ring.send_tail = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
et131x_free_send_packet(etdev, tcb);
|
et131x_free_send_packet(etdev, tcb);
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
/* Goto the next packet */
|
/* Goto the next packet */
|
||||||
tcb = etdev->tx_ring.CurrSendHead;
|
tcb = etdev->tx_ring.send_head;
|
||||||
}
|
}
|
||||||
while (tcb &&
|
while (tcb &&
|
||||||
!((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP)
|
!((serviced ^ tcb->index) & ET_DMA10_WRAP)
|
||||||
&& index > (tcb->WrIndex & ET_DMA10_MASK)) {
|
&& index > (tcb->index & ET_DMA10_MASK)) {
|
||||||
etdev->tx_ring.nBusySend--;
|
etdev->tx_ring.used--;
|
||||||
etdev->tx_ring.CurrSendHead = tcb->Next;
|
etdev->tx_ring.send_head = tcb->next;
|
||||||
if (tcb->Next == NULL)
|
if (tcb->next == NULL)
|
||||||
etdev->tx_ring.CurrSendTail = NULL;
|
etdev->tx_ring.send_tail = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
et131x_free_send_packet(etdev, tcb);
|
et131x_free_send_packet(etdev, tcb);
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
/* Goto the next packet */
|
/* Goto the next packet */
|
||||||
tcb = etdev->tx_ring.CurrSendHead;
|
tcb = etdev->tx_ring.send_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wake up the queue when we hit a low-water mark */
|
/* Wake up the queue when we hit a low-water mark */
|
||||||
if (etdev->tx_ring.nBusySend <= (NUM_TCB / 3))
|
if (etdev->tx_ring.used <= (NUM_TCB / 3))
|
||||||
netif_wake_queue(etdev->netdev);
|
netif_wake_queue(etdev->netdev);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
|
@@ -97,100 +97,71 @@ struct tx_desc {
|
|||||||
u32 flags; /* data (detailed above) */
|
u32 flags; /* data (detailed above) */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Typedefs for Tx DMA engine status writeback */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TX_STATUS_BLOCK_t is sructure representing the status of the Tx DMA engine
|
* The status of the Tx DMA engine it sits in free memory, and is pointed to
|
||||||
* it sits in free memory, and is pointed to by 0x101c / 0x1020
|
* by 0x101c / 0x1020. This is a DMA10 type
|
||||||
*/
|
*/
|
||||||
typedef union _tx_status_block_t {
|
|
||||||
u32 value;
|
|
||||||
struct {
|
|
||||||
#ifdef _BIT_FIELDS_HTOL
|
|
||||||
u32 unused:21; /* bits 11-31 */
|
|
||||||
u32 serv_cpl_wrap:1; /* bit 10 */
|
|
||||||
u32 serv_cpl:10; /* bits 0-9 */
|
|
||||||
#else
|
|
||||||
u32 serv_cpl:10; /* bits 0-9 */
|
|
||||||
u32 serv_cpl_wrap:1; /* bit 10 */
|
|
||||||
u32 unused:21; /* bits 11-31 */
|
|
||||||
#endif
|
|
||||||
} bits;
|
|
||||||
} TX_STATUS_BLOCK_t, *PTX_STATUS_BLOCK_t;
|
|
||||||
|
|
||||||
/* TCB (Transmit Control Block) */
|
/* TCB (Transmit Control Block: Host Side) */
|
||||||
struct tcb {
|
struct tcb {
|
||||||
struct tcb *Next;
|
struct tcb *next; /* Next entry in ring */
|
||||||
u32 Flags;
|
u32 flags; /* Our flags for the packet */
|
||||||
u32 Count;
|
u32 count;
|
||||||
u32 PacketStaleCount;
|
u32 stale; /* Used to spot stuck/lost packets */
|
||||||
struct sk_buff *Packet;
|
struct sk_buff *skb; /* Network skb we are tied to */
|
||||||
u32 PacketLength;
|
u32 len;
|
||||||
u32 WrIndex;
|
u32 index;
|
||||||
u32 WrIndexStart;
|
u32 index_start;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Structure to hold the skb's in a list */
|
|
||||||
typedef struct tx_skb_list_elem {
|
|
||||||
struct list_head skb_list_elem;
|
|
||||||
struct sk_buff *skb;
|
|
||||||
} TX_SKB_LIST_ELEM, *PTX_SKB_LIST_ELEM;
|
|
||||||
|
|
||||||
/* Structure representing our local reference(s) to the ring */
|
/* Structure representing our local reference(s) to the ring */
|
||||||
struct tx_ring {
|
struct tx_ring {
|
||||||
/* TCB (Transmit Control Block) memory and lists */
|
/* TCB (Transmit Control Block) memory and lists */
|
||||||
struct tcb *MpTcbMem;
|
struct tcb *tcb_ring;
|
||||||
|
|
||||||
/* List of TCBs that are ready to be used */
|
/* List of TCBs that are ready to be used */
|
||||||
struct tcb *TCBReadyQueueHead;
|
struct tcb *tcb_qhead;
|
||||||
struct tcb *TCBReadyQueueTail;
|
struct tcb *tcb_qtail;
|
||||||
|
|
||||||
/* list of TCBs that are currently being sent. NOTE that access to all
|
/* list of TCBs that are currently being sent. NOTE that access to all
|
||||||
* three of these (including nBusySend) are controlled via the
|
* three of these (including used) are controlled via the
|
||||||
* TCBSendQLock. This lock should be secured prior to incementing /
|
* TCBSendQLock. This lock should be secured prior to incementing /
|
||||||
* decrementing nBusySend, or any queue manipulation on CurrSendHead /
|
* decrementing used, or any queue manipulation on send_head /
|
||||||
* Tail
|
* Tail
|
||||||
*/
|
*/
|
||||||
struct tcb *CurrSendHead;
|
struct tcb *send_head;
|
||||||
struct tcb *CurrSendTail;
|
struct tcb *send_tail;
|
||||||
int nBusySend;
|
int used;
|
||||||
|
|
||||||
/* The actual descriptor ring */
|
/* The actual descriptor ring */
|
||||||
struct tx_desc *tx_desc_ring;
|
struct tx_desc *tx_desc_ring;
|
||||||
dma_addr_t tx_desc_ring_pa;
|
dma_addr_t tx_desc_ring_pa;
|
||||||
|
|
||||||
/* ReadyToSend indicates where we last wrote to in the descriptor ring. */
|
/* ReadyToSend indicates where we last wrote to in the descriptor ring. */
|
||||||
u32 txDmaReadyToSend;
|
u32 send_idx;
|
||||||
|
|
||||||
/* The location of the write-back status block */
|
/* The location of the write-back status block */
|
||||||
PTX_STATUS_BLOCK_t pTxStatusVa;
|
u32 *tx_status;
|
||||||
dma_addr_t pTxStatusPa;
|
dma_addr_t tx_status_pa;
|
||||||
|
|
||||||
/* A Block of zeroes used to pad packets that are less than 60 bytes */
|
|
||||||
void *pTxDummyBlkVa;
|
|
||||||
dma_addr_t pTxDummyBlkPa;
|
|
||||||
|
|
||||||
TXMAC_ERR_t TxMacErr;
|
TXMAC_ERR_t TxMacErr;
|
||||||
|
|
||||||
/* Variables to track the Tx interrupt coalescing features */
|
/* Variables to track the Tx interrupt coalescing features */
|
||||||
int TxPacketsSinceLastinterrupt;
|
int since_irq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Forward declaration of the frag-list for the following prototypes */
|
|
||||||
typedef struct _MP_FRAG_LIST MP_FRAG_LIST, *PMP_FRAG_LIST;
|
|
||||||
|
|
||||||
/* Forward declaration of the private adapter structure */
|
/* Forward declaration of the private adapter structure */
|
||||||
struct et131x_adapter;
|
struct et131x_adapter;
|
||||||
|
|
||||||
/* PROTOTYPES for et1310_tx.c */
|
/* PROTOTYPES for et1310_tx.c */
|
||||||
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter);
|
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter);
|
||||||
void et131x_tx_dma_memory_free(struct et131x_adapter *adapter);
|
void et131x_tx_dma_memory_free(struct et131x_adapter *adapter);
|
||||||
void ConfigTxDmaRegs(struct et131x_adapter *pAdapter);
|
void ConfigTxDmaRegs(struct et131x_adapter *adapter);
|
||||||
void et131x_init_send(struct et131x_adapter *adapter);
|
void et131x_init_send(struct et131x_adapter *adapter);
|
||||||
void et131x_tx_dma_disable(struct et131x_adapter *pAdapter);
|
void et131x_tx_dma_disable(struct et131x_adapter *adapter);
|
||||||
void et131x_tx_dma_enable(struct et131x_adapter *pAdapter);
|
void et131x_tx_dma_enable(struct et131x_adapter *adapter);
|
||||||
void et131x_handle_send_interrupt(struct et131x_adapter *pAdapter);
|
void et131x_handle_send_interrupt(struct et131x_adapter *adapter);
|
||||||
void et131x_free_busy_send_packets(struct et131x_adapter *pAdapter);
|
void et131x_free_busy_send_packets(struct et131x_adapter *adapter);
|
||||||
int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev);
|
int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev);
|
||||||
|
|
||||||
#endif /* __ET1310_TX_H__ */
|
#endif /* __ET1310_TX_H__ */
|
||||||
|
@@ -179,10 +179,10 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
|
|||||||
/* This is our interrupt, so process accordingly */
|
/* This is our interrupt, so process accordingly */
|
||||||
|
|
||||||
if (status & ET_INTR_WATCHDOG) {
|
if (status & ET_INTR_WATCHDOG) {
|
||||||
struct tcb *tcb = adapter->tx_ring.CurrSendHead;
|
struct tcb *tcb = adapter->tx_ring.send_head;
|
||||||
|
|
||||||
if (tcb)
|
if (tcb)
|
||||||
if (++tcb->PacketStaleCount > 1)
|
if (++tcb->stale > 1)
|
||||||
status |= ET_INTR_TXDMA_ISR;
|
status |= ET_INTR_TXDMA_ISR;
|
||||||
|
|
||||||
if (adapter->RxRing.UnfinishedReceives)
|
if (adapter->RxRing.UnfinishedReceives)
|
||||||
|
@@ -541,19 +541,19 @@ void et131x_tx_timeout(struct net_device *netdev)
|
|||||||
/* Is send stuck? */
|
/* Is send stuck? */
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
tcb = etdev->tx_ring.CurrSendHead;
|
tcb = etdev->tx_ring.send_head;
|
||||||
|
|
||||||
if (tcb != NULL) {
|
if (tcb != NULL) {
|
||||||
tcb->Count++;
|
tcb->count++;
|
||||||
|
|
||||||
if (tcb->Count > NIC_SEND_HANG_THRESHOLD) {
|
if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock,
|
spin_unlock_irqrestore(&etdev->TCBSendQLock,
|
||||||
flags);
|
flags);
|
||||||
|
|
||||||
dev_warn(&etdev->pdev->dev,
|
dev_warn(&etdev->pdev->dev,
|
||||||
"Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n",
|
"Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n",
|
||||||
tcb->WrIndex,
|
tcb->index,
|
||||||
tcb->Flags);
|
tcb->flags);
|
||||||
|
|
||||||
et131x_close(netdev);
|
et131x_close(netdev);
|
||||||
et131x_open(netdev);
|
et131x_open(netdev);
|
||||||
|
Reference in New Issue
Block a user