Staging: et131x: tidy up names for the TX structures
Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
fb70ed6710
commit
b711b2e0fa
@@ -97,10 +97,10 @@
|
|||||||
static void et131x_update_tcb_list(struct et131x_adapter *etdev);
|
static void et131x_update_tcb_list(struct et131x_adapter *etdev);
|
||||||
static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
|
static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
|
||||||
static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
||||||
PMP_TCB pMpTcb);
|
struct tcb *tcb);
|
||||||
static int et131x_send_packet(struct sk_buff *skb,
|
static int et131x_send_packet(struct sk_buff *skb,
|
||||||
struct et131x_adapter *etdev);
|
struct et131x_adapter *etdev);
|
||||||
static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
|
static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* et131x_tx_dma_memory_alloc
|
* et131x_tx_dma_memory_alloc
|
||||||
@@ -117,12 +117,12 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
|
|||||||
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
||||||
{
|
{
|
||||||
int desc_size = 0;
|
int desc_size = 0;
|
||||||
TX_RING_t *tx_ring = &adapter->TxRing;
|
struct tx_ring *tx_ring = &adapter->tx_ring;
|
||||||
|
|
||||||
/* Allocate memory for the TCB's (Transmit Control Block) */
|
/* Allocate memory for the TCB's (Transmit Control Block) */
|
||||||
adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
|
adapter->tx_ring.MpTcbMem = (struct tcb *)
|
||||||
GFP_ATOMIC | GFP_DMA);
|
kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
|
||||||
if (!adapter->TxRing.MpTcbMem) {
|
if (!adapter->tx_ring.MpTcbMem) {
|
||||||
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
|
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -130,11 +130,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
|||||||
/* Allocate enough memory for the Tx descriptor ring, and allocate
|
/* Allocate enough memory for the Tx descriptor ring, and allocate
|
||||||
* some extra so that the ring can be aligned on a 4k boundary.
|
* some extra so that the ring can be aligned on a 4k boundary.
|
||||||
*/
|
*/
|
||||||
desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
|
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
|
||||||
tx_ring->pTxDescRingVa =
|
tx_ring->tx_desc_ring =
|
||||||
(PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
|
(struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
|
||||||
&tx_ring->pTxDescRingPa);
|
&tx_ring->tx_desc_ring_pa);
|
||||||
if (!adapter->TxRing.pTxDescRingVa) {
|
if (!adapter->tx_ring.tx_desc_ring) {
|
||||||
dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
|
dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -146,20 +146,20 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
|||||||
* are ever returned, make sure the high part is retrieved here before
|
* are ever returned, make sure the high part is retrieved here before
|
||||||
* storing the adjusted address.
|
* storing the adjusted address.
|
||||||
*/
|
*/
|
||||||
tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
|
tx_ring->pTxDescRingAdjustedPa = tx_ring->tx_desc_ring_pa;
|
||||||
|
|
||||||
/* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
|
/* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
|
||||||
et131x_align_allocated_memory(adapter,
|
et131x_align_allocated_memory(adapter,
|
||||||
&tx_ring->pTxDescRingAdjustedPa,
|
&tx_ring->pTxDescRingAdjustedPa,
|
||||||
&tx_ring->TxDescOffset, 0x0FFF);
|
&tx_ring->TxDescOffset, 0x0FFF);
|
||||||
|
|
||||||
tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
|
tx_ring->tx_desc_ring += tx_ring->TxDescOffset;
|
||||||
|
|
||||||
/* Allocate memory for the Tx status block */
|
/* Allocate memory for the Tx status block */
|
||||||
tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
|
tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
|
||||||
sizeof(TX_STATUS_BLOCK_t),
|
sizeof(TX_STATUS_BLOCK_t),
|
||||||
&tx_ring->pTxStatusPa);
|
&tx_ring->pTxStatusPa);
|
||||||
if (!adapter->TxRing.pTxStatusPa) {
|
if (!adapter->tx_ring.pTxStatusPa) {
|
||||||
dev_err(&adapter->pdev->dev,
|
dev_err(&adapter->pdev->dev,
|
||||||
"Cannot alloc memory for Tx status block\n");
|
"Cannot alloc memory for Tx status block\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -169,7 +169,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
|||||||
tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
|
tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
|
||||||
NIC_MIN_PACKET_SIZE,
|
NIC_MIN_PACKET_SIZE,
|
||||||
&tx_ring->pTxDummyBlkPa);
|
&tx_ring->pTxDummyBlkPa);
|
||||||
if (!adapter->TxRing.pTxDummyBlkPa) {
|
if (!adapter->tx_ring.pTxDummyBlkPa) {
|
||||||
dev_err(&adapter->pdev->dev,
|
dev_err(&adapter->pdev->dev,
|
||||||
"Cannot alloc memory for Tx dummy buffer\n");
|
"Cannot alloc memory for Tx dummy buffer\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -188,43 +188,43 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
|
|||||||
{
|
{
|
||||||
int desc_size = 0;
|
int desc_size = 0;
|
||||||
|
|
||||||
if (adapter->TxRing.pTxDescRingVa) {
|
if (adapter->tx_ring.tx_desc_ring) {
|
||||||
/* Free memory relating to Tx rings here */
|
/* Free memory relating to Tx rings here */
|
||||||
adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
|
adapter->tx_ring.tx_desc_ring -= adapter->tx_ring.TxDescOffset;
|
||||||
|
|
||||||
desc_size =
|
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
|
||||||
(sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
|
+ 4096 - 1;
|
||||||
|
|
||||||
pci_free_consistent(adapter->pdev,
|
pci_free_consistent(adapter->pdev,
|
||||||
desc_size,
|
desc_size,
|
||||||
adapter->TxRing.pTxDescRingVa,
|
adapter->tx_ring.tx_desc_ring,
|
||||||
adapter->TxRing.pTxDescRingPa);
|
adapter->tx_ring.tx_desc_ring_pa);
|
||||||
|
|
||||||
adapter->TxRing.pTxDescRingVa = NULL;
|
adapter->tx_ring.tx_desc_ring = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free memory for the Tx status block */
|
/* Free memory for the Tx status block */
|
||||||
if (adapter->TxRing.pTxStatusVa) {
|
if (adapter->tx_ring.pTxStatusVa) {
|
||||||
pci_free_consistent(adapter->pdev,
|
pci_free_consistent(adapter->pdev,
|
||||||
sizeof(TX_STATUS_BLOCK_t),
|
sizeof(TX_STATUS_BLOCK_t),
|
||||||
adapter->TxRing.pTxStatusVa,
|
adapter->tx_ring.pTxStatusVa,
|
||||||
adapter->TxRing.pTxStatusPa);
|
adapter->tx_ring.pTxStatusPa);
|
||||||
|
|
||||||
adapter->TxRing.pTxStatusVa = NULL;
|
adapter->tx_ring.pTxStatusVa = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free memory for the dummy buffer */
|
/* Free memory for the dummy buffer */
|
||||||
if (adapter->TxRing.pTxDummyBlkVa) {
|
if (adapter->tx_ring.pTxDummyBlkVa) {
|
||||||
pci_free_consistent(adapter->pdev,
|
pci_free_consistent(adapter->pdev,
|
||||||
NIC_MIN_PACKET_SIZE,
|
NIC_MIN_PACKET_SIZE,
|
||||||
adapter->TxRing.pTxDummyBlkVa,
|
adapter->tx_ring.pTxDummyBlkVa,
|
||||||
adapter->TxRing.pTxDummyBlkPa);
|
adapter->tx_ring.pTxDummyBlkPa);
|
||||||
|
|
||||||
adapter->TxRing.pTxDummyBlkVa = NULL;
|
adapter->tx_ring.pTxDummyBlkVa = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free the memory for MP_TCB structures */
|
/* Free the memory for the tcb structures */
|
||||||
kfree(adapter->TxRing.MpTcbMem);
|
kfree(adapter->tx_ring.MpTcbMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -236,9 +236,9 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
|
|||||||
struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
|
struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
|
||||||
|
|
||||||
/* Load the hardware with the start of the transmit descriptor ring. */
|
/* Load the hardware with the start of the transmit descriptor ring. */
|
||||||
writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
|
writel((u32) (etdev->tx_ring.pTxDescRingAdjustedPa >> 32),
|
||||||
&txdma->pr_base_hi);
|
&txdma->pr_base_hi);
|
||||||
writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
|
writel((u32) etdev->tx_ring.pTxDescRingAdjustedPa,
|
||||||
&txdma->pr_base_lo);
|
&txdma->pr_base_lo);
|
||||||
|
|
||||||
/* Initialise the transmit DMA engine */
|
/* Initialise the transmit DMA engine */
|
||||||
@@ -252,12 +252,12 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
|
|||||||
* storing the adjusted address.
|
* storing the adjusted address.
|
||||||
*/
|
*/
|
||||||
writel(0, &txdma->dma_wb_base_hi);
|
writel(0, &txdma->dma_wb_base_hi);
|
||||||
writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
|
writel(etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
|
||||||
|
|
||||||
memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
|
memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
|
||||||
|
|
||||||
writel(0, &txdma->service_request);
|
writel(0, &txdma->service_request);
|
||||||
etdev->TxRing.txDmaReadyToSend = 0;
|
etdev->tx_ring.txDmaReadyToSend = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -292,39 +292,39 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev)
|
|||||||
*/
|
*/
|
||||||
void et131x_init_send(struct et131x_adapter *adapter)
|
void et131x_init_send(struct et131x_adapter *adapter)
|
||||||
{
|
{
|
||||||
PMP_TCB pMpTcb;
|
struct tcb *tcb;
|
||||||
uint32_t TcbCount;
|
u32 count;
|
||||||
TX_RING_t *tx_ring;
|
struct tx_ring *tx_ring;
|
||||||
|
|
||||||
/* Setup some convenience pointers */
|
/* Setup some convenience pointers */
|
||||||
tx_ring = &adapter->TxRing;
|
tx_ring = &adapter->tx_ring;
|
||||||
pMpTcb = adapter->TxRing.MpTcbMem;
|
tcb = adapter->tx_ring.MpTcbMem;
|
||||||
|
|
||||||
tx_ring->TCBReadyQueueHead = pMpTcb;
|
tx_ring->TCBReadyQueueHead = tcb;
|
||||||
|
|
||||||
/* Go through and set up each TCB */
|
/* Go through and set up each TCB */
|
||||||
for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
|
for (count = 0; count < NUM_TCB; count++) {
|
||||||
memset(pMpTcb, 0, sizeof(MP_TCB));
|
memset(tcb, 0, sizeof(struct tcb));
|
||||||
|
|
||||||
/* Set the link pointer in HW TCB to the next TCB in the
|
/* Set the link pointer in HW TCB to the next TCB in the
|
||||||
* chain. If this is the last TCB in the chain, also set the
|
* chain. If this is the last TCB in the chain, also set the
|
||||||
* tail pointer.
|
* tail pointer.
|
||||||
*/
|
*/
|
||||||
if (TcbCount < NUM_TCB - 1) {
|
if (count < NUM_TCB - 1) {
|
||||||
pMpTcb->Next = pMpTcb + 1;
|
tcb->Next = tcb + 1;
|
||||||
} else {
|
} else {
|
||||||
tx_ring->TCBReadyQueueTail = pMpTcb;
|
tx_ring->TCBReadyQueueTail = tcb;
|
||||||
pMpTcb->Next = (PMP_TCB) NULL;
|
tcb->Next = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pMpTcb++;
|
tcb++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Curr send queue should now be empty */
|
/* Curr send queue should now be empty */
|
||||||
tx_ring->CurrSendHead = (PMP_TCB) NULL;
|
tx_ring->CurrSendHead = NULL;
|
||||||
tx_ring->CurrSendTail = (PMP_TCB) NULL;
|
tx_ring->CurrSendTail = NULL;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
|
INIT_LIST_HEAD(&adapter->tx_ring.SendWaitQueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -348,7 +348,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Queue is not empty or TCB is not available */
|
/* Queue is not empty or TCB is not available */
|
||||||
if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
|
if (!list_empty(&etdev->tx_ring.SendWaitQueue) ||
|
||||||
MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
|
MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
|
||||||
/* NOTE: If there's an error on send, no need to queue the
|
/* NOTE: If there's an error on send, no need to queue the
|
||||||
* packet under Linux; if we just send an error up to the
|
* packet under Linux; if we just send an error up to the
|
||||||
@@ -404,86 +404,85 @@ static int et131x_send_packet(struct sk_buff *skb,
|
|||||||
struct et131x_adapter *etdev)
|
struct et131x_adapter *etdev)
|
||||||
{
|
{
|
||||||
int status = 0;
|
int status = 0;
|
||||||
PMP_TCB pMpTcb = NULL;
|
struct tcb *tcb = NULL;
|
||||||
uint16_t *shbufva;
|
uint16_t *shbufva;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* All packets must have at least a MAC address and a protocol type */
|
/* All packets must have at least a MAC address and a protocol type */
|
||||||
if (skb->len < ETH_HLEN) {
|
if (skb->len < ETH_HLEN)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
|
||||||
|
|
||||||
/* Get a TCB for this packet */
|
/* Get a TCB for this packet */
|
||||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
pMpTcb = etdev->TxRing.TCBReadyQueueHead;
|
tcb = etdev->tx_ring.TCBReadyQueueHead;
|
||||||
|
|
||||||
if (pMpTcb == NULL) {
|
if (tcb == NULL) {
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
|
etdev->tx_ring.TCBReadyQueueHead = tcb->Next;
|
||||||
|
|
||||||
if (etdev->TxRing.TCBReadyQueueHead == NULL)
|
if (etdev->tx_ring.TCBReadyQueueHead == NULL)
|
||||||
etdev->TxRing.TCBReadyQueueTail = NULL;
|
etdev->tx_ring.TCBReadyQueueTail = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
pMpTcb->PacketLength = skb->len;
|
tcb->PacketLength = skb->len;
|
||||||
pMpTcb->Packet = skb;
|
tcb->Packet = skb;
|
||||||
|
|
||||||
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
|
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
|
||||||
shbufva = (uint16_t *) skb->data;
|
shbufva = (uint16_t *) skb->data;
|
||||||
|
|
||||||
if ((shbufva[0] == 0xffff) &&
|
if ((shbufva[0] == 0xffff) &&
|
||||||
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
|
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
|
||||||
pMpTcb->Flags |= fMP_DEST_BROAD;
|
tcb->Flags |= fMP_DEST_BROAD;
|
||||||
} else if ((shbufva[0] & 0x3) == 0x0001) {
|
} else if ((shbufva[0] & 0x3) == 0x0001) {
|
||||||
pMpTcb->Flags |= fMP_DEST_MULTI;
|
tcb->Flags |= fMP_DEST_MULTI;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pMpTcb->Next = NULL;
|
tcb->Next = NULL;
|
||||||
|
|
||||||
/* Call the NIC specific send handler. */
|
/* Call the NIC specific send handler. */
|
||||||
if (status == 0)
|
if (status == 0)
|
||||||
status = nic_send_packet(etdev, pMpTcb);
|
status = nic_send_packet(etdev, tcb);
|
||||||
|
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
if (etdev->TxRing.TCBReadyQueueTail) {
|
if (etdev->tx_ring.TCBReadyQueueTail) {
|
||||||
etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
|
etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
|
||||||
} else {
|
} else {
|
||||||
/* Apparently ready Q is empty. */
|
/* Apparently ready Q is empty. */
|
||||||
etdev->TxRing.TCBReadyQueueHead = pMpTcb;
|
etdev->tx_ring.TCBReadyQueueHead = tcb;
|
||||||
}
|
}
|
||||||
|
|
||||||
etdev->TxRing.TCBReadyQueueTail = pMpTcb;
|
etdev->tx_ring.TCBReadyQueueTail = tcb;
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
WARN_ON(etdev->TxRing.nBusySend > NUM_TCB);
|
WARN_ON(etdev->tx_ring.nBusySend > NUM_TCB);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nic_send_packet - NIC specific send handler for version B silicon.
|
* nic_send_packet - NIC specific send handler for version B silicon.
|
||||||
* @etdev: pointer to our adapter
|
* @etdev: pointer to our adapter
|
||||||
* @pMpTcb: pointer to MP_TCB
|
* @tcb: pointer to struct tcb
|
||||||
*
|
*
|
||||||
* Returns 0 or errno.
|
* Returns 0 or errno.
|
||||||
*/
|
*/
|
||||||
static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
|
||||||
{
|
{
|
||||||
uint32_t loopIndex;
|
u32 i;
|
||||||
TX_DESC_ENTRY_t CurDesc[24];
|
struct tx_desc desc[24]; /* 24 x 16 byte */
|
||||||
uint32_t FragmentNumber = 0;
|
u32 frag = 0;
|
||||||
uint32_t thiscopy, remainder;
|
u32 thiscopy, remainder;
|
||||||
struct sk_buff *pPacket = pMpTcb->Packet;
|
struct sk_buff *skb = tcb->Packet;
|
||||||
uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
|
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
|
||||||
struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
|
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Part of the optimizations of this send routine restrict us to
|
/* Part of the optimizations of this send routine restrict us to
|
||||||
@@ -494,17 +493,16 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
|||||||
* number of fragments. If needed, we can call this function,
|
* number of fragments. If needed, we can call this function,
|
||||||
* although it is less efficient.
|
* although it is less efficient.
|
||||||
*/
|
*/
|
||||||
if (FragListCount > 23) {
|
if (nr_frags > 23)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
|
||||||
|
|
||||||
memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
|
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
|
||||||
|
|
||||||
for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
|
for (i = 0; i < nr_frags; i++) {
|
||||||
/* If there is something in this element, lets get a
|
/* If there is something in this element, lets get a
|
||||||
* descriptor from the ring and get the necessary data
|
* descriptor from the ring and get the necessary data
|
||||||
*/
|
*/
|
||||||
if (loopIndex == 0) {
|
if (i == 0) {
|
||||||
/* If the fragments are smaller than a standard MTU,
|
/* If the fragments are smaller than a standard MTU,
|
||||||
* then map them to a single descriptor in the Tx
|
* then map them to a single descriptor in the Tx
|
||||||
* Desc ring. However, if they're larger, as is
|
* Desc ring. However, if they're larger, as is
|
||||||
@@ -514,165 +512,165 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
|||||||
* This will work until we determine why the hardware
|
* This will work until we determine why the hardware
|
||||||
* doesn't seem to like large fragments.
|
* doesn't seem to like large fragments.
|
||||||
*/
|
*/
|
||||||
if ((pPacket->len - pPacket->data_len) <= 1514) {
|
if ((skb->len - skb->data_len) <= 1514) {
|
||||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
desc[frag].addr_hi = 0;
|
||||||
/* Low 16bits are length, high is vlan and
|
/* Low 16bits are length, high is vlan and
|
||||||
unused currently so zero */
|
unused currently so zero */
|
||||||
CurDesc[FragmentNumber].word2 =
|
desc[frag].len_vlan =
|
||||||
pPacket->len - pPacket->data_len;
|
skb->len - skb->data_len;
|
||||||
|
|
||||||
/* NOTE: Here, the dma_addr_t returned from
|
/* NOTE: Here, the dma_addr_t returned from
|
||||||
* pci_map_single() is implicitly cast as a
|
* pci_map_single() is implicitly cast as a
|
||||||
* uint32_t. Although dma_addr_t can be
|
* u32. Although dma_addr_t can be
|
||||||
* 64-bit, the address returned by
|
* 64-bit, the address returned by
|
||||||
* pci_map_single() is always 32-bit
|
* pci_map_single() is always 32-bit
|
||||||
* addressable (as defined by the pci/dma
|
* addressable (as defined by the pci/dma
|
||||||
* subsystem)
|
* subsystem)
|
||||||
*/
|
*/
|
||||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
desc[frag++].addr_lo =
|
||||||
pci_map_single(etdev->pdev,
|
pci_map_single(etdev->pdev,
|
||||||
pPacket->data,
|
skb->data,
|
||||||
pPacket->len -
|
skb->len -
|
||||||
pPacket->data_len,
|
skb->data_len,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
} else {
|
} else {
|
||||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
desc[frag].addr_hi = 0;
|
||||||
CurDesc[FragmentNumber].word2 =
|
desc[frag].len_vlan =
|
||||||
(pPacket->len - pPacket->data_len) / 2;
|
(skb->len - skb->data_len) / 2;
|
||||||
|
|
||||||
/* NOTE: Here, the dma_addr_t returned from
|
/* NOTE: Here, the dma_addr_t returned from
|
||||||
* pci_map_single() is implicitly cast as a
|
* pci_map_single() is implicitly cast as a
|
||||||
* uint32_t. Although dma_addr_t can be
|
* u32. Although dma_addr_t can be
|
||||||
* 64-bit, the address returned by
|
* 64-bit, the address returned by
|
||||||
* pci_map_single() is always 32-bit
|
* pci_map_single() is always 32-bit
|
||||||
* addressable (as defined by the pci/dma
|
* addressable (as defined by the pci/dma
|
||||||
* subsystem)
|
* subsystem)
|
||||||
*/
|
*/
|
||||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
desc[frag++].addr_lo =
|
||||||
pci_map_single(etdev->pdev,
|
pci_map_single(etdev->pdev,
|
||||||
pPacket->data,
|
skb->data,
|
||||||
((pPacket->len -
|
((skb->len -
|
||||||
pPacket->data_len) / 2),
|
skb->data_len) / 2),
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
desc[frag].addr_hi = 0;
|
||||||
|
|
||||||
CurDesc[FragmentNumber].word2 =
|
desc[frag].len_vlan =
|
||||||
(pPacket->len - pPacket->data_len) / 2;
|
(skb->len - skb->data_len) / 2;
|
||||||
|
|
||||||
/* NOTE: Here, the dma_addr_t returned from
|
/* NOTE: Here, the dma_addr_t returned from
|
||||||
* pci_map_single() is implicitly cast as a
|
* pci_map_single() is implicitly cast as a
|
||||||
* uint32_t. Although dma_addr_t can be
|
* u32. Although dma_addr_t can be
|
||||||
* 64-bit, the address returned by
|
* 64-bit, the address returned by
|
||||||
* pci_map_single() is always 32-bit
|
* pci_map_single() is always 32-bit
|
||||||
* addressable (as defined by the pci/dma
|
* addressable (as defined by the pci/dma
|
||||||
* subsystem)
|
* subsystem)
|
||||||
*/
|
*/
|
||||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
desc[frag++].addr_lo =
|
||||||
pci_map_single(etdev->pdev,
|
pci_map_single(etdev->pdev,
|
||||||
pPacket->data +
|
skb->data +
|
||||||
((pPacket->len -
|
((skb->len -
|
||||||
pPacket->data_len) / 2),
|
skb->data_len) / 2),
|
||||||
((pPacket->len -
|
((skb->len -
|
||||||
pPacket->data_len) / 2),
|
skb->data_len) / 2),
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
desc[frag].addr_hi = 0;
|
||||||
CurDesc[FragmentNumber].word2 =
|
desc[frag].len_vlan =
|
||||||
pFragList[loopIndex - 1].size;
|
frags[i - 1].size;
|
||||||
|
|
||||||
/* NOTE: Here, the dma_addr_t returned from
|
/* NOTE: Here, the dma_addr_t returned from
|
||||||
* pci_map_page() is implicitly cast as a uint32_t.
|
* pci_map_page() is implicitly cast as a u32.
|
||||||
* Although dma_addr_t can be 64-bit, the address
|
* Although dma_addr_t can be 64-bit, the address
|
||||||
* returned by pci_map_page() is always 32-bit
|
* returned by pci_map_page() is always 32-bit
|
||||||
* addressable (as defined by the pci/dma subsystem)
|
* addressable (as defined by the pci/dma subsystem)
|
||||||
*/
|
*/
|
||||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
desc[frag++].addr_lo =
|
||||||
pci_map_page(etdev->pdev,
|
pci_map_page(etdev->pdev,
|
||||||
pFragList[loopIndex - 1].page,
|
frags[i - 1].page,
|
||||||
pFragList[loopIndex - 1].page_offset,
|
frags[i - 1].page_offset,
|
||||||
pFragList[loopIndex - 1].size,
|
frags[i - 1].size,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FragmentNumber == 0)
|
if (frag == 0)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
|
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
|
||||||
if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
|
if (++etdev->tx_ring.TxPacketsSinceLastinterrupt ==
|
||||||
PARM_TX_NUM_BUFS_DEF) {
|
PARM_TX_NUM_BUFS_DEF) {
|
||||||
/* Last element & Interrupt flag */
|
/* Last element & Interrupt flag */
|
||||||
CurDesc[FragmentNumber - 1].word3 = 0x5;
|
desc[frag - 1].flags = 0x5;
|
||||||
etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
|
etdev->tx_ring.TxPacketsSinceLastinterrupt = 0;
|
||||||
} else { /* Last element */
|
} else { /* Last element */
|
||||||
CurDesc[FragmentNumber - 1].word3 = 0x1;
|
desc[frag - 1].flags = 0x1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
CurDesc[FragmentNumber - 1].word3 = 0x5;
|
desc[frag - 1].flags = 0x5;
|
||||||
}
|
}
|
||||||
CurDesc[0].word3 |= 2; /* First element flag */
|
desc[0].flags |= 2; /* First element flag */
|
||||||
|
|
||||||
pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
|
tcb->WrIndexStart = etdev->tx_ring.txDmaReadyToSend;
|
||||||
pMpTcb->PacketStaleCount = 0;
|
tcb->PacketStaleCount = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&etdev->SendHWLock, flags);
|
spin_lock_irqsave(&etdev->SendHWLock, flags);
|
||||||
|
|
||||||
thiscopy = NUM_DESC_PER_RING_TX -
|
thiscopy = NUM_DESC_PER_RING_TX -
|
||||||
INDEX10(etdev->TxRing.txDmaReadyToSend);
|
INDEX10(etdev->tx_ring.txDmaReadyToSend);
|
||||||
|
|
||||||
if (thiscopy >= FragmentNumber) {
|
if (thiscopy >= frag) {
|
||||||
remainder = 0;
|
remainder = 0;
|
||||||
thiscopy = FragmentNumber;
|
thiscopy = frag;
|
||||||
} else {
|
} else {
|
||||||
remainder = FragmentNumber - thiscopy;
|
remainder = frag - thiscopy;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(etdev->TxRing.pTxDescRingVa +
|
memcpy(etdev->tx_ring.tx_desc_ring +
|
||||||
INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc,
|
INDEX10(etdev->tx_ring.txDmaReadyToSend), desc,
|
||||||
sizeof(TX_DESC_ENTRY_t) * thiscopy);
|
sizeof(struct tx_desc) * thiscopy);
|
||||||
|
|
||||||
add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy);
|
add_10bit(&etdev->tx_ring.txDmaReadyToSend, thiscopy);
|
||||||
|
|
||||||
if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 ||
|
if (INDEX10(etdev->tx_ring.txDmaReadyToSend)== 0 ||
|
||||||
INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
|
INDEX10(etdev->tx_ring.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
|
||||||
etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK;
|
etdev->tx_ring.txDmaReadyToSend &= ~ET_DMA10_MASK;
|
||||||
etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP;
|
etdev->tx_ring.txDmaReadyToSend ^= ET_DMA10_WRAP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (remainder) {
|
if (remainder) {
|
||||||
memcpy(etdev->TxRing.pTxDescRingVa,
|
memcpy(etdev->tx_ring.tx_desc_ring,
|
||||||
CurDesc + thiscopy,
|
desc + thiscopy,
|
||||||
sizeof(TX_DESC_ENTRY_t) * remainder);
|
sizeof(struct tx_desc) * remainder);
|
||||||
|
|
||||||
add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder);
|
add_10bit(&etdev->tx_ring.txDmaReadyToSend, remainder);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) {
|
if (INDEX10(etdev->tx_ring.txDmaReadyToSend) == 0) {
|
||||||
if (etdev->TxRing.txDmaReadyToSend)
|
if (etdev->tx_ring.txDmaReadyToSend)
|
||||||
pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
|
tcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
|
||||||
else
|
else
|
||||||
pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
|
tcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
|
||||||
} else
|
} else
|
||||||
pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1;
|
tcb->WrIndex = etdev->tx_ring.txDmaReadyToSend - 1;
|
||||||
|
|
||||||
spin_lock(&etdev->TCBSendQLock);
|
spin_lock(&etdev->TCBSendQLock);
|
||||||
|
|
||||||
if (etdev->TxRing.CurrSendTail)
|
if (etdev->tx_ring.CurrSendTail)
|
||||||
etdev->TxRing.CurrSendTail->Next = pMpTcb;
|
etdev->tx_ring.CurrSendTail->Next = tcb;
|
||||||
else
|
else
|
||||||
etdev->TxRing.CurrSendHead = pMpTcb;
|
etdev->tx_ring.CurrSendHead = tcb;
|
||||||
|
|
||||||
etdev->TxRing.CurrSendTail = pMpTcb;
|
etdev->tx_ring.CurrSendTail = tcb;
|
||||||
|
|
||||||
WARN_ON(pMpTcb->Next != NULL);
|
WARN_ON(tcb->Next != NULL);
|
||||||
|
|
||||||
etdev->TxRing.nBusySend++;
|
etdev->tx_ring.nBusySend++;
|
||||||
|
|
||||||
spin_unlock(&etdev->TCBSendQLock);
|
spin_unlock(&etdev->TCBSendQLock);
|
||||||
|
|
||||||
/* Write the new write pointer back to the device. */
|
/* Write the new write pointer back to the device. */
|
||||||
writel(etdev->TxRing.txDmaReadyToSend,
|
writel(etdev->tx_ring.txDmaReadyToSend,
|
||||||
&etdev->regs->txdma.service_request);
|
&etdev->regs->txdma.service_request);
|
||||||
|
|
||||||
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
|
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
|
||||||
@@ -689,72 +687,72 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
|
* et131x_free_send_packet - Recycle a struct tcb
|
||||||
* @etdev: pointer to our adapter
|
* @etdev: pointer to our adapter
|
||||||
* @pMpTcb: pointer to MP_TCB
|
* @tcb: pointer to struct tcb
|
||||||
*
|
*
|
||||||
|
* Complete the packet if necessary
|
||||||
* Assumption - Send spinlock has been acquired
|
* Assumption - Send spinlock has been acquired
|
||||||
*/
|
*/
|
||||||
inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
||||||
PMP_TCB pMpTcb)
|
struct tcb *tcb)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
TX_DESC_ENTRY_t *desc = NULL;
|
struct tx_desc *desc = NULL;
|
||||||
struct net_device_stats *stats = &etdev->net_stats;
|
struct net_device_stats *stats = &etdev->net_stats;
|
||||||
|
|
||||||
if (pMpTcb->Flags & fMP_DEST_BROAD)
|
if (tcb->Flags & fMP_DEST_BROAD)
|
||||||
atomic_inc(&etdev->Stats.brdcstxmt);
|
atomic_inc(&etdev->Stats.brdcstxmt);
|
||||||
else if (pMpTcb->Flags & fMP_DEST_MULTI)
|
else if (tcb->Flags & fMP_DEST_MULTI)
|
||||||
atomic_inc(&etdev->Stats.multixmt);
|
atomic_inc(&etdev->Stats.multixmt);
|
||||||
else
|
else
|
||||||
atomic_inc(&etdev->Stats.unixmt);
|
atomic_inc(&etdev->Stats.unixmt);
|
||||||
|
|
||||||
if (pMpTcb->Packet) {
|
if (tcb->Packet) {
|
||||||
stats->tx_bytes += pMpTcb->Packet->len;
|
stats->tx_bytes += tcb->Packet->len;
|
||||||
|
|
||||||
/* Iterate through the TX descriptors on the ring
|
/* Iterate through the TX descriptors on the ring
|
||||||
* corresponding to this packet and umap the fragments
|
* corresponding to this packet and umap the fragments
|
||||||
* they point to
|
* they point to
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
desc =
|
desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
|
||||||
(TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
|
INDEX10(tcb->WrIndexStart));
|
||||||
INDEX10(pMpTcb->WrIndexStart));
|
|
||||||
|
|
||||||
pci_unmap_single(etdev->pdev,
|
pci_unmap_single(etdev->pdev,
|
||||||
desc->DataBufferPtrLow,
|
desc->addr_lo,
|
||||||
desc->word2, PCI_DMA_TODEVICE);
|
desc->len_vlan, PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
add_10bit(&pMpTcb->WrIndexStart, 1);
|
add_10bit(&tcb->WrIndexStart, 1);
|
||||||
if (INDEX10(pMpTcb->WrIndexStart) >=
|
if (INDEX10(tcb->WrIndexStart) >=
|
||||||
NUM_DESC_PER_RING_TX) {
|
NUM_DESC_PER_RING_TX) {
|
||||||
pMpTcb->WrIndexStart &= ~ET_DMA10_MASK;
|
tcb->WrIndexStart &= ~ET_DMA10_MASK;
|
||||||
pMpTcb->WrIndexStart ^= ET_DMA10_WRAP;
|
tcb->WrIndexStart ^= ET_DMA10_WRAP;
|
||||||
}
|
}
|
||||||
} while (desc != (etdev->TxRing.pTxDescRingVa +
|
} while (desc != (etdev->tx_ring.tx_desc_ring +
|
||||||
INDEX10(pMpTcb->WrIndex)));
|
INDEX10(tcb->WrIndex)));
|
||||||
|
|
||||||
dev_kfree_skb_any(pMpTcb->Packet);
|
dev_kfree_skb_any(tcb->Packet);
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(pMpTcb, 0, sizeof(MP_TCB));
|
memset(tcb, 0, sizeof(struct tcb));
|
||||||
|
|
||||||
/* Add the TCB to the Ready Q */
|
/* Add the TCB to the Ready Q */
|
||||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||||
|
|
||||||
etdev->Stats.opackets++;
|
etdev->Stats.opackets++;
|
||||||
|
|
||||||
if (etdev->TxRing.TCBReadyQueueTail) {
|
if (etdev->tx_ring.TCBReadyQueueTail) {
|
||||||
etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
|
etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
|
||||||
} else {
|
} else {
|
||||||
/* Apparently ready Q is empty. */
|
/* Apparently ready Q is empty. */
|
||||||
etdev->TxRing.TCBReadyQueueHead = pMpTcb;
|
etdev->tx_ring.TCBReadyQueueHead = tcb;
|
||||||
}
|
}
|
||||||
|
|
||||||
etdev->TxRing.TCBReadyQueueTail = pMpTcb;
|
etdev->tx_ring.TCBReadyQueueTail = tcb;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||||
WARN_ON(etdev->TxRing.nBusySend < 0);
|
WARN_ON(etdev->tx_ring.nBusySend < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -765,52 +763,52 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
|||||||
*/
|
*/
|
||||||
void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
|
void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
|
||||||
{
|
{
|
||||||
PMP_TCB pMpTcb;
|
struct tcb *tcb;
|
||||||
struct list_head *entry;
|
struct list_head *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
uint32_t FreeCounter = 0;
|
u32 freed = 0;
|
||||||
|
|
||||||
while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
|
while (!list_empty(&etdev->tx_ring.SendWaitQueue)) {
|
||||||
spin_lock_irqsave(&etdev->SendWaitLock, flags);
|
spin_lock_irqsave(&etdev->SendWaitLock, flags);
|
||||||
|
|
||||||
etdev->TxRing.nWaitSend--;
|
etdev->tx_ring.nWaitSend--;
|
||||||
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
|
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
|
||||||
|
|
||||||
entry = etdev->TxRing.SendWaitQueue.next;
|
entry = etdev->tx_ring.SendWaitQueue.next;
|
||||||
}
|
}
|
||||||
|
|
||||||
etdev->TxRing.nWaitSend = 0;
|
etdev->tx_ring.nWaitSend = 0;
|
||||||
|
|
||||||
/* Any packets being sent? Check the first TCB on the send list */
|
/* Any packets being sent? Check the first TCB on the send list */
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
tcb = etdev->tx_ring.CurrSendHead;
|
||||||
|
|
||||||
while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
|
while ((tcb != NULL) && (freed < NUM_TCB)) {
|
||||||
PMP_TCB pNext = pMpTcb->Next;
|
struct tcb *pNext = tcb->Next;
|
||||||
|
|
||||||
etdev->TxRing.CurrSendHead = pNext;
|
etdev->tx_ring.CurrSendHead = pNext;
|
||||||
|
|
||||||
if (pNext == NULL)
|
if (pNext == NULL)
|
||||||
etdev->TxRing.CurrSendTail = NULL;
|
etdev->tx_ring.CurrSendTail = NULL;
|
||||||
|
|
||||||
etdev->TxRing.nBusySend--;
|
etdev->tx_ring.nBusySend--;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
FreeCounter++;
|
freed++;
|
||||||
et131x_free_send_packet(etdev, pMpTcb);
|
et131x_free_send_packet(etdev, tcb);
|
||||||
|
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
tcb = etdev->tx_ring.CurrSendHead;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(FreeCounter == NUM_TCB);
|
WARN_ON(freed == NUM_TCB);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
etdev->TxRing.nBusySend = 0;
|
etdev->tx_ring.nBusySend = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -844,53 +842,53 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
|
|||||||
static void et131x_update_tcb_list(struct et131x_adapter *etdev)
|
static void et131x_update_tcb_list(struct et131x_adapter *etdev)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 ServiceComplete;
|
u32 serviced;
|
||||||
PMP_TCB pMpTcb;
|
struct tcb * tcb;
|
||||||
u32 index;
|
u32 index;
|
||||||
|
|
||||||
ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete);
|
serviced = readl(&etdev->regs->txdma.NewServiceComplete);
|
||||||
index = INDEX10(ServiceComplete);
|
index = INDEX10(serviced);
|
||||||
|
|
||||||
/* Has the ring wrapped? Process any descriptors that do not have
|
/* Has the ring wrapped? Process any descriptors that do not have
|
||||||
* the same "wrap" indicator as the current completion indicator
|
* the same "wrap" indicator as the current completion indicator
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
tcb = etdev->tx_ring.CurrSendHead;
|
||||||
|
|
||||||
while (pMpTcb &&
|
while (tcb &&
|
||||||
((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) &&
|
((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) &&
|
||||||
index < INDEX10(pMpTcb->WrIndex)) {
|
index < INDEX10(tcb->WrIndex)) {
|
||||||
etdev->TxRing.nBusySend--;
|
etdev->tx_ring.nBusySend--;
|
||||||
etdev->TxRing.CurrSendHead = pMpTcb->Next;
|
etdev->tx_ring.CurrSendHead = tcb->Next;
|
||||||
if (pMpTcb->Next == NULL)
|
if (tcb->Next == NULL)
|
||||||
etdev->TxRing.CurrSendTail = NULL;
|
etdev->tx_ring.CurrSendTail = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
et131x_free_send_packet(etdev, pMpTcb);
|
et131x_free_send_packet(etdev, tcb);
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
/* Goto the next packet */
|
/* Goto the next packet */
|
||||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
tcb = etdev->tx_ring.CurrSendHead;
|
||||||
}
|
}
|
||||||
while (pMpTcb &&
|
while (tcb &&
|
||||||
!((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP)
|
!((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP)
|
||||||
&& index > (pMpTcb->WrIndex & ET_DMA10_MASK)) {
|
&& index > (tcb->WrIndex & ET_DMA10_MASK)) {
|
||||||
etdev->TxRing.nBusySend--;
|
etdev->tx_ring.nBusySend--;
|
||||||
etdev->TxRing.CurrSendHead = pMpTcb->Next;
|
etdev->tx_ring.CurrSendHead = tcb->Next;
|
||||||
if (pMpTcb->Next == NULL)
|
if (tcb->Next == NULL)
|
||||||
etdev->TxRing.CurrSendTail = NULL;
|
etdev->tx_ring.CurrSendTail = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
et131x_free_send_packet(etdev, pMpTcb);
|
et131x_free_send_packet(etdev, tcb);
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
/* Goto the next packet */
|
/* Goto the next packet */
|
||||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
tcb = etdev->tx_ring.CurrSendHead;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wake up the queue when we hit a low-water mark */
|
/* Wake up the queue when we hit a low-water mark */
|
||||||
if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
|
if (etdev->tx_ring.nBusySend <= (NUM_TCB / 3))
|
||||||
netif_wake_queue(etdev->netdev);
|
netif_wake_queue(etdev->netdev);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||||
@@ -909,13 +907,13 @@ static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
|
|||||||
|
|
||||||
spin_lock_irqsave(&etdev->SendWaitLock, flags);
|
spin_lock_irqsave(&etdev->SendWaitLock, flags);
|
||||||
|
|
||||||
while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
|
while (!list_empty(&etdev->tx_ring.SendWaitQueue) &&
|
||||||
MP_TCB_RESOURCES_AVAILABLE(etdev)) {
|
MP_TCB_RESOURCES_AVAILABLE(etdev)) {
|
||||||
struct list_head *entry;
|
struct list_head *entry;
|
||||||
|
|
||||||
entry = etdev->TxRing.SendWaitQueue.next;
|
entry = etdev->tx_ring.SendWaitQueue.next;
|
||||||
|
|
||||||
etdev->TxRing.nWaitSend--;
|
etdev->tx_ring.nWaitSend--;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
|
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
|
||||||
|
@@ -89,14 +89,13 @@
|
|||||||
* 14: UDP checksum assist
|
* 14: UDP checksum assist
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* TX_DESC_ENTRY_t is sructure representing each descriptor on the ring */
|
/* struct tx_desc represents each descriptor on the ring */
|
||||||
typedef struct _tx_desc_entry_t {
|
struct tx_desc {
|
||||||
u32 DataBufferPtrHigh;
|
u32 addr_hi;
|
||||||
u32 DataBufferPtrLow;
|
u32 addr_lo;
|
||||||
u32 word2; /* control words how to xmit the */
|
u32 len_vlan; /* control words how to xmit the */
|
||||||
u32 word3; /* data (detailed above) */
|
u32 flags; /* data (detailed above) */
|
||||||
} TX_DESC_ENTRY_t, *PTX_DESC_ENTRY_t;
|
};
|
||||||
|
|
||||||
|
|
||||||
/* Typedefs for Tx DMA engine status writeback */
|
/* Typedefs for Tx DMA engine status writeback */
|
||||||
|
|
||||||
@@ -120,8 +119,8 @@ typedef union _tx_status_block_t {
|
|||||||
} TX_STATUS_BLOCK_t, *PTX_STATUS_BLOCK_t;
|
} TX_STATUS_BLOCK_t, *PTX_STATUS_BLOCK_t;
|
||||||
|
|
||||||
/* TCB (Transmit Control Block) */
|
/* TCB (Transmit Control Block) */
|
||||||
typedef struct _MP_TCB {
|
struct tcb {
|
||||||
struct _MP_TCB *Next;
|
struct tcb *Next;
|
||||||
u32 Flags;
|
u32 Flags;
|
||||||
u32 Count;
|
u32 Count;
|
||||||
u32 PacketStaleCount;
|
u32 PacketStaleCount;
|
||||||
@@ -129,7 +128,7 @@ typedef struct _MP_TCB {
|
|||||||
u32 PacketLength;
|
u32 PacketLength;
|
||||||
u32 WrIndex;
|
u32 WrIndex;
|
||||||
u32 WrIndexStart;
|
u32 WrIndexStart;
|
||||||
} MP_TCB, *PMP_TCB;
|
};
|
||||||
|
|
||||||
/* Structure to hold the skb's in a list */
|
/* Structure to hold the skb's in a list */
|
||||||
typedef struct tx_skb_list_elem {
|
typedef struct tx_skb_list_elem {
|
||||||
@@ -137,14 +136,14 @@ typedef struct tx_skb_list_elem {
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
} TX_SKB_LIST_ELEM, *PTX_SKB_LIST_ELEM;
|
} TX_SKB_LIST_ELEM, *PTX_SKB_LIST_ELEM;
|
||||||
|
|
||||||
/* TX_RING_t is sructure representing our local reference(s) to the ring */
|
/* Structure representing our local reference(s) to the ring */
|
||||||
typedef struct _tx_ring_t {
|
struct tx_ring {
|
||||||
/* TCB (Transmit Control Block) memory and lists */
|
/* TCB (Transmit Control Block) memory and lists */
|
||||||
PMP_TCB MpTcbMem;
|
struct tcb *MpTcbMem;
|
||||||
|
|
||||||
/* List of TCBs that are ready to be used */
|
/* List of TCBs that are ready to be used */
|
||||||
PMP_TCB TCBReadyQueueHead;
|
struct tcb *TCBReadyQueueHead;
|
||||||
PMP_TCB TCBReadyQueueTail;
|
struct tcb *TCBReadyQueueTail;
|
||||||
|
|
||||||
/* list of TCBs that are currently being sent. NOTE that access to all
|
/* list of TCBs that are currently being sent. NOTE that access to all
|
||||||
* three of these (including nBusySend) are controlled via the
|
* three of these (including nBusySend) are controlled via the
|
||||||
@@ -152,19 +151,19 @@ typedef struct _tx_ring_t {
|
|||||||
* decrementing nBusySend, or any queue manipulation on CurrSendHead /
|
* decrementing nBusySend, or any queue manipulation on CurrSendHead /
|
||||||
* Tail
|
* Tail
|
||||||
*/
|
*/
|
||||||
PMP_TCB CurrSendHead;
|
struct tcb *CurrSendHead;
|
||||||
PMP_TCB CurrSendTail;
|
struct tcb *CurrSendTail;
|
||||||
int32_t nBusySend;
|
int nBusySend;
|
||||||
|
|
||||||
/* List of packets (not TCBs) that were queued for lack of resources */
|
/* List of packets (not TCBs) that were queued for lack of resources */
|
||||||
struct list_head SendWaitQueue;
|
struct list_head SendWaitQueue;
|
||||||
int32_t nWaitSend;
|
int nWaitSend;
|
||||||
|
|
||||||
/* The actual descriptor ring */
|
/* The actual descriptor ring */
|
||||||
PTX_DESC_ENTRY_t pTxDescRingVa;
|
struct tx_desc *tx_desc_ring;
|
||||||
dma_addr_t pTxDescRingPa;
|
dma_addr_t tx_desc_ring_pa;
|
||||||
uint64_t pTxDescRingAdjustedPa;
|
u64 pTxDescRingAdjustedPa;
|
||||||
uint64_t TxDescOffset;
|
u64 TxDescOffset;
|
||||||
|
|
||||||
/* ReadyToSend indicates where we last wrote to in the descriptor ring. */
|
/* ReadyToSend indicates where we last wrote to in the descriptor ring. */
|
||||||
u32 txDmaReadyToSend;
|
u32 txDmaReadyToSend;
|
||||||
@@ -180,8 +179,8 @@ typedef struct _tx_ring_t {
|
|||||||
TXMAC_ERR_t TxMacErr;
|
TXMAC_ERR_t TxMacErr;
|
||||||
|
|
||||||
/* Variables to track the Tx interrupt coalescing features */
|
/* Variables to track the Tx interrupt coalescing features */
|
||||||
int32_t TxPacketsSinceLastinterrupt;
|
int TxPacketsSinceLastinterrupt;
|
||||||
} TX_RING_t, *PTX_RING_t;
|
};
|
||||||
|
|
||||||
/* Forward declaration of the frag-list for the following prototypes */
|
/* Forward declaration of the frag-list for the following prototypes */
|
||||||
typedef struct _MP_FRAG_LIST MP_FRAG_LIST, *PMP_FRAG_LIST;
|
typedef struct _MP_FRAG_LIST MP_FRAG_LIST, *PMP_FRAG_LIST;
|
||||||
|
@@ -101,8 +101,8 @@
|
|||||||
#define LO_MARK_PERCENT_FOR_RX 15
|
#define LO_MARK_PERCENT_FOR_RX 15
|
||||||
|
|
||||||
/* Macros specific to the private adapter structure */
|
/* Macros specific to the private adapter structure */
|
||||||
#define MP_TCB_RESOURCES_AVAILABLE(_M) ((_M)->TxRing.nBusySend < NUM_TCB)
|
#define MP_TCB_RESOURCES_AVAILABLE(_M) ((_M)->tx_ring.nBusySend < NUM_TCB)
|
||||||
#define MP_TCB_RESOURCES_NOT_AVAILABLE(_M) ((_M)->TxRing.nBusySend >= NUM_TCB)
|
#define MP_TCB_RESOURCES_NOT_AVAILABLE(_M) ((_M)->tx_ring.nBusySend >= NUM_TCB)
|
||||||
|
|
||||||
#define MP_SHOULD_FAIL_SEND(_M) ((_M)->Flags & fMP_ADAPTER_FAIL_SEND_MASK)
|
#define MP_SHOULD_FAIL_SEND(_M) ((_M)->Flags & fMP_ADAPTER_FAIL_SEND_MASK)
|
||||||
|
|
||||||
@@ -255,7 +255,7 @@ struct et131x_adapter {
|
|||||||
MI_BMSR_t Bmsr;
|
MI_BMSR_t Bmsr;
|
||||||
|
|
||||||
/* Tx Memory Variables */
|
/* Tx Memory Variables */
|
||||||
TX_RING_t TxRing;
|
struct tx_ring tx_ring;
|
||||||
|
|
||||||
/* Rx Memory Variables */
|
/* Rx Memory Variables */
|
||||||
RX_RING_t RxRing;
|
RX_RING_t RxRing;
|
||||||
|
@@ -179,15 +179,15 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
|
|||||||
/* This is our interrupt, so process accordingly */
|
/* This is our interrupt, so process accordingly */
|
||||||
|
|
||||||
if (status & ET_INTR_WATCHDOG) {
|
if (status & ET_INTR_WATCHDOG) {
|
||||||
PMP_TCB pMpTcb = adapter->TxRing.CurrSendHead;
|
struct tcb *tcb = adapter->tx_ring.CurrSendHead;
|
||||||
|
|
||||||
if (pMpTcb)
|
if (tcb)
|
||||||
if (++pMpTcb->PacketStaleCount > 1)
|
if (++tcb->PacketStaleCount > 1)
|
||||||
status |= ET_INTR_TXDMA_ISR;
|
status |= ET_INTR_TXDMA_ISR;
|
||||||
|
|
||||||
if (adapter->RxRing.UnfinishedReceives)
|
if (adapter->RxRing.UnfinishedReceives)
|
||||||
status |= ET_INTR_RXDMA_XFR_DONE;
|
status |= ET_INTR_RXDMA_XFR_DONE;
|
||||||
else if (pMpTcb == NULL)
|
else if (tcb == NULL)
|
||||||
writel(0, &adapter->regs->global.watchdog_timer);
|
writel(0, &adapter->regs->global.watchdog_timer);
|
||||||
|
|
||||||
status &= ~ET_INTR_WATCHDOG;
|
status &= ~ET_INTR_WATCHDOG;
|
||||||
@@ -397,7 +397,7 @@ void et131x_isr_handler(struct work_struct *work)
|
|||||||
|
|
||||||
/* Let's move on to the TxMac */
|
/* Let's move on to the TxMac */
|
||||||
if (status & ET_INTR_TXMAC) {
|
if (status & ET_INTR_TXMAC) {
|
||||||
etdev->TxRing.TxMacErr.value =
|
etdev->tx_ring.TxMacErr.value =
|
||||||
readl(&iomem->txmac.err.value);
|
readl(&iomem->txmac.err.value);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -412,7 +412,7 @@ void et131x_isr_handler(struct work_struct *work)
|
|||||||
*/
|
*/
|
||||||
dev_warn(&etdev->pdev->dev,
|
dev_warn(&etdev->pdev->dev,
|
||||||
"TXMAC interrupt, error 0x%08x\n",
|
"TXMAC interrupt, error 0x%08x\n",
|
||||||
etdev->TxRing.TxMacErr.value);
|
etdev->tx_ring.TxMacErr.value);
|
||||||
|
|
||||||
/* If we are debugging, we want to see this error,
|
/* If we are debugging, we want to see this error,
|
||||||
* otherwise we just want the device to be reset and
|
* otherwise we just want the device to be reset and
|
||||||
|
@@ -519,7 +519,7 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
|
|||||||
void et131x_tx_timeout(struct net_device *netdev)
|
void et131x_tx_timeout(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct et131x_adapter *etdev = netdev_priv(netdev);
|
struct et131x_adapter *etdev = netdev_priv(netdev);
|
||||||
PMP_TCB pMpTcb;
|
struct tcb *tcb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Just skip this part if the adapter is doing link detection */
|
/* Just skip this part if the adapter is doing link detection */
|
||||||
@@ -541,28 +541,28 @@ void et131x_tx_timeout(struct net_device *netdev)
|
|||||||
/* Is send stuck? */
|
/* Is send stuck? */
|
||||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||||
|
|
||||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
tcb = etdev->tx_ring.CurrSendHead;
|
||||||
|
|
||||||
if (pMpTcb != NULL) {
|
if (tcb != NULL) {
|
||||||
pMpTcb->Count++;
|
tcb->Count++;
|
||||||
|
|
||||||
if (pMpTcb->Count > NIC_SEND_HANG_THRESHOLD) {
|
if (tcb->Count > NIC_SEND_HANG_THRESHOLD) {
|
||||||
TX_DESC_ENTRY_t StuckDescriptors[10];
|
struct tx_desc stuck[10];
|
||||||
|
|
||||||
if (INDEX10(pMpTcb->WrIndex) > 7) {
|
if (INDEX10(tcb->WrIndex) > 7) {
|
||||||
memcpy(StuckDescriptors,
|
memcpy(stuck,
|
||||||
etdev->TxRing.pTxDescRingVa +
|
etdev->tx_ring.tx_desc_ring +
|
||||||
INDEX10(pMpTcb->WrIndex) - 6,
|
INDEX10(tcb->WrIndex) - 6,
|
||||||
sizeof(TX_DESC_ENTRY_t) * 10);
|
sizeof(struct tx_desc) * 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&etdev->TCBSendQLock,
|
spin_unlock_irqrestore(&etdev->TCBSendQLock,
|
||||||
flags);
|
flags);
|
||||||
|
|
||||||
dev_warn(&etdev->pdev->dev,
|
dev_warn(&etdev->pdev->dev,
|
||||||
"Send stuck - reset. pMpTcb->WrIndex %x, Flags 0x%08x\n",
|
"Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n",
|
||||||
pMpTcb->WrIndex,
|
tcb->WrIndex,
|
||||||
pMpTcb->Flags);
|
tcb->Flags);
|
||||||
|
|
||||||
et131x_close(netdev);
|
et131x_close(netdev);
|
||||||
et131x_open(netdev);
|
et131x_open(netdev);
|
||||||
|
Reference in New Issue
Block a user