tc35815: Whitespace cleanup

Cosmetic TAB/whitespace cleanups and some style cleanups.  No
functional changes.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
Atsushi Nemoto
2008-04-11 00:25:31 +09:00
committed by Jeff Garzik
parent c6686fe3e4
commit 7f225b427b

View File

@@ -249,7 +249,7 @@ struct tc35815_regs {
/* Int_En bit asign -------------------------------------------------------- */ /* Int_En bit asign -------------------------------------------------------- */
#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ #define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Control Complete Enable */ #define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ #define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ #define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ #define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
@@ -352,8 +352,10 @@ struct BDesc {
/* Tuning parameters */ /* Tuning parameters */
#define DMA_BURST_SIZE 32 #define DMA_BURST_SIZE 32
#define TX_THRESHOLD 1024 #define TX_THRESHOLD 1024
#define TX_THRESHOLD_MAX 1536 /* used threshold with packet max byte for low pci transfer ability.*/ /* used threshold with packet max byte for low pci transfer ability.*/
#define TX_THRESHOLD_KEEP_LIMIT 10 /* setting threshold max value when overrun error occured this count. */ #define TX_THRESHOLD_MAX 1536
/* setting threshold max value when overrun error occured this count. */
#define TX_THRESHOLD_KEEP_LIMIT 10
/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
#ifdef TC35815_USE_PACKEDBUFFER #ifdef TC35815_USE_PACKEDBUFFER
@@ -499,7 +501,8 @@ static void* alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
void *buf; void *buf;
/* pci_map + pci_dma_sync will be more effective than /* pci_map + pci_dma_sync will be more effective than
* pci_alloc_consistent on some archs. */ * pci_alloc_consistent on some archs. */
if ((buf = (void *)__get_free_page(GFP_ATOMIC)) == NULL) buf = (void *)__get_free_page(GFP_ATOMIC);
if (!buf)
return NULL; return NULL;
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
@@ -962,7 +965,6 @@ static void __devexit tc35815_remove_one (struct pci_dev *pdev)
kfree(lp->mii_bus.irq); kfree(lp->mii_bus.irq);
unregister_netdev(dev); unregister_netdev(dev);
free_netdev(dev); free_netdev(dev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
@@ -980,11 +982,17 @@ tc35815_init_queues(struct net_device *dev)
sizeof(struct TxFD) * TX_FD_NUM > sizeof(struct TxFD) * TX_FD_NUM >
PAGE_SIZE * FD_PAGE_NUM); PAGE_SIZE * FD_PAGE_NUM);
if ((lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma)) == 0) lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
PAGE_SIZE * FD_PAGE_NUM,
&lp->fd_buf_dma);
if (!lp->fd_buf)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < RX_BUF_NUM; i++) { for (i = 0; i < RX_BUF_NUM; i++) {
#ifdef TC35815_USE_PACKEDBUFFER #ifdef TC35815_USE_PACKEDBUFFER
if ((lp->data_buf[i] = alloc_rxbuf_page(lp->pci_dev, &lp->data_buf_dma[i])) == NULL) { lp->data_buf[i] =
alloc_rxbuf_page(lp->pci_dev,
&lp->data_buf_dma[i]);
if (!lp->data_buf[i]) {
while (--i >= 0) { while (--i >= 0) {
free_rxbuf_page(lp->pci_dev, free_rxbuf_page(lp->pci_dev,
lp->data_buf[i], lp->data_buf[i],
@@ -1027,18 +1035,17 @@ tc35815_init_queues(struct net_device *dev)
#endif #endif
printk("\n"); printk("\n");
} else { } else {
for (i = 0; i < FD_PAGE_NUM; i++) { for (i = 0; i < FD_PAGE_NUM; i++)
clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE)); clear_page((void *)((unsigned long)lp->fd_buf +
} i * PAGE_SIZE));
} }
fd_addr = (unsigned long)lp->fd_buf; fd_addr = (unsigned long)lp->fd_buf;
/* Free Descriptors (for Receive) */ /* Free Descriptors (for Receive) */
lp->rfd_base = (struct RxFD *)fd_addr; lp->rfd_base = (struct RxFD *)fd_addr;
fd_addr += sizeof(struct RxFD) * RX_FD_NUM; fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
for (i = 0; i < RX_FD_NUM; i++) { for (i = 0; i < RX_FD_NUM; i++)
lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
}
lp->rfd_cur = lp->rfd_base; lp->rfd_cur = lp->rfd_base;
lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
@@ -1366,9 +1373,9 @@ tc35815_open(struct net_device *dev)
* This is used if the interrupt line can turned off (shared). * This is used if the interrupt line can turned off (shared).
* See 3c503.c for an example of selecting the IRQ at config-time. * See 3c503.c for an example of selecting the IRQ at config-time.
*/ */
if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) { if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED,
dev->name, dev))
return -EAGAIN; return -EAGAIN;
}
tc35815_chip_reset(dev); tc35815_chip_reset(dev);
@@ -2182,8 +2189,7 @@ tc35815_set_multicast_list(struct net_device *dev)
struct tc35815_regs __iomem *tr = struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr; (struct tc35815_regs __iomem *)dev->base_addr;
if (dev->flags&IFF_PROMISC) if (dev->flags & IFF_PROMISC) {
{
#ifdef WORKAROUND_100HALF_PROMISC #ifdef WORKAROUND_100HALF_PROMISC
/* With some (all?) 100MHalf HUB, controller will hang /* With some (all?) 100MHalf HUB, controller will hang
* if we enabled promiscuous mode before linkup... */ * if we enabled promiscuous mode before linkup... */
@@ -2194,15 +2200,12 @@ tc35815_set_multicast_list(struct net_device *dev)
#endif #endif
/* Enable promiscuous mode */ /* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} } else if ((dev->flags & IFF_ALLMULTI) ||
else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3) dev->mc_count > CAM_ENTRY_MAX - 3) {
{
/* CAM 0, 1, 20 are reserved. */ /* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */ /* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
} } else if (dev->mc_count) {
else if(dev->mc_count)
{
struct dev_mc_list *cur_addr = dev->mc_list; struct dev_mc_list *cur_addr = dev->mc_list;
int i; int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
@@ -2218,8 +2221,7 @@ tc35815_set_multicast_list(struct net_device *dev)
} }
tc_writel(ena_bits, &tr->CAM_Ena); tc_writel(ena_bits, &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
} } else {
else {
tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
} }