gianfar: Add per queue structure support
This patch introduces per tx and per rx queue structures. Earlier the members of these structures were inside the gfar_private structure. Moving forward if we want to support multiple queues, we need to refactor the gfar_private structure so that introduction of multiple queues is easier. Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
123b43e971
commit
a12f801d4b
@@ -8,9 +8,10 @@
|
|||||||
*
|
*
|
||||||
* Author: Andy Fleming
|
* Author: Andy Fleming
|
||||||
* Maintainer: Kumar Gala
|
* Maintainer: Kumar Gala
|
||||||
|
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
|
||||||
*
|
*
|
||||||
* Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
|
* Copyright 2002-2009 Freescale Semiconductor, Inc.
|
||||||
* Copyright (c) 2007 MontaVista Software, Inc.
|
* Copyright 2007 MontaVista Software, Inc.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms of the GNU General Public License as published by the
|
* under the terms of the GNU General Public License as published by the
|
||||||
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
|
|||||||
static void gfar_timeout(struct net_device *dev);
|
static void gfar_timeout(struct net_device *dev);
|
||||||
static int gfar_close(struct net_device *dev);
|
static int gfar_close(struct net_device *dev);
|
||||||
struct sk_buff *gfar_new_skb(struct net_device *dev);
|
struct sk_buff *gfar_new_skb(struct net_device *dev);
|
||||||
static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
|
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
|
||||||
struct sk_buff *skb);
|
struct sk_buff *skb);
|
||||||
static int gfar_set_mac_address(struct net_device *dev);
|
static int gfar_set_mac_address(struct net_device *dev);
|
||||||
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
|
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
|
||||||
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
|
|||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
static void gfar_netpoll(struct net_device *dev);
|
static void gfar_netpoll(struct net_device *dev);
|
||||||
#endif
|
#endif
|
||||||
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
|
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
|
||||||
static int gfar_clean_tx_ring(struct net_device *dev);
|
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
|
||||||
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
|
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
|
||||||
int amount_pull);
|
int amount_pull);
|
||||||
static void gfar_vlan_rx_register(struct net_device *netdev,
|
static void gfar_vlan_rx_register(struct net_device *netdev,
|
||||||
@@ -147,16 +148,16 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
|
|||||||
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
|
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
|
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
|
||||||
dma_addr_t buf)
|
dma_addr_t buf)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct net_device *dev = rx_queue->dev;
|
||||||
u32 lstatus;
|
u32 lstatus;
|
||||||
|
|
||||||
bdp->bufPtr = buf;
|
bdp->bufPtr = buf;
|
||||||
|
|
||||||
lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
|
lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
|
||||||
if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
|
if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
|
||||||
lstatus |= BD_LFLAG(RXBD_WRAP);
|
lstatus |= BD_LFLAG(RXBD_WRAP);
|
||||||
|
|
||||||
eieio();
|
eieio();
|
||||||
@@ -167,20 +168,25 @@ static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
|
|||||||
static int gfar_init_bds(struct net_device *ndev)
|
static int gfar_init_bds(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(ndev);
|
struct gfar_private *priv = netdev_priv(ndev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
struct txbd8 *txbdp;
|
struct txbd8 *txbdp;
|
||||||
struct rxbd8 *rxbdp;
|
struct rxbd8 *rxbdp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
/* Initialize some variables in our dev structure */
|
/* Initialize some variables in our dev structure */
|
||||||
priv->num_txbdfree = priv->tx_ring_size;
|
tx_queue->num_txbdfree = tx_queue->tx_ring_size;
|
||||||
priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
|
tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base;
|
||||||
priv->cur_rx = priv->rx_bd_base;
|
rx_queue->cur_rx = rx_queue->rx_bd_base;
|
||||||
priv->skb_curtx = priv->skb_dirtytx = 0;
|
tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0;
|
||||||
priv->skb_currx = 0;
|
rx_queue->skb_currx = 0;
|
||||||
|
|
||||||
/* Initialize Transmit Descriptor Ring */
|
/* Initialize Transmit Descriptor Ring */
|
||||||
txbdp = priv->tx_bd_base;
|
txbdp = tx_queue->tx_bd_base;
|
||||||
for (i = 0; i < priv->tx_ring_size; i++) {
|
for (i = 0; i < tx_queue->tx_ring_size; i++) {
|
||||||
txbdp->lstatus = 0;
|
txbdp->lstatus = 0;
|
||||||
txbdp->bufPtr = 0;
|
txbdp->bufPtr = 0;
|
||||||
txbdp++;
|
txbdp++;
|
||||||
@@ -190,12 +196,12 @@ static int gfar_init_bds(struct net_device *ndev)
|
|||||||
txbdp--;
|
txbdp--;
|
||||||
txbdp->status |= TXBD_WRAP;
|
txbdp->status |= TXBD_WRAP;
|
||||||
|
|
||||||
rxbdp = priv->rx_bd_base;
|
rxbdp = rx_queue->rx_bd_base;
|
||||||
for (i = 0; i < priv->rx_ring_size; i++) {
|
for (i = 0; i < rx_queue->rx_ring_size; i++) {
|
||||||
struct sk_buff *skb = priv->rx_skbuff[i];
|
struct sk_buff *skb = rx_queue->rx_skbuff[i];
|
||||||
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr);
|
gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr);
|
||||||
} else {
|
} else {
|
||||||
skb = gfar_new_skb(ndev);
|
skb = gfar_new_skb(ndev);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
@@ -203,9 +209,9 @@ static int gfar_init_bds(struct net_device *ndev)
|
|||||||
ndev->name);
|
ndev->name);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
priv->rx_skbuff[i] = skb;
|
rx_queue->rx_skbuff[i] = skb;
|
||||||
|
|
||||||
gfar_new_rxbdp(ndev, rxbdp, skb);
|
gfar_new_rxbdp(rx_queue, rxbdp, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
rxbdp++;
|
rxbdp++;
|
||||||
@@ -220,12 +226,17 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
|
|||||||
int i;
|
int i;
|
||||||
struct gfar_private *priv = netdev_priv(ndev);
|
struct gfar_private *priv = netdev_priv(ndev);
|
||||||
struct device *dev = &priv->ofdev->dev;
|
struct device *dev = &priv->ofdev->dev;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
/* Allocate memory for the buffer descriptors */
|
/* Allocate memory for the buffer descriptors */
|
||||||
vaddr = dma_alloc_coherent(dev,
|
vaddr = dma_alloc_coherent(dev,
|
||||||
sizeof(*priv->tx_bd_base) * priv->tx_ring_size +
|
sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size +
|
||||||
sizeof(*priv->rx_bd_base) * priv->rx_ring_size,
|
sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size,
|
||||||
&priv->tx_bd_dma_base, GFP_KERNEL);
|
&tx_queue->tx_bd_dma_base, GFP_KERNEL);
|
||||||
if (!vaddr) {
|
if (!vaddr) {
|
||||||
if (netif_msg_ifup(priv))
|
if (netif_msg_ifup(priv))
|
||||||
pr_err("%s: Could not allocate buffer descriptors!\n",
|
pr_err("%s: Could not allocate buffer descriptors!\n",
|
||||||
@@ -233,36 +244,38 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->tx_bd_base = vaddr;
|
tx_queue->tx_bd_base = vaddr;
|
||||||
|
tx_queue->dev = ndev;
|
||||||
|
|
||||||
/* Start the rx descriptor ring where the tx ring leaves off */
|
/* Start the rx descriptor ring where the tx ring leaves off */
|
||||||
vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size;
|
vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size;
|
||||||
priv->rx_bd_base = vaddr;
|
rx_queue->rx_bd_base = vaddr;
|
||||||
|
rx_queue->dev = ndev;
|
||||||
|
|
||||||
/* Setup the skbuff rings */
|
/* Setup the skbuff rings */
|
||||||
priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
|
tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
|
||||||
priv->tx_ring_size, GFP_KERNEL);
|
tx_queue->tx_ring_size, GFP_KERNEL);
|
||||||
if (!priv->tx_skbuff) {
|
if (!tx_queue->tx_skbuff) {
|
||||||
if (netif_msg_ifup(priv))
|
if (netif_msg_ifup(priv))
|
||||||
pr_err("%s: Could not allocate tx_skbuff\n",
|
pr_err("%s: Could not allocate tx_skbuff\n",
|
||||||
ndev->name);
|
ndev->name);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < priv->tx_ring_size; i++)
|
for (i = 0; i < tx_queue->tx_ring_size; i++)
|
||||||
priv->tx_skbuff[i] = NULL;
|
tx_queue->tx_skbuff[i] = NULL;
|
||||||
|
|
||||||
priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
|
rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
|
||||||
priv->rx_ring_size, GFP_KERNEL);
|
rx_queue->rx_ring_size, GFP_KERNEL);
|
||||||
if (!priv->rx_skbuff) {
|
if (!rx_queue->rx_skbuff) {
|
||||||
if (netif_msg_ifup(priv))
|
if (netif_msg_ifup(priv))
|
||||||
pr_err("%s: Could not allocate rx_skbuff\n",
|
pr_err("%s: Could not allocate rx_skbuff\n",
|
||||||
ndev->name);
|
ndev->name);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < priv->rx_ring_size; i++)
|
for (i = 0; i < rx_queue->rx_ring_size; i++)
|
||||||
priv->rx_skbuff[i] = NULL;
|
rx_queue->rx_skbuff[i] = NULL;
|
||||||
|
|
||||||
if (gfar_init_bds(ndev))
|
if (gfar_init_bds(ndev))
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@@ -278,24 +291,29 @@ static void gfar_init_mac(struct net_device *ndev)
|
|||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(ndev);
|
struct gfar_private *priv = netdev_priv(ndev);
|
||||||
struct gfar __iomem *regs = priv->regs;
|
struct gfar __iomem *regs = priv->regs;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
u32 rctrl = 0;
|
u32 rctrl = 0;
|
||||||
u32 tctrl = 0;
|
u32 tctrl = 0;
|
||||||
u32 attrs = 0;
|
u32 attrs = 0;
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
/* enet DMA only understands physical addresses */
|
/* enet DMA only understands physical addresses */
|
||||||
gfar_write(®s->tbase0, priv->tx_bd_dma_base);
|
gfar_write(®s->tbase0, tx_queue->tx_bd_dma_base);
|
||||||
gfar_write(®s->rbase0, priv->tx_bd_dma_base +
|
gfar_write(®s->rbase0, tx_queue->tx_bd_dma_base +
|
||||||
sizeof(*priv->tx_bd_base) *
|
sizeof(*tx_queue->tx_bd_base) *
|
||||||
priv->tx_ring_size);
|
tx_queue->tx_ring_size);
|
||||||
|
|
||||||
/* Configure the coalescing support */
|
/* Configure the coalescing support */
|
||||||
gfar_write(®s->txic, 0);
|
gfar_write(®s->txic, 0);
|
||||||
if (priv->txcoalescing)
|
if (tx_queue->txcoalescing)
|
||||||
gfar_write(®s->txic, priv->txic);
|
gfar_write(®s->txic, tx_queue->txic);
|
||||||
|
|
||||||
gfar_write(®s->rxic, 0);
|
gfar_write(®s->rxic, 0);
|
||||||
if (priv->rxcoalescing)
|
if (rx_queue->rxcoalescing)
|
||||||
gfar_write(®s->rxic, priv->rxic);
|
gfar_write(®s->rxic, rx_queue->rxic);
|
||||||
|
|
||||||
if (priv->rx_csum_enable)
|
if (priv->rx_csum_enable)
|
||||||
rctrl |= RCTRL_CHECKSUMMING;
|
rctrl |= RCTRL_CHECKSUMMING;
|
||||||
@@ -414,7 +432,7 @@ static int gfar_of_init(struct net_device *dev)
|
|||||||
|
|
||||||
stash = of_get_property(np, "bd-stash", NULL);
|
stash = of_get_property(np, "bd-stash", NULL);
|
||||||
|
|
||||||
if(stash) {
|
if (stash) {
|
||||||
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
|
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
|
||||||
priv->bd_stash_en = 1;
|
priv->bd_stash_en = 1;
|
||||||
}
|
}
|
||||||
@@ -519,8 +537,18 @@ static int gfar_probe(struct of_device *ofdev,
|
|||||||
if (err)
|
if (err)
|
||||||
goto regs_fail;
|
goto regs_fail;
|
||||||
|
|
||||||
spin_lock_init(&priv->txlock);
|
priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
|
||||||
spin_lock_init(&priv->rxlock);
|
sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
|
||||||
|
if (!priv->tx_queue)
|
||||||
|
goto regs_fail;
|
||||||
|
|
||||||
|
priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
|
||||||
|
sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
|
||||||
|
if (!priv->rx_queue)
|
||||||
|
goto rx_queue_fail;
|
||||||
|
|
||||||
|
spin_lock_init(&priv->tx_queue->txlock);
|
||||||
|
spin_lock_init(&priv->rx_queue->rxlock);
|
||||||
spin_lock_init(&priv->bflock);
|
spin_lock_init(&priv->bflock);
|
||||||
INIT_WORK(&priv->reset_task, gfar_reset_task);
|
INIT_WORK(&priv->reset_task, gfar_reset_task);
|
||||||
|
|
||||||
@@ -552,12 +580,13 @@ static int gfar_probe(struct of_device *ofdev,
|
|||||||
|
|
||||||
/* Fill in the dev structure */
|
/* Fill in the dev structure */
|
||||||
dev->watchdog_timeo = TX_TIMEOUT;
|
dev->watchdog_timeo = TX_TIMEOUT;
|
||||||
netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
|
|
||||||
dev->mtu = 1500;
|
dev->mtu = 1500;
|
||||||
|
|
||||||
dev->netdev_ops = &gfar_netdev_ops;
|
dev->netdev_ops = &gfar_netdev_ops;
|
||||||
dev->ethtool_ops = &gfar_ethtool_ops;
|
dev->ethtool_ops = &gfar_ethtool_ops;
|
||||||
|
|
||||||
|
/* Register for napi ...NAPI is for each rx_queue */
|
||||||
|
netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT);
|
||||||
|
|
||||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
|
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
|
||||||
priv->rx_csum_enable = 1;
|
priv->rx_csum_enable = 1;
|
||||||
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
|
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
|
||||||
@@ -613,14 +642,16 @@ static int gfar_probe(struct of_device *ofdev,
|
|||||||
dev->hard_header_len += GMAC_FCB_LEN;
|
dev->hard_header_len += GMAC_FCB_LEN;
|
||||||
|
|
||||||
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
|
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
|
||||||
priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
|
|
||||||
priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
|
|
||||||
priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
|
|
||||||
|
|
||||||
priv->txcoalescing = DEFAULT_TX_COALESCE;
|
/* Initializing some of the rx/tx queue level parameters */
|
||||||
priv->txic = DEFAULT_TXIC;
|
priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE;
|
||||||
priv->rxcoalescing = DEFAULT_RX_COALESCE;
|
priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE;
|
||||||
priv->rxic = DEFAULT_RXIC;
|
priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE;
|
||||||
|
priv->tx_queue->txic = DEFAULT_TXIC;
|
||||||
|
|
||||||
|
priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE;
|
||||||
|
priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE;
|
||||||
|
priv->rx_queue->rxic = DEFAULT_RXIC;
|
||||||
|
|
||||||
/* Enable most messages by default */
|
/* Enable most messages by default */
|
||||||
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
|
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
|
||||||
@@ -666,12 +697,15 @@ static int gfar_probe(struct of_device *ofdev,
|
|||||||
/* provided which set of benchmarks. */
|
/* provided which set of benchmarks. */
|
||||||
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
|
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
|
||||||
printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
|
printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
|
||||||
dev->name, priv->rx_ring_size, priv->tx_ring_size);
|
dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
register_fail:
|
register_fail:
|
||||||
iounmap(priv->regs);
|
iounmap(priv->regs);
|
||||||
|
kfree(priv->rx_queue);
|
||||||
|
rx_queue_fail:
|
||||||
|
kfree(priv->tx_queue);
|
||||||
regs_fail:
|
regs_fail:
|
||||||
if (priv->phy_node)
|
if (priv->phy_node)
|
||||||
of_node_put(priv->phy_node);
|
of_node_put(priv->phy_node);
|
||||||
@@ -705,6 +739,8 @@ static int gfar_suspend(struct device *dev)
|
|||||||
{
|
{
|
||||||
struct gfar_private *priv = dev_get_drvdata(dev);
|
struct gfar_private *priv = dev_get_drvdata(dev);
|
||||||
struct net_device *ndev = priv->ndev;
|
struct net_device *ndev = priv->ndev;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 tempval;
|
u32 tempval;
|
||||||
|
|
||||||
@@ -712,10 +748,12 @@ static int gfar_suspend(struct device *dev)
|
|||||||
(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
|
(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
|
||||||
|
|
||||||
netif_device_detach(ndev);
|
netif_device_detach(ndev);
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
if (netif_running(ndev)) {
|
if (netif_running(ndev)) {
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
spin_lock(&priv->rxlock);
|
spin_lock(&rx_queue->rxlock);
|
||||||
|
|
||||||
gfar_halt_nodisable(ndev);
|
gfar_halt_nodisable(ndev);
|
||||||
|
|
||||||
@@ -729,10 +767,10 @@ static int gfar_suspend(struct device *dev)
|
|||||||
|
|
||||||
gfar_write(&priv->regs->maccfg1, tempval);
|
gfar_write(&priv->regs->maccfg1, tempval);
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&rx_queue->rxlock);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
napi_disable(&priv->napi);
|
napi_disable(&rx_queue->napi);
|
||||||
|
|
||||||
if (magic_packet) {
|
if (magic_packet) {
|
||||||
/* Enable interrupt on Magic Packet */
|
/* Enable interrupt on Magic Packet */
|
||||||
@@ -754,6 +792,8 @@ static int gfar_resume(struct device *dev)
|
|||||||
{
|
{
|
||||||
struct gfar_private *priv = dev_get_drvdata(dev);
|
struct gfar_private *priv = dev_get_drvdata(dev);
|
||||||
struct net_device *ndev = priv->ndev;
|
struct net_device *ndev = priv->ndev;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 tempval;
|
u32 tempval;
|
||||||
int magic_packet = priv->wol_en &&
|
int magic_packet = priv->wol_en &&
|
||||||
@@ -770,9 +810,11 @@ static int gfar_resume(struct device *dev)
|
|||||||
/* Disable Magic Packet mode, in case something
|
/* Disable Magic Packet mode, in case something
|
||||||
* else woke us up.
|
* else woke us up.
|
||||||
*/
|
*/
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
spin_lock(&priv->rxlock);
|
spin_lock(&rx_queue->rxlock);
|
||||||
|
|
||||||
tempval = gfar_read(&priv->regs->maccfg2);
|
tempval = gfar_read(&priv->regs->maccfg2);
|
||||||
tempval &= ~MACCFG2_MPEN;
|
tempval &= ~MACCFG2_MPEN;
|
||||||
@@ -780,12 +822,12 @@ static int gfar_resume(struct device *dev)
|
|||||||
|
|
||||||
gfar_start(ndev);
|
gfar_start(ndev);
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&rx_queue->rxlock);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
netif_device_attach(ndev);
|
netif_device_attach(ndev);
|
||||||
|
|
||||||
napi_enable(&priv->napi);
|
napi_enable(&rx_queue->napi);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1060,18 +1102,23 @@ void gfar_halt(struct net_device *dev)
|
|||||||
void stop_gfar(struct net_device *dev)
|
void stop_gfar(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
phy_stop(priv->phydev);
|
phy_stop(priv->phydev);
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
/* Lock it down */
|
/* Lock it down */
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
spin_lock(&priv->rxlock);
|
spin_lock(&rx_queue->rxlock);
|
||||||
|
|
||||||
gfar_halt(dev);
|
gfar_halt(dev);
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&rx_queue->rxlock);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
/* Free the IRQs */
|
/* Free the IRQs */
|
||||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
|
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
|
||||||
@@ -1092,46 +1139,50 @@ static void free_skb_resources(struct gfar_private *priv)
|
|||||||
struct device *dev = &priv->ofdev->dev;
|
struct device *dev = &priv->ofdev->dev;
|
||||||
struct rxbd8 *rxbdp;
|
struct rxbd8 *rxbdp;
|
||||||
struct txbd8 *txbdp;
|
struct txbd8 *txbdp;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
/* Go through all the buffer descriptors and free their data buffers */
|
/* Go through all the buffer descriptors and free their data buffers */
|
||||||
txbdp = priv->tx_bd_base;
|
tx_queue = priv->tx_queue;
|
||||||
|
txbdp = tx_queue->tx_bd_base;
|
||||||
|
|
||||||
if (!priv->tx_skbuff)
|
if (!tx_queue->tx_skbuff)
|
||||||
goto skip_tx_skbuff;
|
goto skip_tx_skbuff;
|
||||||
|
|
||||||
for (i = 0; i < priv->tx_ring_size; i++) {
|
for (i = 0; i < tx_queue->tx_ring_size; i++) {
|
||||||
if (!priv->tx_skbuff[i])
|
if (!tx_queue->tx_skbuff[i])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
|
dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
|
||||||
txbdp->length, DMA_TO_DEVICE);
|
txbdp->length, DMA_TO_DEVICE);
|
||||||
txbdp->lstatus = 0;
|
txbdp->lstatus = 0;
|
||||||
for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
|
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) {
|
||||||
txbdp++;
|
txbdp++;
|
||||||
dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
|
dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
|
||||||
txbdp->length, DMA_TO_DEVICE);
|
txbdp->length, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
txbdp++;
|
txbdp++;
|
||||||
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
|
||||||
priv->tx_skbuff[i] = NULL;
|
tx_queue->tx_skbuff[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(priv->tx_skbuff);
|
kfree(tx_queue->tx_skbuff);
|
||||||
skip_tx_skbuff:
|
skip_tx_skbuff:
|
||||||
|
|
||||||
rxbdp = priv->rx_bd_base;
|
rx_queue = priv->rx_queue;
|
||||||
|
rxbdp = rx_queue->rx_bd_base;
|
||||||
|
|
||||||
if (!priv->rx_skbuff)
|
if (!rx_queue->rx_skbuff)
|
||||||
goto skip_rx_skbuff;
|
goto skip_rx_skbuff;
|
||||||
|
|
||||||
for (i = 0; i < priv->rx_ring_size; i++) {
|
for (i = 0; i < rx_queue->rx_ring_size; i++) {
|
||||||
if (priv->rx_skbuff[i]) {
|
if (rx_queue->rx_skbuff[i]) {
|
||||||
dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
|
dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
|
||||||
priv->rx_buffer_size,
|
priv->rx_buffer_size,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
dev_kfree_skb_any(priv->rx_skbuff[i]);
|
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
|
||||||
priv->rx_skbuff[i] = NULL;
|
rx_queue->rx_skbuff[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rxbdp->lstatus = 0;
|
rxbdp->lstatus = 0;
|
||||||
@@ -1139,17 +1190,19 @@ skip_tx_skbuff:
|
|||||||
rxbdp++;
|
rxbdp++;
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(priv->rx_skbuff);
|
kfree(rx_queue->rx_skbuff);
|
||||||
skip_rx_skbuff:
|
skip_rx_skbuff:
|
||||||
|
|
||||||
dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
|
dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size +
|
||||||
sizeof(*rxbdp) * priv->rx_ring_size,
|
sizeof(*rxbdp) * rx_queue->rx_ring_size,
|
||||||
priv->tx_bd_base, priv->tx_bd_dma_base);
|
tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
void gfar_start(struct net_device *dev)
|
void gfar_start(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue;
|
||||||
|
struct gfar_priv_rx_q *rx_queue;
|
||||||
struct gfar __iomem *regs = priv->regs;
|
struct gfar __iomem *regs = priv->regs;
|
||||||
u32 tempval;
|
u32 tempval;
|
||||||
|
|
||||||
@@ -1258,7 +1311,7 @@ static int gfar_enet_open(struct net_device *dev)
|
|||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
napi_enable(&priv->napi);
|
napi_enable(&priv->rx_queue->napi);
|
||||||
|
|
||||||
skb_queue_head_init(&priv->rx_recycle);
|
skb_queue_head_init(&priv->rx_recycle);
|
||||||
|
|
||||||
@@ -1269,14 +1322,14 @@ static int gfar_enet_open(struct net_device *dev)
|
|||||||
|
|
||||||
err = init_phy(dev);
|
err = init_phy(dev);
|
||||||
|
|
||||||
if(err) {
|
if (err) {
|
||||||
napi_disable(&priv->napi);
|
napi_disable(&priv->rx_queue->napi);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = startup_gfar(dev);
|
err = startup_gfar(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
napi_disable(&priv->napi);
|
napi_disable(&priv->rx_queue->napi);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1349,6 +1402,7 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
|
|||||||
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
struct txfcb *fcb = NULL;
|
struct txfcb *fcb = NULL;
|
||||||
struct txbd8 *txbdp, *txbdp_start, *base;
|
struct txbd8 *txbdp, *txbdp_start, *base;
|
||||||
u32 lstatus;
|
u32 lstatus;
|
||||||
@@ -1357,7 +1411,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int nr_frags, length;
|
unsigned int nr_frags, length;
|
||||||
|
|
||||||
base = priv->tx_bd_base;
|
tx_queue = priv->tx_queue;
|
||||||
|
base = tx_queue->tx_bd_base;
|
||||||
|
|
||||||
/* make space for additional header when fcb is needed */
|
/* make space for additional header when fcb is needed */
|
||||||
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
|
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
|
||||||
@@ -1378,21 +1433,21 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
/* total number of fragments in the SKB */
|
/* total number of fragments in the SKB */
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
/* check if there is space to queue this packet */
|
/* check if there is space to queue this packet */
|
||||||
if ((nr_frags+1) > priv->num_txbdfree) {
|
if ((nr_frags+1) > tx_queue->num_txbdfree) {
|
||||||
/* no space, stop the queue */
|
/* no space, stop the queue */
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
dev->stats.tx_fifo_errors++;
|
dev->stats.tx_fifo_errors++;
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update transmit stats */
|
/* Update transmit stats */
|
||||||
dev->stats.tx_bytes += skb->len;
|
dev->stats.tx_bytes += skb->len;
|
||||||
|
|
||||||
txbdp = txbdp_start = priv->cur_tx;
|
txbdp = txbdp_start = tx_queue->cur_tx;
|
||||||
|
|
||||||
if (nr_frags == 0) {
|
if (nr_frags == 0) {
|
||||||
lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
|
lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
|
||||||
@@ -1400,7 +1455,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
/* Place the fragment addresses and lengths into the TxBDs */
|
/* Place the fragment addresses and lengths into the TxBDs */
|
||||||
for (i = 0; i < nr_frags; i++) {
|
for (i = 0; i < nr_frags; i++) {
|
||||||
/* Point at the next BD, wrapping as needed */
|
/* Point at the next BD, wrapping as needed */
|
||||||
txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
|
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
|
||||||
|
|
||||||
length = skb_shinfo(skb)->frags[i].size;
|
length = skb_shinfo(skb)->frags[i].size;
|
||||||
|
|
||||||
@@ -1442,7 +1497,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* setup the TxBD length and buffer pointer for the first BD */
|
/* setup the TxBD length and buffer pointer for the first BD */
|
||||||
priv->tx_skbuff[priv->skb_curtx] = skb;
|
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
|
||||||
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
|
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
|
||||||
skb_headlen(skb), DMA_TO_DEVICE);
|
skb_headlen(skb), DMA_TO_DEVICE);
|
||||||
|
|
||||||
@@ -1462,19 +1517,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
/* Update the current skb pointer to the next entry we will use
|
/* Update the current skb pointer to the next entry we will use
|
||||||
* (wrapping if necessary) */
|
* (wrapping if necessary) */
|
||||||
priv->skb_curtx = (priv->skb_curtx + 1) &
|
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
|
||||||
TX_RING_MOD_MASK(priv->tx_ring_size);
|
TX_RING_MOD_MASK(tx_queue->tx_ring_size);
|
||||||
|
|
||||||
priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
|
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
|
||||||
|
|
||||||
/* reduce TxBD free count */
|
/* reduce TxBD free count */
|
||||||
priv->num_txbdfree -= (nr_frags + 1);
|
tx_queue->num_txbdfree -= (nr_frags + 1);
|
||||||
|
|
||||||
dev->trans_start = jiffies;
|
dev->trans_start = jiffies;
|
||||||
|
|
||||||
/* If the next BD still needs to be cleaned up, then the bds
|
/* If the next BD still needs to be cleaned up, then the bds
|
||||||
are full. We need to tell the kernel to stop sending us stuff. */
|
are full. We need to tell the kernel to stop sending us stuff. */
|
||||||
if (!priv->num_txbdfree) {
|
if (!tx_queue->num_txbdfree) {
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
|
|
||||||
dev->stats.tx_fifo_errors++;
|
dev->stats.tx_fifo_errors++;
|
||||||
@@ -1484,7 +1539,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
|
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
|
||||||
|
|
||||||
/* Unlock priv */
|
/* Unlock priv */
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
@@ -1494,7 +1549,7 @@ static int gfar_close(struct net_device *dev)
|
|||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
|
||||||
napi_disable(&priv->napi);
|
napi_disable(&priv->rx_queue->napi);
|
||||||
|
|
||||||
skb_queue_purge(&priv->rx_recycle);
|
skb_queue_purge(&priv->rx_recycle);
|
||||||
cancel_work_sync(&priv->reset_task);
|
cancel_work_sync(&priv->reset_task);
|
||||||
@@ -1523,10 +1578,12 @@ static void gfar_vlan_rx_register(struct net_device *dev,
|
|||||||
struct vlan_group *grp)
|
struct vlan_group *grp)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 tempval;
|
u32 tempval;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->rxlock, flags);
|
rx_queue = priv->rx_queue;
|
||||||
|
spin_lock_irqsave(&rx_queue->rxlock, flags);
|
||||||
|
|
||||||
priv->vlgrp = grp;
|
priv->vlgrp = grp;
|
||||||
|
|
||||||
@@ -1560,7 +1617,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
|
|||||||
|
|
||||||
gfar_change_mtu(dev, dev->mtu);
|
gfar_change_mtu(dev, dev->mtu);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->rxlock, flags);
|
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
|
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
|
||||||
@@ -1649,24 +1706,27 @@ static void gfar_timeout(struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupt Handler for Transmit complete */
|
/* Interrupt Handler for Transmit complete */
|
||||||
static int gfar_clean_tx_ring(struct net_device *dev)
|
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
|
||||||
{
|
{
|
||||||
|
struct net_device *dev = tx_queue->dev;
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
struct txbd8 *bdp;
|
struct txbd8 *bdp;
|
||||||
struct txbd8 *lbdp = NULL;
|
struct txbd8 *lbdp = NULL;
|
||||||
struct txbd8 *base = priv->tx_bd_base;
|
struct txbd8 *base = tx_queue->tx_bd_base;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int skb_dirtytx;
|
int skb_dirtytx;
|
||||||
int tx_ring_size = priv->tx_ring_size;
|
int tx_ring_size = tx_queue->tx_ring_size;
|
||||||
int frags = 0;
|
int frags = 0;
|
||||||
int i;
|
int i;
|
||||||
int howmany = 0;
|
int howmany = 0;
|
||||||
u32 lstatus;
|
u32 lstatus;
|
||||||
|
|
||||||
bdp = priv->dirty_tx;
|
rx_queue = priv->rx_queue;
|
||||||
skb_dirtytx = priv->skb_dirtytx;
|
bdp = tx_queue->dirty_tx;
|
||||||
|
skb_dirtytx = tx_queue->skb_dirtytx;
|
||||||
|
|
||||||
while ((skb = priv->tx_skbuff[skb_dirtytx])) {
|
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
|
||||||
frags = skb_shinfo(skb)->nr_frags;
|
frags = skb_shinfo(skb)->nr_frags;
|
||||||
lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
|
lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
|
||||||
|
|
||||||
@@ -1698,29 +1758,29 @@ static int gfar_clean_tx_ring(struct net_device *dev)
|
|||||||
* If there's room in the queue (limit it to rx_buffer_size)
|
* If there's room in the queue (limit it to rx_buffer_size)
|
||||||
* we add this skb back into the pool, if it's the right size
|
* we add this skb back into the pool, if it's the right size
|
||||||
*/
|
*/
|
||||||
if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
|
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
|
||||||
skb_recycle_check(skb, priv->rx_buffer_size +
|
skb_recycle_check(skb, priv->rx_buffer_size +
|
||||||
RXBUF_ALIGNMENT))
|
RXBUF_ALIGNMENT))
|
||||||
__skb_queue_head(&priv->rx_recycle, skb);
|
__skb_queue_head(&priv->rx_recycle, skb);
|
||||||
else
|
else
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
|
||||||
priv->tx_skbuff[skb_dirtytx] = NULL;
|
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
|
||||||
|
|
||||||
skb_dirtytx = (skb_dirtytx + 1) &
|
skb_dirtytx = (skb_dirtytx + 1) &
|
||||||
TX_RING_MOD_MASK(tx_ring_size);
|
TX_RING_MOD_MASK(tx_ring_size);
|
||||||
|
|
||||||
howmany++;
|
howmany++;
|
||||||
priv->num_txbdfree += frags + 1;
|
tx_queue->num_txbdfree += frags + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we freed a buffer, we can restart transmission, if necessary */
|
/* If we freed a buffer, we can restart transmission, if necessary */
|
||||||
if (netif_queue_stopped(dev) && priv->num_txbdfree)
|
if (netif_queue_stopped(dev) && tx_queue->num_txbdfree)
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
|
|
||||||
/* Update dirty indicators */
|
/* Update dirty indicators */
|
||||||
priv->skb_dirtytx = skb_dirtytx;
|
tx_queue->skb_dirtytx = skb_dirtytx;
|
||||||
priv->dirty_tx = bdp;
|
tx_queue->dirty_tx = bdp;
|
||||||
|
|
||||||
dev->stats.tx_packets += howmany;
|
dev->stats.tx_packets += howmany;
|
||||||
|
|
||||||
@@ -1730,14 +1790,18 @@ static int gfar_clean_tx_ring(struct net_device *dev)
|
|||||||
static void gfar_schedule_cleanup(struct net_device *dev)
|
static void gfar_schedule_cleanup(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
rx_queue = priv->rx_queue;
|
||||||
spin_lock(&priv->rxlock);
|
tx_queue = priv->tx_queue;
|
||||||
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
|
spin_lock(&rx_queue->rxlock);
|
||||||
|
|
||||||
if (napi_schedule_prep(&priv->napi)) {
|
if (napi_schedule_prep(&rx_queue->napi)) {
|
||||||
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
|
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
|
||||||
__napi_schedule(&priv->napi);
|
__napi_schedule(&rx_queue->napi);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Clear IEVENT, so interrupts aren't called again
|
* Clear IEVENT, so interrupts aren't called again
|
||||||
@@ -1746,8 +1810,8 @@ static void gfar_schedule_cleanup(struct net_device *dev)
|
|||||||
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&rx_queue->rxlock);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupt Handler for Transmit complete */
|
/* Interrupt Handler for Transmit complete */
|
||||||
@@ -1757,15 +1821,16 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
|
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct net_device *dev = rx_queue->dev;
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
dma_addr_t buf;
|
dma_addr_t buf;
|
||||||
|
|
||||||
buf = dma_map_single(&priv->ofdev->dev, skb->data,
|
buf = dma_map_single(&priv->ofdev->dev, skb->data,
|
||||||
priv->rx_buffer_size, DMA_FROM_DEVICE);
|
priv->rx_buffer_size, DMA_FROM_DEVICE);
|
||||||
gfar_init_rxbdp(dev, bdp, buf);
|
gfar_init_rxbdp(rx_queue, bdp, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1890,8 +1955,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
|
|||||||
* until the budget/quota has been reached. Returns the number
|
* until the budget/quota has been reached. Returns the number
|
||||||
* of frames handled
|
* of frames handled
|
||||||
*/
|
*/
|
||||||
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
|
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
|
||||||
{
|
{
|
||||||
|
struct net_device *dev = rx_queue->dev;
|
||||||
struct rxbd8 *bdp, *base;
|
struct rxbd8 *bdp, *base;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int pkt_len;
|
int pkt_len;
|
||||||
@@ -1900,8 +1966,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
|
|||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
|
||||||
/* Get the first full descriptor */
|
/* Get the first full descriptor */
|
||||||
bdp = priv->cur_rx;
|
bdp = rx_queue->cur_rx;
|
||||||
base = priv->rx_bd_base;
|
base = rx_queue->rx_bd_base;
|
||||||
|
|
||||||
amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
|
amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
|
||||||
priv->padding;
|
priv->padding;
|
||||||
@@ -1913,7 +1979,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
|
|||||||
/* Add another skb for the future */
|
/* Add another skb for the future */
|
||||||
newskb = gfar_new_skb(dev);
|
newskb = gfar_new_skb(dev);
|
||||||
|
|
||||||
skb = priv->rx_skbuff[priv->skb_currx];
|
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
|
||||||
|
|
||||||
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
|
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
|
||||||
priv->rx_buffer_size, DMA_FROM_DEVICE);
|
priv->rx_buffer_size, DMA_FROM_DEVICE);
|
||||||
@@ -1961,30 +2027,33 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->rx_skbuff[priv->skb_currx] = newskb;
|
rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
|
||||||
|
|
||||||
/* Setup the new bdp */
|
/* Setup the new bdp */
|
||||||
gfar_new_rxbdp(dev, bdp, newskb);
|
gfar_new_rxbdp(rx_queue, bdp, newskb);
|
||||||
|
|
||||||
/* Update to the next pointer */
|
/* Update to the next pointer */
|
||||||
bdp = next_bd(bdp, base, priv->rx_ring_size);
|
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
|
||||||
|
|
||||||
/* update to point at the next skb */
|
/* update to point at the next skb */
|
||||||
priv->skb_currx =
|
rx_queue->skb_currx =
|
||||||
(priv->skb_currx + 1) &
|
(rx_queue->skb_currx + 1) &
|
||||||
RX_RING_MOD_MASK(priv->rx_ring_size);
|
RX_RING_MOD_MASK(rx_queue->rx_ring_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the current rxbd pointer to be the next one */
|
/* Update the current rxbd pointer to be the next one */
|
||||||
priv->cur_rx = bdp;
|
rx_queue->cur_rx = bdp;
|
||||||
|
|
||||||
return howmany;
|
return howmany;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfar_poll(struct napi_struct *napi, int budget)
|
static int gfar_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
|
struct gfar_priv_rx_q *rx_queue = container_of(napi,
|
||||||
struct net_device *dev = priv->ndev;
|
struct gfar_priv_rx_q, napi);
|
||||||
|
struct net_device *dev = rx_queue->dev;
|
||||||
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
int tx_cleaned = 0;
|
int tx_cleaned = 0;
|
||||||
int rx_cleaned = 0;
|
int rx_cleaned = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -1992,14 +2061,15 @@ static int gfar_poll(struct napi_struct *napi, int budget)
|
|||||||
/* Clear IEVENT, so interrupts aren't called again
|
/* Clear IEVENT, so interrupts aren't called again
|
||||||
* because of the packets that have already arrived */
|
* because of the packets that have already arrived */
|
||||||
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
|
||||||
/* If we fail to get the lock, don't bother with the TX BDs */
|
/* If we fail to get the lock, don't bother with the TX BDs */
|
||||||
if (spin_trylock_irqsave(&priv->txlock, flags)) {
|
if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
|
||||||
tx_cleaned = gfar_clean_tx_ring(dev);
|
tx_cleaned = gfar_clean_tx_ring(tx_queue);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
rx_cleaned = gfar_clean_rx_ring(dev, budget);
|
rx_cleaned = gfar_clean_rx_ring(rx_queue, budget);
|
||||||
|
|
||||||
if (tx_cleaned)
|
if (tx_cleaned)
|
||||||
return budget;
|
return budget;
|
||||||
@@ -2014,13 +2084,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
|
|||||||
|
|
||||||
/* If we are coalescing interrupts, update the timer */
|
/* If we are coalescing interrupts, update the timer */
|
||||||
/* Otherwise, clear it */
|
/* Otherwise, clear it */
|
||||||
if (likely(priv->rxcoalescing)) {
|
if (likely(rx_queue->rxcoalescing)) {
|
||||||
gfar_write(&priv->regs->rxic, 0);
|
gfar_write(&priv->regs->rxic, 0);
|
||||||
gfar_write(&priv->regs->rxic, priv->rxic);
|
gfar_write(&priv->regs->rxic, rx_queue->rxic);
|
||||||
}
|
}
|
||||||
if (likely(priv->txcoalescing)) {
|
if (likely(tx_queue->txcoalescing)) {
|
||||||
gfar_write(&priv->regs->txic, 0);
|
gfar_write(&priv->regs->txic, 0);
|
||||||
gfar_write(&priv->regs->txic, priv->txic);
|
gfar_write(&priv->regs->txic, tx_queue->txic);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2087,12 +2157,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
|
|||||||
static void adjust_link(struct net_device *dev)
|
static void adjust_link(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
struct gfar __iomem *regs = priv->regs;
|
struct gfar __iomem *regs = priv->regs;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct phy_device *phydev = priv->phydev;
|
struct phy_device *phydev = priv->phydev;
|
||||||
int new_state = 0;
|
int new_state = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
tx_queue = priv->tx_queue;
|
||||||
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
if (phydev->link) {
|
if (phydev->link) {
|
||||||
u32 tempval = gfar_read(®s->maccfg2);
|
u32 tempval = gfar_read(®s->maccfg2);
|
||||||
u32 ecntrl = gfar_read(®s->ecntrl);
|
u32 ecntrl = gfar_read(®s->ecntrl);
|
||||||
@@ -2158,7 +2230,7 @@ static void adjust_link(struct net_device *dev)
|
|||||||
if (new_state && netif_msg_link(priv))
|
if (new_state && netif_msg_link(priv))
|
||||||
phy_print_status(phydev);
|
phy_print_status(phydev);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the hash table based on the current list of multicast
|
/* Update the hash table based on the current list of multicast
|
||||||
@@ -2172,7 +2244,7 @@ static void gfar_set_multi(struct net_device *dev)
|
|||||||
struct gfar __iomem *regs = priv->regs;
|
struct gfar __iomem *regs = priv->regs;
|
||||||
u32 tempval;
|
u32 tempval;
|
||||||
|
|
||||||
if(dev->flags & IFF_PROMISC) {
|
if (dev->flags & IFF_PROMISC) {
|
||||||
/* Set RCTRL to PROM */
|
/* Set RCTRL to PROM */
|
||||||
tempval = gfar_read(®s->rctrl);
|
tempval = gfar_read(®s->rctrl);
|
||||||
tempval |= RCTRL_PROM;
|
tempval |= RCTRL_PROM;
|
||||||
@@ -2184,7 +2256,7 @@ static void gfar_set_multi(struct net_device *dev)
|
|||||||
gfar_write(®s->rctrl, tempval);
|
gfar_write(®s->rctrl, tempval);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(dev->flags & IFF_ALLMULTI) {
|
if (dev->flags & IFF_ALLMULTI) {
|
||||||
/* Set the hash to rx all multicast frames */
|
/* Set the hash to rx all multicast frames */
|
||||||
gfar_write(®s->igaddr0, 0xffffffff);
|
gfar_write(®s->igaddr0, 0xffffffff);
|
||||||
gfar_write(®s->igaddr1, 0xffffffff);
|
gfar_write(®s->igaddr1, 0xffffffff);
|
||||||
@@ -2236,7 +2308,7 @@ static void gfar_set_multi(struct net_device *dev)
|
|||||||
em_num = 0;
|
em_num = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(dev->mc_count == 0)
|
if (dev->mc_count == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Parse the list, and set the appropriate bits */
|
/* Parse the list, and set the appropriate bits */
|
||||||
|
@@ -7,8 +7,9 @@
|
|||||||
*
|
*
|
||||||
* Author: Andy Fleming
|
* Author: Andy Fleming
|
||||||
* Maintainer: Kumar Gala
|
* Maintainer: Kumar Gala
|
||||||
|
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
|
||||||
*
|
*
|
||||||
* Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
|
* Copyright 2002-2009 Freescale Semiconductor, Inc.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms of the GNU General Public License as published by the
|
* under the terms of the GNU General Public License as published by the
|
||||||
@@ -699,6 +700,76 @@ struct gfar {
|
|||||||
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
|
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
|
||||||
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
|
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct gfar_priv_tx_q - per tx queue structure
|
||||||
|
* @txlock: per queue tx spin lock
|
||||||
|
* @tx_skbuff:skb pointers
|
||||||
|
* @skb_curtx: to be used skb pointer
|
||||||
|
* @skb_dirtytx:the last used skb pointer
|
||||||
|
* @qindex: index of this queue
|
||||||
|
* @dev: back pointer to the dev structure
|
||||||
|
* @grp: back pointer to the group to which this queue belongs
|
||||||
|
* @tx_bd_base: First tx buffer descriptor
|
||||||
|
* @cur_tx: Next free ring entry
|
||||||
|
* @dirty_tx: First buffer in line to be transmitted
|
||||||
|
* @tx_ring_size: Tx ring size
|
||||||
|
* @num_txbdfree: number of free TxBds
|
||||||
|
* @txcoalescing: enable/disable tx coalescing
|
||||||
|
* @txic: transmit interrupt coalescing value
|
||||||
|
* @txcount: coalescing value if based on tx frame count
|
||||||
|
* @txtime: coalescing value if based on time
|
||||||
|
*/
|
||||||
|
struct gfar_priv_tx_q {
|
||||||
|
spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
|
||||||
|
struct sk_buff ** tx_skbuff;
|
||||||
|
/* Buffer descriptor pointers */
|
||||||
|
dma_addr_t tx_bd_dma_base;
|
||||||
|
struct txbd8 *tx_bd_base;
|
||||||
|
struct txbd8 *cur_tx;
|
||||||
|
struct txbd8 *dirty_tx;
|
||||||
|
struct net_device *dev;
|
||||||
|
u16 skb_curtx;
|
||||||
|
u16 skb_dirtytx;
|
||||||
|
u16 qindex;
|
||||||
|
unsigned int tx_ring_size;
|
||||||
|
unsigned int num_txbdfree;
|
||||||
|
/* Configuration info for the coalescing features */
|
||||||
|
unsigned char txcoalescing;
|
||||||
|
unsigned long txic;
|
||||||
|
unsigned short txcount;
|
||||||
|
unsigned short txtime;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct gfar_priv_rx_q - per rx queue structure
|
||||||
|
* @rxlock: per queue rx spin lock
|
||||||
|
* @napi: the napi poll function
|
||||||
|
* @rx_skbuff: skb pointers
|
||||||
|
* @skb_currx: currently use skb pointer
|
||||||
|
* @rx_bd_base: First rx buffer descriptor
|
||||||
|
* @cur_rx: Next free rx ring entry
|
||||||
|
* @qindex: index of this queue
|
||||||
|
* @dev: back pointer to the dev structure
|
||||||
|
* @rx_ring_size: Rx ring size
|
||||||
|
* @rxcoalescing: enable/disable rx-coalescing
|
||||||
|
* @rxic: receive interrupt coalescing vlaue
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct gfar_priv_rx_q {
|
||||||
|
spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
|
||||||
|
struct napi_struct napi;
|
||||||
|
struct sk_buff ** rx_skbuff;
|
||||||
|
struct rxbd8 *rx_bd_base;
|
||||||
|
struct rxbd8 *cur_rx;
|
||||||
|
struct net_device *dev;
|
||||||
|
u16 skb_currx;
|
||||||
|
u16 qindex;
|
||||||
|
unsigned int rx_ring_size;
|
||||||
|
/* RX Coalescing values */
|
||||||
|
unsigned char rxcoalescing;
|
||||||
|
unsigned long rxic;
|
||||||
|
};
|
||||||
|
|
||||||
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
|
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
|
||||||
* (Ok, that's not so true anymore, but there is a family resemblence)
|
* (Ok, that's not so true anymore, but there is a family resemblence)
|
||||||
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
|
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
|
||||||
@@ -709,52 +780,15 @@ struct gfar {
|
|||||||
* the buffer descriptor determines the actual condition.
|
* the buffer descriptor determines the actual condition.
|
||||||
*/
|
*/
|
||||||
struct gfar_private {
|
struct gfar_private {
|
||||||
/* Fields controlled by TX lock */
|
|
||||||
spinlock_t txlock;
|
|
||||||
|
|
||||||
/* Pointer to the array of skbuffs */
|
|
||||||
struct sk_buff ** tx_skbuff;
|
|
||||||
|
|
||||||
/* next free skb in the array */
|
|
||||||
u16 skb_curtx;
|
|
||||||
|
|
||||||
/* First skb in line to be transmitted */
|
|
||||||
u16 skb_dirtytx;
|
|
||||||
|
|
||||||
/* Configuration info for the coalescing features */
|
|
||||||
unsigned char txcoalescing;
|
|
||||||
unsigned long txic;
|
|
||||||
|
|
||||||
/* Buffer descriptor pointers */
|
|
||||||
dma_addr_t tx_bd_dma_base;
|
|
||||||
struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
|
|
||||||
struct txbd8 *cur_tx; /* Next free ring entry */
|
|
||||||
struct txbd8 *dirty_tx; /* First buffer in line
|
|
||||||
to be transmitted */
|
|
||||||
unsigned int tx_ring_size;
|
|
||||||
unsigned int num_txbdfree; /* number of TxBDs free */
|
|
||||||
|
|
||||||
/* RX Locked fields */
|
|
||||||
spinlock_t rxlock;
|
|
||||||
|
|
||||||
struct device_node *node;
|
struct device_node *node;
|
||||||
struct net_device *ndev;
|
struct net_device *ndev;
|
||||||
struct of_device *ofdev;
|
struct of_device *ofdev;
|
||||||
struct napi_struct napi;
|
|
||||||
|
|
||||||
/* skb array and index */
|
struct gfar_priv_tx_q *tx_queue;
|
||||||
struct sk_buff ** rx_skbuff;
|
struct gfar_priv_rx_q *rx_queue;
|
||||||
u16 skb_currx;
|
|
||||||
|
|
||||||
/* RX Coalescing values */
|
/* RX per device parameters */
|
||||||
unsigned char rxcoalescing;
|
|
||||||
unsigned long rxic;
|
|
||||||
|
|
||||||
struct rxbd8 *rx_bd_base; /* First Rx buffers */
|
|
||||||
struct rxbd8 *cur_rx; /* Next free rx ring entry */
|
|
||||||
|
|
||||||
/* RX parameters */
|
|
||||||
unsigned int rx_ring_size;
|
|
||||||
unsigned int rx_buffer_size;
|
unsigned int rx_buffer_size;
|
||||||
unsigned int rx_stash_size;
|
unsigned int rx_stash_size;
|
||||||
unsigned int rx_stash_index;
|
unsigned int rx_stash_index;
|
||||||
|
@@ -7,8 +7,9 @@
|
|||||||
*
|
*
|
||||||
* Author: Andy Fleming
|
* Author: Andy Fleming
|
||||||
* Maintainer: Kumar Gala
|
* Maintainer: Kumar Gala
|
||||||
|
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
|
||||||
*
|
*
|
||||||
* Copyright (c) 2003,2004 Freescale Semiconductor, Inc.
|
* Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
|
||||||
*
|
*
|
||||||
* This software may be used and distributed according to
|
* This software may be used and distributed according to
|
||||||
* the terms of the GNU Public License, Version 2, incorporated herein
|
* the terms of the GNU Public License, Version 2, incorporated herein
|
||||||
@@ -41,7 +42,7 @@
|
|||||||
#include "gianfar.h"
|
#include "gianfar.h"
|
||||||
|
|
||||||
extern void gfar_start(struct net_device *dev);
|
extern void gfar_start(struct net_device *dev);
|
||||||
extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
|
extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
|
||||||
|
|
||||||
#define GFAR_MAX_COAL_USECS 0xffff
|
#define GFAR_MAX_COAL_USECS 0xffff
|
||||||
#define GFAR_MAX_COAL_FRAMES 0xff
|
#define GFAR_MAX_COAL_FRAMES 0xff
|
||||||
@@ -197,12 +198,16 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
struct phy_device *phydev = priv->phydev;
|
struct phy_device *phydev = priv->phydev;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
|
||||||
if (NULL == phydev)
|
if (NULL == phydev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
cmd->maxtxpkt = get_icft_value(priv->txic);
|
cmd->maxtxpkt = get_icft_value(tx_queue->txic);
|
||||||
cmd->maxrxpkt = get_icft_value(priv->rxic);
|
cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
|
||||||
|
|
||||||
return phy_ethtool_gset(phydev, cmd);
|
return phy_ethtool_gset(phydev, cmd);
|
||||||
}
|
}
|
||||||
@@ -279,6 +284,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
|
|||||||
static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
|
static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
unsigned long rxtime;
|
unsigned long rxtime;
|
||||||
unsigned long rxcount;
|
unsigned long rxcount;
|
||||||
unsigned long txtime;
|
unsigned long txtime;
|
||||||
@@ -290,10 +297,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
|
|||||||
if (NULL == priv->phydev)
|
if (NULL == priv->phydev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
rxtime = get_ictt_value(priv->rxic);
|
rx_queue = priv->rx_queue;
|
||||||
rxcount = get_icft_value(priv->rxic);
|
tx_queue = priv->tx_queue;
|
||||||
txtime = get_ictt_value(priv->txic);
|
|
||||||
txcount = get_icft_value(priv->txic);
|
rxtime = get_ictt_value(rx_queue->rxic);
|
||||||
|
rxcount = get_icft_value(rx_queue->rxic);
|
||||||
|
txtime = get_ictt_value(tx_queue->txic);
|
||||||
|
txcount = get_icft_value(tx_queue->txic);
|
||||||
cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
|
cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
|
||||||
cvals->rx_max_coalesced_frames = rxcount;
|
cvals->rx_max_coalesced_frames = rxcount;
|
||||||
|
|
||||||
@@ -339,16 +349,21 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
|
|||||||
static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
|
static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
|
|
||||||
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
/* Set up rx coalescing */
|
/* Set up rx coalescing */
|
||||||
if ((cvals->rx_coalesce_usecs == 0) ||
|
if ((cvals->rx_coalesce_usecs == 0) ||
|
||||||
(cvals->rx_max_coalesced_frames == 0))
|
(cvals->rx_max_coalesced_frames == 0))
|
||||||
priv->rxcoalescing = 0;
|
rx_queue->rxcoalescing = 0;
|
||||||
else
|
else
|
||||||
priv->rxcoalescing = 1;
|
rx_queue->rxcoalescing = 1;
|
||||||
|
|
||||||
if (NULL == priv->phydev)
|
if (NULL == priv->phydev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@@ -366,15 +381,15 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
|
rx_queue->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
|
||||||
gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
|
gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
|
||||||
|
|
||||||
/* Set up tx coalescing */
|
/* Set up tx coalescing */
|
||||||
if ((cvals->tx_coalesce_usecs == 0) ||
|
if ((cvals->tx_coalesce_usecs == 0) ||
|
||||||
(cvals->tx_max_coalesced_frames == 0))
|
(cvals->tx_max_coalesced_frames == 0))
|
||||||
priv->txcoalescing = 0;
|
tx_queue->txcoalescing = 0;
|
||||||
else
|
else
|
||||||
priv->txcoalescing = 1;
|
tx_queue->txcoalescing = 1;
|
||||||
|
|
||||||
/* Check the bounds of the values */
|
/* Check the bounds of the values */
|
||||||
if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
|
if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
|
||||||
@@ -389,16 +404,16 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
|
tx_queue->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
|
||||||
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
|
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
|
||||||
|
|
||||||
gfar_write(&priv->regs->rxic, 0);
|
gfar_write(&priv->regs->rxic, 0);
|
||||||
if (priv->rxcoalescing)
|
if (rx_queue->rxcoalescing)
|
||||||
gfar_write(&priv->regs->rxic, priv->rxic);
|
gfar_write(&priv->regs->rxic, rx_queue->rxic);
|
||||||
|
|
||||||
gfar_write(&priv->regs->txic, 0);
|
gfar_write(&priv->regs->txic, 0);
|
||||||
if (priv->txcoalescing)
|
if (tx_queue->txcoalescing)
|
||||||
gfar_write(&priv->regs->txic, priv->txic);
|
gfar_write(&priv->regs->txic, tx_queue->txic);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -409,6 +424,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
|
|||||||
static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
|
static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
|
rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
|
||||||
rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
|
rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
|
||||||
@@ -418,10 +438,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
|
|||||||
/* Values changeable by the user. The valid values are
|
/* Values changeable by the user. The valid values are
|
||||||
* in the range 1 to the "*_max_pending" counterpart above.
|
* in the range 1 to the "*_max_pending" counterpart above.
|
||||||
*/
|
*/
|
||||||
rvals->rx_pending = priv->rx_ring_size;
|
rvals->rx_pending = rx_queue->rx_ring_size;
|
||||||
rvals->rx_mini_pending = priv->rx_ring_size;
|
rvals->rx_mini_pending = rx_queue->rx_ring_size;
|
||||||
rvals->rx_jumbo_pending = priv->rx_ring_size;
|
rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
|
||||||
rvals->tx_pending = priv->tx_ring_size;
|
rvals->tx_pending = tx_queue->tx_ring_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Change the current ring parameters, stopping the controller if
|
/* Change the current ring parameters, stopping the controller if
|
||||||
@@ -431,6 +451,8 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
|
|||||||
static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
|
static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
|
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
|
||||||
@@ -451,29 +473,32 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
if (dev->flags & IFF_UP) {
|
if (dev->flags & IFF_UP) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Halt TX and RX, and process the frames which
|
/* Halt TX and RX, and process the frames which
|
||||||
* have already been received */
|
* have already been received */
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
spin_lock(&priv->rxlock);
|
spin_lock(&rx_queue->rxlock);
|
||||||
|
|
||||||
gfar_halt(dev);
|
gfar_halt(dev);
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&rx_queue->rxlock);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
gfar_clean_rx_ring(dev, priv->rx_ring_size);
|
gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size);
|
||||||
|
|
||||||
/* Now we take down the rings to rebuild them */
|
/* Now we take down the rings to rebuild them */
|
||||||
stop_gfar(dev);
|
stop_gfar(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Change the size */
|
/* Change the size */
|
||||||
priv->rx_ring_size = rvals->rx_pending;
|
rx_queue->rx_ring_size = rvals->rx_pending;
|
||||||
priv->tx_ring_size = rvals->tx_pending;
|
tx_queue->tx_ring_size = rvals->tx_pending;
|
||||||
priv->num_txbdfree = priv->tx_ring_size;
|
tx_queue->num_txbdfree = tx_queue->tx_ring_size;
|
||||||
|
|
||||||
/* Rebuild the rings with the new size */
|
/* Rebuild the rings with the new size */
|
||||||
if (dev->flags & IFF_UP) {
|
if (dev->flags & IFF_UP) {
|
||||||
@@ -486,24 +511,29 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
|
|||||||
static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
|
static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(dev);
|
struct gfar_private *priv = netdev_priv(dev);
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
tx_queue = priv->tx_queue;
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
if (dev->flags & IFF_UP) {
|
if (dev->flags & IFF_UP) {
|
||||||
/* Halt TX and RX, and process the frames which
|
/* Halt TX and RX, and process the frames which
|
||||||
* have already been received */
|
* have already been received */
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
spin_lock(&priv->rxlock);
|
spin_lock(&rx_queue->rxlock);
|
||||||
|
|
||||||
gfar_halt(dev);
|
gfar_halt(dev);
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&rx_queue->rxlock);
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
gfar_clean_rx_ring(dev, priv->rx_ring_size);
|
gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size);
|
||||||
|
|
||||||
/* Now we take down the rings to rebuild them */
|
/* Now we take down the rings to rebuild them */
|
||||||
stop_gfar(dev);
|
stop_gfar(dev);
|
||||||
|
@@ -8,8 +8,9 @@
|
|||||||
*
|
*
|
||||||
* Author: Andy Fleming
|
* Author: Andy Fleming
|
||||||
* Maintainer: Kumar Gala (galak@kernel.crashing.org)
|
* Maintainer: Kumar Gala (galak@kernel.crashing.org)
|
||||||
|
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
|
||||||
*
|
*
|
||||||
* Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
|
* Copyright 2002-2009 Freescale Semiconductor, Inc.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms of the GNU General Public License as published by the
|
* under the terms of the GNU General Public License as published by the
|
||||||
@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
int new_setting = 0;
|
int new_setting = 0;
|
||||||
u32 temp;
|
u32 temp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -56,6 +58,8 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
|
|||||||
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
/* Find out the new setting */
|
/* Find out the new setting */
|
||||||
if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
|
if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
|
||||||
new_setting = 1;
|
new_setting = 1;
|
||||||
@@ -65,7 +69,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
|
|||||||
else
|
else
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->rxlock, flags);
|
spin_lock_irqsave(&rx_queue->rxlock, flags);
|
||||||
|
|
||||||
/* Set the new stashing value */
|
/* Set the new stashing value */
|
||||||
priv->bd_stash_en = new_setting;
|
priv->bd_stash_en = new_setting;
|
||||||
@@ -79,7 +83,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
|
|||||||
|
|
||||||
gfar_write(&priv->regs->attr, temp);
|
gfar_write(&priv->regs->attr, temp);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->rxlock, flags);
|
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@@ -99,6 +103,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned int length = simple_strtoul(buf, NULL, 0);
|
unsigned int length = simple_strtoul(buf, NULL, 0);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -106,7 +111,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
|
|||||||
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->rxlock, flags);
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rx_queue->rxlock, flags);
|
||||||
if (length > priv->rx_buffer_size)
|
if (length > priv->rx_buffer_size)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@@ -131,7 +138,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
|
|||||||
gfar_write(&priv->regs->attr, temp);
|
gfar_write(&priv->regs->attr, temp);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&priv->rxlock, flags);
|
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@@ -154,6 +161,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
||||||
|
struct gfar_priv_rx_q *rx_queue = NULL;
|
||||||
unsigned short index = simple_strtoul(buf, NULL, 0);
|
unsigned short index = simple_strtoul(buf, NULL, 0);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -161,7 +169,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
|
|||||||
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
|
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->rxlock, flags);
|
rx_queue = priv->rx_queue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rx_queue->rxlock, flags);
|
||||||
if (index > priv->rx_stash_size)
|
if (index > priv->rx_stash_size)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@@ -176,7 +186,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
|
|||||||
gfar_write(&priv->regs->attreli, flags);
|
gfar_write(&priv->regs->attreli, flags);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&priv->rxlock, flags);
|
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@@ -198,6 +208,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
unsigned int length = simple_strtoul(buf, NULL, 0);
|
unsigned int length = simple_strtoul(buf, NULL, 0);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -205,7 +216,9 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
|
|||||||
if (length > GFAR_MAX_FIFO_THRESHOLD)
|
if (length > GFAR_MAX_FIFO_THRESHOLD)
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
tx_queue = priv->tx_queue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
priv->fifo_threshold = length;
|
priv->fifo_threshold = length;
|
||||||
|
|
||||||
@@ -214,7 +227,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
|
|||||||
temp |= length;
|
temp |= length;
|
||||||
gfar_write(&priv->regs->fifo_tx_thr, temp);
|
gfar_write(&priv->regs->fifo_tx_thr, temp);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@@ -235,6 +248,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
unsigned int num = simple_strtoul(buf, NULL, 0);
|
unsigned int num = simple_strtoul(buf, NULL, 0);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -242,7 +256,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
|
|||||||
if (num > GFAR_MAX_FIFO_STARVE)
|
if (num > GFAR_MAX_FIFO_STARVE)
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
tx_queue = priv->tx_queue;
|
||||||
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
priv->fifo_starve = num;
|
priv->fifo_starve = num;
|
||||||
|
|
||||||
@@ -251,7 +266,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
|
|||||||
temp |= num;
|
temp |= num;
|
||||||
gfar_write(&priv->regs->fifo_tx_starve, temp);
|
gfar_write(&priv->regs->fifo_tx_starve, temp);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@@ -273,6 +288,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
|
||||||
|
struct gfar_priv_tx_q *tx_queue = NULL;
|
||||||
unsigned int num = simple_strtoul(buf, NULL, 0);
|
unsigned int num = simple_strtoul(buf, NULL, 0);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -280,7 +296,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
|
|||||||
if (num > GFAR_MAX_FIFO_STARVE_OFF)
|
if (num > GFAR_MAX_FIFO_STARVE_OFF)
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->txlock, flags);
|
tx_queue = priv->tx_queue;
|
||||||
|
spin_lock_irqsave(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
priv->fifo_starve_off = num;
|
priv->fifo_starve_off = num;
|
||||||
|
|
||||||
@@ -289,7 +306,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
|
|||||||
temp |= num;
|
temp |= num;
|
||||||
gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
|
gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->txlock, flags);
|
spin_unlock_irqrestore(&tx_queue->txlock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user