Merge remote branch 'origin' into secretlab/next-devicetree

Merging in current state of Linus' tree to deal with merge conflicts and
build failures in vio.c after merge.

Conflicts:
	drivers/i2c/busses/i2c-cpm.c
	drivers/i2c/busses/i2c-mpc.c
	drivers/net/gianfar.c

Also fixed up one line in arch/powerpc/kernel/vio.c to use the
correct node pointer.

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
This commit is contained in:
Grant Likely
2010-05-22 00:36:56 -06:00
6983 changed files with 473835 additions and 213947 deletions

View File

@@ -82,6 +82,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/net_tstamp.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
rctrl |= RCTRL_PADDING(priv->padding);
}
/* Insert receive time stamps into padding alignment bytes */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
priv->padding = 8;
}
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
return priv->vlgrp || priv->rx_csum_enable;
return priv->vlgrp || priv->rx_csum_enable ||
(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
@@ -549,12 +558,8 @@ static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
u64 addr, size;
addr = of_translate_address(np,
of_get_address(np, 0, &size, NULL));
priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
if (!priv->gfargrp[priv->num_grps].regs)
return -ENOMEM;
@@ -742,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -772,6 +778,48 @@ err_grp_init:
return err;
}
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
case HWTSTAMP_TX_ON:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
priv->hwts_tx_en = 1;
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->hwts_rx_en = 0;
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
priv->hwts_rx_en = 1;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -780,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return gfar_hwtstamp_ioctl(dev, rq, cmd);
if (!priv->phydev)
return -ENODEV;
@@ -982,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
else
priv->padding = 0;
if (dev->features & NETIF_F_IP_CSUM)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->hard_header_len += GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1292,21 +1344,9 @@ static struct dev_pm_ops gfar_pm_ops = {
#define GFAR_PM_OPS (&gfar_pm_ops)
static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
{
return gfar_suspend(&ofdev->dev);
}
static int gfar_legacy_resume(struct of_device *ofdev)
{
return gfar_resume(&ofdev->dev);
}
#else
#define GFAR_PM_OPS NULL
#define gfar_legacy_suspend NULL
#define gfar_legacy_resume NULL
#endif
@@ -1515,9 +1555,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
while (!(gfar_read(&regs->ievent) &
(IEVENT_GRSC | IEVENT_GTSC)))
cpu_relax();
spin_event_timeout(((gfar_read(&regs->ievent) &
(IEVENT_GRSC | IEVENT_GTSC)) ==
(IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
}
}
@@ -1653,6 +1693,7 @@ static void free_skb_resources(struct gfar_private *priv)
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
@@ -1686,7 +1727,7 @@ void gfar_start(struct net_device *dev)
gfar_write(&regs->imask, IMASK_DEFAULT);
}
dev->trans_start = jiffies;
dev->trans_start = jiffies; /* prevent tx timeout */
}
void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1926,23 +1967,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct gfar __iomem *regs = NULL;
struct txfcb *fcb = NULL;
struct txbd8 *txbdp, *txbdp_start, *base;
struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
int i, rq = 0;
int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
unsigned int nr_frags, length;
unsigned int nr_frags, nr_txbds, length;
union skb_shared_tx *shtx;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
shtx = skb_tx(skb);
/* check if time stamp should be generated */
if (unlikely(shtx->hardware && priv->hwts_tx_en))
do_tstamp = 1;
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
(priv->vlgrp && vlan_tx_tag_present(skb))) &&
(priv->vlgrp && vlan_tx_tag_present(skb)) ||
unlikely(do_tstamp)) &&
(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *skb_new;
@@ -1959,8 +2006,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
/* calculate the required number of TxBDs for this skb */
if (unlikely(do_tstamp))
nr_txbds = nr_frags + 2;
else
nr_txbds = nr_frags + 1;
/* check if there is space to queue this packet */
if ((nr_frags+1) > tx_queue->num_txbdfree) {
if (nr_txbds > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
@@ -1972,9 +2025,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = txbdp->lstatus;
/* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp))
txbdp_tstamp = txbdp = next_txbd(txbdp, base,
tx_queue->tx_ring_size);
if (nr_frags == 0) {
lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
if (unlikely(do_tstamp))
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
TXBD_INTERRUPT);
else
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
@@ -2020,11 +2083,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_tx_vlan(skb, fcb);
}
/* setup the TxBD length and buffer pointer for the first BD */
/* Setup tx hardware time stamping if requested */
if (unlikely(do_tstamp)) {
shtx->in_progress = 1;
if (fcb == NULL)
fcb = gfar_add_fcb(skb);
fcb->ptp = 1;
lstatus |= BD_LFLAG(TXBD_TOE);
}
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
/*
* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
* GMAC_FCB_LEN. The second TxBD points to the actual frame data with
* the full frame length.
*/
if (unlikely(do_tstamp)) {
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
(skb_headlen(skb) - GMAC_FCB_LEN);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
/*
* We can work in parallel with gfar_clean_tx_ring(), except
@@ -2064,9 +2148,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
/* reduce TxBD free count */
tx_queue->num_txbdfree -= (nr_frags + 1);
dev->trans_start = jiffies;
tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
@@ -2092,7 +2174,6 @@ static int gfar_close(struct net_device *dev)
disable_napi(priv);
skb_queue_purge(&priv->rx_recycle);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
@@ -2255,16 +2336,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
struct net_device *dev = tx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *bdp;
struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
int frags = 0;
int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
u32 lstatus;
size_t buflen;
union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2274,7 +2357,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
/*
* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
shtx = skb_tx(skb);
if (unlikely(shtx->in_progress))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
lstatus = lbdp->lstatus;
@@ -2283,10 +2377,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
dma_unmap_single(&priv->ofdev->dev,
bdp->bufPtr,
bdp->length,
DMA_TO_DEVICE);
if (unlikely(shtx->in_progress)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = next->length + GMAC_FCB_LEN;
} else
buflen = bdp->length;
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
if (unlikely(shtx->in_progress)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_tstamp_tx(skb, &shhwtstamps);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next;
}
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2318,7 +2426,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
tx_queue->num_txbdfree += frags + 1;
tx_queue->num_txbdfree += nr_txbds;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
}
@@ -2474,6 +2582,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
skb_pull(skb, amount_pull);
}
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
u64 *ns = (u64 *) skb->data;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(*ns);
}
if (priv->padding)
skb_pull(skb, priv->padding);
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2510,8 +2629,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
priv->padding;
amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2798,7 +2916,7 @@ static void adjust_link(struct net_device *dev)
* whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
struct dev_mc_list *mc_ptr;
struct netdev_hw_addr *ha;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
@@ -2871,17 +2989,14 @@ static void gfar_set_multi(struct net_device *dev)
return;
/* Parse the list, and set the appropriate bits */
netdev_for_each_mc_addr(mc_ptr, dev) {
netdev_for_each_mc_addr(ha, dev) {
if (idx < em_num) {
gfar_set_mac_for_addr(dev, idx,
mc_ptr->dmi_addr);
gfar_set_mac_for_addr(dev, idx, ha->addr);
idx++;
} else
gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
gfar_set_hash_for_addr(dev, ha->addr);
}
}
return;
}
@@ -2922,8 +3037,6 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
tempval = gfar_read(priv->hash_regs[whichreg]);
tempval |= value;
gfar_write(priv->hash_regs[whichreg], tempval);
return;
}
@@ -3062,8 +3175,6 @@ static struct of_platform_driver gfar_driver = {
},
.probe = gfar_probe,
.remove = gfar_remove,
.suspend = gfar_legacy_suspend,
.resume = gfar_legacy_resume,
};
static int __init gfar_init(void)