Merge branch 'gianfar-next'

Claudiu Manoil says:

====================
gianfar: ARM port driver updates (2/2)

The 2nd round of driver updates to make gianfar portable on ARM,
for the ARM based SoC that integrates eTSEC - "ls1021a".
The patches address the bulk of remaining endianess issues -
handling DMA fields (BD and FCB), and device tree properties.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2015-03-15 19:56:52 -04:00
2 changed files with 138 additions and 96 deletions

View File

@@ -158,7 +158,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
{ {
u32 lstatus; u32 lstatus;
bdp->bufPtr = buf; bdp->bufPtr = cpu_to_be32(buf);
lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
@@ -166,7 +166,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_wmb(); gfar_wmb();
bdp->lstatus = lstatus; bdp->lstatus = cpu_to_be32(lstatus);
} }
static int gfar_init_bds(struct net_device *ndev) static int gfar_init_bds(struct net_device *ndev)
@@ -200,7 +200,8 @@ static int gfar_init_bds(struct net_device *ndev)
/* Set the last descriptor in the ring to indicate wrap */ /* Set the last descriptor in the ring to indicate wrap */
txbdp--; txbdp--;
txbdp->status |= TXBD_WRAP; txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
TXBD_WRAP);
} }
rfbptr = &regs->rfbptr0; rfbptr = &regs->rfbptr0;
@@ -214,7 +215,7 @@ static int gfar_init_bds(struct net_device *ndev)
struct sk_buff *skb = rx_queue->rx_skbuff[j]; struct sk_buff *skb = rx_queue->rx_skbuff[j];
if (skb) { if (skb) {
bufaddr = rxbdp->bufPtr; bufaddr = be32_to_cpu(rxbdp->bufPtr);
} else { } else {
skb = gfar_new_skb(ndev, &bufaddr); skb = gfar_new_skb(ndev, &bufaddr);
if (!skb) { if (!skb) {
@@ -696,19 +697,28 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv; grp->priv = priv;
spin_lock_init(&grp->grplock); spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) { if (priv->mode == MQ_MG_MODE) {
u32 *rxq_mask, *txq_mask; u32 rxq_mask, txq_mask;
rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); int ret;
txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
if (!ret) {
grp->rx_bit_map = rxq_mask ?
rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
}
ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
if (!ret) {
grp->tx_bit_map = txq_mask ?
txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
}
if (priv->poll_mode == GFAR_SQ_POLLING) { if (priv->poll_mode == GFAR_SQ_POLLING) {
/* One Q per interrupt group: Q0 to G0, Q1 to G1 */ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
} else { /* GFAR_MQ_POLLING */
grp->rx_bit_map = rxq_mask ?
*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = txq_mask ?
*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
} }
} else { } else {
grp->rx_bit_map = 0xFF; grp->rx_bit_map = 0xFF;
@@ -769,11 +779,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
struct gfar_private *priv = NULL; struct gfar_private *priv = NULL;
struct device_node *np = ofdev->dev.of_node; struct device_node *np = ofdev->dev.of_node;
struct device_node *child = NULL; struct device_node *child = NULL;
const u32 *stash; struct property *stash;
const u32 *stash_len; u32 stash_len = 0;
const u32 *stash_idx; u32 stash_idx = 0;
unsigned int num_tx_qs, num_rx_qs; unsigned int num_tx_qs, num_rx_qs;
u32 *tx_queues, *rx_queues;
unsigned short mode, poll_mode; unsigned short mode, poll_mode;
if (!np) if (!np)
@@ -787,10 +796,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
poll_mode = GFAR_SQ_POLLING; poll_mode = GFAR_SQ_POLLING;
} }
/* parse the num of HW tx and rx queues */
tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
if (mode == SQ_SG_MODE) { if (mode == SQ_SG_MODE) {
num_tx_qs = 1; num_tx_qs = 1;
num_rx_qs = 1; num_rx_qs = 1;
@@ -809,8 +814,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_tx_qs = num_grps; /* one txq per int group */ num_tx_qs = num_grps; /* one txq per int group */
num_rx_qs = num_grps; /* one rxq per int group */ num_rx_qs = num_grps; /* one rxq per int group */
} else { /* GFAR_MQ_POLLING */ } else { /* GFAR_MQ_POLLING */
num_tx_qs = tx_queues ? *tx_queues : 1; u32 tx_queues, rx_queues;
num_rx_qs = rx_queues ? *rx_queues : 1; int ret;
/* parse the num of HW tx and rx queues */
ret = of_property_read_u32(np, "fsl,num_tx_queues",
&tx_queues);
num_tx_qs = ret ? 1 : tx_queues;
ret = of_property_read_u32(np, "fsl,num_rx_queues",
&rx_queues);
num_rx_qs = ret ? 1 : rx_queues;
} }
} }
@@ -851,13 +865,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (err) if (err)
goto rx_alloc_failed; goto rx_alloc_failed;
err = of_property_read_string(np, "model", &model);
if (err) {
pr_err("Device model property missing, aborting\n");
goto rx_alloc_failed;
}
/* Init Rx queue filer rule set linked list */ /* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list); INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0; priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access); mutex_init(&priv->rx_queue_access);
model = of_get_property(np, "model", NULL);
for (i = 0; i < MAXGROUPS; i++) for (i = 0; i < MAXGROUPS; i++)
priv->gfargrp[i].regs = NULL; priv->gfargrp[i].regs = NULL;
@@ -877,22 +895,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
goto err_grp_init; goto err_grp_init;
} }
stash = of_get_property(np, "bd-stash", NULL); stash = of_find_property(np, "bd-stash", NULL);
if (stash) { if (stash) {
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
priv->bd_stash_en = 1; priv->bd_stash_en = 1;
} }
stash_len = of_get_property(np, "rx-stash-len", NULL); err = of_property_read_u32(np, "rx-stash-len", &stash_len);
if (stash_len) if (err == 0)
priv->rx_stash_size = *stash_len; priv->rx_stash_size = stash_len;
stash_idx = of_get_property(np, "rx-stash-idx", NULL); err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
if (stash_idx) if (err == 0)
priv->rx_stash_index = *stash_idx; priv->rx_stash_index = stash_idx;
if (stash_len || stash_idx) if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
@@ -919,15 +937,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
FSL_GIANFAR_DEV_HAS_TIMER; FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL); err = of_property_read_string(np, "phy-connection-type", &ctype);
/* We only care about rgmii-id. The rest are autodetected */ /* We only care about rgmii-id. The rest are autodetected */
if (ctype && !strcmp(ctype, "rgmii-id")) if (err == 0 && !strcmp(ctype, "rgmii-id"))
priv->interface = PHY_INTERFACE_MODE_RGMII_ID; priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
else else
priv->interface = PHY_INTERFACE_MODE_MII; priv->interface = PHY_INTERFACE_MODE_MII;
if (of_get_property(np, "fsl,magic-packet", NULL)) if (of_find_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
priv->phy_node = of_parse_phandle(np, "phy-handle", 0); priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -1884,14 +1902,15 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
if (!tx_queue->tx_skbuff[i]) if (!tx_queue->tx_skbuff[i])
continue; continue;
dma_unmap_single(priv->dev, txbdp->bufPtr, dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
txbdp->length, DMA_TO_DEVICE); be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
txbdp->lstatus = 0; txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) { j++) {
txbdp++; txbdp++;
dma_unmap_page(priv->dev, txbdp->bufPtr, dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
txbdp->length, DMA_TO_DEVICE); be16_to_cpu(txbdp->length),
DMA_TO_DEVICE);
} }
txbdp++; txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]); dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1911,7 +1930,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) { for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) { if (rx_queue->rx_skbuff[i]) {
dma_unmap_single(priv->dev, rxbdp->bufPtr, dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
priv->rx_buffer_size, priv->rx_buffer_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]); dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
@@ -2167,16 +2186,16 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
*/ */
if (ip_hdr(skb)->protocol == IPPROTO_UDP) { if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP; flags |= TXFCB_UDP;
fcb->phcs = udp_hdr(skb)->check; fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
} else } else
fcb->phcs = tcp_hdr(skb)->check; fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
/* l3os is the distance between the start of the /* l3os is the distance between the start of the
* frame (skb->data) and the start of the IP hdr. * frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the * l4os is the distance between the start of the
* l3 hdr and the l4 hdr * l3 hdr and the l4 hdr
*/ */
fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb); fcb->l4os = skb_network_header_len(skb);
fcb->flags = flags; fcb->flags = flags;
@@ -2185,7 +2204,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{ {
fcb->flags |= TXFCB_VLN; fcb->flags |= TXFCB_VLN;
fcb->vlctl = skb_vlan_tag_get(skb); fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
} }
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2298,7 +2317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->stats.tx_packets++; tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx; txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = txbdp->lstatus; lstatus = be32_to_cpu(txbdp->lstatus);
/* Time stamp insertion requires one additional TxBD */ /* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp)) if (unlikely(do_tstamp))
@@ -2306,11 +2325,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size); tx_queue->tx_ring_size);
if (nr_frags == 0) { if (nr_frags == 0) {
if (unlikely(do_tstamp)) if (unlikely(do_tstamp)) {
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
TXBD_INTERRUPT);
else lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
} else {
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
}
} else { } else {
/* Place the fragment addresses and lengths into the TxBDs */ /* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) { for (i = 0; i < nr_frags; i++) {
@@ -2320,7 +2342,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag_len = skb_shinfo(skb)->frags[i].size; frag_len = skb_shinfo(skb)->frags[i].size;
lstatus = txbdp->lstatus | frag_len | lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
BD_LFLAG(TXBD_READY); BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */ /* Handle the last BD specially */
@@ -2336,11 +2358,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err; goto dma_map_err;
/* set the TxBD length and buffer pointer */ /* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr; txbdp->bufPtr = cpu_to_be32(bufaddr);
txbdp->lstatus = lstatus; txbdp->lstatus = cpu_to_be32(lstatus);
} }
lstatus = txbdp_start->lstatus; lstatus = be32_to_cpu(txbdp_start->lstatus);
} }
/* Add TxPAL between FCB and frame if required */ /* Add TxPAL between FCB and frame if required */
@@ -2388,7 +2410,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(dma_mapping_error(priv->dev, bufaddr))) if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
goto dma_map_err; goto dma_map_err;
txbdp_start->bufPtr = bufaddr; txbdp_start->bufPtr = cpu_to_be32(bufaddr);
/* If time stamping is requested one additional TxBD must be set up. The /* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of * first TxBD points to the FCB and must have a data length of
@@ -2396,9 +2418,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length. * the full frame length.
*/ */
if (unlikely(do_tstamp)) { if (unlikely(do_tstamp)) {
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
(skb_headlen(skb) - fcb_len); bufaddr = be32_to_cpu(txbdp_start->bufPtr);
bufaddr += fcb_len;
lstatus_ts |= BD_LFLAG(TXBD_READY) |
(skb_headlen(skb) - fcb_len);
txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else { } else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2421,7 +2449,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_wmb(); gfar_wmb();
txbdp_start->lstatus = lstatus; txbdp_start->lstatus = cpu_to_be32(lstatus);
gfar_wmb(); /* force lstatus write before tx_skbuff */ gfar_wmb(); /* force lstatus write before tx_skbuff */
@@ -2460,13 +2488,14 @@ dma_map_err:
if (do_tstamp) if (do_tstamp)
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
for (i = 0; i < nr_frags; i++) { for (i = 0; i < nr_frags; i++) {
lstatus = txbdp->lstatus; lstatus = be32_to_cpu(txbdp->lstatus);
if (!(lstatus & BD_LFLAG(TXBD_READY))) if (!(lstatus & BD_LFLAG(TXBD_READY)))
break; break;
txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); lstatus &= ~BD_LFLAG(TXBD_READY);
bufaddr = txbdp->bufPtr; txbdp->lstatus = cpu_to_be32(lstatus);
dma_unmap_page(priv->dev, bufaddr, txbdp->length, bufaddr = be32_to_cpu(txbdp->bufPtr);
dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
DMA_TO_DEVICE); DMA_TO_DEVICE);
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
} }
@@ -2607,7 +2636,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
lstatus = lbdp->lstatus; lstatus = be32_to_cpu(lbdp->lstatus);
/* Only clean completed frames */ /* Only clean completed frames */
if ((lstatus & BD_LFLAG(TXBD_READY)) && if ((lstatus & BD_LFLAG(TXBD_READY)) &&
@@ -2616,11 +2645,12 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
next = next_txbd(bdp, base, tx_ring_size); next = next_txbd(bdp, base, tx_ring_size);
buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; buflen = be16_to_cpu(next->length) +
GMAC_FCB_LEN + GMAC_TXPAL_LEN;
} else } else
buflen = bdp->length; buflen = be16_to_cpu(bdp->length);
dma_unmap_single(priv->dev, bdp->bufPtr, dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE); buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2631,17 +2661,18 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
shhwtstamps.hwtstamp = ns_to_ktime(*ns); shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
skb_tstamp_tx(skb, &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP); gfar_clear_txbd_status(bdp);
bdp = next; bdp = next;
} }
bdp->lstatus &= BD_LFLAG(TXBD_WRAP); gfar_clear_txbd_status(bdp);
bdp = next_txbd(bdp, base, tx_ring_size); bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) { for (i = 0; i < frags; i++) {
dma_unmap_page(priv->dev, bdp->bufPtr, dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
bdp->length, DMA_TO_DEVICE); be16_to_cpu(bdp->length),
bdp->lstatus &= BD_LFLAG(TXBD_WRAP); DMA_TO_DEVICE);
gfar_clear_txbd_status(bdp);
bdp = next_txbd(bdp, base, tx_ring_size); bdp = next_txbd(bdp, base, tx_ring_size);
} }
@@ -2798,13 +2829,13 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
* were verified, then we tell the kernel that no * were verified, then we tell the kernel that no
* checksumming is necessary. Otherwise, it is [FIXME] * checksumming is necessary. Otherwise, it is [FIXME]
*/ */
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
(RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else else
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
} }
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi) int amount_pull, struct napi_struct *napi)
@@ -2846,8 +2877,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
* RXFCB_VLN is pseudo randomly set. * RXFCB_VLN is pseudo randomly set.
*/ */
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
fcb->flags & RXFCB_VLN) be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
/* Send the packet up the stack */ /* Send the packet up the stack */
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
@@ -2874,7 +2906,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
struct sk_buff *newskb; struct sk_buff *newskb;
dma_addr_t bufaddr; dma_addr_t bufaddr;
@@ -2885,21 +2917,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
dma_unmap_single(priv->dev, bdp->bufPtr, dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
priv->rx_buffer_size, DMA_FROM_DEVICE); priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) && if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
bdp->length > priv->rx_buffer_size)) be16_to_cpu(bdp->length) > priv->rx_buffer_size))
bdp->status = RXBD_LARGE; bdp->status = cpu_to_be16(RXBD_LARGE);
/* We drop the frame if we failed to allocate a new buffer */ /* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || if (unlikely(!newskb ||
bdp->status & RXBD_ERR)) { !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
count_errors(bdp->status, dev); be16_to_cpu(bdp->status) & RXBD_ERR)) {
count_errors(be16_to_cpu(bdp->status), dev);
if (unlikely(!newskb)) { if (unlikely(!newskb)) {
newskb = skb; newskb = skb;
bufaddr = bdp->bufPtr; bufaddr = be32_to_cpu(bdp->bufPtr);
} else if (skb) } else if (skb)
dev_kfree_skb(skb); dev_kfree_skb(skb);
} else { } else {
@@ -2908,7 +2941,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
howmany++; howmany++;
if (likely(skb)) { if (likely(skb)) {
pkt_len = bdp->length - ETH_FCS_LEN; pkt_len = be16_to_cpu(bdp->length) -
ETH_FCS_LEN;
/* Remove the FCS from the packet length */ /* Remove the FCS from the packet length */
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len; rx_queue->stats.rx_bytes += pkt_len;

View File

@@ -544,12 +544,12 @@ struct txbd8
{ {
union { union {
struct { struct {
u16 status; /* Status Fields */ __be16 status; /* Status Fields */
u16 length; /* Buffer length */ __be16 length; /* Buffer length */
}; };
u32 lstatus; __be32 lstatus;
}; };
u32 bufPtr; /* Buffer Pointer */ __be32 bufPtr; /* Buffer Pointer */
}; };
struct txfcb { struct txfcb {
@@ -557,28 +557,28 @@ struct txfcb {
u8 ptp; /* Flag to enable tx timestamping */ u8 ptp; /* Flag to enable tx timestamping */
u8 l4os; /* Level 4 Header Offset */ u8 l4os; /* Level 4 Header Offset */
u8 l3os; /* Level 3 Header Offset */ u8 l3os; /* Level 3 Header Offset */
u16 phcs; /* Pseudo-header Checksum */ __be16 phcs; /* Pseudo-header Checksum */
u16 vlctl; /* VLAN control word */ __be16 vlctl; /* VLAN control word */
}; };
struct rxbd8 struct rxbd8
{ {
union { union {
struct { struct {
u16 status; /* Status Fields */ __be16 status; /* Status Fields */
u16 length; /* Buffer Length */ __be16 length; /* Buffer Length */
}; };
u32 lstatus; __be32 lstatus;
}; };
u32 bufPtr; /* Buffer Pointer */ __be32 bufPtr; /* Buffer Pointer */
}; };
struct rxfcb { struct rxfcb {
u16 flags; __be16 flags;
u8 rq; /* Receive Queue index */ u8 rq; /* Receive Queue index */
u8 pro; /* Layer 4 Protocol */ u8 pro; /* Layer 4 Protocol */
u16 reserved; u16 reserved;
u16 vlctl; /* VLAN control word */ __be16 vlctl; /* VLAN control word */
}; };
struct gianfar_skb_cb { struct gianfar_skb_cb {
@@ -1287,6 +1287,14 @@ static inline void gfar_wmb(void)
#endif #endif
} }
static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
{
u32 lstatus = be32_to_cpu(bdp->lstatus);
lstatus &= BD_LFLAG(TXBD_WRAP);
bdp->lstatus = cpu_to_be32(lstatus);
}
irqreturn_t gfar_receive(int irq, void *dev_id); irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev); int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev);