sb1250: use netdev_alloc_skb
Use netdev_alloc_skb. This sets skb->dev and allows arch specific allocation. Also simplify and cleanup the alignment code. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
committed by
Jeff Garzik
parent
40ba182e3c
commit
789585e968
@ -179,7 +179,6 @@ enum sbmac_state {
|
|||||||
#define SBMAC_MAX_TXDESCR 256
|
#define SBMAC_MAX_TXDESCR 256
|
||||||
#define SBMAC_MAX_RXDESCR 256
|
#define SBMAC_MAX_RXDESCR 256
|
||||||
|
|
||||||
#define ETHER_ALIGN 2
|
|
||||||
#define ETHER_ADDR_LEN 6
|
#define ETHER_ADDR_LEN 6
|
||||||
#define ENET_PACKET_SIZE 1518
|
#define ENET_PACKET_SIZE 1518
|
||||||
/*#define ENET_PACKET_SIZE 9216 */
|
/*#define ENET_PACKET_SIZE 9216 */
|
||||||
@ -262,8 +261,6 @@ struct sbmac_softc {
|
|||||||
spinlock_t sbm_lock; /* spin lock */
|
spinlock_t sbm_lock; /* spin lock */
|
||||||
int sbm_devflags; /* current device flags */
|
int sbm_devflags; /* current device flags */
|
||||||
|
|
||||||
int sbm_buffersize;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Controller-specific things
|
* Controller-specific things
|
||||||
*/
|
*/
|
||||||
@ -305,10 +302,11 @@ struct sbmac_softc {
|
|||||||
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
|
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
|
||||||
int txrx, int maxdescr);
|
int txrx, int maxdescr);
|
||||||
static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
|
static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
|
||||||
static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m);
|
static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
|
||||||
|
struct sk_buff *m);
|
||||||
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
|
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
|
||||||
static void sbdma_emptyring(struct sbmacdma *d);
|
static void sbdma_emptyring(struct sbmacdma *d);
|
||||||
static void sbdma_fillring(struct sbmacdma *d);
|
static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
|
||||||
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
||||||
int work_to_do, int poll);
|
int work_to_do, int poll);
|
||||||
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
||||||
@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d)
|
|||||||
d->sbdma_remptr = NULL;
|
d->sbdma_remptr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
|
static inline void sbdma_align_skb(struct sk_buff *skb,
|
||||||
|
unsigned int power2, unsigned int offset)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
unsigned char *addr = skb->data;
|
||||||
unsigned long newaddr;
|
unsigned char *newaddr = PTR_ALIGN(addr, power2);
|
||||||
|
|
||||||
addr = (unsigned long) skb->data;
|
skb_reserve(skb, newaddr - addr + offset);
|
||||||
|
|
||||||
newaddr = (addr + power2 - 1) & ~(power2 - 1);
|
|
||||||
|
|
||||||
skb_reserve(skb,newaddr-addr+offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -797,6 +792,7 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
|
|||||||
* this queues a buffer for inbound packets.
|
* this queues a buffer for inbound packets.
|
||||||
*
|
*
|
||||||
* Input parameters:
|
* Input parameters:
|
||||||
|
* sc - softc structure
|
||||||
* d - DMA channel descriptor
|
* d - DMA channel descriptor
|
||||||
* sb - sk_buff to add, or NULL if we should allocate one
|
* sb - sk_buff to add, or NULL if we should allocate one
|
||||||
*
|
*
|
||||||
@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
|
|||||||
********************************************************************* */
|
********************************************************************* */
|
||||||
|
|
||||||
|
|
||||||
static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
|
static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
|
||||||
|
struct sk_buff *sb)
|
||||||
{
|
{
|
||||||
|
struct net_device *dev = sc->sbm_dev;
|
||||||
struct sbdmadscr *dsc;
|
struct sbdmadscr *dsc;
|
||||||
struct sbdmadscr *nextdsc;
|
struct sbdmadscr *nextdsc;
|
||||||
struct sk_buff *sb_new = NULL;
|
struct sk_buff *sb_new = NULL;
|
||||||
@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (sb == NULL) {
|
if (sb == NULL) {
|
||||||
sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
|
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
|
||||||
|
SMP_CACHE_BYTES * 2 +
|
||||||
|
NET_IP_ALIGN);
|
||||||
if (sb_new == NULL) {
|
if (sb_new == NULL) {
|
||||||
pr_info("%s: sk_buff allocation failed\n",
|
pr_info("%s: sk_buff allocation failed\n",
|
||||||
d->sbdma_eth->sbm_dev->name);
|
d->sbdma_eth->sbm_dev->name);
|
||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
|
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
sb_new = sb;
|
sb_new = sb;
|
||||||
@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
|
|||||||
* Do not interrupt per DMA transfer.
|
* Do not interrupt per DMA transfer.
|
||||||
*/
|
*/
|
||||||
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
||||||
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
|
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
|
||||||
#else
|
#else
|
||||||
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
dsc->dscr_a = virt_to_phys(sb_new->data) |
|
||||||
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
|
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
|
||||||
M_DMA_DSCRA_INTERRUPT;
|
M_DMA_DSCRA_INTERRUPT;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d)
|
|||||||
* with sk_buffs
|
* with sk_buffs
|
||||||
*
|
*
|
||||||
* Input parameters:
|
* Input parameters:
|
||||||
|
* sc - softc structure
|
||||||
* d - DMA channel
|
* d - DMA channel
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* nothing
|
* nothing
|
||||||
********************************************************************* */
|
********************************************************************* */
|
||||||
|
|
||||||
static void sbdma_fillring(struct sbmacdma *d)
|
static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
|
for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
|
||||||
if (sbdma_add_rcvbuffer(d,NULL) != 0)
|
if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1159,10 +1160,11 @@ again:
|
|||||||
* packet and put it right back on the receive ring.
|
* packet and put it right back on the receive ring.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (unlikely (sbdma_add_rcvbuffer(d,NULL) ==
|
if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
|
||||||
-ENOBUFS)) {
|
-ENOBUFS)) {
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
|
/* Re-add old buffer */
|
||||||
|
sbdma_add_rcvbuffer(sc, d, sb);
|
||||||
/* No point in continuing at the moment */
|
/* No point in continuing at the moment */
|
||||||
printk(KERN_ERR "dropped packet (1)\n");
|
printk(KERN_ERR "dropped packet (1)\n");
|
||||||
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
|
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
|
||||||
@ -1212,7 +1214,7 @@ again:
|
|||||||
* put it back on the receive ring.
|
* put it back on the receive ring.
|
||||||
*/
|
*/
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
sbdma_add_rcvbuffer(d,sb);
|
sbdma_add_rcvbuffer(sc, d, sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
|
|||||||
* Fill the receive ring
|
* Fill the receive ring
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sbdma_fillring(&(s->sbm_rxdma));
|
sbdma_fillring(s, &(s->sbm_rxdma));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Turn on the rest of the bits in the enable register
|
* Turn on the rest of the bits in the enable register
|
||||||
@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
|
|||||||
dev->dev_addr[i] = eaddr[i];
|
dev->dev_addr[i] = eaddr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Init packet size
|
|
||||||
*/
|
|
||||||
|
|
||||||
sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize context (get pointers to registers and stuff), then
|
* Initialize context (get pointers to registers and stuff), then
|
||||||
* allocate the memory for the descriptor tables.
|
* allocate the memory for the descriptor tables.
|
||||||
|
Reference in New Issue
Block a user