[SPARC64]: Add timeouts to streaming buffer synchronization.
If some hardware error occurs and the flush flag never updates, we will hang forever in these routines. Add a timeout, and print out a diagnostic if it is reached. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -8,6 +8,7 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
|
||||||
#include <asm/pbm.h>
|
#include <asm/pbm.h>
|
||||||
|
|
||||||
@@ -379,6 +380,54 @@ bad:
|
|||||||
return PCI_DMA_ERROR_CODE;
|
return PCI_DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
|
||||||
|
{
|
||||||
|
int limit;
|
||||||
|
|
||||||
|
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
||||||
|
if (strbuf->strbuf_ctxflush &&
|
||||||
|
iommu->iommu_ctxflush) {
|
||||||
|
unsigned long matchreg, flushreg;
|
||||||
|
|
||||||
|
flushreg = strbuf->strbuf_ctxflush;
|
||||||
|
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
||||||
|
|
||||||
|
limit = 10000;
|
||||||
|
do {
|
||||||
|
pci_iommu_write(flushreg, ctx);
|
||||||
|
udelay(10);
|
||||||
|
limit--;
|
||||||
|
if (!limit)
|
||||||
|
break;
|
||||||
|
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
||||||
|
if (!limit)
|
||||||
|
printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
|
||||||
|
"timeout vaddr[%08x] ctx[%lx]\n",
|
||||||
|
vaddr, ctx);
|
||||||
|
} else {
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
|
||||||
|
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
||||||
|
(void) pci_iommu_read(iommu->write_complete_reg);
|
||||||
|
|
||||||
|
limit = 10000;
|
||||||
|
while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
|
||||||
|
limit--;
|
||||||
|
if (!limit)
|
||||||
|
break;
|
||||||
|
udelay(10);
|
||||||
|
membar("#LoadLoad");
|
||||||
|
}
|
||||||
|
if (!limit)
|
||||||
|
printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
|
||||||
|
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
|
||||||
|
vaddr, ctx, npages);
|
||||||
|
}
|
||||||
|
|
||||||
/* Unmap a single streaming mode DMA translation. */
|
/* Unmap a single streaming mode DMA translation. */
|
||||||
void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||||
{
|
{
|
||||||
@@ -386,7 +435,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
|
|||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
struct pci_strbuf *strbuf;
|
struct pci_strbuf *strbuf;
|
||||||
iopte_t *base;
|
iopte_t *base;
|
||||||
unsigned long flags, npages, i, ctx;
|
unsigned long flags, npages, ctx;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (direction == PCI_DMA_NONE)
|
||||||
BUG();
|
BUG();
|
||||||
@@ -414,29 +463,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
|
|||||||
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
||||||
|
|
||||||
/* Step 1: Kick data out of streaming buffers if necessary. */
|
/* Step 1: Kick data out of streaming buffers if necessary. */
|
||||||
if (strbuf->strbuf_enabled) {
|
if (strbuf->strbuf_enabled)
|
||||||
u32 vaddr = bus_addr;
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
|
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
|
||||||
if (strbuf->strbuf_ctxflush &&
|
|
||||||
iommu->iommu_ctxflush) {
|
|
||||||
unsigned long matchreg, flushreg;
|
|
||||||
|
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
|
||||||
do {
|
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 2: Clear out first TSB entry. */
|
/* Step 2: Clear out first TSB entry. */
|
||||||
iopte_make_dummy(iommu, base);
|
iopte_make_dummy(iommu, base);
|
||||||
@@ -647,29 +675,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
|
|||||||
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
||||||
|
|
||||||
/* Step 1: Kick data out of streaming buffers if necessary. */
|
/* Step 1: Kick data out of streaming buffers if necessary. */
|
||||||
if (strbuf->strbuf_enabled) {
|
if (strbuf->strbuf_enabled)
|
||||||
u32 vaddr = (u32) bus_addr;
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
|
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
|
||||||
if (strbuf->strbuf_ctxflush &&
|
|
||||||
iommu->iommu_ctxflush) {
|
|
||||||
unsigned long matchreg, flushreg;
|
|
||||||
|
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
|
||||||
do {
|
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 2: Clear out first TSB entry. */
|
/* Step 2: Clear out first TSB entry. */
|
||||||
iopte_make_dummy(iommu, base);
|
iopte_make_dummy(iommu, base);
|
||||||
@@ -715,28 +722,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Step 2: Kick data out of streaming buffers. */
|
/* Step 2: Kick data out of streaming buffers. */
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
if (iommu->iommu_ctxflush &&
|
|
||||||
strbuf->strbuf_ctxflush) {
|
|
||||||
unsigned long matchreg, flushreg;
|
|
||||||
|
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
|
||||||
do {
|
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 3: Perform flush synchronization sequence. */
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
@@ -749,7 +735,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
|
|||||||
struct pcidev_cookie *pcp;
|
struct pcidev_cookie *pcp;
|
||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
struct pci_strbuf *strbuf;
|
struct pci_strbuf *strbuf;
|
||||||
unsigned long flags, ctx;
|
unsigned long flags, ctx, npages, i;
|
||||||
|
u32 bus_addr;
|
||||||
|
|
||||||
pcp = pdev->sysdata;
|
pcp = pdev->sysdata;
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
@@ -772,36 +759,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Step 2: Kick data out of streaming buffers. */
|
/* Step 2: Kick data out of streaming buffers. */
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
||||||
if (iommu->iommu_ctxflush &&
|
for(i = 1; i < nelems; i++)
|
||||||
strbuf->strbuf_ctxflush) {
|
if (!sglist[i].dma_length)
|
||||||
unsigned long matchreg, flushreg;
|
break;
|
||||||
|
i--;
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
- bus_addr) >> IO_PAGE_SHIFT;
|
||||||
do {
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while (((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
unsigned long i, npages;
|
|
||||||
u32 bus_addr;
|
|
||||||
|
|
||||||
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
|
||||||
|
|
||||||
for(i = 1; i < nelems; i++)
|
|
||||||
if (!sglist[i].dma_length)
|
|
||||||
break;
|
|
||||||
i--;
|
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
|
|
||||||
for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 3: Perform flush synchronization sequence. */
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
@@ -117,19 +117,34 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
|
|||||||
|
|
||||||
#define STRBUF_TAG_VALID 0x02UL
|
#define STRBUF_TAG_VALID 0x02UL
|
||||||
|
|
||||||
static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
|
static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
|
||||||
{
|
{
|
||||||
|
unsigned long n;
|
||||||
|
int limit;
|
||||||
|
|
||||||
iommu->strbuf_flushflag = 0UL;
|
iommu->strbuf_flushflag = 0UL;
|
||||||
while (npages--)
|
n = npages;
|
||||||
upa_writeq(base + (npages << IO_PAGE_SHIFT),
|
while (n--)
|
||||||
|
upa_writeq(base + (n << IO_PAGE_SHIFT),
|
||||||
iommu->strbuf_regs + STRBUF_PFLUSH);
|
iommu->strbuf_regs + STRBUF_PFLUSH);
|
||||||
|
|
||||||
/* Whoopee cushion! */
|
/* Whoopee cushion! */
|
||||||
upa_writeq(__pa(&iommu->strbuf_flushflag),
|
upa_writeq(__pa(&iommu->strbuf_flushflag),
|
||||||
iommu->strbuf_regs + STRBUF_FSYNC);
|
iommu->strbuf_regs + STRBUF_FSYNC);
|
||||||
upa_readq(iommu->sbus_control_reg);
|
upa_readq(iommu->sbus_control_reg);
|
||||||
while (iommu->strbuf_flushflag == 0UL)
|
|
||||||
|
limit = 10000;
|
||||||
|
while (iommu->strbuf_flushflag == 0UL) {
|
||||||
|
limit--;
|
||||||
|
if (!limit)
|
||||||
|
break;
|
||||||
|
udelay(10);
|
||||||
membar("#LoadLoad");
|
membar("#LoadLoad");
|
||||||
|
}
|
||||||
|
if (!limit)
|
||||||
|
printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
|
||||||
|
"vaddr[%08x] npages[%ld]\n",
|
||||||
|
base, npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
|
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
|
||||||
@@ -406,7 +421,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
|
|||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
||||||
strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -569,7 +584,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
|
|||||||
iommu = sdev->bus->iommu;
|
iommu = sdev->bus->iommu;
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
||||||
strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -581,7 +596,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
|
|||||||
size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
|
size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -605,7 +620,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
|
|||||||
size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
|
size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user