[POWERPC] Use 4kB iommu pages even on 64kB-page systems
The 10Gigabit ethernet device drivers appear to be able to chew up all 256MB of TCE mappings on pSeries systems, as evidenced by numerous error messages: iommu_alloc failed, tbl c0000000010d5c48 vaddr c0000000d875eff0 npages 1 Some experimentation indicates that this is essentially because one 1500 byte ethernet MTU gets mapped as a 64K DMA region when the large 64K pages are enabled. Thus, it doesn't take much to exhaust all of the available DMA mappings for a high-speed card. This patch changes the iommu allocator to work with its own unique, distinct page size. Although the patch is long, its actually quite simple: it just #defines a distinct IOMMU_PAGE_SIZE and then uses this in all the places that matter. As a side effect, it also dramatically improves network performance on platforms with H-calls on iommu translation inserts/removes (since we no longer call it 16 times for a 1500 bytes packet when the iommu HW is still 4k). In the future, we might want to make the IOMMU_PAGE_SIZE a variable in the iommu_table instance, thus allowing support for different HW page sizes in the iommu itself. Signed-off-by: Linas Vepstas <linas@austin.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
committed by
Paul Mackerras
parent
dd6c89f686
commit
5d2efba64b
@ -47,6 +47,17 @@ static int novmerge = 0;
|
||||
static int novmerge = 1;
|
||||
#endif
|
||||
|
||||
static inline unsigned long iommu_num_pages(unsigned long vaddr,
|
||||
unsigned long slen)
|
||||
{
|
||||
unsigned long npages;
|
||||
|
||||
npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
|
||||
npages >>= IOMMU_PAGE_SHIFT;
|
||||
|
||||
return npages;
|
||||
}
|
||||
|
||||
static int __init setup_iommu(char *str)
|
||||
{
|
||||
if (!strcmp(str, "novmerge"))
|
||||
@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
|
||||
}
|
||||
|
||||
entry += tbl->it_offset; /* Offset into real TCE table */
|
||||
ret = entry << PAGE_SHIFT; /* Set the return dma address */
|
||||
ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
|
||||
|
||||
/* Put the TCEs in the HW table */
|
||||
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
|
||||
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
|
||||
direction);
|
||||
|
||||
|
||||
@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||
unsigned long entry, free_entry;
|
||||
unsigned long i;
|
||||
|
||||
entry = dma_addr >> PAGE_SHIFT;
|
||||
entry = dma_addr >> IOMMU_PAGE_SHIFT;
|
||||
free_entry = entry - tbl->it_offset;
|
||||
|
||||
if (((free_entry + npages) > tbl->it_size) ||
|
||||
@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
/* Init first segment length for backout at failure */
|
||||
outs->dma_length = 0;
|
||||
|
||||
DBG("mapping %d elements:\n", nelems);
|
||||
DBG("sg mapping %d elements:\n", nelems);
|
||||
|
||||
spin_lock_irqsave(&(tbl->it_lock), flags);
|
||||
|
||||
@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
}
|
||||
/* Allocate iommu entries for that segment */
|
||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
||||
npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
|
||||
npages >>= PAGE_SHIFT;
|
||||
entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
|
||||
npages = iommu_num_pages(vaddr, slen);
|
||||
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
|
||||
|
||||
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
||||
|
||||
@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
|
||||
/* Convert entry to a dma_addr_t */
|
||||
entry += tbl->it_offset;
|
||||
dma_addr = entry << PAGE_SHIFT;
|
||||
dma_addr |= s->offset;
|
||||
dma_addr = entry << IOMMU_PAGE_SHIFT;
|
||||
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
|
||||
|
||||
DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
|
||||
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
|
||||
npages, entry, dma_addr);
|
||||
|
||||
/* Insert into HW table */
|
||||
ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
|
||||
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
|
||||
|
||||
/* If we are in an open segment, try merging */
|
||||
if (segstart != s) {
|
||||
@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
DBG(" can't merge, new segment.\n");
|
||||
} else {
|
||||
outs->dma_length += s->length;
|
||||
DBG(" merged, new len: %lx\n", outs->dma_length);
|
||||
DBG(" merged, new len: %ux\n", outs->dma_length);
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
if (s->dma_length != 0) {
|
||||
unsigned long vaddr, npages;
|
||||
|
||||
vaddr = s->dma_address & PAGE_MASK;
|
||||
npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
|
||||
>> PAGE_SHIFT;
|
||||
vaddr = s->dma_address & IOMMU_PAGE_MASK;
|
||||
npages = iommu_num_pages(s->dma_address, s->dma_length);
|
||||
__iommu_free(tbl, vaddr, npages);
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_length = 0;
|
||||
@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||
|
||||
if (sglist->dma_length == 0)
|
||||
break;
|
||||
npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
|
||||
- (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
npages = iommu_num_pages(dma_handle,sglist->dma_length);
|
||||
__iommu_free(tbl, dma_handle, npages);
|
||||
sglist++;
|
||||
}
|
||||
@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
uaddr = (unsigned long)vaddr;
|
||||
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
|
||||
npages >>= PAGE_SHIFT;
|
||||
npages = iommu_num_pages(uaddr, size);
|
||||
|
||||
if (tbl) {
|
||||
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
|
||||
mask >> PAGE_SHIFT, 0);
|
||||
mask >> IOMMU_PAGE_SHIFT, 0);
|
||||
if (dma_handle == DMA_ERROR_CODE) {
|
||||
if (printk_ratelimit()) {
|
||||
printk(KERN_INFO "iommu_alloc failed, "
|
||||
@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
||||
tbl, vaddr, npages);
|
||||
}
|
||||
} else
|
||||
dma_handle |= (uaddr & ~PAGE_MASK);
|
||||
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
|
||||
}
|
||||
|
||||
return dma_handle;
|
||||
@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
||||
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
unsigned int npages;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
if (tbl)
|
||||
iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
|
||||
(dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
|
||||
if (tbl) {
|
||||
npages = iommu_num_pages(dma_handle, size);
|
||||
iommu_free(tbl, dma_handle, npages);
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocates a contiguous real buffer and creates mappings over it.
|
||||
@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
||||
{
|
||||
void *ret = NULL;
|
||||
dma_addr_t mapping;
|
||||
unsigned int npages, order;
|
||||
unsigned int order;
|
||||
unsigned int nio_pages, io_order;
|
||||
struct page *page;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
npages = size >> PAGE_SHIFT;
|
||||
order = get_order(size);
|
||||
|
||||
/*
|
||||
@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
||||
memset(ret, 0, size);
|
||||
|
||||
/* Set up tces to cover the allocated range */
|
||||
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
|
||||
mask >> PAGE_SHIFT, order);
|
||||
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
||||
io_order = get_iommu_order(size);
|
||||
mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||
mask >> IOMMU_PAGE_SHIFT, io_order);
|
||||
if (mapping == DMA_ERROR_CODE) {
|
||||
free_pages((unsigned long)ret, order);
|
||||
return NULL;
|
||||
@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
||||
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
unsigned int npages;
|
||||
|
||||
if (tbl) {
|
||||
unsigned int nio_pages;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
||||
iommu_free(tbl, dma_handle, nio_pages);
|
||||
size = PAGE_ALIGN(size);
|
||||
npages = size >> PAGE_SHIFT;
|
||||
iommu_free(tbl, dma_handle, npages);
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user