x86: code cleanups in arch/x86/kernel/pci-gart_64.c
code cleanups: errors lines of code errors/KLOC arch/x86/kernel/pci-gart_64.c 183 748 244.6 arch/x86/kernel/pci-gart_64.c 0 790 0 Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
@@ -42,16 +42,19 @@ static unsigned long iommu_pages; /* .. and in pages */
|
|||||||
|
|
||||||
static u32 *iommu_gatt_base; /* Remapping table */
|
static u32 *iommu_gatt_base; /* Remapping table */
|
||||||
|
|
||||||
/* If this is disabled the IOMMU will use an optimized flushing strategy
|
/*
|
||||||
of only flushing when an mapping is reused. With it true the GART is flushed
|
* If this is disabled the IOMMU will use an optimized flushing strategy
|
||||||
for every mapping. Problem is that doing the lazy flush seems to trigger
|
* of only flushing when an mapping is reused. With it true the GART is
|
||||||
bugs with some popular PCI cards, in particular 3ware (but has been also
|
* flushed for every mapping. Problem is that doing the lazy flush seems
|
||||||
also seen with Qlogic at least). */
|
* to trigger bugs with some popular PCI cards, in particular 3ware (but
|
||||||
|
* has been also also seen with Qlogic at least).
|
||||||
|
*/
|
||||||
int iommu_fullflush = 1;
|
int iommu_fullflush = 1;
|
||||||
|
|
||||||
/* Allocation bitmap for the remapping area */
|
/* Allocation bitmap for the remapping area: */
|
||||||
static DEFINE_SPINLOCK(iommu_bitmap_lock);
|
static DEFINE_SPINLOCK(iommu_bitmap_lock);
|
||||||
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
|
/* Guarded by iommu_bitmap_lock: */
|
||||||
|
static unsigned long *iommu_gart_bitmap;
|
||||||
|
|
||||||
static u32 gart_unmapped_entry;
|
static u32 gart_unmapped_entry;
|
||||||
|
|
||||||
@@ -61,7 +64,7 @@ static u32 gart_unmapped_entry;
|
|||||||
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
|
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
|
||||||
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
|
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
|
||||||
|
|
||||||
#define to_pages(addr,size) \
|
#define to_pages(addr, size) \
|
||||||
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
|
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
|
||||||
|
|
||||||
#define EMERGENCY_PAGES 32 /* = 128KB */
|
#define EMERGENCY_PAGES 32 /* = 128KB */
|
||||||
@@ -84,10 +87,12 @@ static unsigned long alloc_iommu(int size)
|
|||||||
unsigned long offset, flags;
|
unsigned long offset, flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||||
offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
|
offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
|
||||||
|
iommu_pages, size);
|
||||||
if (offset == -1) {
|
if (offset == -1) {
|
||||||
need_flush = 1;
|
need_flush = 1;
|
||||||
offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
|
offset = find_next_zero_string(iommu_gart_bitmap, 0,
|
||||||
|
iommu_pages, size);
|
||||||
}
|
}
|
||||||
if (offset != -1) {
|
if (offset != -1) {
|
||||||
set_bit_string(iommu_gart_bitmap, offset, size);
|
set_bit_string(iommu_gart_bitmap, offset, size);
|
||||||
@@ -100,12 +105,14 @@ static unsigned long alloc_iommu(int size)
|
|||||||
if (iommu_fullflush)
|
if (iommu_fullflush)
|
||||||
need_flush = 1;
|
need_flush = 1;
|
||||||
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
||||||
|
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_iommu(unsigned long offset, int size)
|
static void free_iommu(unsigned long offset, int size)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||||
__clear_bit_string(iommu_gart_bitmap, offset, size);
|
__clear_bit_string(iommu_gart_bitmap, offset, size);
|
||||||
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
||||||
@@ -117,6 +124,7 @@ static void free_iommu(unsigned long offset, int size)
|
|||||||
static void flush_gart(void)
|
static void flush_gart(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||||
if (need_flush) {
|
if (need_flush) {
|
||||||
k8_flush_garts();
|
k8_flush_garts();
|
||||||
@@ -127,34 +135,46 @@ static void flush_gart(void)
|
|||||||
|
|
||||||
#ifdef CONFIG_IOMMU_LEAK
|
#ifdef CONFIG_IOMMU_LEAK
|
||||||
|
|
||||||
#define SET_LEAK(x) if (iommu_leak_tab) \
|
#define SET_LEAK(x) \
|
||||||
iommu_leak_tab[x] = __builtin_return_address(0);
|
do { \
|
||||||
#define CLEAR_LEAK(x) if (iommu_leak_tab) \
|
if (iommu_leak_tab) \
|
||||||
iommu_leak_tab[x] = NULL;
|
iommu_leak_tab[x] = __builtin_return_address(0);\
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define CLEAR_LEAK(x) \
|
||||||
|
do { \
|
||||||
|
if (iommu_leak_tab) \
|
||||||
|
iommu_leak_tab[x] = NULL; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* Debugging aid for drivers that don't free their IOMMU tables */
|
/* Debugging aid for drivers that don't free their IOMMU tables */
|
||||||
static void **iommu_leak_tab;
|
static void **iommu_leak_tab;
|
||||||
static int leak_trace;
|
static int leak_trace;
|
||||||
static int iommu_leak_pages = 20;
|
static int iommu_leak_pages = 20;
|
||||||
|
|
||||||
static void dump_leak(void)
|
static void dump_leak(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
static int dump;
|
static int dump;
|
||||||
if (dump || !iommu_leak_tab) return;
|
|
||||||
|
if (dump || !iommu_leak_tab)
|
||||||
|
return;
|
||||||
dump = 1;
|
dump = 1;
|
||||||
show_stack(NULL,NULL);
|
show_stack(NULL, NULL);
|
||||||
|
|
||||||
/* Very crude. dump some from the end of the table too */
|
/* Very crude. dump some from the end of the table too */
|
||||||
printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
|
printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
|
||||||
for (i = 0; i < iommu_leak_pages; i+=2) {
|
iommu_leak_pages);
|
||||||
printk("%lu: ", iommu_pages-i);
|
for (i = 0; i < iommu_leak_pages; i += 2) {
|
||||||
|
printk(KERN_DEBUG "%lu: ", iommu_pages-i);
|
||||||
printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
|
printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
|
||||||
printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
|
printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
|
||||||
}
|
}
|
||||||
printk("\n");
|
printk(KERN_DEBUG "\n");
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define SET_LEAK(x)
|
# define SET_LEAK(x)
|
||||||
#define CLEAR_LEAK(x)
|
# define CLEAR_LEAK(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void iommu_full(struct device *dev, size_t size, int dir)
|
static void iommu_full(struct device *dev, size_t size, int dir)
|
||||||
@@ -177,29 +197,34 @@ static void iommu_full(struct device *dev, size_t size, int dir)
|
|||||||
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
|
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
|
||||||
panic("PCI-DMA: Memory would be corrupted\n");
|
panic("PCI-DMA: Memory would be corrupted\n");
|
||||||
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
|
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
|
||||||
panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
|
panic(KERN_ERR
|
||||||
|
"PCI-DMA: Random memory would be DMAed\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_LEAK
|
#ifdef CONFIG_IOMMU_LEAK
|
||||||
dump_leak();
|
dump_leak();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
|
static inline int
|
||||||
|
need_iommu(struct device *dev, unsigned long addr, size_t size)
|
||||||
{
|
{
|
||||||
u64 mask = *dev->dma_mask;
|
u64 mask = *dev->dma_mask;
|
||||||
int high = addr + size > mask;
|
int high = addr + size > mask;
|
||||||
int mmu = high;
|
int mmu = high;
|
||||||
|
|
||||||
if (force_iommu)
|
if (force_iommu)
|
||||||
mmu = 1;
|
mmu = 1;
|
||||||
|
|
||||||
return mmu;
|
return mmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
|
static inline int
|
||||||
|
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
|
||||||
{
|
{
|
||||||
u64 mask = *dev->dma_mask;
|
u64 mask = *dev->dma_mask;
|
||||||
int high = addr + size > mask;
|
int high = addr + size > mask;
|
||||||
int mmu = high;
|
int mmu = high;
|
||||||
|
|
||||||
return mmu;
|
return mmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -212,6 +237,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
|||||||
unsigned long npages = to_pages(phys_mem, size);
|
unsigned long npages = to_pages(phys_mem, size);
|
||||||
unsigned long iommu_page = alloc_iommu(npages);
|
unsigned long iommu_page = alloc_iommu(npages);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (iommu_page == -1) {
|
if (iommu_page == -1) {
|
||||||
if (!nonforced_iommu(dev, phys_mem, size))
|
if (!nonforced_iommu(dev, phys_mem, size))
|
||||||
return phys_mem;
|
return phys_mem;
|
||||||
@@ -229,16 +255,19 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
|||||||
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
|
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t gart_map_simple(struct device *dev, char *buf,
|
static dma_addr_t
|
||||||
size_t size, int dir)
|
gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
|
||||||
{
|
{
|
||||||
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
|
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
|
||||||
|
|
||||||
flush_gart();
|
flush_gart();
|
||||||
|
|
||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map a single area into the IOMMU */
|
/* Map a single area into the IOMMU */
|
||||||
static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
|
static dma_addr_t
|
||||||
|
gart_map_single(struct device *dev, void *addr, size_t size, int dir)
|
||||||
{
|
{
|
||||||
unsigned long phys_mem, bus;
|
unsigned long phys_mem, bus;
|
||||||
|
|
||||||
@@ -250,6 +279,7 @@ static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, i
|
|||||||
return phys_mem;
|
return phys_mem;
|
||||||
|
|
||||||
bus = gart_map_simple(dev, addr, size, dir);
|
bus = gart_map_simple(dev, addr, size, dir);
|
||||||
|
|
||||||
return bus;
|
return bus;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,6 +296,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|||||||
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
|
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
|
||||||
dma_addr >= iommu_bus_base + iommu_size)
|
dma_addr >= iommu_bus_base + iommu_size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
|
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
|
||||||
npages = to_pages(dma_addr, size);
|
npages = to_pages(dma_addr, size);
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
@@ -278,7 +309,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|||||||
/*
|
/*
|
||||||
* Wrapper for pci_unmap_single working with scatterlists.
|
* Wrapper for pci_unmap_single working with scatterlists.
|
||||||
*/
|
*/
|
||||||
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
static void
|
||||||
|
gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||||
{
|
{
|
||||||
struct scatterlist *s;
|
struct scatterlist *s;
|
||||||
int i;
|
int i;
|
||||||
@@ -303,6 +335,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
|||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
unsigned long addr = sg_phys(s);
|
unsigned long addr = sg_phys(s);
|
||||||
|
|
||||||
if (nonforced_iommu(dev, addr, s->length)) {
|
if (nonforced_iommu(dev, addr, s->length)) {
|
||||||
addr = dma_map_area(dev, addr, s->length, dir);
|
addr = dma_map_area(dev, addr, s->length, dir);
|
||||||
if (addr == bad_dma_address) {
|
if (addr == bad_dma_address) {
|
||||||
@@ -317,6 +350,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
|||||||
s->dma_length = s->length;
|
s->dma_length = s->length;
|
||||||
}
|
}
|
||||||
flush_gart();
|
flush_gart();
|
||||||
|
|
||||||
return nents;
|
return nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -355,11 +389,12 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
BUG_ON(iommu_page - iommu_start != pages);
|
BUG_ON(iommu_page - iommu_start != pages);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_map_cont(struct scatterlist *start, int nelems,
|
static inline int
|
||||||
struct scatterlist *sout,
|
dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
|
||||||
unsigned long pages, int need)
|
unsigned long pages, int need)
|
||||||
{
|
{
|
||||||
if (!need) {
|
if (!need) {
|
||||||
@@ -375,15 +410,12 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
|
|||||||
* DMA map all entries in a scatterlist.
|
* DMA map all entries in a scatterlist.
|
||||||
* Merge chunks that have page aligned sizes into a continuous mapping.
|
* Merge chunks that have page aligned sizes into a continuous mapping.
|
||||||
*/
|
*/
|
||||||
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
static int
|
||||||
int dir)
|
gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
int out;
|
|
||||||
int start;
|
|
||||||
unsigned long pages = 0;
|
|
||||||
int need = 0, nextneed;
|
|
||||||
struct scatterlist *s, *ps, *start_sg, *sgmap;
|
struct scatterlist *s, *ps, *start_sg, *sgmap;
|
||||||
|
int need = 0, nextneed, i, out, start;
|
||||||
|
unsigned long pages = 0;
|
||||||
|
|
||||||
if (nents == 0)
|
if (nents == 0)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -397,6 +429,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
ps = NULL; /* shut up gcc */
|
ps = NULL; /* shut up gcc */
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
dma_addr_t addr = sg_phys(s);
|
dma_addr_t addr = sg_phys(s);
|
||||||
|
|
||||||
s->dma_address = addr;
|
s->dma_address = addr;
|
||||||
BUG_ON(s->length == 0);
|
BUG_ON(s->length == 0);
|
||||||
|
|
||||||
@@ -404,8 +437,11 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
|
|
||||||
/* Handle the previous not yet processed entries */
|
/* Handle the previous not yet processed entries */
|
||||||
if (i > start) {
|
if (i > start) {
|
||||||
/* Can only merge when the last chunk ends on a page
|
/*
|
||||||
boundary and the new one doesn't have an offset. */
|
* Can only merge when the last chunk ends on a
|
||||||
|
* page boundary and the new one doesn't have an
|
||||||
|
* offset.
|
||||||
|
*/
|
||||||
if (!iommu_merge || !nextneed || !need || s->offset ||
|
if (!iommu_merge || !nextneed || !need || s->offset ||
|
||||||
(ps->offset + ps->length) % PAGE_SIZE) {
|
(ps->offset + ps->length) % PAGE_SIZE) {
|
||||||
if (dma_map_cont(start_sg, i - start, sgmap,
|
if (dma_map_cont(start_sg, i - start, sgmap,
|
||||||
@@ -436,6 +472,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
error:
|
error:
|
||||||
flush_gart();
|
flush_gart();
|
||||||
gart_unmap_sg(dev, sg, out, dir);
|
gart_unmap_sg(dev, sg, out, dir);
|
||||||
|
|
||||||
/* When it was forced or merged try again in a dumb way */
|
/* When it was forced or merged try again in a dumb way */
|
||||||
if (force_iommu || iommu_merge) {
|
if (force_iommu || iommu_merge) {
|
||||||
out = dma_map_sg_nonforce(dev, sg, nents, dir);
|
out = dma_map_sg_nonforce(dev, sg, nents, dir);
|
||||||
@@ -444,6 +481,7 @@ error:
|
|||||||
}
|
}
|
||||||
if (panic_on_overflow)
|
if (panic_on_overflow)
|
||||||
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
||||||
|
|
||||||
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
||||||
for_each_sg(sg, s, nents, i)
|
for_each_sg(sg, s, nents, i)
|
||||||
s->dma_address = bad_dma_address;
|
s->dma_address = bad_dma_address;
|
||||||
@@ -455,6 +493,7 @@ static int no_agp;
|
|||||||
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
||||||
{
|
{
|
||||||
unsigned long a;
|
unsigned long a;
|
||||||
|
|
||||||
if (!iommu_size) {
|
if (!iommu_size) {
|
||||||
iommu_size = aper_size;
|
iommu_size = aper_size;
|
||||||
if (!no_agp)
|
if (!no_agp)
|
||||||
@@ -464,18 +503,20 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
|||||||
a = aper + iommu_size;
|
a = aper + iommu_size;
|
||||||
iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
|
iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
|
||||||
|
|
||||||
if (iommu_size < 64*1024*1024)
|
if (iommu_size < 64*1024*1024) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
|
"PCI-DMA: Warning: Small IOMMU %luMB."
|
||||||
|
" Consider increasing the AGP aperture in BIOS\n",
|
||||||
|
iommu_size >> 20);
|
||||||
|
}
|
||||||
|
|
||||||
return iommu_size;
|
return iommu_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
|
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
|
||||||
{
|
{
|
||||||
unsigned aper_size = 0, aper_base_32;
|
unsigned aper_size = 0, aper_base_32, aper_order;
|
||||||
u64 aper_base;
|
u64 aper_base;
|
||||||
unsigned aper_order;
|
|
||||||
|
|
||||||
pci_read_config_dword(dev, 0x94, &aper_base_32);
|
pci_read_config_dword(dev, 0x94, &aper_base_32);
|
||||||
pci_read_config_dword(dev, 0x90, &aper_order);
|
pci_read_config_dword(dev, 0x90, &aper_order);
|
||||||
@@ -498,10 +539,10 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
|
|||||||
*/
|
*/
|
||||||
static __init int init_k8_gatt(struct agp_kern_info *info)
|
static __init int init_k8_gatt(struct agp_kern_info *info)
|
||||||
{
|
{
|
||||||
|
unsigned aper_size, gatt_size, new_aper_size;
|
||||||
|
unsigned aper_base, new_aper_base;
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
void *gatt;
|
void *gatt;
|
||||||
unsigned aper_base, new_aper_base;
|
|
||||||
unsigned aper_size, gatt_size, new_aper_size;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
|
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
|
||||||
@@ -523,13 +564,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
|||||||
if (!aper_base)
|
if (!aper_base)
|
||||||
goto nommu;
|
goto nommu;
|
||||||
info->aper_base = aper_base;
|
info->aper_base = aper_base;
|
||||||
info->aper_size = aper_size>>20;
|
info->aper_size = aper_size >> 20;
|
||||||
|
|
||||||
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
|
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
|
||||||
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
|
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
|
||||||
if (!gatt)
|
if (!gatt)
|
||||||
panic("Cannot allocate GATT table");
|
panic("Cannot allocate GATT table");
|
||||||
if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
|
if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT,
|
||||||
|
PAGE_KERNEL_NOCACHE))
|
||||||
panic("Could not set GART PTEs to uncacheable pages");
|
panic("Could not set GART PTEs to uncacheable pages");
|
||||||
global_flush_tlb();
|
global_flush_tlb();
|
||||||
|
|
||||||
@@ -537,8 +579,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
|||||||
agp_gatt_table = gatt;
|
agp_gatt_table = gatt;
|
||||||
|
|
||||||
for (i = 0; i < num_k8_northbridges; i++) {
|
for (i = 0; i < num_k8_northbridges; i++) {
|
||||||
u32 ctl;
|
|
||||||
u32 gatt_reg;
|
u32 gatt_reg;
|
||||||
|
u32 ctl;
|
||||||
|
|
||||||
dev = k8_northbridges[i];
|
dev = k8_northbridges[i];
|
||||||
gatt_reg = __pa(gatt) >> 12;
|
gatt_reg = __pa(gatt) >> 12;
|
||||||
@@ -553,7 +595,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
|||||||
}
|
}
|
||||||
flush_gart();
|
flush_gart();
|
||||||
|
|
||||||
printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
|
printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
|
||||||
|
aper_base, aper_size>>10);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nommu:
|
nommu:
|
||||||
@@ -603,8 +646,8 @@ void gart_iommu_shutdown(void)
|
|||||||
void __init gart_iommu_init(void)
|
void __init gart_iommu_init(void)
|
||||||
{
|
{
|
||||||
struct agp_kern_info info;
|
struct agp_kern_info info;
|
||||||
unsigned long aper_size;
|
|
||||||
unsigned long iommu_start;
|
unsigned long iommu_start;
|
||||||
|
unsigned long aper_size;
|
||||||
unsigned long scratch;
|
unsigned long scratch;
|
||||||
long i;
|
long i;
|
||||||
|
|
||||||
@@ -647,7 +690,7 @@ void __init gart_iommu_init(void)
|
|||||||
iommu_size = check_iommu_size(info.aper_base, aper_size);
|
iommu_size = check_iommu_size(info.aper_base, aper_size);
|
||||||
iommu_pages = iommu_size >> PAGE_SHIFT;
|
iommu_pages = iommu_size >> PAGE_SHIFT;
|
||||||
|
|
||||||
iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
|
iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
|
||||||
get_order(iommu_pages/8));
|
get_order(iommu_pages/8));
|
||||||
if (!iommu_gart_bitmap)
|
if (!iommu_gart_bitmap)
|
||||||
panic("Cannot allocate iommu bitmap\n");
|
panic("Cannot allocate iommu bitmap\n");
|
||||||
@@ -660,7 +703,8 @@ void __init gart_iommu_init(void)
|
|||||||
if (iommu_leak_tab)
|
if (iommu_leak_tab)
|
||||||
memset(iommu_leak_tab, 0, iommu_pages * 8);
|
memset(iommu_leak_tab, 0, iommu_pages * 8);
|
||||||
else
|
else
|
||||||
printk("PCI-DMA: Cannot allocate leak trace area\n");
|
printk(KERN_DEBUG
|
||||||
|
"PCI-DMA: Cannot allocate leak trace area\n");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -673,7 +717,7 @@ void __init gart_iommu_init(void)
|
|||||||
agp_memory_reserved = iommu_size;
|
agp_memory_reserved = iommu_size;
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
|
"PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
|
||||||
iommu_size>>20);
|
iommu_size >> 20);
|
||||||
|
|
||||||
iommu_start = aper_size - iommu_size;
|
iommu_start = aper_size - iommu_size;
|
||||||
iommu_bus_base = info.aper_base + iommu_start;
|
iommu_bus_base = info.aper_base + iommu_start;
|
||||||
@@ -713,7 +757,7 @@ void __init gart_parse_options(char *p)
|
|||||||
int arg;
|
int arg;
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_LEAK
|
#ifdef CONFIG_IOMMU_LEAK
|
||||||
if (!strncmp(p,"leak",4)) {
|
if (!strncmp(p, "leak", 4)) {
|
||||||
leak_trace = 1;
|
leak_trace = 1;
|
||||||
p += 4;
|
p += 4;
|
||||||
if (*p == '=') ++p;
|
if (*p == '=') ++p;
|
||||||
@@ -723,18 +767,18 @@ void __init gart_parse_options(char *p)
|
|||||||
#endif
|
#endif
|
||||||
if (isdigit(*p) && get_option(&p, &arg))
|
if (isdigit(*p) && get_option(&p, &arg))
|
||||||
iommu_size = arg;
|
iommu_size = arg;
|
||||||
if (!strncmp(p, "fullflush",8))
|
if (!strncmp(p, "fullflush", 8))
|
||||||
iommu_fullflush = 1;
|
iommu_fullflush = 1;
|
||||||
if (!strncmp(p, "nofullflush",11))
|
if (!strncmp(p, "nofullflush", 11))
|
||||||
iommu_fullflush = 0;
|
iommu_fullflush = 0;
|
||||||
if (!strncmp(p,"noagp",5))
|
if (!strncmp(p, "noagp", 5))
|
||||||
no_agp = 1;
|
no_agp = 1;
|
||||||
if (!strncmp(p, "noaperture",10))
|
if (!strncmp(p, "noaperture", 10))
|
||||||
fix_aperture = 0;
|
fix_aperture = 0;
|
||||||
/* duplicated from pci-dma.c */
|
/* duplicated from pci-dma.c */
|
||||||
if (!strncmp(p,"force",5))
|
if (!strncmp(p, "force", 5))
|
||||||
gart_iommu_aperture_allowed = 1;
|
gart_iommu_aperture_allowed = 1;
|
||||||
if (!strncmp(p,"allowed",7))
|
if (!strncmp(p, "allowed", 7))
|
||||||
gart_iommu_aperture_allowed = 1;
|
gart_iommu_aperture_allowed = 1;
|
||||||
if (!strncmp(p, "memaper", 7)) {
|
if (!strncmp(p, "memaper", 7)) {
|
||||||
fallback_aper_force = 1;
|
fallback_aper_force = 1;
|
||||||
|
Reference in New Issue
Block a user