x86, ia64: convert to use generic dma_map_ops struct

This converts X86 and IA64 to use include/linux/dma-mapping.h.

It's a bit large but pretty boring. The major change for X86 is
converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping
operations. The major changes for IA64 is using map_page and
unmap_page instead of map_single and unmap_single.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
FUJITA Tomonori
2009-01-05 23:59:02 +09:00
committed by Ingo Molnar
parent f0402a262e
commit 160c1d8e40
24 changed files with 277 additions and 377 deletions

View File

@@ -1,9 +1,9 @@
#include <linux/dma-mapping.h>
struct dma_mapping_ops *dma_ops;
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
struct dma_mapping_ops *dma_get_ops(struct device *dev)
struct dma_map_ops *dma_get_ops(struct device *dev)
{
return dma_ops;
}

View File

@@ -1,5 +1,5 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <asm/machvec.h>
#include <asm/system.h>
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
EXPORT_SYMBOL(machvec_timer_interrupt);
void
machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir)
machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
mb();
}
EXPORT_SYMBOL(machvec_dma_sync_single);
void
machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir)
machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
enum dma_data_direction dir)
{
mb();
}

View File

@@ -41,21 +41,7 @@ struct device fallback_dev = {
.dma_mask = &fallback_dev.coherent_dma_mask,
};
extern struct dma_mapping_ops vtd_dma_ops;
void __init pci_iommu_alloc(void)
{
dma_ops = &vtd_dma_ops;
/*
* The order of these functions is important for
* fall-back/fail-over reasons
*/
detect_intel_iommu();
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
}
extern struct dma_map_ops intel_dma_ops;
static int __init pci_iommu_init(void)
{
@@ -81,10 +67,10 @@ iommu_dma_init(void)
int iommu_dma_supported(struct device *dev, u64 mask)
{
struct dma_mapping_ops *ops = platform_dma_get_ops(dev);
struct dma_map_ops *ops = platform_dma_get_ops(dev);
if (ops->dma_supported_op)
return ops->dma_supported_op(dev, mask);
if (ops->dma_supported)
return ops->dma_supported(dev, mask);
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
@@ -113,4 +99,31 @@ int iommu_dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(iommu_dma_supported);
static int vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
void __init pci_iommu_alloc(void)
{
dma_ops = &intel_dma_ops;
dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
dma_ops->sync_single_for_device = machvec_dma_sync_single;
dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
dma_ops->dma_supported = iommu_dma_supported;
dma_ops->mapping_error = vtd_dma_mapping_error;
/*
* The order of these functions is important for
* fall-back/fail-over reasons
*/
detect_intel_iommu();
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
}
#endif

View File

@@ -16,24 +16,36 @@ EXPORT_SYMBOL(swiotlb);
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly;
struct dma_mapping_ops swiotlb_dma_ops = {
static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return swiotlb_map_single_attrs(dev, page_address(page) + offset, size,
dir, attrs);
}
static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
swiotlb_unmap_single_attrs(dev, dma_handle, size, dir, attrs);
}
struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.map_single = swiotlb_map_single,
.unmap_single = swiotlb_unmap_single,
.map_single_attrs = swiotlb_map_single_attrs,
.unmap_single_attrs = swiotlb_unmap_single_attrs,
.map_sg_attrs = swiotlb_map_sg_attrs,
.unmap_sg_attrs = swiotlb_unmap_sg_attrs,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.map_sg = swiotlb_map_sg,
.unmap_sg = swiotlb_unmap_sg,
.dma_supported_op = swiotlb_dma_supported,
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};