intel-iommu: Correct sglist size calculation.
In domain_sg_mapping(), use aligned_nrpages() instead of hand-coded rounding code for calculating the size of each sg elem. This means that on IA64 we correctly round up to the MM page size, not just to the VT-d page size. Also remove the incorrect mm_to_dma_pfn() when intel_map_sg() calls domain_sg_mapping() -- the 'size' variable is in VT-d pages already. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
committed by
David Woodhouse
parent
90bc1a658a
commit
f532959b77
@@ -1648,6 +1648,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
|
|||||||
tmp->devfn);
|
tmp->devfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns a number of VTD pages, but aligned to MM page size */
|
||||||
|
static inline unsigned long aligned_nrpages(unsigned long host_addr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
host_addr &= ~PAGE_MASK;
|
||||||
|
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||||
struct scatterlist *sg, unsigned long phys_pfn,
|
struct scatterlist *sg, unsigned long phys_pfn,
|
||||||
unsigned long nr_pages, int prot)
|
unsigned long nr_pages, int prot)
|
||||||
@@ -1675,7 +1683,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|||||||
uint64_t tmp;
|
uint64_t tmp;
|
||||||
|
|
||||||
if (!sg_res) {
|
if (!sg_res) {
|
||||||
sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
|
sg_res = aligned_nrpages(sg->offset, sg->length);
|
||||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
pteval = page_to_phys(sg_page(sg)) | prot;
|
pteval = page_to_phys(sg_page(sg)) | prot;
|
||||||
@@ -2415,14 +2423,6 @@ error:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns a number of VTD pages, but aligned to MM page size */
|
|
||||||
static inline unsigned long aligned_nrpages(unsigned long host_addr,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
host_addr &= ~PAGE_MASK;
|
|
||||||
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This takes a number of _MM_ pages, not VTD pages */
|
/* This takes a number of _MM_ pages, not VTD pages */
|
||||||
static struct iova *intel_alloc_iova(struct device *dev,
|
static struct iova *intel_alloc_iova(struct device *dev,
|
||||||
struct dmar_domain *domain,
|
struct dmar_domain *domain,
|
||||||
@@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
|
|||||||
|
|
||||||
start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
|
start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
|
||||||
|
|
||||||
ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
|
ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
/* clear the page */
|
/* clear the page */
|
||||||
dma_pte_clear_range(domain, start_vpfn,
|
dma_pte_clear_range(domain, start_vpfn,
|
||||||
|
Reference in New Issue
Block a user