iommu sg merging: alpha: make pci_iommu respect the segment size limits
This patch makes pci_iommu respect segment size limits when merging sg lists. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
a031bbcb8d
commit
7c53664dcd
@@ -9,6 +9,7 @@
|
|||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/hwrpb.h>
|
#include <asm/hwrpb.h>
|
||||||
@@ -470,22 +471,29 @@ EXPORT_SYMBOL(pci_free_consistent);
|
|||||||
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
|
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
|
||||||
|
|
||||||
static void
|
static void
|
||||||
sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
|
sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
|
||||||
|
int virt_ok)
|
||||||
{
|
{
|
||||||
unsigned long next_paddr;
|
unsigned long next_paddr;
|
||||||
struct scatterlist *leader;
|
struct scatterlist *leader;
|
||||||
long leader_flag, leader_length;
|
long leader_flag, leader_length;
|
||||||
|
unsigned int max_seg_size;
|
||||||
|
|
||||||
leader = sg;
|
leader = sg;
|
||||||
leader_flag = 0;
|
leader_flag = 0;
|
||||||
leader_length = leader->length;
|
leader_length = leader->length;
|
||||||
next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
|
next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
|
||||||
|
|
||||||
|
/* we will not marge sg without device. */
|
||||||
|
max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
|
||||||
for (++sg; sg < end; ++sg) {
|
for (++sg; sg < end; ++sg) {
|
||||||
unsigned long addr, len;
|
unsigned long addr, len;
|
||||||
addr = SG_ENT_PHYS_ADDRESS(sg);
|
addr = SG_ENT_PHYS_ADDRESS(sg);
|
||||||
len = sg->length;
|
len = sg->length;
|
||||||
|
|
||||||
|
if (leader_length + len > max_seg_size)
|
||||||
|
goto new_segment;
|
||||||
|
|
||||||
if (next_paddr == addr) {
|
if (next_paddr == addr) {
|
||||||
sg->dma_address = -1;
|
sg->dma_address = -1;
|
||||||
leader_length += len;
|
leader_length += len;
|
||||||
@@ -494,6 +502,7 @@ sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
|
|||||||
leader_flag = 1;
|
leader_flag = 1;
|
||||||
leader_length += len;
|
leader_length += len;
|
||||||
} else {
|
} else {
|
||||||
|
new_segment:
|
||||||
leader->dma_address = leader_flag;
|
leader->dma_address = leader_flag;
|
||||||
leader->dma_length = leader_length;
|
leader->dma_length = leader_length;
|
||||||
leader = sg;
|
leader = sg;
|
||||||
@@ -512,7 +521,7 @@ sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
|
|||||||
in the blanks. */
|
in the blanks. */
|
||||||
|
|
||||||
static int
|
static int
|
||||||
sg_fill(struct scatterlist *leader, struct scatterlist *end,
|
sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
|
||||||
struct scatterlist *out, struct pci_iommu_arena *arena,
|
struct scatterlist *out, struct pci_iommu_arena *arena,
|
||||||
dma_addr_t max_dma, int dac_allowed)
|
dma_addr_t max_dma, int dac_allowed)
|
||||||
{
|
{
|
||||||
@@ -562,8 +571,8 @@ sg_fill(struct scatterlist *leader, struct scatterlist *end,
|
|||||||
|
|
||||||
/* Otherwise, break up the remaining virtually contiguous
|
/* Otherwise, break up the remaining virtually contiguous
|
||||||
hunks into individual direct maps and retry. */
|
hunks into individual direct maps and retry. */
|
||||||
sg_classify(leader, end, 0);
|
sg_classify(dev, leader, end, 0);
|
||||||
return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
|
return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
|
||||||
}
|
}
|
||||||
|
|
||||||
out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
|
out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
|
||||||
@@ -619,12 +628,15 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
|
|||||||
struct pci_iommu_arena *arena;
|
struct pci_iommu_arena *arena;
|
||||||
dma_addr_t max_dma;
|
dma_addr_t max_dma;
|
||||||
int dac_allowed;
|
int dac_allowed;
|
||||||
|
struct device *dev;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (direction == PCI_DMA_NONE)
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||||
|
|
||||||
|
dev = pdev ? &pdev->dev : NULL;
|
||||||
|
|
||||||
/* Fast path single entry scatterlists. */
|
/* Fast path single entry scatterlists. */
|
||||||
if (nents == 1) {
|
if (nents == 1) {
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
@@ -638,7 +650,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
|
|||||||
end = sg + nents;
|
end = sg + nents;
|
||||||
|
|
||||||
/* First, prepare information about the entries. */
|
/* First, prepare information about the entries. */
|
||||||
sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
|
sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
|
||||||
|
|
||||||
/* Second, figure out where we're going to map things. */
|
/* Second, figure out where we're going to map things. */
|
||||||
if (alpha_mv.mv_pci_tbi) {
|
if (alpha_mv.mv_pci_tbi) {
|
||||||
@@ -658,7 +670,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
|
|||||||
for (out = sg; sg < end; ++sg) {
|
for (out = sg; sg < end; ++sg) {
|
||||||
if ((int) sg->dma_address < 0)
|
if ((int) sg->dma_address < 0)
|
||||||
continue;
|
continue;
|
||||||
if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
|
if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
out++;
|
out++;
|
||||||
}
|
}
|
||||||
|
@@ -4,6 +4,7 @@
|
|||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
#include <asm/scatterlist.h>
|
#include <asm/scatterlist.h>
|
||||||
#include <asm/machvec.h>
|
#include <asm/machvec.h>
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user