iommu sg merging: IA64: make sba_iommu respect the segment size limits
This patch makes sba iommu respect segment size limits when merging sg lists. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
740c3ce667
commit
a031bbcb8d
@@ -1265,7 +1265,7 @@ sba_fill_pdir(
|
|||||||
* the sglist do both.
|
* the sglist do both.
|
||||||
*/
|
*/
|
||||||
static SBA_INLINE int
|
static SBA_INLINE int
|
||||||
sba_coalesce_chunks( struct ioc *ioc,
|
sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
|
||||||
struct scatterlist *startsg,
|
struct scatterlist *startsg,
|
||||||
int nents)
|
int nents)
|
||||||
{
|
{
|
||||||
@@ -1275,6 +1275,7 @@ sba_coalesce_chunks( struct ioc *ioc,
|
|||||||
struct scatterlist *dma_sg; /* next DMA stream head */
|
struct scatterlist *dma_sg; /* next DMA stream head */
|
||||||
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
||||||
int n_mappings = 0;
|
int n_mappings = 0;
|
||||||
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
||||||
|
|
||||||
while (nents > 0) {
|
while (nents > 0) {
|
||||||
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
|
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
|
||||||
@@ -1314,6 +1315,9 @@ sba_coalesce_chunks( struct ioc *ioc,
|
|||||||
> DMA_CHUNK_SIZE)
|
> DMA_CHUNK_SIZE)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (dma_len + startsg->length > max_seg_size)
|
||||||
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** Then look for virtually contiguous blocks.
|
** Then look for virtually contiguous blocks.
|
||||||
**
|
**
|
||||||
@@ -1441,7 +1445,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
|
|||||||
** w/o this association, we wouldn't have coherent DMA!
|
** w/o this association, we wouldn't have coherent DMA!
|
||||||
** Access to the virtual address is what forces a two pass algorithm.
|
** Access to the virtual address is what forces a two pass algorithm.
|
||||||
*/
|
*/
|
||||||
coalesced = sba_coalesce_chunks(ioc, sglist, nents);
|
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** Program the I/O Pdir
|
** Program the I/O Pdir
|
||||||
|
Reference in New Issue
Block a user