ALSA: Fix SG-buffer DMA with non-coherent architectures

Using SG-buffers with dma_alloc_coherent() is often very inefficient
on non-coherent architectures because a tracking record could be
allocated in addition for each dma_alloc_coherent() call.
Instead, simply disable SG-buffers but just allocate normal continuous
buffers on non-supported (currently all but x86) architectures.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai
2008-06-17 16:39:06 +02:00
parent 8e4a718ff3
commit cc6a8acdee
6 changed files with 40 additions and 1 deletions

View File

@ -47,7 +47,11 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#endif
/*
* info for buffer allocation
@ -60,6 +64,7 @@ struct snd_dma_buffer {
void *private_data; /* private for allocator; don't touch */
};
#ifdef CONFIG_SND_DMA_SGBUF
/*
* Scatter-Gather generic device pages
*/
@ -107,6 +112,7 @@ static inline void *snd_sgbuf_get_ptr(struct snd_sg_buf *sgbuf, size_t offset)
{
return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
}
#endif /* CONFIG_SND_DMA_SGBUF */
/* allocate/release a buffer */
int snd_dma_alloc_pages(int type, struct device *dev, size_t size,

View File

@ -902,6 +902,7 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
#ifdef CONFIG_SND_DMA_SGBUF
/*
* SG-buffer handling
*/
@ -927,6 +928,28 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
unsigned int ofs, unsigned int size);
#else /* !SND_DMA_SGBUF */
/*
* fake using a continuous buffer
*/
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return substream->runtime->dma_addr + ofs;
}
static inline void *
snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return substream->runtime->dma_area + ofs;
}
#define snd_pcm_sgbuf_ops_page NULL
#define snd_pcm_sgbuf_get_chunk_size(subs, ofs, size) (size)
#endif /* SND_DMA_SGBUF */
/* handle mmap counter - PCM mmap callback should handle this counter properly */
static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
{