dma: add dma_get_any_slave_channel(), for use in of_xlate()

mmp_pdma.c implements a custom of_xlate() function that is 95% identical
to what Tegra will need. Create a function to implement the common part,
so everyone doesn't just cut/paste the implementation.

Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Lars-Peter Clausen <lars@metafoo.de>
Cc: dmaengine@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Stephen Warren 2013-11-26 12:40:51 -07:00 committed by Vinod Koul
parent 6ce4eac1f6
commit 8010dad55a
3 changed files with 36 additions and 23 deletions

View File

@ -535,6 +535,34 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(dma_get_slave_channel);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
int err;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* lock against __dma_request_channel */
mutex_lock(&dma_list_mutex);
chan = private_candidate(&mask, device, NULL, NULL);
if (chan) {
err = dma_chan_get(chan);
if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL;
}
}
mutex_unlock(&dma_list_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
/**
* __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy

View File

@ -893,33 +893,17 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mmp_pdma_device *d = ofdma->of_dma_data;
struct dma_chan *chan, *candidate;
struct dma_chan *chan;
struct mmp_pdma_chan *c;
retry:
candidate = NULL;
/* walk the list of channels registered with the current instance and
* find one that is currently unused */
list_for_each_entry(chan, &d->device.channels, device_node)
if (chan->client_count == 0) {
candidate = chan;
break;
}
if (!candidate)
chan = dma_get_any_slave_channel(&d->device);
if (!chan)
return NULL;
/* dma_get_slave_channel will return NULL if we lost a race between
* the lookup and the reservation */
chan = dma_get_slave_channel(candidate);
c = to_mmp_pdma_chan(chan);
c->drcmr = dma_spec->args[0];
if (chan) {
struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
c->drcmr = dma_spec->args[0];
return chan;
}
goto retry;
return chan;
}
static int mmp_pdma_probe(struct platform_device *op)

View File

@ -1079,6 +1079,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \