Commit c9bd5e69 authored by Russell King's avatar Russell King

DMA-API: sound: fix dma mask handling in a lot of drivers

This code sequence is unsafe in modules:

static u64 mask = DMA_BIT_MASK(something);
...
	if (!dev->dma_mask)
		dev->dma_mask = &mask;

as if a module is reloaded, the mask will be pointing at the original
module's mask address, and this can lead to oopses.  Moreover, they
all follow this with:

	if (!dev->coherent_dma_mask)
		dev->coherent_dma_mask = mask;

where 'mask' is the same value as the statically defined mask, and this
bypasses the architecture's check on whether the DMA mask is possible.

Fix these issues by using the new dma_coerce_coherent_and_mask()
function.
Acked-by: default avatarMark Brown <broonie@linaro.org>
Acked-by: default avatarTakashi Iwai <tiwai@suse.de>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent fa6a8d6d
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <sound/core.h> #include <sound/core.h>
...@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = { ...@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
.mmap = pxa2xx_pcm_mmap, .mmap = pxa2xx_pcm_mmap,
}; };
static u64 pxa2xx_pcm_dmamask = 0xffffffff;
int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client, int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
struct snd_pcm **rpcm) struct snd_pcm **rpcm)
{ {
...@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client, ...@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
pcm->private_data = client; pcm->private_data = client;
pcm->private_free = pxa2xx_pcm_free_dma_buffers; pcm->private_free = pxa2xx_pcm_free_dma_buffers;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &pxa2xx_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) goto out;
card->dev->coherent_dma_mask = 0xffffffff;
if (play) { if (play) {
int stream = SNDRV_PCM_STREAM_PLAYBACK; int stream = SNDRV_PCM_STREAM_PLAYBACK;
......
...@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream, ...@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
} }
EXPORT_SYMBOL_GPL(atmel_pcm_mmap); EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd) int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &atmel_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n"); pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
......
...@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm) ...@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
} }
} }
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd) static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
pr_debug("%s enter\n", __func__); pr_debug("%s enter\n", __func__);
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &bf5xx_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm, ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
......
...@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = { ...@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
.silence = bf5xx_pcm_silence, .silence = bf5xx_pcm_silence,
}; };
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd) static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
size_t size = bf5xx_pcm_hardware.buffer_bytes_max; size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
int ret;
pr_debug("%s enter\n", __func__); pr_debug("%s enter\n", __func__);
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &bf5xx_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV, card->dev, size, size); SNDRV_DMA_TYPE_DEV, card->dev, size, size);
......
...@@ -844,18 +844,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm) ...@@ -844,18 +844,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
} }
} }
static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd) static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &davinci_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = davinci_pcm_preallocate_dma_buffer(pcm, ret = davinci_pcm_preallocate_dma_buffer(pcm,
......
...@@ -298,14 +298,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd) ...@@ -298,14 +298,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
int ret; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
card->dev->dma_mask = &fsl_dma_dmamask; if (ret)
return ret;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = fsl_dma_dmamask;
/* Some codecs have separate DAIs for playback and capture, so we /* Some codecs have separate DAIs for playback and capture, so we
* should allocate a DMA buffer only for the streams that are valid. * should allocate a DMA buffer only for the streams that are valid.
......
...@@ -272,18 +272,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) ...@@ -272,18 +272,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
return 0; return 0;
} }
static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd) static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
if (!card->dev->dma_mask)
card->dev->dma_mask = &imx_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = imx_pcm_preallocate_dma_buffer(pcm, ret = imx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK); SNDRV_PCM_STREAM_PLAYBACK);
......
...@@ -299,7 +299,6 @@ static struct snd_pcm_ops psc_dma_ops = { ...@@ -299,7 +299,6 @@ static struct snd_pcm_ops psc_dma_ops = {
.hw_params = psc_dma_hw_params, .hw_params = psc_dma_hw_params,
}; };
static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
static int psc_dma_new(struct snd_soc_pcm_runtime *rtd) static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
...@@ -307,15 +306,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd) ...@@ -307,15 +306,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
size_t size = psc_dma_hardware.buffer_bytes_max; size_t size = psc_dma_hardware.buffer_bytes_max;
int rc = 0; int rc;
dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n", dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
card, dai, pcm); card, dai, pcm);
if (!card->dev->dma_mask) rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &psc_dma_dmamask; if (rc)
if (!card->dev->coherent_dma_mask) return rc;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
......
...@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm) ...@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
} }
} }
static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd) static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
if (!card->dev->dma_mask)
card->dev->dma_mask = &jz4740_pcm_dmamask;
if (!card->dev->coherent_dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (ret)
return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = jz4740_pcm_preallocate_dma_buffer(pcm, ret = jz4740_pcm_preallocate_dma_buffer(pcm,
......
...@@ -59,8 +59,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = { ...@@ -59,8 +59,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
.fifo_size = 0, .fifo_size = 0,
}; };
static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
{ {
struct kirkwood_dma_data *priv = dev_id; struct kirkwood_dma_data *priv = dev_id;
...@@ -292,10 +290,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd) ...@@ -292,10 +290,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &kirkwood_dma_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm, ret = kirkwood_dma_preallocate_dma_buffer(pcm,
......
...@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm) ...@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free_for_all(pcm); snd_pcm_lib_preallocate_free_for_all(pcm);
} }
static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd) static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &nuc900_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, 4 * 1024, (4 * 1024) - 1); card->dev, 4 * 1024, (4 * 1024) - 1);
......
...@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = { ...@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
.mmap = omap_pcm_mmap, .mmap = omap_pcm_mmap,
}; };
static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream) int stream)
{ {
...@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) ...@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
card->dev->dma_mask = &omap_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = omap_pcm_preallocate_dma_buffer(pcm, ret = omap_pcm_preallocate_dma_buffer(pcm,
......
...@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = { ...@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
.mmap = pxa2xx_pcm_mmap, .mmap = pxa2xx_pcm_mmap,
}; };
static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd) static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &pxa2xx_pcm_dmamask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
......
...@@ -444,8 +444,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm) ...@@ -444,8 +444,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free_for_all(pcm); snd_pcm_lib_preallocate_free_for_all(pcm);
} }
static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime) static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
{ {
struct snd_card *card = runtime->card->snd_card; struct snd_card *card = runtime->card->snd_card;
...@@ -456,10 +454,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime) ...@@ -456,10 +454,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
params = snd_soc_dai_get_dma_data(runtime->cpu_dai, params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
if (!card->dev->dma_mask) res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &s6000_pcm_dmamask; if (res)
if (!card->dev->coherent_dma_mask) return res;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (params->dma_in) { if (params->dma_in) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in), s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
......
...@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm) ...@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
} }
} }
static u64 dma_mask = DMA_BIT_MASK(32);
static int dma_new(struct snd_soc_pcm_runtime *rtd) static int dma_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
pr_debug("Entered %s\n", __func__); pr_debug("Entered %s\n", __func__);
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &dma_mask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = preallocate_dma_buffer(pcm, ret = preallocate_dma_buffer(pcm,
......
...@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) ...@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
return 0; return 0;
} }
static u64 idma_mask = DMA_BIT_MASK(32);
static int idma_new(struct snd_soc_pcm_runtime *rtd) static int idma_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_card *card = rtd->card->snd_card; struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
int ret = 0; int ret;
if (!card->dev->dma_mask) ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
card->dev->dma_mask = &idma_mask; if (ret)
if (!card->dev->coherent_dma_mask) return ret;
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = preallocate_idma_buffer(pcm, ret = preallocate_idma_buffer(pcm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment