Commit 4f143eca authored by Mark Brown's avatar Mark Brown

ASoC: Intel: Skylake: Topology and shutdown fixes

Merge series from Cezary Rojewski <cezary.rojewski@intel.com>:

Even though skylake-driver is going to be replaced by the avs-driver,
the goal is to keep it functional on all the configurations it supports
until its EOL. When comparing chrome trees against upstream
skylake-driver, couple fixes pop up that are not part of upstream
repository. These fixes are backed up by real bugs (issue trackers),
address real problems. There is no reason for them to stay in the
internal tree.

Patches 1-4 combined together address issue where the driver updates the
presumably static audio format descriptions coming from the topology
files through its "fixup" functions. As long as given audio format is
used by a single path, nothing collides and any updates are harmless.
However, when multiple paths e.g.: DMIC and HDMI1 utilize the same audio
format descriptor, any updates caused by the opening of the first path,
may cause the second to fail.

The 5th change from the set fixes driver hang sporadically occurring
during shutdown procedure. Once HDAudio links are powered down along
with the AudioDSP, the hang stops reproducing.

The last change helps survive in environments with limited/fragmented
memory. While the BDL is small already, other buffers can be allocated
using scatter-gather. This basically aligns the code with what the
avs-driver does.
parents 38eef3be 451d85c4
......@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <sound/hda_register.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
......@@ -79,21 +80,25 @@ static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
__le32 **bdlp, int size, int with_ioc)
{
__le32 *bdl = *bdlp;
int remaining = ctx->cl_dev.bufsize;
int offset = 0;
ctx->cl_dev.frags = 0;
while (size > 0) {
phys_addr_t addr = virt_to_phys(dmab_data->area +
(ctx->cl_dev.frags * ctx->cl_dev.bufsize));
while (remaining > 0) {
phys_addr_t addr;
int chunk;
addr = snd_sgbuf_get_addr(dmab_data, offset);
bdl[0] = cpu_to_le32(lower_32_bits(addr));
bdl[1] = cpu_to_le32(upper_32_bits(addr));
chunk = snd_sgbuf_get_chunk_size(dmab_data, offset, size);
bdl[2] = cpu_to_le32(chunk);
bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
size -= ctx->cl_dev.bufsize;
bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
remaining -= chunk;
bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01);
bdl += 4;
offset += chunk;
ctx->cl_dev.frags++;
}
}
......@@ -338,15 +343,15 @@ int skl_cldma_prepare(struct sst_dsp *ctx)
ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
/* Allocate buffer*/
ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, ctx->dev, ctx->cl_dev.bufsize,
&ctx->cl_dev.dmab_data);
if (ret < 0) {
dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret);
return ret;
}
/* Setup Code loader BDL */
ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, ctx->dev, BDL_SIZE, &ctx->cl_dev.dmab_bdl);
if (ret < 0) {
dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret);
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
......
......@@ -582,36 +582,10 @@ static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
return ret;
}
static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
static void skl_tplg_set_pipe_config_idx(struct skl_pipe *pipe, int idx)
{
struct skl_pipe_fmt *cur_fmt;
struct skl_pipe_fmt *next_fmt;
int i;
if (pipe->nr_cfgs <= 1)
return false;
if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
return true;
for (i = 0; i < pipe->nr_cfgs - 1; i++) {
if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
cur_fmt = &pipe->configs[i].out_fmt;
next_fmt = &pipe->configs[i + 1].out_fmt;
} else {
cur_fmt = &pipe->configs[i].in_fmt;
next_fmt = &pipe->configs[i + 1].in_fmt;
}
if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
cur_fmt->bps,
next_fmt->channels,
next_fmt->freq,
next_fmt->bps))
return true;
}
return false;
pipe->cur_config_idx = idx;
pipe->memory_pages = pipe->configs[idx].mem_pages;
}
/*
......@@ -632,24 +606,14 @@ skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
int i;
if (pipe->nr_cfgs == 0) {
pipe->cur_config_idx = 0;
return 0;
}
if (skl_tplg_is_multi_fmt(skl, pipe)) {
pipe->cur_config_idx = pipe->pipe_config_idx;
pipe->memory_pages = pconfig->mem_pages;
dev_dbg(skl->dev, "found pipe config idx:%d\n",
pipe->cur_config_idx);
skl_tplg_set_pipe_config_idx(pipe, 0);
return 0;
}
if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) {
dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n",
pipe->ppl_id);
pipe->cur_config_idx = 0;
pipe->memory_pages = pconfig->mem_pages;
skl_tplg_set_pipe_config_idx(pipe, 0);
return 0;
}
......@@ -668,10 +632,8 @@ skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
fmt->channels, fmt->freq, fmt->bps)) {
pipe->cur_config_idx = i;
pipe->memory_pages = pconfig->mem_pages;
skl_tplg_set_pipe_config_idx(pipe, i);
dev_dbg(skl->dev, "Using pipe config: %d\n", i);
return 0;
}
}
......@@ -1391,9 +1353,9 @@ static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
return -EIO;
if (is_set)
pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
skl_tplg_set_pipe_config_idx(pipe, ucontrol->value.enumerated.item[0]);
else
ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx;
ucontrol->value.enumerated.item[0] = pipe->cur_config_idx;
return 0;
}
......@@ -1837,20 +1799,28 @@ static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
{
struct nhlt_specific_cfg *cfg;
struct skl_pipe *pipe = mconfig->pipe;
struct skl_pipe_params save = *pipe->p_params;
struct skl_pipe_fmt *pipe_fmt;
struct skl_dev *skl = get_skl_ctx(dai->dev);
int link_type = skl_tplg_be_link_type(mconfig->dev_type);
u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
int ret;
skl_tplg_fill_dma_id(mconfig, params);
if (link_type == NHLT_LINK_HDA)
return 0;
*pipe->p_params = *params;
ret = skl_tplg_get_pipe_config(skl, mconfig);
if (ret)
goto err;
dev_dbg(skl->dev, "%s using pipe config: %d\n", __func__, pipe->cur_config_idx);
if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK)
pipe_fmt = &pipe->configs[pipe->pipe_config_idx].out_fmt;
pipe_fmt = &pipe->configs[pipe->cur_config_idx].out_fmt;
else
pipe_fmt = &pipe->configs[pipe->pipe_config_idx].in_fmt;
pipe_fmt = &pipe->configs[pipe->cur_config_idx].in_fmt;
/* update the blob based on virtual bus_id*/
cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt,
......@@ -1865,10 +1835,15 @@ static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n",
mconfig->vbus_id, link_type, params->stream,
params->ch, params->s_freq, params->s_fmt);
return -EINVAL;
ret = -EINVAL;
goto err;
}
return 0;
err:
*pipe->p_params = save;
return ret;
}
static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
......
......@@ -324,7 +324,6 @@ struct skl_pipe {
struct skl_path_config configs[SKL_MAX_PATH_CONFIGS];
struct list_head w_list;
bool passthru;
u32 pipe_config_idx;
};
enum skl_module_state {
......
......@@ -1107,7 +1107,10 @@ static void skl_shutdown(struct pci_dev *pci)
if (!skl->init_done)
return;
snd_hdac_stop_streams_and_chip(bus);
snd_hdac_stop_streams(bus);
snd_hdac_ext_bus_link_power_down_all(bus);
skl_dsp_sleep(skl->dsp);
list_for_each_entry(s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
snd_hdac_ext_stream_decouple(bus, stream, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment