Commit 8a4259bf authored by Takashi Iwai's avatar Takashi Iwai

ALSA: ctxfi - Fix Oops at mmapping

Replace a spinlock with a mutex protecting the vm block list at
mmap / munmap calls, which caused Oops like below:

BUG: sleeping function called from invalid context at mm/slub.c:1599
in_atomic(): 0, irqs_disabled(): 1, pid: 32065, name: xine
Pid: 32065, comm: xine Tainted: P           2.6.29.4-75.fc10.x86_64 #1
Call Trace:
  [<ffffffff81040685>] __might_sleep+0x105/0x10a
  [<ffffffff810c9fae>] kmem_cache_alloc+0x32/0xe2
  [<ffffffffa08e3110>] ct_vm_map+0xfa/0x19e [snd_ctxfi]
  [<ffffffffa08e1a07>] ct_map_audio_buffer+0x4c/0x76 [snd_ctxfi]
  [<ffffffffa08e2aa5>] atc_pcm_playback_prepare+0x1d7/0x2a8 [snd_ctxfi]
  [<ffffffff8105ef3f>] ? up_read+0x9/0xb
  [<ffffffff81186b61>] ? __up_read+0x7c/0x87
  [<ffffffffa08e36a6>] ct_pcm_playback_prepare+0x39/0x60 [snd_ctxfi]
  [<ffffffffa0886bcb>] snd_pcm_do_prepare+0x16/0x28 [snd_pcm]
  [<ffffffffa08867c7>] snd_pcm_action_single+0x2d/0x5b [snd_pcm]
  [<ffffffffa08881f3>] snd_pcm_action_nonatomic+0x52/0x6a [snd_pcm]
  [<ffffffffa088a723>] snd_pcm_common_ioctl1+0x404/0xc79 [snd_pcm]
  [<ffffffff810c52c8>] ? alloc_pages_current+0xb9/0xc2
  [<ffffffff810c9402>] ? new_slab+0x1a5/0x1cb
  [<ffffffff810ab9ea>] ? vma_prio_tree_insert+0x23/0xc1
  [<ffffffffa088b411>] snd_pcm_playback_ioctl1+0x213/0x230 [snd_pcm]
  [<ffffffff810b6c20>] ? mmap_region+0x397/0x4c9
  [<ffffffffa088bd9b>] snd_pcm_playback_ioctl+0x2e/0x36 [snd_pcm]
  [<ffffffff810ddc64>] vfs_ioctl+0x2a/0x78
  [<ffffffff810de130>] do_vfs_ioctl+0x462/0x4a2
  [<ffffffff81029cef>] ? default_spin_lock_flags+0x9/0xe
  [<ffffffff81374647>] ? trace_hardirqs_off_thunk+0x3a/0x6c
  [<ffffffff810de1c5>] sys_ioctl+0x55/0x77
  [<ffffffff8101133a>] system_call_fastpath+0x16/0x1b
Signed-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent aae80dc2
......@@ -119,7 +119,6 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm);
static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
unsigned long flags;
struct snd_pcm_runtime *runtime;
struct ct_vm *vm;
......@@ -129,9 +128,7 @@ static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
runtime = apcm->substream->runtime;
vm = atc->vm;
spin_lock_irqsave(&atc->vm_lock, flags);
apcm->vm_block = vm->map(vm, runtime->dma_area, runtime->dma_bytes);
spin_unlock_irqrestore(&atc->vm_lock, flags);
if (NULL == apcm->vm_block)
return -ENOENT;
......@@ -141,7 +138,6 @@ static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
unsigned long flags;
struct ct_vm *vm;
if (NULL == apcm->vm_block)
......@@ -149,9 +145,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
vm = atc->vm;
spin_lock_irqsave(&atc->vm_lock, flags);
vm->unmap(vm, apcm->vm_block);
spin_unlock_irqrestore(&atc->vm_lock, flags);
apcm->vm_block = NULL;
}
......@@ -161,9 +155,7 @@ static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
struct ct_vm *vm;
void *kvirt_addr;
unsigned long phys_addr;
unsigned long flags;
spin_lock_irqsave(&atc->vm_lock, flags);
vm = atc->vm;
kvirt_addr = vm->get_ptp_virt(vm, index);
if (kvirt_addr == NULL)
......@@ -171,8 +163,6 @@ static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
else
phys_addr = virt_to_phys(kvirt_addr);
spin_unlock_irqrestore(&atc->vm_lock, flags);
return phys_addr;
}
......@@ -1562,7 +1552,6 @@ int ct_atc_create(struct snd_card *card, struct pci_dev *pci,
atc_set_ops(atc);
spin_lock_init(&atc->atc_lock);
spin_lock_init(&atc->vm_lock);
/* Find card model */
err = atc_identify_card(atc);
......
......@@ -101,7 +101,6 @@ struct ct_atc {
unsigned long (*get_ptp_phys)(struct ct_atc *atc, int index);
spinlock_t atc_lock;
spinlock_t vm_lock;
int (*pcm_playback_prepare)(struct ct_atc *atc,
struct ct_atc_pcm *apcm);
......
......@@ -35,25 +35,27 @@ get_vm_block(struct ct_vm *vm, unsigned int size)
struct ct_vm_block *block = NULL, *entry = NULL;
struct list_head *pos = NULL;
mutex_lock(&vm->lock);
list_for_each(pos, &vm->unused) {
entry = list_entry(pos, struct ct_vm_block, list);
if (entry->size >= size)
break; /* found a block that is big enough */
}
if (pos == &vm->unused)
return NULL;
goto out;
if (entry->size == size) {
/* Move the vm node from unused list to used list directly */
list_del(&entry->list);
list_add(&entry->list, &vm->used);
vm->size -= size;
return entry;
block = entry;
goto out;
}
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (NULL == block)
return NULL;
goto out;
block->addr = entry->addr;
block->size = size;
......@@ -62,6 +64,8 @@ get_vm_block(struct ct_vm *vm, unsigned int size)
entry->size -= size;
vm->size -= size;
out:
mutex_unlock(&vm->lock);
return block;
}
......@@ -70,6 +74,7 @@ static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
struct ct_vm_block *entry = NULL, *pre_ent = NULL;
struct list_head *pos = NULL, *pre = NULL;
mutex_lock(&vm->lock);
list_del(&block->list);
vm->size += block->size;
......@@ -106,6 +111,7 @@ static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
pos = pre;
pre = pos->prev;
}
mutex_unlock(&vm->lock);
}
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
......@@ -191,6 +197,8 @@ int ct_vm_create(struct ct_vm **rvm)
if (NULL == vm)
return -ENOMEM;
mutex_init(&vm->lock);
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
......
......@@ -20,7 +20,7 @@
#define CT_PTP_NUM 1 /* num of device page table pages */
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
struct ct_vm_block {
......@@ -35,6 +35,7 @@ struct ct_vm {
unsigned int size; /* Available addr space in bytes */
struct list_head unused; /* List of unused blocks */
struct list_head used; /* List of used blocks */
struct mutex lock;
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
struct ct_vm_block *(*map)(struct ct_vm *, void *host_addr, int size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment