Commit 4e268db7 authored by Takashi Iwai's avatar Takashi Iwai

ALSA: usx2y: Fix potential leaks of uninitialized memory

usx2y drivers may expose the allocated pages via mmap, but it performs
zero-clear only for the struct size, not aligned with the page size.
This leaves out some uninitialized trailing bytes.

This patch fixes the clearance to cover all memory that are exposed to
user-space.

Link: https://lore.kernel.org/r/20210517131545.27252-5-tiwai@suse.deSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent a829dd5b
...@@ -55,17 +55,17 @@ static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm ...@@ -55,17 +55,17 @@ static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm
return -EBUSY; return -EBUSY;
/* if userspace tries to mmap beyond end of our buffer, fail */ /* if userspace tries to mmap beyond end of our buffer, fail */
if (size > PAGE_ALIGN(sizeof(struct us428ctls_sharedmem))) { if (size > US428_SHAREDMEM_PAGES) {
snd_printd("%lu > %lu\n", size, (unsigned long)sizeof(struct us428ctls_sharedmem)); snd_printd("%lu > %lu\n", size, (unsigned long)US428_SHAREDMEM_PAGES);
return -EINVAL; return -EINVAL;
} }
if (!us428->us428ctls_sharedmem) { if (!us428->us428ctls_sharedmem) {
init_waitqueue_head(&us428->us428ctls_wait_queue_head); init_waitqueue_head(&us428->us428ctls_wait_queue_head);
us428->us428ctls_sharedmem = alloc_pages_exact(sizeof(struct us428ctls_sharedmem), GFP_KERNEL); us428->us428ctls_sharedmem = alloc_pages_exact(US428_SHAREDMEM_PAGES, GFP_KERNEL);
if (!us428->us428ctls_sharedmem) if (!us428->us428ctls_sharedmem)
return -ENOMEM; return -ENOMEM;
memset(us428->us428ctls_sharedmem, -1, sizeof(struct us428ctls_sharedmem)); memset(us428->us428ctls_sharedmem, -1, US428_SHAREDMEM_PAGES);
us428->us428ctls_sharedmem->ctl_snapshot_last = -2; us428->us428ctls_sharedmem->ctl_snapshot_last = -2;
} }
area->vm_ops = &us428ctls_vm_ops; area->vm_ops = &us428ctls_vm_ops;
......
...@@ -89,3 +89,5 @@ struct us428ctls_sharedmem { ...@@ -89,3 +89,5 @@ struct us428ctls_sharedmem {
struct us428_p4out p4out[N_US428_P4OUT_BUFS]; struct us428_p4out p4out[N_US428_P4OUT_BUFS];
int p4out_last, p4out_sent; int p4out_last, p4out_sent;
}; };
#define US428_SHAREDMEM_PAGES PAGE_ALIGN(sizeof(struct us428ctls_sharedmem))
...@@ -430,7 +430,7 @@ static void snd_usx2y_card_private_free(struct snd_card *card) ...@@ -430,7 +430,7 @@ static void snd_usx2y_card_private_free(struct snd_card *card)
usb_free_urb(usx2y->in04_urb); usb_free_urb(usx2y->in04_urb);
if (usx2y->us428ctls_sharedmem) if (usx2y->us428ctls_sharedmem)
free_pages_exact(usx2y->us428ctls_sharedmem, free_pages_exact(usx2y->us428ctls_sharedmem,
sizeof(*usx2y->us428ctls_sharedmem)); US428_SHAREDMEM_PAGES);
if (usx2y->card_index >= 0 && usx2y->card_index < SNDRV_CARDS) if (usx2y->card_index >= 0 && usx2y->card_index < SNDRV_CARDS)
snd_usx2y_card_used[usx2y->card_index] = 0; snd_usx2y_card_used[usx2y->card_index] = 0;
} }
......
...@@ -485,6 +485,9 @@ static int usx2y_usbpcm_urbs_start(struct snd_usx2y_substream *subs) ...@@ -485,6 +485,9 @@ static int usx2y_usbpcm_urbs_start(struct snd_usx2y_substream *subs)
return err; return err;
} }
#define USX2Y_HWDEP_PCM_PAGES \
PAGE_ALIGN(sizeof(struct snd_usx2y_hwdep_pcm_shm))
/* /*
* prepare callback * prepare callback
* *
...@@ -501,11 +504,11 @@ static int snd_usx2y_usbpcm_prepare(struct snd_pcm_substream *substream) ...@@ -501,11 +504,11 @@ static int snd_usx2y_usbpcm_prepare(struct snd_pcm_substream *substream)
snd_printdd("snd_usx2y_pcm_prepare(%p)\n", substream); snd_printdd("snd_usx2y_pcm_prepare(%p)\n", substream);
if (!usx2y->hwdep_pcm_shm) { if (!usx2y->hwdep_pcm_shm) {
usx2y->hwdep_pcm_shm = alloc_pages_exact(sizeof(struct snd_usx2y_hwdep_pcm_shm), usx2y->hwdep_pcm_shm = alloc_pages_exact(USX2Y_HWDEP_PCM_PAGES,
GFP_KERNEL); GFP_KERNEL);
if (!usx2y->hwdep_pcm_shm) if (!usx2y->hwdep_pcm_shm)
return -ENOMEM; return -ENOMEM;
memset(usx2y->hwdep_pcm_shm, 0, sizeof(struct snd_usx2y_hwdep_pcm_shm)); memset(usx2y->hwdep_pcm_shm, 0, USX2Y_HWDEP_PCM_PAGES);
} }
mutex_lock(&usx2y->pcm_mutex); mutex_lock(&usx2y->pcm_mutex);
...@@ -692,8 +695,8 @@ static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep *hw, struct file *filp, str ...@@ -692,8 +695,8 @@ static int snd_usx2y_hwdep_pcm_mmap(struct snd_hwdep *hw, struct file *filp, str
return -EBUSY; return -EBUSY;
/* if userspace tries to mmap beyond end of our buffer, fail */ /* if userspace tries to mmap beyond end of our buffer, fail */
if (size > PAGE_ALIGN(sizeof(struct snd_usx2y_hwdep_pcm_shm))) { if (size > USX2Y_HWDEP_PCM_PAGES) {
snd_printd("%lu > %lu\n", size, (unsigned long)sizeof(struct snd_usx2y_hwdep_pcm_shm)); snd_printd("%lu > %lu\n", size, (unsigned long)USX2Y_HWDEP_PCM_PAGES);
return -EINVAL; return -EINVAL;
} }
...@@ -711,7 +714,7 @@ static void snd_usx2y_hwdep_pcm_private_free(struct snd_hwdep *hwdep) ...@@ -711,7 +714,7 @@ static void snd_usx2y_hwdep_pcm_private_free(struct snd_hwdep *hwdep)
struct usx2ydev *usx2y = hwdep->private_data; struct usx2ydev *usx2y = hwdep->private_data;
if (usx2y->hwdep_pcm_shm) if (usx2y->hwdep_pcm_shm)
free_pages_exact(usx2y->hwdep_pcm_shm, sizeof(struct snd_usx2y_hwdep_pcm_shm)); free_pages_exact(usx2y->hwdep_pcm_shm, USX2Y_HWDEP_PCM_PAGES);
} }
int usx2y_hwdep_pcm_new(struct snd_card *card) int usx2y_hwdep_pcm_new(struct snd_card *card)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment