Commit 8ca19a89 authored by Daniel De Graaf's avatar Daniel De Graaf Committed by Konrad Rzeszutek Wilk

xen/gntalloc: Change gref_lock to a mutex

The event channel release function cannot be called under a spinlock
because it can attempt to acquire a mutex due to the event channel
reference acquired when setting up unmap notifications.
Signed-off-by: default avatarDaniel De Graaf <dgdegra@tycho.nsa.gov>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 80df4649
...@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by " ...@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
"the gntalloc device"); "the gntalloc device");
static LIST_HEAD(gref_list); static LIST_HEAD(gref_list);
static DEFINE_SPINLOCK(gref_lock); static DEFINE_MUTEX(gref_mutex);
static int gref_size; static int gref_size;
struct notify_info { struct notify_info {
...@@ -143,15 +143,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, ...@@ -143,15 +143,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
} }
/* Add to gref lists. */ /* Add to gref lists. */
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
list_splice_tail(&queue_gref, &gref_list); list_splice_tail(&queue_gref, &gref_list);
list_splice_tail(&queue_file, &priv->list); list_splice_tail(&queue_file, &priv->list);
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
return 0; return 0;
undo: undo:
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
gref_size -= (op->count - i); gref_size -= (op->count - i);
list_for_each_entry(gref, &queue_file, next_file) { list_for_each_entry(gref, &queue_file, next_file) {
...@@ -167,7 +167,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, ...@@ -167,7 +167,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
*/ */
if (unlikely(!list_empty(&queue_gref))) if (unlikely(!list_empty(&queue_gref)))
list_splice_tail(&queue_gref, &gref_list); list_splice_tail(&queue_gref, &gref_list);
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
return rc; return rc;
} }
...@@ -251,7 +251,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp) ...@@ -251,7 +251,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
pr_debug("%s: priv %p\n", __func__, priv); pr_debug("%s: priv %p\n", __func__, priv);
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
while (!list_empty(&priv->list)) { while (!list_empty(&priv->list)) {
gref = list_entry(priv->list.next, gref = list_entry(priv->list.next,
struct gntalloc_gref, next_file); struct gntalloc_gref, next_file);
...@@ -261,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp) ...@@ -261,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
__del_gref(gref); __del_gref(gref);
} }
kfree(priv); kfree(priv);
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
return 0; return 0;
} }
...@@ -286,21 +286,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, ...@@ -286,21 +286,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
goto out; goto out;
} }
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
/* Clean up pages that were at zero (local) users but were still mapped /* Clean up pages that were at zero (local) users but were still mapped
* by remote domains. Since those pages count towards the limit that we * by remote domains. Since those pages count towards the limit that we
* are about to enforce, removing them here is a good idea. * are about to enforce, removing them here is a good idea.
*/ */
do_cleanup(); do_cleanup();
if (gref_size + op.count > limit) { if (gref_size + op.count > limit) {
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
rc = -ENOSPC; rc = -ENOSPC;
goto out_free; goto out_free;
} }
gref_size += op.count; gref_size += op.count;
op.index = priv->index; op.index = priv->index;
priv->index += op.count * PAGE_SIZE; priv->index += op.count * PAGE_SIZE;
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
rc = add_grefs(&op, gref_ids, priv); rc = add_grefs(&op, gref_ids, priv);
if (rc < 0) if (rc < 0)
...@@ -343,7 +343,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, ...@@ -343,7 +343,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
goto dealloc_grant_out; goto dealloc_grant_out;
} }
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
gref = find_grefs(priv, op.index, op.count); gref = find_grefs(priv, op.index, op.count);
if (gref) { if (gref) {
/* Remove from the file list only, and decrease reference count. /* Remove from the file list only, and decrease reference count.
...@@ -363,7 +363,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, ...@@ -363,7 +363,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
do_cleanup(); do_cleanup();
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
dealloc_grant_out: dealloc_grant_out:
return rc; return rc;
} }
...@@ -383,7 +383,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, ...@@ -383,7 +383,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
index = op.index & ~(PAGE_SIZE - 1); index = op.index & ~(PAGE_SIZE - 1);
pgoff = op.index & (PAGE_SIZE - 1); pgoff = op.index & (PAGE_SIZE - 1);
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
gref = find_grefs(priv, index, 1); gref = find_grefs(priv, index, 1);
if (!gref) { if (!gref) {
...@@ -400,8 +400,9 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, ...@@ -400,8 +400,9 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
gref->notify.pgoff = pgoff; gref->notify.pgoff = pgoff;
gref->notify.event = op.event_channel_port; gref->notify.event = op.event_channel_port;
rc = 0; rc = 0;
unlock_out: unlock_out:
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
return rc; return rc;
} }
...@@ -433,9 +434,9 @@ static void gntalloc_vma_open(struct vm_area_struct *vma) ...@@ -433,9 +434,9 @@ static void gntalloc_vma_open(struct vm_area_struct *vma)
if (!gref) if (!gref)
return; return;
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
gref->users++; gref->users++;
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
} }
static void gntalloc_vma_close(struct vm_area_struct *vma) static void gntalloc_vma_close(struct vm_area_struct *vma)
...@@ -444,11 +445,11 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) ...@@ -444,11 +445,11 @@ static void gntalloc_vma_close(struct vm_area_struct *vma)
if (!gref) if (!gref)
return; return;
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
gref->users--; gref->users--;
if (gref->users == 0) if (gref->users == 0)
__del_gref(gref); __del_gref(gref);
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
} }
static struct vm_operations_struct gntalloc_vmops = { static struct vm_operations_struct gntalloc_vmops = {
...@@ -471,7 +472,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -471,7 +472,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL; return -EINVAL;
} }
spin_lock(&gref_lock); mutex_lock(&gref_mutex);
gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
if (gref == NULL) { if (gref == NULL) {
rv = -ENOENT; rv = -ENOENT;
...@@ -499,7 +500,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -499,7 +500,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
rv = 0; rv = 0;
out_unlock: out_unlock:
spin_unlock(&gref_lock); mutex_unlock(&gref_mutex);
return rv; return rv;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment