Commit d50582e0 authored by Stefano Stabellini's avatar Stefano Stabellini

xen/arm: remove mach_to_phys rbtree

Remove the rbtree used to keep track of machine to physical mappings:
the frontend can grant the same page multiple times, leading to errors
inserting or removing entries from the mach_to_phys tree.

Linux only needed to know the physical address corresponding to a given
machine address in swiotlb-xen. Now that swiotlb-xen can call the
xen_dma_* functions passing the machine address directly, we can remove
it.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Tested-by: default avatarDenis Schneider <v1ne2go@gmail.com>
parent 340720be
...@@ -33,7 +33,6 @@ typedef struct xpaddr { ...@@ -33,7 +33,6 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL) #define INVALID_P2M_ENTRY (~0UL)
unsigned long __pfn_to_mfn(unsigned long pfn); unsigned long __pfn_to_mfn(unsigned long pfn);
unsigned long __mfn_to_pfn(unsigned long mfn);
extern struct rb_root phys_to_mach; extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn) static inline unsigned long pfn_to_mfn(unsigned long pfn)
...@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) ...@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn) static inline unsigned long mfn_to_pfn(unsigned long mfn)
{ {
unsigned long pfn;
if (phys_to_mach.rb_node != NULL) {
pfn = __mfn_to_pfn(mfn);
if (pfn != INVALID_P2M_ENTRY)
return pfn;
}
return mfn; return mfn;
} }
......
...@@ -21,14 +21,12 @@ struct xen_p2m_entry { ...@@ -21,14 +21,12 @@ struct xen_p2m_entry {
unsigned long pfn; unsigned long pfn;
unsigned long mfn; unsigned long mfn;
unsigned long nr_pages; unsigned long nr_pages;
struct rb_node rbnode_mach;
struct rb_node rbnode_phys; struct rb_node rbnode_phys;
}; };
static rwlock_t p2m_lock; static rwlock_t p2m_lock;
struct rb_root phys_to_mach = RB_ROOT; struct rb_root phys_to_mach = RB_ROOT;
EXPORT_SYMBOL_GPL(phys_to_mach); EXPORT_SYMBOL_GPL(phys_to_mach);
static struct rb_root mach_to_phys = RB_ROOT;
static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
{ {
...@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) ...@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
parent = *link; parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
if (new->mfn == entry->mfn)
goto err_out;
if (new->pfn == entry->pfn) if (new->pfn == entry->pfn)
goto err_out; goto err_out;
...@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn) ...@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
} }
EXPORT_SYMBOL_GPL(__pfn_to_mfn); EXPORT_SYMBOL_GPL(__pfn_to_mfn);
static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
{
struct rb_node **link = &mach_to_phys.rb_node;
struct rb_node *parent = NULL;
struct xen_p2m_entry *entry;
int rc = 0;
while (*link) {
parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
if (new->mfn == entry->mfn)
goto err_out;
if (new->pfn == entry->pfn)
goto err_out;
if (new->mfn < entry->mfn)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->rbnode_mach, parent, link);
rb_insert_color(&new->rbnode_mach, &mach_to_phys);
goto out;
err_out:
rc = -EINVAL;
pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
out:
return rc;
}
unsigned long __mfn_to_pfn(unsigned long mfn)
{
struct rb_node *n = mach_to_phys.rb_node;
struct xen_p2m_entry *entry;
unsigned long irqflags;
read_lock_irqsave(&p2m_lock, irqflags);
while (n) {
entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
if (entry->mfn <= mfn &&
entry->mfn + entry->nr_pages > mfn) {
read_unlock_irqrestore(&p2m_lock, irqflags);
return entry->pfn + (mfn - entry->mfn);
}
if (mfn < entry->mfn)
n = n->rb_left;
else
n = n->rb_right;
}
read_unlock_irqrestore(&p2m_lock, irqflags);
return INVALID_P2M_ENTRY;
}
EXPORT_SYMBOL_GPL(__mfn_to_pfn);
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count)
...@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn, ...@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn && if (p2m_entry->pfn <= pfn &&
p2m_entry->pfn + p2m_entry->nr_pages > pfn) { p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
write_unlock_irqrestore(&p2m_lock, irqflags); write_unlock_irqrestore(&p2m_lock, irqflags);
kfree(p2m_entry); kfree(p2m_entry);
...@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn, ...@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
p2m_entry->mfn = mfn; p2m_entry->mfn = mfn;
write_lock_irqsave(&p2m_lock, irqflags); write_lock_irqsave(&p2m_lock, irqflags);
if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
(rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
write_unlock_irqrestore(&p2m_lock, irqflags); write_unlock_irqrestore(&p2m_lock, irqflags);
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment