Commit 3c33066a authored by Leonardo Bras's avatar Leonardo Bras Committed by Michael Ellerman

powerpc/kernel/iommu: Add new iommu_table_in_use() helper

Having a function to check if the iommu table has any allocation helps
deciding if a tbl can be reset for using a new DMA window.

It should be enough to replace all instances of !bitmap_empty(tbl...).

iommu_table_in_use() skips reserved memory, so we don't need to worry about
releasing it before testing. This causes iommu_table_release_pages() to
become unnecessary, given it is only used to remove reserved memory for
testing.

Also, only allow storing reserved memory values in tbl if they are valid
in the table, so there is no need to check it in the new helper.
Signed-off-by: default avatarLeonardo Bras <leobras.c@gmail.com>
Reviewed-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210817063929.38701-3-leobras.c@gmail.com
parent 0c634baf
...@@ -154,6 +154,7 @@ extern int iommu_tce_table_put(struct iommu_table *tbl); ...@@ -154,6 +154,7 @@ extern int iommu_tce_table_put(struct iommu_table *tbl);
*/ */
extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
int nid, unsigned long res_start, unsigned long res_end); int nid, unsigned long res_start, unsigned long res_end);
bool iommu_table_in_use(struct iommu_table *tbl);
#define IOMMU_TABLE_GROUP_MAX_TABLES 2 #define IOMMU_TABLE_GROUP_MAX_TABLES 2
......
...@@ -690,32 +690,24 @@ static void iommu_table_reserve_pages(struct iommu_table *tbl, ...@@ -690,32 +690,24 @@ static void iommu_table_reserve_pages(struct iommu_table *tbl,
if (tbl->it_offset == 0) if (tbl->it_offset == 0)
set_bit(0, tbl->it_map); set_bit(0, tbl->it_map);
tbl->it_reserved_start = res_start; if (res_start < tbl->it_offset)
tbl->it_reserved_end = res_end; res_start = tbl->it_offset;
/* Check if res_start..res_end isn't empty and overlaps the table */
if (res_start && res_end &&
(tbl->it_offset + tbl->it_size < res_start ||
res_end < tbl->it_offset))
return;
for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) if (res_end > (tbl->it_offset + tbl->it_size))
set_bit(i - tbl->it_offset, tbl->it_map); res_end = tbl->it_offset + tbl->it_size;
}
static void iommu_table_release_pages(struct iommu_table *tbl) /* Check if res_start..res_end is a valid range in the table */
{ if (res_start >= res_end) {
int i; tbl->it_reserved_start = tbl->it_offset;
tbl->it_reserved_end = tbl->it_offset;
return;
}
/* tbl->it_reserved_start = res_start;
* In case we have reserved the first bit, we should not emit tbl->it_reserved_end = res_end;
* the warning below.
*/
if (tbl->it_offset == 0)
clear_bit(0, tbl->it_map);
for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
clear_bit(i - tbl->it_offset, tbl->it_map); set_bit(i - tbl->it_offset, tbl->it_map);
} }
/* /*
...@@ -779,6 +771,22 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, ...@@ -779,6 +771,22 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
return tbl; return tbl;
} }
bool iommu_table_in_use(struct iommu_table *tbl)
{
unsigned long start = 0, end;
/* ignore reserved bit0 */
if (tbl->it_offset == 0)
start = 1;
end = tbl->it_reserved_start - tbl->it_offset;
if (find_next_bit(tbl->it_map, end, start) != end)
return true;
start = tbl->it_reserved_end - tbl->it_offset;
end = tbl->it_size;
return find_next_bit(tbl->it_map, end, start) != end;
}
static void iommu_table_free(struct kref *kref) static void iommu_table_free(struct kref *kref)
{ {
struct iommu_table *tbl; struct iommu_table *tbl;
...@@ -795,10 +803,8 @@ static void iommu_table_free(struct kref *kref) ...@@ -795,10 +803,8 @@ static void iommu_table_free(struct kref *kref)
iommu_debugfs_del(tbl); iommu_debugfs_del(tbl);
iommu_table_release_pages(tbl);
/* verify that table contains no entries */ /* verify that table contains no entries */
if (!bitmap_empty(tbl->it_map, tbl->it_size)) if (iommu_table_in_use(tbl))
pr_warn("%s: Unexpected TCEs\n", __func__); pr_warn("%s: Unexpected TCEs\n", __func__);
/* free bitmap */ /* free bitmap */
...@@ -1099,14 +1105,9 @@ int iommu_take_ownership(struct iommu_table *tbl) ...@@ -1099,14 +1105,9 @@ int iommu_take_ownership(struct iommu_table *tbl)
for (i = 0; i < tbl->nr_pools; i++) for (i = 0; i < tbl->nr_pools; i++)
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
iommu_table_release_pages(tbl); if (iommu_table_in_use(tbl)) {
if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
pr_err("iommu_tce: it_map is not empty"); pr_err("iommu_tce: it_map is not empty");
ret = -EBUSY; ret = -EBUSY;
/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
tbl->it_reserved_end);
} else { } else {
memset(tbl->it_map, 0xff, sz); memset(tbl->it_map, 0xff, sz);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment