Commit 1aaa7368 authored by Petr Tesarik's avatar Petr Tesarik Committed by Christoph Hellwig

swiotlb: allocate a new memory pool when existing pools are full

When swiotlb_find_slots() cannot find suitable slots, schedule the
allocation of a new memory pool. It is not possible to allocate the pool
immediately, because this code may run in interrupt context, which is not
suitable for large memory allocations. This means that the memory pool will
be available too late for the currently requested mapping, but the stress
on the software IO TLB allocator is likely to continue, and subsequent
allocations will benefit from the additional pool eventually.

Keep all memory pools for an allocator in an RCU list to avoid locking on
the read side. For modifications, add a new spinlock to struct io_tlb_mem.

The spinlock also protects updates to the total number of slabs (nslabs in
struct io_tlb_mem), but not reads of the value. Readers may therefore
encounter a stale value, but this is not an issue:

- swiotlb_tbl_map_single() and is_swiotlb_active() only check for non-zero
  value. This is ensured by the existence of the default memory pool,
  allocated at boot.

- The exact value is used only for non-critical purposes (debugfs, kernel
  messages).
Signed-off-by: default avatarPetr Tesarik <petr.tesarik.ext@huawei.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent ad96ce32
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/workqueue.h>
struct device; struct device;
struct page; struct page;
...@@ -104,12 +105,16 @@ struct io_tlb_pool { ...@@ -104,12 +105,16 @@ struct io_tlb_pool {
/** /**
* struct io_tlb_mem - Software IO TLB allocator * struct io_tlb_mem - Software IO TLB allocator
* @defpool: Default (initial) IO TLB memory pool descriptor. * @defpool: Default (initial) IO TLB memory pool descriptor.
* @pool: IO TLB memory pool descriptor (if not dynamic).
* @nslabs: Total number of IO TLB slabs in all pools. * @nslabs: Total number of IO TLB slabs in all pools.
* @debugfs: The dentry to debugfs. * @debugfs: The dentry to debugfs.
* @force_bounce: %true if swiotlb bouncing is forced * @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation * @for_alloc: %true if the pool is used for memory allocation
* @can_grow: %true if more pools can be allocated dynamically. * @can_grow: %true if more pools can be allocated dynamically.
* @phys_limit: Maximum allowed physical address. * @phys_limit: Maximum allowed physical address.
* @lock: Lock to synchronize changes to the list.
* @pools: List of IO TLB memory pool descriptors (if dynamic).
* @dyn_alloc: Dynamic IO TLB pool allocation work.
* @total_used: The total number of slots in the pool that are currently used * @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in * across all areas. Used only for calculating used_hiwater in
* debugfs. * debugfs.
...@@ -125,6 +130,9 @@ struct io_tlb_mem { ...@@ -125,6 +130,9 @@ struct io_tlb_mem {
#ifdef CONFIG_SWIOTLB_DYNAMIC #ifdef CONFIG_SWIOTLB_DYNAMIC
bool can_grow; bool can_grow;
u64 phys_limit; u64 phys_limit;
spinlock_t lock;
struct list_head pools;
struct work_struct dyn_alloc;
#endif #endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
atomic_long_t total_used; atomic_long_t total_used;
......
...@@ -79,8 +79,23 @@ struct io_tlb_slot { ...@@ -79,8 +79,23 @@ struct io_tlb_slot {
static bool swiotlb_force_bounce; static bool swiotlb_force_bounce;
static bool swiotlb_force_disable; static bool swiotlb_force_disable;
#ifdef CONFIG_SWIOTLB_DYNAMIC
static void swiotlb_dyn_alloc(struct work_struct *work);
static struct io_tlb_mem io_tlb_default_mem = {
.lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
.dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
swiotlb_dyn_alloc),
};
#else /* !CONFIG_SWIOTLB_DYNAMIC */
static struct io_tlb_mem io_tlb_default_mem; static struct io_tlb_mem io_tlb_default_mem;
#endif /* CONFIG_SWIOTLB_DYNAMIC */
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
static unsigned long default_nareas; static unsigned long default_nareas;
...@@ -278,6 +293,23 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, ...@@ -278,6 +293,23 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
return; return;
} }
/**
* add_mem_pool() - add a memory pool to the allocator
* @mem: Software IO TLB allocator.
* @pool: Memory pool to be added.
*/
static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
spin_lock(&mem->lock);
list_add_rcu(&pool->node, &mem->pools);
mem->nslabs += pool->nslabs;
spin_unlock(&mem->lock);
#else
mem->nslabs = pool->nslabs;
#endif
}
static void __init *swiotlb_memblock_alloc(unsigned long nslabs, static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
unsigned int flags, unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs)) int (*remap)(void *tlb, unsigned long nslabs))
...@@ -375,7 +407,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, ...@@ -375,7 +407,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
default_nareas); default_nareas);
io_tlb_default_mem.nslabs = nslabs; add_mem_pool(&io_tlb_default_mem, mem);
if (flags & SWIOTLB_VERBOSE) if (flags & SWIOTLB_VERBOSE)
swiotlb_print_info(); swiotlb_print_info();
...@@ -474,7 +506,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, ...@@ -474,7 +506,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true, swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
nareas); nareas);
io_tlb_default_mem.nslabs = nslabs; add_mem_pool(&io_tlb_default_mem, mem);
swiotlb_print_info(); swiotlb_print_info();
return 0; return 0;
...@@ -625,44 +657,83 @@ static void swiotlb_free_tlb(void *vaddr, size_t bytes) ...@@ -625,44 +657,83 @@ static void swiotlb_free_tlb(void *vaddr, size_t bytes)
/** /**
* swiotlb_alloc_pool() - allocate a new IO TLB memory pool * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
* @dev: Device for which a memory pool is allocated. * @dev: Device for which a memory pool is allocated.
* @nslabs: Desired number of slabs. * @minslabs: Minimum number of slabs.
* @nslabs: Desired (maximum) number of slabs.
* @nareas: Number of areas.
* @phys_limit: Maximum DMA buffer physical address. * @phys_limit: Maximum DMA buffer physical address.
* @gfp: GFP flags for the allocations. * @gfp: GFP flags for the allocations.
* *
* Allocate and initialize a new IO TLB memory pool. * Allocate and initialize a new IO TLB memory pool. The actual number of
* slabs may be reduced if allocation of @nslabs fails. If even
* @minslabs cannot be allocated, this function fails.
* *
* Return: New memory pool, or %NULL on allocation failure. * Return: New memory pool, or %NULL on allocation failure.
*/ */
static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev, static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
unsigned int nslabs, u64 phys_limit, gfp_t gfp) unsigned long minslabs, unsigned long nslabs,
unsigned int nareas, u64 phys_limit, gfp_t gfp)
{ {
struct io_tlb_pool *pool; struct io_tlb_pool *pool;
unsigned int slot_order;
struct page *tlb; struct page *tlb;
size_t pool_size; size_t pool_size;
size_t tlb_size; size_t tlb_size;
pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), 1) + pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
array_size(sizeof(*pool->slots), nslabs);
pool = kzalloc(pool_size, gfp); pool = kzalloc(pool_size, gfp);
if (!pool) if (!pool)
goto error; goto error;
pool->areas = (void *)pool + sizeof(*pool); pool->areas = (void *)pool + sizeof(*pool);
pool->slots = (void *)pool->areas + sizeof(*pool->areas);
tlb_size = nslabs << IO_TLB_SHIFT; tlb_size = nslabs << IO_TLB_SHIFT;
tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp); while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
if (!tlb) if (nslabs <= minslabs)
goto error_tlb; goto error_tlb;
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
nareas = limit_nareas(nareas, nslabs);
tlb_size = nslabs << IO_TLB_SHIFT;
}
swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, 1); slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
pool->slots = (struct io_tlb_slot *)
__get_free_pages(gfp, slot_order);
if (!pool->slots)
goto error_slots;
swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
return pool; return pool;
error_slots:
swiotlb_free_tlb(page_address(tlb), tlb_size);
error_tlb: error_tlb:
kfree(pool); kfree(pool);
error: error:
return NULL; return NULL;
} }
/**
* swiotlb_dyn_alloc() - dynamic memory pool allocation worker
* @work: Pointer to dyn_alloc in struct io_tlb_mem.
*/
static void swiotlb_dyn_alloc(struct work_struct *work)
{
struct io_tlb_mem *mem =
container_of(work, struct io_tlb_mem, dyn_alloc);
struct io_tlb_pool *pool;
pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
default_nareas, mem->phys_limit, GFP_KERNEL);
if (!pool) {
pr_warn_ratelimited("Failed to allocate new pool");
return;
}
add_mem_pool(mem, pool);
/* Pairs with smp_rmb() in swiotlb_find_pool(). */
smp_wmb();
}
/** /**
* swiotlb_dyn_free() - RCU callback to free a memory pool * swiotlb_dyn_free() - RCU callback to free a memory pool
* @rcu: RCU head in the corresponding struct io_tlb_pool. * @rcu: RCU head in the corresponding struct io_tlb_pool.
...@@ -670,8 +741,10 @@ static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev, ...@@ -670,8 +741,10 @@ static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
static void swiotlb_dyn_free(struct rcu_head *rcu) static void swiotlb_dyn_free(struct rcu_head *rcu)
{ {
struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu); struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
size_t tlb_size = pool->end - pool->start; size_t tlb_size = pool->end - pool->start;
free_pages((unsigned long)pool->slots, get_order(slots_size));
swiotlb_free_tlb(pool->vaddr, tlb_size); swiotlb_free_tlb(pool->vaddr, tlb_size);
kfree(pool); kfree(pool);
} }
...@@ -689,15 +762,19 @@ static void swiotlb_dyn_free(struct rcu_head *rcu) ...@@ -689,15 +762,19 @@ static void swiotlb_dyn_free(struct rcu_head *rcu)
struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr) struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
{ {
struct io_tlb_mem *mem = dev->dma_io_tlb_mem; struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool = &mem->defpool; struct io_tlb_pool *pool;
if (paddr >= pool->start && paddr < pool->end)
return pool;
/* Pairs with smp_wmb() in swiotlb_find_slots(). */ /* Pairs with smp_wmb() in swiotlb_find_slots() and
* swiotlb_dyn_alloc(), which modify the RCU lists.
*/
smp_rmb(); smp_rmb();
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
if (paddr >= pool->start && paddr < pool->end)
goto out;
}
list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) { list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
if (paddr >= pool->start && paddr < pool->end) if (paddr >= pool->start && paddr < pool->end)
goto out; goto out;
...@@ -1046,18 +1123,24 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, ...@@ -1046,18 +1123,24 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
u64 phys_limit; u64 phys_limit;
int index; int index;
pool = &mem->defpool; rcu_read_lock();
index = swiotlb_pool_find_slots(dev, pool, orig_addr, list_for_each_entry_rcu(pool, &mem->pools, node) {
alloc_size, alloc_align_mask); index = swiotlb_pool_find_slots(dev, pool, orig_addr,
if (index >= 0) alloc_size, alloc_align_mask);
goto found; if (index >= 0) {
rcu_read_unlock();
goto found;
}
}
rcu_read_unlock();
if (!mem->can_grow) if (!mem->can_grow)
return -1; return -1;
schedule_work(&mem->dyn_alloc);
nslabs = nr_slots(alloc_size); nslabs = nr_slots(alloc_size);
phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit); phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
pool = swiotlb_alloc_pool(dev, nslabs, phys_limit, pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
GFP_NOWAIT | __GFP_NOWARN); GFP_NOWAIT | __GFP_NOWARN);
if (!pool) if (!pool)
return -1; return -1;
...@@ -1141,7 +1224,19 @@ static unsigned long mem_pool_used(struct io_tlb_pool *pool) ...@@ -1141,7 +1224,19 @@ static unsigned long mem_pool_used(struct io_tlb_pool *pool)
*/ */
static unsigned long mem_used(struct io_tlb_mem *mem) static unsigned long mem_used(struct io_tlb_mem *mem)
{ {
#ifdef CONFIG_SWIOTLB_DYNAMIC
struct io_tlb_pool *pool;
unsigned long used = 0;
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node)
used += mem_pool_used(pool);
rcu_read_unlock();
return used;
#else
return mem_pool_used(&mem->defpool); return mem_pool_used(&mem->defpool);
#endif
} }
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
...@@ -1562,7 +1657,10 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, ...@@ -1562,7 +1657,10 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
false, nareas); false, nareas);
mem->force_bounce = true; mem->force_bounce = true;
mem->for_alloc = true; mem->for_alloc = true;
mem->nslabs = nslabs; #ifdef CONFIG_SWIOTLB_DYNAMIC
spin_lock_init(&mem->lock);
#endif
add_mem_pool(mem, pool);
rmem->priv = mem; rmem->priv = mem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment