Commit 2d29960a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk

swiotlb: dynamically allocate io_tlb_default_mem

Instead of allocating ->list and ->orig_addr separately just do one
dynamic allocation for the actual io_tlb_mem structure.  This simplifies
a lot of the initialization code, and also allows to just check
io_tlb_default_mem to see if swiotlb is in use.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 73f62095
......@@ -158,17 +158,14 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
int __ref xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long nslabs, bytes, order;
unsigned int repeat = 3;
unsigned long bytes = swiotlb_size_or_default();
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
unsigned int order, repeat = 3;
int rc = -ENOMEM;
char *start;
nslabs = swiotlb_nr_tbl();
if (!nslabs)
nslabs = DEFAULT_NSLABS;
retry:
m_ret = XEN_SWIOTLB_ENOMEM;
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes);
/*
......@@ -221,19 +218,16 @@ int __ref xen_swiotlb_init(void)
#ifdef CONFIG_X86
void __init xen_swiotlb_init_early(void)
{
unsigned long nslabs, bytes;
unsigned long bytes = swiotlb_size_or_default();
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
unsigned int repeat = 3;
char *start;
int rc;
nslabs = swiotlb_nr_tbl();
if (!nslabs)
nslabs = DEFAULT_NSLABS;
retry:
/*
* Get IO TLB memory from any location.
*/
bytes = nslabs << IO_TLB_SHIFT;
start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
......@@ -248,8 +242,8 @@ void __init xen_swiotlb_init_early(void)
if (repeat--) {
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
(nslabs << IO_TLB_SHIFT) >> 20);
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
......@@ -548,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {
......
......@@ -90,28 +90,30 @@ struct io_tlb_mem {
phys_addr_t end;
unsigned long nslabs;
unsigned long used;
unsigned int *list;
unsigned int index;
phys_addr_t *orig_addr;
size_t *alloc_size;
spinlock_t lock;
struct dentry *debugfs;
bool late_alloc;
struct io_tlb_slot {
phys_addr_t orig_addr;
size_t alloc_size;
unsigned int list;
} slots[];
};
extern struct io_tlb_mem io_tlb_default_mem;
extern struct io_tlb_mem *io_tlb_default_mem;
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
struct io_tlb_mem *mem = io_tlb_default_mem;
return paddr >= mem->start && paddr < mem->end;
return mem && paddr >= mem->start && paddr < mem->end;
}
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(void);
void __init swiotlb_adjust_size(unsigned long new_size);
void __init swiotlb_adjust_size(unsigned long size);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
......@@ -135,7 +137,7 @@ static inline bool is_swiotlb_active(void)
return false;
}
static inline void swiotlb_adjust_size(unsigned long new_size)
static inline void swiotlb_adjust_size(unsigned long size)
{
}
#endif /* CONFIG_SWIOTLB */
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment