Commit 2d29960a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk

swiotlb: dynamically allocate io_tlb_default_mem

Instead of allocating ->list and ->orig_addr separately just do one
dynamic allocation for the actual io_tlb_mem structure.  This simplifies
a lot of the initialization code, and also allows to just check
io_tlb_default_mem to see if swiotlb is in use.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 73f62095
...@@ -158,17 +158,14 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) ...@@ -158,17 +158,14 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
int __ref xen_swiotlb_init(void) int __ref xen_swiotlb_init(void)
{ {
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long nslabs, bytes, order; unsigned long bytes = swiotlb_size_or_default();
unsigned int repeat = 3; unsigned long nslabs = bytes >> IO_TLB_SHIFT;
unsigned int order, repeat = 3;
int rc = -ENOMEM; int rc = -ENOMEM;
char *start; char *start;
nslabs = swiotlb_nr_tbl();
if (!nslabs)
nslabs = DEFAULT_NSLABS;
retry: retry:
m_ret = XEN_SWIOTLB_ENOMEM; m_ret = XEN_SWIOTLB_ENOMEM;
bytes = nslabs << IO_TLB_SHIFT;
order = get_order(bytes); order = get_order(bytes);
/* /*
...@@ -221,19 +218,16 @@ int __ref xen_swiotlb_init(void) ...@@ -221,19 +218,16 @@ int __ref xen_swiotlb_init(void)
#ifdef CONFIG_X86 #ifdef CONFIG_X86
void __init xen_swiotlb_init_early(void) void __init xen_swiotlb_init_early(void)
{ {
unsigned long nslabs, bytes; unsigned long bytes = swiotlb_size_or_default();
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
unsigned int repeat = 3; unsigned int repeat = 3;
char *start; char *start;
int rc; int rc;
nslabs = swiotlb_nr_tbl();
if (!nslabs)
nslabs = DEFAULT_NSLABS;
retry: retry:
/* /*
* Get IO TLB memory from any location. * Get IO TLB memory from any location.
*/ */
bytes = nslabs << IO_TLB_SHIFT;
start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
if (!start) if (!start)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
...@@ -248,8 +242,8 @@ void __init xen_swiotlb_init_early(void) ...@@ -248,8 +242,8 @@ void __init xen_swiotlb_init_early(void)
if (repeat--) { if (repeat--) {
/* Min is 2MB */ /* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1)); nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n", bytes = nslabs << IO_TLB_SHIFT;
(nslabs << IO_TLB_SHIFT) >> 20); pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry; goto retry;
} }
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
...@@ -548,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, ...@@ -548,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{ {
return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask; return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
} }
const struct dma_map_ops xen_swiotlb_dma_ops = { const struct dma_map_ops xen_swiotlb_dma_ops = {
......
...@@ -90,28 +90,30 @@ struct io_tlb_mem { ...@@ -90,28 +90,30 @@ struct io_tlb_mem {
phys_addr_t end; phys_addr_t end;
unsigned long nslabs; unsigned long nslabs;
unsigned long used; unsigned long used;
unsigned int *list;
unsigned int index; unsigned int index;
phys_addr_t *orig_addr;
size_t *alloc_size;
spinlock_t lock; spinlock_t lock;
struct dentry *debugfs; struct dentry *debugfs;
bool late_alloc; bool late_alloc;
struct io_tlb_slot {
phys_addr_t orig_addr;
size_t alloc_size;
unsigned int list;
} slots[];
}; };
extern struct io_tlb_mem io_tlb_default_mem; extern struct io_tlb_mem *io_tlb_default_mem;
static inline bool is_swiotlb_buffer(phys_addr_t paddr) static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{ {
struct io_tlb_mem *mem = &io_tlb_default_mem; struct io_tlb_mem *mem = io_tlb_default_mem;
return paddr >= mem->start && paddr < mem->end; return mem && paddr >= mem->start && paddr < mem->end;
} }
void __init swiotlb_exit(void); void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void); unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev); size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(void); bool is_swiotlb_active(void);
void __init swiotlb_adjust_size(unsigned long new_size); void __init swiotlb_adjust_size(unsigned long size);
#else #else
#define swiotlb_force SWIOTLB_NO_FORCE #define swiotlb_force SWIOTLB_NO_FORCE
static inline bool is_swiotlb_buffer(phys_addr_t paddr) static inline bool is_swiotlb_buffer(phys_addr_t paddr)
...@@ -135,7 +137,7 @@ static inline bool is_swiotlb_active(void) ...@@ -135,7 +137,7 @@ static inline bool is_swiotlb_active(void)
return false; return false;
} }
static inline void swiotlb_adjust_size(unsigned long new_size) static inline void swiotlb_adjust_size(unsigned long size)
{ {
} }
#endif /* CONFIG_SWIOTLB */ #endif /* CONFIG_SWIOTLB */
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment