Commit 60ed380e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - mm switching fix where the kernel pgd ends up in the user TTBR0 after
   returning from an EFI run-time services call

 - fix __GFP_ZERO handling for atomic pool and CMA DMA allocations (the
   generic code does get the gfp flags, so it's left with the arch code
   to memzero accordingly)

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Honor __GFP_ZERO in dma allocations
  arm64: efi: don't restore TTBR0 if active_mm points at init_mm
parents 62a202d7 7132813c
......@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#define cpu_switch_mm(pgd,mm) \
do { \
BUG_ON(pgd == swapper_pg_dir); \
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
} while (0)
#define cpu_get_pgd() \
({ \
......
......@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
static void efi_set_pgd(struct mm_struct *mm)
{
if (mm == &init_mm)
cpu_set_reserved_ttbr0();
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
if (icache_is_aivivt())
__flush_icache_all();
......
......@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
static void *__alloc_from_pool(size_t size, struct page **ret_page)
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
unsigned long val;
void *ptr = NULL;
......@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
*ret_page = phys_to_page(phys);
ptr = (void *)val;
if (flags & __GFP_ZERO)
memset(ptr, 0, size);
}
return ptr;
......@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA;
if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
struct page *page;
void *addr;
size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
......@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
return NULL;
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return page_address(page);
addr = page_address(page);
if (flags & __GFP_ZERO)
memset(addr, 0, size);
return addr;
} else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
......@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL;
void *addr = __alloc_from_pool(size, &page);
void *addr = __alloc_from_pool(size, &page, flags);
if (addr)
*dma_handle = phys_to_dma(dev, page_to_phys(page));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment