Commit 86a78a8b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "One fix going back to stable, for a bug on 32-bit introduced when we
  added support for THREAD_INFO_IN_TASK.

  A fix for a typo in a recent rework of our hugetlb code that leads to
  crashes on 64-bit when using hugetlbfs with a 4K PAGE_SIZE.

  Two fixes for our recent rework of the address layout on 64-bit hash
  CPUs, both only triggered when userspace tries to access addresses
  outside the user or kernel address ranges.

  Finally a fix for a recently introduced double free in an error path
  in our cacheinfo code.

  Thanks to: Aneesh Kumar K.V, Christophe Leroy, Sachin Sant, Tobin C.
  Harding"

* tag 'powerpc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/cacheinfo: Remove double free
  powerpc/mm/hash: Fix get_region_id() for invalid addresses
  powerpc/mm: Drop VM_BUG_ON in get_region_id()
  powerpc/mm: Fix crashes with hugepages & 4K pages
  powerpc/32s: fix flush_hash_pages() on SMP
parents bcd17397 672eaf37
...@@ -93,6 +93,7 @@ ...@@ -93,6 +93,7 @@
#define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START) #define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START)
#define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START) #define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START)
#define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START) #define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START)
#define INVALID_REGION_ID (VMEMMAP_REGION_ID + 1)
/* /*
* Defines the address of the vmemap area, in its own region on * Defines the address of the vmemap area, in its own region on
...@@ -119,14 +120,15 @@ static inline int get_region_id(unsigned long ea) ...@@ -119,14 +120,15 @@ static inline int get_region_id(unsigned long ea)
if (id == 0) if (id == 0)
return USER_REGION_ID; return USER_REGION_ID;
if (id != (PAGE_OFFSET >> 60))
return INVALID_REGION_ID;
if (ea < H_KERN_VIRT_START) if (ea < H_KERN_VIRT_START)
return LINEAR_MAP_REGION_ID; return LINEAR_MAP_REGION_ID;
VM_BUG_ON(id != 0xc);
BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2); BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
region_id = NON_LINEAR_REGION_ID(ea); region_id = NON_LINEAR_REGION_ID(ea);
VM_BUG_ON(region_id > VMEMMAP_REGION_ID);
return region_id; return region_id;
} }
......
...@@ -767,7 +767,6 @@ static void cacheinfo_create_index_dir(struct cache *cache, int index, ...@@ -767,7 +767,6 @@ static void cacheinfo_create_index_dir(struct cache *cache, int index,
cache_dir->kobj, "index%d", index); cache_dir->kobj, "index%d", index);
if (rc) { if (rc) {
kobject_put(&index_dir->kobj); kobject_put(&index_dir->kobj);
kfree(index_dir);
return; return;
} }
......
...@@ -539,7 +539,8 @@ _GLOBAL(flush_hash_pages) ...@@ -539,7 +539,8 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
lwz r8,TASK_CPU(r2) tophys (r8, r2)
lwz r8, TASK_CPU(r8)
oris r8,r8,9 oris r8,r8,9
10: lwarx r0,0,r9 10: lwarx r0,0,r9
cmpi 0,r0,0 cmpi 0,r0,0
......
...@@ -556,7 +556,7 @@ static int __init add_huge_page_size(unsigned long long size) ...@@ -556,7 +556,7 @@ static int __init add_huge_page_size(unsigned long long size)
if (size <= PAGE_SIZE || !is_power_of_2(size)) if (size <= PAGE_SIZE || !is_power_of_2(size))
return -EINVAL; return -EINVAL;
mmu_psize = check_and_get_huge_psize(size); mmu_psize = check_and_get_huge_psize(shift);
if (mmu_psize < 0) if (mmu_psize < 0)
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment