Commit f5278565 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "The gcc-4.4.4 workaround has actually been merged into a KVM tree by
  Paolo but it is stuck in linux-next and mainline needs it"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  arch/x86/kvm/mmu.c: work around gcc-4.4.4 bug
  sched, numa: do not hint for NUMA balancing on VM_MIXEDMAP mappings
  zsmalloc: fix a null pointer dereference in destroy_handle_cache()
  mm: memcontrol: fix false-positive VM_BUG_ON() on -rt
  checkpatch: fix "GLOBAL_INITIALISERS" test
  zram: clear disk io accounting when reset zram device
  memcg: do not call reclaim if !__GFP_WAIT
  mm/memory_hotplug.c: set zone->wait_table to null after freeing it
parents e64f6384 5ec45a19
...@@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
u64 entry, gentry, *spte; u64 entry, gentry, *spte;
int npte; int npte;
bool remote_flush, local_flush, zap_page; bool remote_flush, local_flush, zap_page;
union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { union kvm_mmu_page_role mask = { };
.cr0_wp = 1,
.cr4_pae = 1, mask.cr0_wp = 1;
.nxe = 1, mask.cr4_pae = 1;
.smep_andnot_wp = 1, mask.nxe = 1;
.smap_andnot_wp = 1, mask.smep_andnot_wp = 1;
}; mask.smap_andnot_wp = 1;
/* /*
* If we don't have indirect shadow pages, it means no page is * If we don't have indirect shadow pages, it means no page is
......
...@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram) ...@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats)); memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0; zram->disksize = 0;
zram->max_comp_streams = 1; zram->max_comp_streams = 1;
set_capacity(zram->disk, 0); set_capacity(zram->disk, 0);
part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock); up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */ /* I/O operation under all of CPU are done so let's free */
......
...@@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work) ...@@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work)
} }
for (; vma; vma = vma->vm_next) { for (; vma; vma = vma->vm_next) {
if (!vma_migratable(vma) || !vma_policy_mof(vma) || if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma)) { is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue; continue;
} }
......
...@@ -2323,6 +2323,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -2323,6 +2323,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
css_get_many(&memcg->css, batch); css_get_many(&memcg->css, batch);
if (batch > nr_pages) if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages); refill_stock(memcg, batch - nr_pages);
if (!(gfp_mask & __GFP_WAIT))
goto done;
/* /*
* If the hierarchy is above the normal consumption range, * If the hierarchy is above the normal consumption range,
* make the charging task trim their excess contribution. * make the charging task trim their excess contribution.
...@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) ...@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!mem_cgroup_is_root(memcg)) if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1); page_counter_uncharge(&memcg->memory, 1);
/* XXX: caller holds IRQ-safe mapping->tree_lock */ /* Caller disabled preemption with mapping->tree_lock */
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, page, -1); mem_cgroup_charge_statistics(memcg, page, -1);
memcg_check_events(memcg, page); memcg_check_events(memcg, page);
} }
......
...@@ -1969,8 +1969,10 @@ void try_offline_node(int nid) ...@@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
* wait_table may be allocated from boot memory, * wait_table may be allocated from boot memory,
* here only free if it's allocated by vmalloc. * here only free if it's allocated by vmalloc.
*/ */
if (is_vmalloc_addr(zone->wait_table)) if (is_vmalloc_addr(zone->wait_table)) {
vfree(zone->wait_table); vfree(zone->wait_table);
zone->wait_table = NULL;
}
} }
} }
EXPORT_SYMBOL(try_offline_node); EXPORT_SYMBOL(try_offline_node);
......
...@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool) ...@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
static void destroy_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool)
{ {
kmem_cache_destroy(pool->handle_cachep); if (pool->handle_cachep)
kmem_cache_destroy(pool->handle_cachep);
} }
static unsigned long alloc_handle(struct zs_pool *pool) static unsigned long alloc_handle(struct zs_pool *pool)
......
...@@ -3169,12 +3169,12 @@ sub process { ...@@ -3169,12 +3169,12 @@ sub process {
} }
# check for global initialisers. # check for global initialisers.
if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) { if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) {
if (ERROR("GLOBAL_INITIALISERS", if (ERROR("GLOBAL_INITIALISERS",
"do not initialise globals to 0 or NULL\n" . "do not initialise globals to 0 or NULL\n" .
$herecurr) && $herecurr) &&
$fix) { $fix) {
$fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/; $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/;
} }
} }
# check for static initialisers. # check for static initialisers.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment