Commit 85e97be3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "8 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/slub.c: run free_partial() outside of the kmem_cache_node->list_lock
  rmap: fix compound check logic in page_remove_file_rmap
  mm, rmap: fix false positive VM_BUG() in page_add_file_rmap()
  mm/page_alloc.c: recalculate some of node threshold when on/offline memory
  mm/page_alloc.c: fix wrong initialization when sysctl_min_unmapped_ratio changes
  thp: move shmem_huge_enabled() outside of SYSFS ifdef
  revert "ARM: keystone: dts: add psci command definition"
  rapidio: dereferencing an error pointer
parents 7de24996 60398923
...@@ -70,14 +70,6 @@ psci { ...@@ -70,14 +70,6 @@ psci {
cpu_on = <0x84000003>; cpu_on = <0x84000003>;
}; };
psci {
compatible = "arm,psci";
method = "smc";
cpu_suspend = <0x84000001>;
cpu_off = <0x84000002>;
cpu_on = <0x84000003>;
};
soc { soc {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch) ...@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch)
static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
long timeout) long timeout)
{ {
struct rio_channel *ch = NULL; struct rio_channel *ch;
struct rio_channel *new_ch = NULL; struct rio_channel *new_ch;
struct conn_req *req; struct conn_req *req;
struct cm_peer *peer; struct cm_peer *peer;
int found = 0; int found = 0;
...@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, ...@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
spin_unlock_bh(&ch->lock); spin_unlock_bh(&ch->lock);
riocm_put_channel(ch); riocm_put_channel(ch);
ch = NULL;
kfree(req); kfree(req);
down_read(&rdev_sem); down_read(&rdev_sem);
...@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, ...@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
if (!found) { if (!found) {
/* If peer device object not found, simply ignore the request */ /* If peer device object not found, simply ignore the request */
err = -ENODEV; err = -ENODEV;
goto err_nodev; goto err_put_new_ch;
} }
new_ch->rdev = peer->rdev; new_ch->rdev = peer->rdev;
...@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, ...@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
*new_ch_id = new_ch->id; *new_ch_id = new_ch->id;
return new_ch; return new_ch;
err_put_new_ch:
spin_lock_bh(&idr_lock);
idr_remove(&ch_idr, new_ch->id);
spin_unlock_bh(&idr_lock);
riocm_put_channel(new_ch);
err_put: err_put:
riocm_put_channel(ch); if (ch)
err_nodev: riocm_put_channel(ch);
if (new_ch) {
spin_lock_bh(&idr_lock);
idr_remove(&ch_idr, new_ch->id);
spin_unlock_bh(&idr_lock);
riocm_put_channel(new_ch);
}
*new_ch_id = 0; *new_ch_id = 0;
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -4757,6 +4757,8 @@ int local_memory_node(int node) ...@@ -4757,6 +4757,8 @@ int local_memory_node(int node)
} }
#endif #endif
static void setup_min_unmapped_ratio(void);
static void setup_min_slab_ratio(void);
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
static void set_zonelist_order(void) static void set_zonelist_order(void)
...@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) ...@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
zone->node = nid; zone->node = nid;
pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
/ 100;
pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
#endif #endif
zone->name = zone_names[j]; zone->name = zone_names[j];
zone->zone_pgdat = pgdat; zone->zone_pgdat = pgdat;
...@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void) ...@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
setup_per_zone_wmarks(); setup_per_zone_wmarks();
refresh_zone_stat_thresholds(); refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve(); setup_per_zone_lowmem_reserve();
#ifdef CONFIG_NUMA
setup_min_unmapped_ratio();
setup_min_slab_ratio();
#endif
return 0; return 0;
} }
core_initcall(init_per_zone_wmark_min) core_initcall(init_per_zone_wmark_min)
...@@ -6842,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, ...@@ -6842,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static void setup_min_unmapped_ratio(void)
{
pg_data_t *pgdat;
struct zone *zone;
for_each_online_pgdat(pgdat)
pgdat->min_unmapped_pages = 0;
for_each_zone(zone)
zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
sysctl_min_unmapped_ratio) / 100;
}
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos) void __user *buffer, size_t *length, loff_t *ppos)
{ {
struct pglist_data *pgdat;
struct zone *zone;
int rc; int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos); rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc) if (rc)
return rc; return rc;
setup_min_unmapped_ratio();
return 0;
}
static void setup_min_slab_ratio(void)
{
pg_data_t *pgdat;
struct zone *zone;
for_each_online_pgdat(pgdat) for_each_online_pgdat(pgdat)
pgdat->min_slab_pages = 0; pgdat->min_slab_pages = 0;
for_each_zone(zone) for_each_zone(zone)
zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
sysctl_min_unmapped_ratio) / 100; sysctl_min_slab_ratio) / 100;
return 0;
} }
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos) void __user *buffer, size_t *length, loff_t *ppos)
{ {
struct pglist_data *pgdat;
struct zone *zone;
int rc; int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos); rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc) if (rc)
return rc; return rc;
for_each_online_pgdat(pgdat) setup_min_slab_ratio();
pgdat->min_slab_pages = 0;
for_each_zone(zone)
zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
sysctl_min_slab_ratio) / 100;
return 0; return 0;
} }
#endif #endif
......
...@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound) ...@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
VM_BUG_ON_PAGE(!PageSwapBacked(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
__inc_node_page_state(page, NR_SHMEM_PMDMAPPED); __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
} else { } else {
if (PageTransCompound(page)) { if (PageTransCompound(page) && page_mapping(page)) {
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_WARN_ON_ONCE(!PageLocked(page));
SetPageDoubleMap(compound_head(page)); SetPageDoubleMap(compound_head(page));
if (PageMlocked(page)) if (PageMlocked(page))
clear_page_mlock(compound_head(page)); clear_page_mlock(compound_head(page));
...@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound) ...@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
{ {
int i, nr = 1; int i, nr = 1;
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); VM_BUG_ON_PAGE(compound && !PageHead(page), page);
lock_page_memcg(page); lock_page_memcg(page);
/* Hugepages are not counted in NR_FILE_MAPPED for now. */ /* Hugepages are not counted in NR_FILE_MAPPED for now. */
......
...@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, ...@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
struct kobj_attribute shmem_enabled_attr = struct kobj_attribute shmem_enabled_attr =
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
bool shmem_huge_enabled(struct vm_area_struct *vma) bool shmem_huge_enabled(struct vm_area_struct *vma)
{ {
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vma->vm_file);
...@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) ...@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
return false; return false;
} }
} }
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
#else /* !CONFIG_SHMEM */ #else /* !CONFIG_SHMEM */
......
...@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, ...@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
*/ */
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{ {
LIST_HEAD(discard);
struct page *page, *h; struct page *page, *h;
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
...@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) { list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) { if (!page->inuse) {
remove_partial(n, page); remove_partial(n, page);
discard_slab(s, page); list_add(&page->lru, &discard);
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
"Objects remaining in %s on __kmem_cache_shutdown()"); "Objects remaining in %s on __kmem_cache_shutdown()");
} }
} }
spin_unlock_irq(&n->list_lock); spin_unlock_irq(&n->list_lock);
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment