Commit 08077ca8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (Andrew's patch-bomb)

Merge fixes from Andrew Morton:
 "13 patches.  12 are fixes and one is a little preparatory thing for
  Andi."

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (13 commits)
  memory hotplug: fix section info double registration bug
  mm/page_alloc: fix the page address of higher page's buddy calculation
  drivers/rtc/rtc-twl.c: ensure all interrupts are disabled during probe
  compiler.h: add __visible
  pid-namespace: limit value of ns_last_pid to (0, max_pid)
  include/net/sock.h: squelch compiler warning in sk_rmem_schedule()
  slub: consider pfmemalloc_match() in get_partial_node()
  slab: fix starting index for finding another object
  slab: do ClearSlabPfmemalloc() for all pages of slab
  nbd: clear waiting_queue on shutdown
  MAINTAINERS: fix TXT maintainer list and source repo path
  mm/ia64: fix a memory block size bug
  memory hotplug: reset pgdat->kswapd to NULL if creating kernel thread fails
parents 2ade0b7f f14851af
...@@ -3666,11 +3666,12 @@ F: Documentation/networking/README.ipw2200 ...@@ -3666,11 +3666,12 @@ F: Documentation/networking/README.ipw2200
F: drivers/net/wireless/ipw2x00/ F: drivers/net/wireless/ipw2x00/
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
M: Joseph Cihula <joseph.cihula@intel.com> M: Richard L Maliszewski <richard.l.maliszewski@intel.com>
M: Gang Wei <gang.wei@intel.com>
M: Shane Wang <shane.wang@intel.com> M: Shane Wang <shane.wang@intel.com>
L: tboot-devel@lists.sourceforge.net L: tboot-devel@lists.sourceforge.net
W: http://tboot.sourceforge.net W: http://tboot.sourceforge.net
T: Mercurial http://www.bughost.org/repos.hg/tboot.hg T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
S: Supported S: Supported
F: Documentation/intel_txt.txt F: Documentation/intel_txt.txt
F: include/linux/tboot.h F: include/linux/tboot.h
......
...@@ -449,6 +449,14 @@ static void nbd_clear_que(struct nbd_device *nbd) ...@@ -449,6 +449,14 @@ static void nbd_clear_que(struct nbd_device *nbd)
req->errors++; req->errors++;
nbd_end_request(req); nbd_end_request(req);
} }
while (!list_empty(&nbd->waiting_queue)) {
req = list_entry(nbd->waiting_queue.next, struct request,
queuelist);
list_del_init(&req->queuelist);
req->errors++;
nbd_end_request(req);
}
} }
...@@ -598,6 +606,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -598,6 +606,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->file = NULL; nbd->file = NULL;
nbd_clear_que(nbd); nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head)); BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
if (file) if (file)
fput(file); fput(file);
return 0; return 0;
......
...@@ -495,6 +495,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) ...@@ -495,6 +495,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
if (ret < 0) if (ret < 0)
goto out1; goto out1;
/* ensure interrupts are disabled, bootloaders can be strange */
ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
dev_warn(&pdev->dev, "unable to disable interrupt\n");
/* init cached IRQ enable bits */ /* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0) if (ret < 0)
......
...@@ -49,6 +49,13 @@ ...@@ -49,6 +49,13 @@
#endif #endif
#endif #endif
#if __GNUC_MINOR__ >= 6
/*
* Tell the optimizer that something else uses this function or variable.
*/
#define __visible __attribute__((externally_visible))
#endif
#if __GNUC_MINOR__ > 0 #if __GNUC_MINOR__ > 0
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) #define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif #endif
......
...@@ -278,6 +278,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); ...@@ -278,6 +278,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __section(S) __attribute__ ((__section__(#S))) # define __section(S) __attribute__ ((__section__(#S)))
#endif #endif
#ifndef __visible
#define __visible
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */ /* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type #ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
struct memory_block { struct memory_block {
unsigned long start_section_nr; unsigned long start_section_nr;
......
...@@ -1332,7 +1332,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size) ...@@ -1332,7 +1332,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
} }
static inline bool static inline bool
sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{ {
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return true; return true;
......
...@@ -232,15 +232,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write, ...@@ -232,15 +232,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
*/ */
tmp.data = &current->nsproxy->pid_ns->last_pid; tmp.data = &current->nsproxy->pid_ns->last_pid;
return proc_dointvec(&tmp, write, buffer, lenp, ppos); return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
} }
extern int pid_max;
static int zero = 0;
static struct ctl_table pid_ns_ctl_table[] = { static struct ctl_table pid_ns_ctl_table[] = {
{ {
.procname = "ns_last_pid", .procname = "ns_last_pid",
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0666, /* permissions are checked in the handler */ .mode = 0666, /* permissions are checked in the handler */
.proc_handler = pid_ns_ctl_handler, .proc_handler = pid_ns_ctl_handler,
.extra1 = &zero,
.extra2 = &pid_max,
}, },
{ } { }
}; };
......
...@@ -126,9 +126,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn) ...@@ -126,9 +126,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
struct mem_section *ms; struct mem_section *ms;
struct page *page, *memmap; struct page *page, *memmap;
if (!pfn_valid(start_pfn))
return;
section_nr = pfn_to_section_nr(start_pfn); section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr); ms = __nr_to_section(section_nr);
...@@ -187,9 +184,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) ...@@ -187,9 +184,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
end_pfn = pfn + pgdat->node_spanned_pages; end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */ /* register_section info */
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
/*
* Some platforms can assign the same pfn to multiple nodes - on
* node0 as well as nodeN. To avoid registering a pfn against
* multiple nodes we check that this pfn does not already
* reside in some other node.
*/
if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
register_page_bootmem_info_section(pfn); register_page_bootmem_info_section(pfn);
}
} }
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
......
...@@ -584,7 +584,7 @@ static inline void __free_one_page(struct page *page, ...@@ -584,7 +584,7 @@ static inline void __free_one_page(struct page *page,
combined_idx = buddy_idx & page_idx; combined_idx = buddy_idx & page_idx;
higher_page = page + (combined_idx - page_idx); higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1); buddy_idx = __find_buddy_index(combined_idx, order + 1);
higher_buddy = page + (buddy_idx - combined_idx); higher_buddy = higher_page + (buddy_idx - combined_idx);
if (page_is_buddy(higher_page, higher_buddy, order + 1)) { if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru, list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]); &zone->free_area[order].free_list[migratetype]);
......
...@@ -983,7 +983,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -983,7 +983,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
} }
/* The caller cannot use PFMEMALLOC objects, find another one */ /* The caller cannot use PFMEMALLOC objects, find another one */
for (i = 1; i < ac->avail; i++) { for (i = 0; i < ac->avail; i++) {
/* If a !PFMEMALLOC object is found, swap them */ /* If a !PFMEMALLOC object is found, swap them */
if (!is_obj_pfmemalloc(ac->entry[i])) { if (!is_obj_pfmemalloc(ac->entry[i])) {
objp = ac->entry[i]; objp = ac->entry[i];
...@@ -1000,7 +1000,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -1000,7 +1000,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
l3 = cachep->nodelists[numa_mem_id()]; l3 = cachep->nodelists[numa_mem_id()];
if (!list_empty(&l3->slabs_free) && force_refill) { if (!list_empty(&l3->slabs_free) && force_refill) {
struct slab *slabp = virt_to_slab(objp); struct slab *slabp = virt_to_slab(objp);
ClearPageSlabPfmemalloc(virt_to_page(slabp->s_mem)); ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
clear_obj_pfmemalloc(&objp); clear_obj_pfmemalloc(&objp);
recheck_pfmemalloc_active(cachep, ac); recheck_pfmemalloc_active(cachep, ac);
return objp; return objp;
...@@ -1032,7 +1032,7 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -1032,7 +1032,7 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
{ {
if (unlikely(pfmemalloc_active)) { if (unlikely(pfmemalloc_active)) {
/* Some pfmemalloc slabs exist, check if this is one */ /* Some pfmemalloc slabs exist, check if this is one */
struct page *page = virt_to_page(objp); struct page *page = virt_to_head_page(objp);
if (PageSlabPfmemalloc(page)) if (PageSlabPfmemalloc(page))
set_obj_pfmemalloc(&objp); set_obj_pfmemalloc(&objp);
} }
......
...@@ -1524,12 +1524,13 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1524,12 +1524,13 @@ static inline void *acquire_slab(struct kmem_cache *s,
} }
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
/* /*
* Try to allocate a partial slab from a specific node. * Try to allocate a partial slab from a specific node.
*/ */
static void *get_partial_node(struct kmem_cache *s, static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
struct kmem_cache_node *n, struct kmem_cache_cpu *c) struct kmem_cache_cpu *c, gfp_t flags)
{ {
struct page *page, *page2; struct page *page, *page2;
void *object = NULL; void *object = NULL;
...@@ -1545,9 +1546,13 @@ static void *get_partial_node(struct kmem_cache *s, ...@@ -1545,9 +1546,13 @@ static void *get_partial_node(struct kmem_cache *s,
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) { list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t = acquire_slab(s, n, page, object == NULL); void *t;
int available; int available;
if (!pfmemalloc_match(page, flags))
continue;
t = acquire_slab(s, n, page, object == NULL);
if (!t) if (!t)
break; break;
...@@ -1614,7 +1619,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, ...@@ -1614,7 +1619,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
if (n && cpuset_zone_allowed_hardwall(zone, flags) && if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) { n->nr_partial > s->min_partial) {
object = get_partial_node(s, n, c); object = get_partial_node(s, n, c, flags);
if (object) { if (object) {
/* /*
* Return the object even if * Return the object even if
...@@ -1643,7 +1648,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, ...@@ -1643,7 +1648,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
void *object; void *object;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
object = get_partial_node(s, get_node(s, searchnode), c); object = get_partial_node(s, get_node(s, searchnode), c, flags);
if (object || node != NUMA_NO_NODE) if (object || node != NUMA_NO_NODE)
return object; return object;
......
...@@ -3102,6 +3102,7 @@ int kswapd_run(int nid) ...@@ -3102,6 +3102,7 @@ int kswapd_run(int nid)
/* failure at boot is fatal */ /* failure at boot is fatal */
BUG_ON(system_state == SYSTEM_BOOTING); BUG_ON(system_state == SYSTEM_BOOTING);
printk("Failed to start kswapd on node %d\n",nid); printk("Failed to start kswapd on node %d\n",nid);
pgdat->kswapd = NULL;
ret = -1; ret = -1;
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment