Commit 5b160bd4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86/mce changes from Ingo Molnar:
 "This tree improves the AMD thresholding bank code and includes a
  memory fault signal handling fixlet."

* 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Fix siginfo_t->si_addr value for non-recoverable memory faults
  x86, MCE, AMD: Update copyrights and boilerplate
  x86, MCE, AMD: Give proper names to the thresholding banks
  x86, MCE, AMD: Make error_count read only
  x86, MCE, AMD: Cleanup reading of error_count
  x86, MCE, AMD: Print decimal thresholding values
  x86, MCE, AMD: Move shared bank to node descriptor
  x86, MCE, AMD: Remove local_allocate_... wrapper
  x86, MCE, AMD: Remove shared banks sysfs linking
  x86, amd_nb: Export model 0x10 and later PCI id
parents 7100e505 bb65a764
...@@ -26,10 +26,31 @@ struct amd_l3_cache { ...@@ -26,10 +26,31 @@ struct amd_l3_cache {
u8 subcaches[4]; u8 subcaches[4];
}; };
struct threshold_block {
unsigned int block;
unsigned int bank;
unsigned int cpu;
u32 address;
u16 interrupt_enable;
bool interrupt_capable;
u16 threshold_limit;
struct kobject kobj;
struct list_head miscj;
};
struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
/* initialized to the number of CPUs on the node sharing this bank */
atomic_t cpus;
};
struct amd_northbridge { struct amd_northbridge {
struct pci_dev *misc; struct pci_dev *misc;
struct pci_dev *link; struct pci_dev *link;
struct amd_l3_cache l3_cache; struct amd_l3_cache l3_cache;
struct threshold_bank *bank4;
}; };
struct amd_northbridge_info { struct amd_northbridge_info {
......
...@@ -19,6 +19,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { ...@@ -19,6 +19,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{} {}
}; };
EXPORT_SYMBOL(amd_nb_misc_ids); EXPORT_SYMBOL(amd_nb_misc_ids);
......
...@@ -1190,6 +1190,7 @@ void mce_notify_process(void) ...@@ -1190,6 +1190,7 @@ void mce_notify_process(void)
{ {
unsigned long pfn; unsigned long pfn;
struct mce_info *mi = mce_find_info(); struct mce_info *mi = mce_find_info();
int flags = MF_ACTION_REQUIRED;
if (!mi) if (!mi)
mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
...@@ -1204,8 +1205,9 @@ void mce_notify_process(void) ...@@ -1204,8 +1205,9 @@ void mce_notify_process(void)
* doomed. We still need to mark the page as poisoned and alert any * doomed. We still need to mark the page as poisoned and alert any
* other users of the page. * other users of the page.
*/ */
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 || if (!mi->restartable)
mi->restartable == 0) { flags |= MF_MUST_KILL;
if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
pr_err("Memory error not recovered"); pr_err("Memory error not recovered");
force_sig(SIGBUS, current); force_sig(SIGBUS, current);
} }
......
/* /*
* (c) 2005, 2006 Advanced Micro Devices, Inc. * (c) 2005-2012 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the * Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or * GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html * http://www.gnu.org/licenses/gpl.html
* *
* Written by Jacob Shin - AMD, Inc. * Written by Jacob Shin - AMD, Inc.
* *
* Support : jacob.shin@amd.com * Support: borislav.petkov@amd.com
* *
* April 2006 * April 2006
* - added support for AMD Family 0x10 processors * - added support for AMD Family 0x10 processors
* May 2012
* - major scrubbing
* *
* All MC4_MISCi registers are shared between multi-cores * All MC4_MISCi registers are shared between multi-cores
*/ */
...@@ -25,6 +27,7 @@ ...@@ -25,6 +27,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/amd_nb.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/mce.h> #include <asm/mce.h>
...@@ -45,23 +48,15 @@ ...@@ -45,23 +48,15 @@
#define MASK_BLKPTR_LO 0xFF000000 #define MASK_BLKPTR_LO 0xFF000000
#define MCG_XBLK_ADDR 0xC0000400 #define MCG_XBLK_ADDR 0xC0000400
struct threshold_block { static const char * const th_names[] = {
unsigned int block; "load_store",
unsigned int bank; "insn_fetch",
unsigned int cpu; "combined_unit",
u32 address; "",
u16 interrupt_enable; "northbridge",
bool interrupt_capable; "execution_unit",
u16 threshold_limit;
struct kobject kobj;
struct list_head miscj;
}; };
struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
cpumask_var_t cpus;
};
static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
static unsigned char shared_bank[NR_BANKS] = { static unsigned char shared_bank[NR_BANKS] = {
...@@ -84,6 +79,26 @@ struct thresh_restart { ...@@ -84,6 +79,26 @@ struct thresh_restart {
u16 old_limit; u16 old_limit;
}; };
static const char * const bank4_names(struct threshold_block *b)
{
switch (b->address) {
/* MSR4_MISC0 */
case 0x00000413:
return "dram";
case 0xc0000408:
return "ht_links";
case 0xc0000409:
return "l3_cache";
default:
WARN(1, "Funny MSR: 0x%08x\n", b->address);
return "";
}
};
static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
{ {
/* /*
...@@ -224,8 +239,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -224,8 +239,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (!block) if (!block)
per_cpu(bank_map, cpu) |= (1 << bank); per_cpu(bank_map, cpu) |= (1 << bank);
if (shared_bank[bank] && c->cpu_core_id)
break;
memset(&b, 0, sizeof(b)); memset(&b, 0, sizeof(b));
b.cpu = cpu; b.cpu = cpu;
...@@ -326,7 +339,7 @@ struct threshold_attr { ...@@ -326,7 +339,7 @@ struct threshold_attr {
#define SHOW_FIELDS(name) \ #define SHOW_FIELDS(name) \
static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
{ \ { \
return sprintf(buf, "%lx\n", (unsigned long) b->name); \ return sprintf(buf, "%lu\n", (unsigned long) b->name); \
} }
SHOW_FIELDS(interrupt_enable) SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit) SHOW_FIELDS(threshold_limit)
...@@ -377,38 +390,21 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) ...@@ -377,38 +390,21 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
return size; return size;
} }
struct threshold_block_cross_cpu {
struct threshold_block *tb;
long retval;
};
static void local_error_count_handler(void *_tbcc)
{
struct threshold_block_cross_cpu *tbcc = _tbcc;
struct threshold_block *b = tbcc->tb;
u32 low, high;
rdmsr(b->address, low, high);
tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
}
static ssize_t show_error_count(struct threshold_block *b, char *buf) static ssize_t show_error_count(struct threshold_block *b, char *buf)
{ {
struct threshold_block_cross_cpu tbcc = { .tb = b, }; u32 lo, hi;
smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1); rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
return sprintf(buf, "%lx\n", tbcc.retval);
}
static ssize_t store_error_count(struct threshold_block *b, return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
const char *buf, size_t count) (THRESHOLD_MAX - b->threshold_limit)));
{
struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return 1;
} }
static struct threshold_attr error_count = {
.attr = {.name = __stringify(error_count), .mode = 0444 },
.show = show_error_count,
};
#define RW_ATTR(val) \ #define RW_ATTR(val) \
static struct threshold_attr val = { \ static struct threshold_attr val = { \
.attr = {.name = __stringify(val), .mode = 0644 }, \ .attr = {.name = __stringify(val), .mode = 0644 }, \
...@@ -418,7 +414,6 @@ static struct threshold_attr val = { \ ...@@ -418,7 +414,6 @@ static struct threshold_attr val = { \
RW_ATTR(interrupt_enable); RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit); RW_ATTR(threshold_limit);
RW_ATTR(error_count);
static struct attribute *default_attrs[] = { static struct attribute *default_attrs[] = {
&threshold_limit.attr, &threshold_limit.attr,
...@@ -517,7 +512,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, ...@@ -517,7 +512,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
err = kobject_init_and_add(&b->kobj, &threshold_ktype, err = kobject_init_and_add(&b->kobj, &threshold_ktype,
per_cpu(threshold_banks, cpu)[bank]->kobj, per_cpu(threshold_banks, cpu)[bank]->kobj,
"misc%i", block); (bank == 4 ? bank4_names(b) : th_names[bank]));
if (err) if (err)
goto out_free; goto out_free;
recurse: recurse:
...@@ -548,98 +543,91 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, ...@@ -548,98 +543,91 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
return err; return err;
} }
static __cpuinit long static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
local_allocate_threshold_blocks(int cpu, unsigned int bank)
{ {
return allocate_threshold_blocks(cpu, bank, 0, struct list_head *head = &b->blocks->miscj;
MSR_IA32_MC0_MISC + bank * 4); struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
int err = 0;
err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
if (err)
return err;
list_for_each_entry_safe(pos, tmp, head, miscj) {
err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
if (err) {
list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
kobject_del(&pos->kobj);
return err;
}
}
return err;
} }
/* symlinks sibling shared banks to first core. first core owns dir/files. */
static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
{ {
int i, err = 0;
struct threshold_bank *b = NULL;
struct device *dev = per_cpu(mce_device, cpu); struct device *dev = per_cpu(mce_device, cpu);
char name[32]; struct amd_northbridge *nb = NULL;
struct threshold_bank *b = NULL;
sprintf(name, "threshold_bank%i", bank); const char *name = th_names[bank];
int err = 0;
#ifdef CONFIG_SMP if (shared_bank[bank]) {
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = cpumask_first(cpu_llc_shared_mask(cpu));
/* first core not up yet */ nb = node_to_amd_nb(amd_get_nb_id(cpu));
if (cpu_data(i).cpu_core_id) WARN_ON(!nb);
goto out;
/* already linked */ /* threshold descriptor already initialized on this node? */
if (per_cpu(threshold_banks, cpu)[bank]) if (nb->bank4) {
goto out; /* yes, use it */
b = nb->bank4;
err = kobject_add(b->kobj, &dev->kobj, name);
if (err)
goto out;
b = per_cpu(threshold_banks, i)[bank]; per_cpu(threshold_banks, cpu)[bank] = b;
atomic_inc(&b->cpus);
if (!b) err = __threshold_add_blocks(b);
goto out;
err = sysfs_create_link(&dev->kobj, b->kobj, name);
if (err)
goto out; goto out;
}
cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
} }
#endif
b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) { if (!b) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
}
b->kobj = kobject_create_and_add(name, &dev->kobj); b->kobj = kobject_create_and_add(name, &dev->kobj);
if (!b->kobj) if (!b->kobj) {
err = -EINVAL;
goto out_free; goto out_free;
}
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
cpumask_set_cpu(cpu, b->cpus);
#endif
per_cpu(threshold_banks, cpu)[bank] = b; per_cpu(threshold_banks, cpu)[bank] = b;
err = local_allocate_threshold_blocks(cpu, bank); if (shared_bank[bank]) {
if (err) atomic_set(&b->cpus, 1);
goto out_free;
for_each_cpu(i, b->cpus) {
if (i == cpu)
continue;
dev = per_cpu(mce_device, i); /* nb is already initialized, see above */
if (dev) WARN_ON(nb->bank4);
err = sysfs_create_link(&dev->kobj,b->kobj, name); nb->bank4 = b;
if (err)
goto out;
per_cpu(threshold_banks, i)[bank] = b;
} }
goto out; err = allocate_threshold_blocks(cpu, bank, 0,
MSR_IA32_MC0_MISC + bank * 4);
if (!err)
goto out;
out_free: out_free:
per_cpu(threshold_banks, cpu)[bank] = NULL;
free_cpumask_var(b->cpus);
kfree(b); kfree(b);
out:
out:
return err; return err;
} }
...@@ -660,12 +648,6 @@ static __cpuinit int threshold_create_device(unsigned int cpu) ...@@ -660,12 +648,6 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
return err; return err;
} }
/*
* let's be hotplug friendly.
* in case of multiple core processors, the first core always takes ownership
* of shared sysfs dir/files, and rest of the cores will be symlinked to it.
*/
static void deallocate_threshold_block(unsigned int cpu, static void deallocate_threshold_block(unsigned int cpu,
unsigned int bank) unsigned int bank)
{ {
...@@ -686,41 +668,42 @@ static void deallocate_threshold_block(unsigned int cpu, ...@@ -686,41 +668,42 @@ static void deallocate_threshold_block(unsigned int cpu,
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
} }
static void __threshold_remove_blocks(struct threshold_bank *b)
{
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
kobject_del(b->kobj);
list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
kobject_del(&pos->kobj);
}
static void threshold_remove_bank(unsigned int cpu, int bank) static void threshold_remove_bank(unsigned int cpu, int bank)
{ {
struct amd_northbridge *nb;
struct threshold_bank *b; struct threshold_bank *b;
struct device *dev;
char name[32];
int i = 0;
b = per_cpu(threshold_banks, cpu)[bank]; b = per_cpu(threshold_banks, cpu)[bank];
if (!b) if (!b)
return; return;
if (!b->blocks) if (!b->blocks)
goto free_out; goto free_out;
sprintf(name, "threshold_bank%i", bank); if (shared_bank[bank]) {
if (!atomic_dec_and_test(&b->cpus)) {
#ifdef CONFIG_SMP __threshold_remove_blocks(b);
/* sibling symlink */ per_cpu(threshold_banks, cpu)[bank] = NULL;
if (shared_bank[bank] && b->blocks->cpu != cpu) { return;
dev = per_cpu(mce_device, cpu); } else {
sysfs_remove_link(&dev->kobj, name); /*
per_cpu(threshold_banks, cpu)[bank] = NULL; * the last CPU on this node using the shared bank is
* going away, remove that bank now.
return; */
} nb = node_to_amd_nb(amd_get_nb_id(cpu));
#endif nb->bank4 = NULL;
}
/* remove all sibling symlinks before unregistering */
for_each_cpu(i, b->cpus) {
if (i == cpu)
continue;
dev = per_cpu(mce_device, i);
if (dev)
sysfs_remove_link(&dev->kobj, name);
per_cpu(threshold_banks, i)[bank] = NULL;
} }
deallocate_threshold_block(cpu, bank); deallocate_threshold_block(cpu, bank);
...@@ -728,7 +711,6 @@ static void threshold_remove_bank(unsigned int cpu, int bank) ...@@ -728,7 +711,6 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
free_out: free_out:
kobject_del(b->kobj); kobject_del(b->kobj);
kobject_put(b->kobj); kobject_put(b->kobj);
free_cpumask_var(b->cpus);
kfree(b); kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL; per_cpu(threshold_banks, cpu)[bank] = NULL;
} }
......
...@@ -33,9 +33,6 @@ static bool force; ...@@ -33,9 +33,6 @@ static bool force;
module_param(force, bool, 0444); module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
/* PCI-IDs for Northbridge devices not used anywhere else */
#define PCI_DEVICE_ID_AMD_15H_M10H_NB_F3 0x1403
/* CPUID function 0x80000001, ebx */ /* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000 #define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_F 0x00000000
...@@ -213,7 +210,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = { ...@@ -213,7 +210,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{} {}
}; };
MODULE_DEVICE_TABLE(pci, k10temp_id_table); MODULE_DEVICE_TABLE(pci, k10temp_id_table);
......
...@@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void); ...@@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void);
enum mf_flags { enum mf_flags {
MF_COUNT_INCREASED = 1 << 0, MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1, MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
}; };
extern int memory_failure(unsigned long pfn, int trapno, int flags); extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
......
...@@ -517,6 +517,7 @@ ...@@ -517,6 +517,7 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
......
...@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, ...@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* Also when FAIL is set do a force kill because something went * Also when FAIL is set do a force kill because something went
* wrong earlier. * wrong earlier.
*/ */
static void kill_procs(struct list_head *to_kill, int doit, int trapno, static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
int fail, struct page *page, unsigned long pfn, int fail, struct page *page, unsigned long pfn,
int flags) int flags)
{ {
struct to_kill *tk, *next; struct to_kill *tk, *next;
list_for_each_entry_safe (tk, next, to_kill, nd) { list_for_each_entry_safe (tk, next, to_kill, nd) {
if (doit) { if (forcekill) {
/* /*
* In case something went wrong with munmapping * In case something went wrong with munmapping
* make sure the process doesn't catch the * make sure the process doesn't catch the
...@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct address_space *mapping; struct address_space *mapping;
LIST_HEAD(tokill); LIST_HEAD(tokill);
int ret; int ret;
int kill = 1; int kill = 1, forcekill;
struct page *hpage = compound_head(p); struct page *hpage = compound_head(p);
struct page *ppage; struct page *ppage;
...@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* be called inside page lock (it's recommended but not enforced). * be called inside page lock (it's recommended but not enforced).
*/ */
mapping = page_mapping(hpage); mapping = page_mapping(hpage);
if (!PageDirty(hpage) && mapping && if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
mapping_cap_writeback_dirty(mapping)) { mapping_cap_writeback_dirty(mapping)) {
if (page_mkclean(hpage)) { if (page_mkclean(hpage)) {
SetPageDirty(hpage); SetPageDirty(hpage);
...@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* Now that the dirty bit has been propagated to the * Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if * struct page and all unmaps done we can decide if
* killing is needed or not. Only kill when the page * killing is needed or not. Only kill when the page
* was dirty, otherwise the tokill list is merely * was dirty or the process is not restartable,
* otherwise the tokill list is merely
* freed. When there was a problem unmapping earlier * freed. When there was a problem unmapping earlier
* use a more force-full uncatchable kill to prevent * use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory. * any accesses to the poisoned memory.
*/ */
kill_procs(&tokill, !!PageDirty(ppage), trapno, forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
kill_procs(&tokill, forcekill, trapno,
ret != SWAP_SUCCESS, p, pfn, flags); ret != SWAP_SUCCESS, p, pfn, flags);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment