Commit 042f7b7c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull more powerpc updates from Ben Herrenschmidt:
 "Here are a few more powerpc things for you.

  So you'll find here the conversion of the two new firmware sysfs
  interfaces to the new API for self-removing files that Greg and Tejun
  introduced, so they can finally remove the old one.

  I'm also reverting the hwmon driver for powernv.  I shouldn't have
  merged it, I got a bit carried away here.  I hadn't realized it was
  never CCed to the relevant maintainer(s) and list(s), and happens to
  have some issues so I'm taking it out and it will come back via the
  proper channels.

  The rest is a bunch of LE fixes (argh, some of the new stuff was
  broken on LE, I really need to start testing LE myself !) and various
  random fixes here and there.

  Finally one bit that's not strictly a fix, which is the HVC OPAL
  change to "kick" the HVC thread when the firmware tells us there is
  new incoming data.  I don't feel like waiting for this one, it's
  simple enough, and it makes a big difference in console responsiveness
  which is good for my nerves"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (26 commits)
  powerpc/powernv Adapt opal-elog and opal-dump to new sysfs_remove_file_self
  Revert "powerpc/powernv: hwmon driver for power values, fan rpm and temperature"
  power, sched: stop updating inside arch_update_cpu_topology() when nothing to be update
  powerpc/le: Avoid creatng R_PPC64_TOCSAVE relocations for modules.
  arch/powerpc: Use RCU_INIT_POINTER(x, NULL) in platforms/cell/spu_syscalls.c
  powerpc/opal: Add missing include
  powerpc: Convert last uses of __FUNCTION__ to __func__
  powerpc: Add lq/stq emulation
  powerpc/powernv: Add invalid OPAL call
  powerpc/powernv: Add OPAL message log interface
  powerpc/book3s: Fix mc_recoverable_range buffer overrun issue.
  powerpc: Remove dead code in sycall entry
  powerpc: Use of_node_init() for the fakenode in msi_bitmap.c
  powerpc/mm: NUMA pte should be handled via slow path in get_user_pages_fast()
  powerpc/powernv: Fix endian issues with sensor code
  powerpc/powernv: Fix endian issues with OPAL async code
  tty/hvc_opal: Kick the HVC thread on OPAL console events
  powerpc/powernv: Add opal_notifier_unregister() and export to modules
  powerpc/ppc64: Do not turn AIL (reloc-on interrupts) too early
  powerpc/ppc64: Gracefully handle early interrupts
  ...
parents 69cd9eba cc4f265a
......@@ -74,6 +74,7 @@ override CROSS32AS += -mlittle-endian
LDEMULATION := lppc
GNUTARGET := powerpcle
MULTIPLEWORD := -mno-multiple
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
else
ifeq ($(call cc-option-yn,-mbig-endian),y)
override CC += -mbig-endian
......
......@@ -54,6 +54,7 @@ extern struct ppc_emulated {
#ifdef CONFIG_PPC64
struct ppc_emulated_entry mfdscr;
struct ppc_emulated_entry mtdscr;
struct ppc_emulated_entry lq_stq;
#endif
} ppc_emulated;
......
......@@ -87,6 +87,7 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_ASYNC_COMPLETION -15
/* API Tokens (in r0) */
#define OPAL_INVALID_CALL -1
#define OPAL_CONSOLE_WRITE 1
#define OPAL_CONSOLE_READ 2
#define OPAL_RTC_READ 3
......@@ -177,6 +178,8 @@ extern int opal_enter_rtas(struct rtas_args *args,
#ifndef __ASSEMBLY__
#include <linux/notifier.h>
/* Other enums */
enum OpalVendorApiTokens {
OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
......@@ -422,9 +425,9 @@ enum OpalSysparamPerm {
};
struct opal_msg {
uint32_t msg_type;
uint32_t reserved;
uint64_t params[8];
__be32 msg_type;
__be32 reserved;
__be64 params[8];
};
struct opal_machine_check_event {
......@@ -730,7 +733,11 @@ typedef struct oppanel_line {
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
/* /ibm,opal */
extern struct device_node *opal_node;
/* API functions */
int64_t opal_invalid_call(void);
int64_t opal_console_write(int64_t term_number, __be64 *length,
const uint8_t *buffer);
int64_t opal_console_read(int64_t term_number, __be64 *length,
......@@ -874,8 +881,7 @@ int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
size_t length);
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
size_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token,
uint32_t *sensor_data);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
......@@ -892,6 +898,8 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb);
extern int opal_message_notifier_register(enum OpalMessageType msg_type,
struct notifier_block *nb);
extern void opal_notifier_enable(void);
......@@ -919,6 +927,7 @@ extern void opal_flash_init(void);
extern int opal_elog_init(void);
extern void opal_platform_dump_init(void);
extern void opal_sys_param_init(void);
extern void opal_msglog_init(void);
extern int opal_machine_check(struct pt_regs *regs);
extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
......
......@@ -150,19 +150,53 @@ struct rtas_suspend_me_data {
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
struct rtas_error_log {
unsigned long version:8; /* Architectural version */
unsigned long severity:3; /* Severity level of error */
unsigned long disposition:2; /* Degree of recovery */
unsigned long extended:1; /* extended log present? */
unsigned long /* reserved */ :2; /* Reserved for future use */
unsigned long initiator:4; /* Initiator of event */
unsigned long target:4; /* Target of failed operation */
unsigned long type:8; /* General event or error*/
unsigned long extended_log_length:32; /* length in bytes */
unsigned char buffer[1]; /* Start of extended log */
/* Byte 0 */
uint8_t byte0; /* Architectural version */
/* Byte 1 */
uint8_t byte1;
/* XXXXXXXX
* XXX 3: Severity level of error
* XX 2: Degree of recovery
* X 1: Extended log present?
* XX 2: Reserved
*/
/* Byte 2 */
uint8_t byte2;
/* XXXXXXXX
* XXXX 4: Initiator of event
* XXXX 4: Target of failed operation
*/
uint8_t byte3; /* General event or error*/
__be32 extended_log_length; /* length in bytes */
unsigned char buffer[1]; /* Start of extended log */
/* Variable length. */
};
static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog)
{
return (elog->byte1 & 0xE0) >> 5;
}
static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog)
{
return (elog->byte1 & 0x18) >> 3;
}
static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog)
{
return (elog->byte1 & 0x04) >> 2;
}
#define rtas_error_type(x) ((x)->byte3)
static inline
uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog)
{
return be32_to_cpu(elog->extended_log_length);
}
#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14
#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
......@@ -172,32 +206,35 @@ struct rtas_error_log {
*/
struct rtas_ext_event_log_v6 {
/* Byte 0 */
uint32_t log_valid:1; /* 1:Log valid */
uint32_t unrecoverable_error:1; /* 1:Unrecoverable error */
uint32_t recoverable_error:1; /* 1:recoverable (correctable */
/* or successfully retried) */
uint32_t degraded_operation:1; /* 1:Unrecoverable err, bypassed*/
/* - degraded operation (e.g. */
/* CPU or mem taken off-line) */
uint32_t predictive_error:1;
uint32_t new_log:1; /* 1:"New" log (Always 1 for */
/* data returned from RTAS */
uint32_t big_endian:1; /* 1: Big endian */
uint32_t :1; /* reserved */
uint8_t byte0;
/* XXXXXXXX
* X 1: Log valid
* X 1: Unrecoverable error
* X 1: Recoverable (correctable or successfully retried)
* X 1: Bypassed unrecoverable error (degraded operation)
* X 1: Predictive error
* X 1: "New" log (always 1 for data returned from RTAS)
* X 1: Big Endian
* X 1: Reserved
*/
/* Byte 1 */
uint32_t :8; /* reserved */
uint8_t byte1; /* reserved */
/* Byte 2 */
uint32_t powerpc_format:1; /* Set to 1 (indicating log is */
/* in PowerPC format */
uint32_t :3; /* reserved */
uint32_t log_format:4; /* Log format indicator. Define */
/* format used for byte 12-2047 */
uint8_t byte2;
/* XXXXXXXX
* X 1: Set to 1 (indicating log is in PowerPC format)
* XXX 3: Reserved
* XXXX 4: Log format used for bytes 12-2047
*/
/* Byte 3 */
uint32_t :8; /* reserved */
uint8_t byte3; /* reserved */
/* Byte 4-11 */
uint8_t reserved[8]; /* reserved */
/* Byte 12-15 */
uint32_t company_id; /* Company ID of the company */
__be32 company_id; /* Company ID of the company */
/* that defines the format for */
/* the vendor specific log type */
/* Byte 16-end of log */
......@@ -205,6 +242,18 @@ struct rtas_ext_event_log_v6 {
/* Variable length. */
};
static
inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log)
{
return ext_log->byte2 & 0x0F;
}
static
inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
{
return be32_to_cpu(ext_log->company_id);
}
/* pSeries event log format */
/* Two bytes ASCII section IDs */
......@@ -227,14 +276,26 @@ struct rtas_ext_event_log_v6 {
/* Vendor specific Platform Event Log Format, Version 6, section header */
struct pseries_errorlog {
uint16_t id; /* 0x00 2-byte ASCII section ID */
uint16_t length; /* 0x02 Section length in bytes */
__be16 id; /* 0x00 2-byte ASCII section ID */
__be16 length; /* 0x02 Section length in bytes */
uint8_t version; /* 0x04 Section version */
uint8_t subtype; /* 0x05 Section subtype */
uint16_t creator_component; /* 0x06 Creator component ID */
__be16 creator_component; /* 0x06 Creator component ID */
uint8_t data[]; /* 0x08 Start of section data */
};
static
inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect)
{
return be16_to_cpu(sect->id);
}
static
inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
{
return be16_to_cpu(sect->length);
}
struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id);
......
......@@ -73,7 +73,7 @@ static struct aligninfo aligninfo[128] = {
{ 8, LD+F }, /* 00 0 1001: lfd */
{ 4, ST+F+S }, /* 00 0 1010: stfs */
{ 8, ST+F }, /* 00 0 1011: stfd */
INVALID, /* 00 0 1100 */
{ 16, LD }, /* 00 0 1100: lq */
{ 8, LD }, /* 00 0 1101: ld/ldu/lwa */
INVALID, /* 00 0 1110 */
{ 8, ST }, /* 00 0 1111: std/stdu */
......@@ -140,7 +140,7 @@ static struct aligninfo aligninfo[128] = {
{ 2, LD+SW }, /* 10 0 1100: lhbrx */
{ 4, LD+SE }, /* 10 0 1101 lwa */
{ 2, ST+SW }, /* 10 0 1110: sthbrx */
INVALID, /* 10 0 1111 */
{ 16, ST }, /* 10 0 1111: stq */
INVALID, /* 10 1 0000 */
INVALID, /* 10 1 0001 */
INVALID, /* 10 1 0010 */
......@@ -385,8 +385,6 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
int i, ret, sw = 0;
if (!(flags & F))
return 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
......@@ -406,6 +404,34 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
return 1; /* exception handled and fixed up */
}
#ifdef CONFIG_PPC64
static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int flags)
{
char *ptr0 = (char *)&regs->gpr[reg];
char *ptr1 = (char *)&regs->gpr[reg+1];
int i, ret, sw = 0;
if (reg & 1)
return 0; /* invalid form: GPR must be even */
if (flags & SW)
sw = 7;
ret = 0;
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
ret |= __get_user(ptr0[i^sw], addr + i);
ret |= __get_user(ptr1[i^sw], addr + i + 8);
} else {
ret |= __put_user(ptr0[i^sw], addr + i);
ret |= __put_user(ptr1[i^sw], addr + i + 8);
}
}
if (ret)
return -EFAULT;
return 1; /* exception handled and fixed up */
}
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_SPE
static struct aligninfo spe_aligninfo[32] = {
......@@ -914,10 +940,20 @@ int fix_alignment(struct pt_regs *regs)
flush_fp_to_thread(current);
}
/* Special case for 16-byte FP loads and stores */
if (nb == 16) {
PPC_WARN_ALIGNMENT(fp_pair, regs);
return emulate_fp_pair(addr, reg, flags);
if ((nb == 16)) {
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
return emulate_fp_pair(addr, reg, flags);
} else {
#ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
PPC_WARN_ALIGNMENT(lq_stq, regs);
return emulate_lq_stq(regs, addr, reg, flags);
#else
return 0;
#endif
}
}
PPC_WARN_ALIGNMENT(unaligned, regs);
......
......@@ -56,7 +56,6 @@ _GLOBAL(__setup_cpu_power8)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power8
......@@ -75,7 +74,6 @@ _GLOBAL(__restore_cpu_power8)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power8
......
......@@ -54,14 +54,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
xori r12,r12,MSR_LE ; \
mtspr SPRN_SRR1,r12 ; \
rfid ; /* return to userspace */ \
b . ; \
2: mfspr r12,SPRN_SRR1 ; \
andi. r12,r12,MSR_PR ; \
bne 0b ; \
mtspr SPRN_SRR0,r3 ; \
mtspr SPRN_SRR1,r4 ; \
mtspr SPRN_SDR1,r5 ; \
rfid ; \
b . ; /* prevent speculative execution */
#if defined(CONFIG_RELOCATABLE)
......
......@@ -152,7 +152,8 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->paca_index = cpu;
new_paca->kernel_toc = kernel_toc;
new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL;
/* Only set MSR:IR/DR when MMU is initialized */
new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
new_paca->hw_cpu_id = 0xffff;
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
......
......@@ -610,6 +610,31 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
tm_save_sprs(thr);
}
extern void __tm_recheckpoint(struct thread_struct *thread,
unsigned long orig_msr);
void tm_recheckpoint(struct thread_struct *thread,
unsigned long orig_msr)
{
unsigned long flags;
/* We really can't be interrupted here as the TEXASR registers can't
* change and later in the trecheckpoint code, we have a userspace R1.
* So let's hard disable over this region.
*/
local_irq_save(flags);
hard_irq_disable();
/* The TM SPRs are restored here, so that TEXASR.FS can be set
* before the trecheckpoint and no explosion occurs.
*/
tm_restore_sprs(thread);
__tm_recheckpoint(thread, orig_msr);
local_irq_restore(flags);
}
static inline void tm_recheckpoint_new_task(struct task_struct *new)
{
unsigned long msr;
......@@ -628,13 +653,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
if (!new->thread.regs)
return;
/* The TM SPRs are restored here, so that TEXASR.FS can be set
* before the trecheckpoint and no explosion occurs.
*/
tm_restore_sprs(&new->thread);
if (!MSR_TM_ACTIVE(new->thread.regs->msr))
if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
tm_restore_sprs(&new->thread);
return;
}
msr = new->thread.tm_orig_msr;
/* Recheckpoint to restore original checkpointed register state. */
TM_DEBUG("*** tm_recheckpoint of pid %d "
......
......@@ -347,45 +347,45 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#endif
}
if (found >= 0) {
DBG("boot cpu: logical %d physical %d\n", found,
be32_to_cpu(intserv[found_thread]));
boot_cpuid = found;
set_hard_smp_processor_id(found,
be32_to_cpu(intserv[found_thread]));
/* Not the boot CPU */
if (found < 0)
return 0;
/*
* PAPR defines "logical" PVR values for cpus that
* meet various levels of the architecture:
* 0x0f000001 Architecture version 2.04
* 0x0f000002 Architecture version 2.05
* If the cpu-version property in the cpu node contains
* such a value, we call identify_cpu again with the
* logical PVR value in order to use the cpu feature
* bits appropriate for the architecture level.
*
* A POWER6 partition in "POWER6 architected" mode
* uses the 0x0f000002 PVR value; in POWER5+ mode
* it uses 0x0f000001.
*/
prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
identify_cpu(0, be32_to_cpup(prop));
DBG("boot cpu: logical %d physical %d\n", found,
be32_to_cpu(intserv[found_thread]));
boot_cpuid = found;
set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread]));
identical_pvr_fixup(node);
}
/*
* PAPR defines "logical" PVR values for cpus that
* meet various levels of the architecture:
* 0x0f000001 Architecture version 2.04
* 0x0f000002 Architecture version 2.05
* If the cpu-version property in the cpu node contains
* such a value, we call identify_cpu again with the
* logical PVR value in order to use the cpu feature
* bits appropriate for the architecture level.
*
* A POWER6 partition in "POWER6 architected" mode
* uses the 0x0f000002 PVR value; in POWER5+ mode
* it uses 0x0f000001.
*/
prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
identify_cpu(0, be32_to_cpup(prop));
identical_pvr_fixup(node);
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
#ifdef CONFIG_PPC64
if (nthreads > 1)
cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
else
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
#endif
return 0;
}
......@@ -747,6 +747,10 @@ void __init early_init_devtree(void *params)
* (altivec support, boot CPU ID, ...)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
if (boot_cpuid < 0) {
printk("Failed to indentify boot CPU !\n");
BUG();
}
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
......
......@@ -993,21 +993,24 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
(struct rtas_ext_event_log_v6 *)log->buffer;
struct pseries_errorlog *sect;
unsigned char *p, *log_end;
uint32_t ext_log_length = rtas_error_extended_log_length(log);
uint8_t log_format = rtas_ext_event_log_format(ext_log);
uint32_t company_id = rtas_ext_event_company_id(ext_log);
/* Check that we understand the format */
if (log->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
ext_log->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
ext_log->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
company_id != RTAS_V6EXT_COMPANY_ID_IBM)
return NULL;
log_end = log->buffer + log->extended_log_length;
log_end = log->buffer + ext_log_length;
p = ext_log->vendor_log;
while (p < log_end) {
sect = (struct pseries_errorlog *)p;
if (sect->id == section_id)
if (pseries_errorlog_id(sect) == section_id)
return sect;
p += sect->length;
p += pseries_errorlog_length(sect);
}
return NULL;
......
......@@ -150,8 +150,8 @@ static void printk_log_rtas(char *buf, int len)
struct rtas_error_log *errlog = (struct rtas_error_log *)buf;
printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n",
error_log_cnt, rtas_event_type(errlog->type),
errlog->severity);
error_log_cnt, rtas_event_type(rtas_error_type(errlog)),
rtas_error_severity(errlog));
}
}
......@@ -159,14 +159,16 @@ static int log_rtas_len(char * buf)
{
int len;
struct rtas_error_log *err;
uint32_t extended_log_length;
/* rtas fixed header */
len = 8;
err = (struct rtas_error_log *)buf;
if (err->extended && err->extended_log_length) {
extended_log_length = rtas_error_extended_log_length(err);
if (rtas_error_extended(err) && extended_log_length) {
/* extended header */
len += err->extended_log_length;
len += extended_log_length;
}
if (rtas_error_log_max == 0)
......@@ -293,15 +295,13 @@ void prrn_schedule_update(u32 scope)
static void handle_rtas_event(const struct rtas_error_log *log)
{
if (log->type == RTAS_TYPE_PRRN) {
/* For PRRN Events the extended log length is used to denote
* the scope for calling rtas update-nodes.
*/
if (prrn_is_enabled())
prrn_schedule_update(log->extended_log_length);
}
if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled())
return;
return;
/* For PRRN Events the extended log length is used to denote
* the scope for calling rtas update-nodes.
*/
prrn_schedule_update(rtas_error_extended_log_length(log));
}
#else
......
......@@ -76,6 +76,9 @@ EXPORT_SYMBOL(ppc_md);
struct machdep_calls *machine_id;
EXPORT_SYMBOL(machine_id);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
unsigned long klimit = (unsigned long) _end;
char cmd_line[COMMAND_LINE_SIZE];
......
......@@ -44,8 +44,6 @@
extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
int boot_cpuid_phys;
EXPORT_SYMBOL_GPL(boot_cpuid_phys);
......
......@@ -74,7 +74,6 @@
#define DBG(fmt...)
#endif
int boot_cpuid = 0;
int spinning_secondaries;
u64 ppc64_pft_size;
......@@ -196,6 +195,18 @@ static void fixup_boot_paca(void)
get_paca()->data_offset = 0;
}
static void cpu_ready_for_interrupts(void)
{
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
/* Enable AIL if supported */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
}
/*
* Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of
......@@ -262,6 +273,14 @@ void __init early_setup(unsigned long dt_ptr)
/* Initialize the hash table or TLB handling */
early_init_mmu();
/*
* At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to
* have IR and DR set and enable AIL if it exists
*/
cpu_ready_for_interrupts();
/* Reserve large chunks of memory for use by CMA for KVM */
kvm_cma_reserve();
/*
......@@ -294,6 +313,13 @@ void early_setup_secondary(void)
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
/*
* At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to
* have IR and DR set.
*/
cpu_ready_for_interrupts();
}
#endif /* CONFIG_SMP */
......
......@@ -881,6 +881,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
* transactional versions should be loaded.
*/
tm_enable();
/* Make sure the transaction is marked as failed */
current->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
/* Get the top half of the MSR */
......
......@@ -527,6 +527,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
}
#endif
tm_enable();
/* Make sure the transaction is marked as failed */
current->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
......
......@@ -307,7 +307,7 @@ dont_backup_fp:
* Call with IRQs off, stacks get all out of sync for
* some periods in here!
*/
_GLOBAL(tm_recheckpoint)
_GLOBAL(__tm_recheckpoint)
mfcr r5
mflr r0
stw r5, 8(r1)
......
......@@ -1868,6 +1868,7 @@ struct ppc_emulated ppc_emulated = {
#ifdef CONFIG_PPC64
WARN_EMULATED_SETUP(mfdscr),
WARN_EMULATED_SETUP(mtdscr),
WARN_EMULATED_SETUP(lq_stq),
#endif
};
......
......@@ -11,48 +11,36 @@ mtfsf(unsigned int FM, u32 *frB)
u32 mask;
u32 fpscr;
if (FM == 0)
return 0;
if (FM == 0xff)
mask = 0x9fffffff;
if (likely(FM == 1))
mask = 0x0f;
else if (likely(FM == 0xff))
mask = ~0;
else {
mask = 0;
if (FM & (1 << 0))
mask |= 0x90000000;
if (FM & (1 << 1))
mask |= 0x0f000000;
if (FM & (1 << 2))
mask |= 0x00f00000;
if (FM & (1 << 3))
mask |= 0x000f0000;
if (FM & (1 << 4))
mask |= 0x0000f000;
if (FM & (1 << 5))
mask |= 0x00000f00;
if (FM & (1 << 6))
mask |= 0x000000f0;
if (FM & (1 << 7))
mask |= 0x0000000f;
mask = ((FM & 1) |
((FM << 3) & 0x10) |
((FM << 6) & 0x100) |
((FM << 9) & 0x1000) |
((FM << 12) & 0x10000) |
((FM << 15) & 0x100000) |
((FM << 18) & 0x1000000) |
((FM << 21) & 0x10000000)) * 15;
}
__FPU_FPSCR &= ~(mask);
__FPU_FPSCR |= (frB[1] & mask);
fpscr = ((__FPU_FPSCR & ~mask) | (frB[1] & mask)) &
~(FPSCR_VX | FPSCR_FEX | 0x800);
__FPU_FPSCR &= ~(FPSCR_VX);
if (__FPU_FPSCR & (FPSCR_VXSNAN | FPSCR_VXISI | FPSCR_VXIDI |
if (fpscr & (FPSCR_VXSNAN | FPSCR_VXISI | FPSCR_VXIDI |
FPSCR_VXZDZ | FPSCR_VXIMZ | FPSCR_VXVC |
FPSCR_VXSOFT | FPSCR_VXSQRT | FPSCR_VXCVI))
__FPU_FPSCR |= FPSCR_VX;
fpscr = __FPU_FPSCR;
fpscr &= ~(FPSCR_FEX);
if (((fpscr & FPSCR_VX) && (fpscr & FPSCR_VE)) ||
((fpscr & FPSCR_OX) && (fpscr & FPSCR_OE)) ||
((fpscr & FPSCR_UX) && (fpscr & FPSCR_UE)) ||
((fpscr & FPSCR_ZX) && (fpscr & FPSCR_ZE)) ||
((fpscr & FPSCR_XX) && (fpscr & FPSCR_XE)))
fpscr |= FPSCR_VX;
/* The bit order of exception enables and exception status
* is the same. Simply shift and mask to check for enabled
* exceptions.
*/
if (fpscr & (fpscr >> 22) & 0xf8)
fpscr |= FPSCR_FEX;
__FPU_FPSCR = fpscr;
#ifdef DEBUG
......
......@@ -36,6 +36,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
do {
pte_t pte = ACCESS_ONCE(*ptep);
struct page *page;
/*
* Similar to the PMD case, NUMA hinting must take slow path
*/
if (pte_numa(pte))
return 0;
if ((pte_val(pte) & mask) != result)
return 0;
......@@ -75,6 +80,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
if (pmd_huge(pmd) || pmd_large(pmd)) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
* can be serialised against THP migration.
*/
if (pmd_numa(pmd))
return 0;
if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
write, pages, nr))
return 0;
......
......@@ -1591,6 +1591,20 @@ int arch_update_cpu_topology(void)
cpu = cpu_last_thread_sibling(cpu);
}
/*
* In cases where we have nothing to update (because the updates list
* is too short or because the new topology is same as the old one),
* skip invoking update_cpu_topology() via stop-machine(). This is
* necessary (and not just a fast-path optimization) since stop-machine
* can end up electing a random CPU to run update_cpu_topology(), and
* thus trick us into setting up incorrect cpu-node mappings (since
* 'updates' is kzalloc()'ed).
*
* And for the similar reason, we will skip all the following updating.
*/
if (!cpumask_weight(&updated_cpus))
goto out;
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
/*
......@@ -1612,6 +1626,7 @@ int arch_update_cpu_topology(void)
changed = 1;
}
out:
kfree(updates);
return changed;
}
......
......@@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(register_spu_syscalls);
void unregister_spu_syscalls(struct spufs_calls *calls)
{
BUG_ON(spufs_calls->owner != calls->owner);
rcu_assign_pointer(spufs_calls, NULL);
RCU_INIT_POINTER(spufs_calls, NULL);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-y += opal-msglog.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
......
......@@ -125,14 +125,15 @@ static int opal_async_comp_event(struct notifier_block *nb,
{
struct opal_msg *comp_msg = msg;
unsigned long flags;
uint64_t token;
if (msg_type != OPAL_MSG_ASYNC_COMP)
return 0;
memcpy(&opal_async_responses[comp_msg->params[0]], comp_msg,
sizeof(*comp_msg));
token = be64_to_cpu(comp_msg->params[0]);
memcpy(&opal_async_responses[token], comp_msg, sizeof(*comp_msg));
spin_lock_irqsave(&opal_async_comp_lock, flags);
__set_bit(comp_msg->params[0], opal_async_complete_map);
__set_bit(token, opal_async_complete_map);
spin_unlock_irqrestore(&opal_async_comp_lock, flags);
wake_up(&opal_async_wait);
......
......@@ -86,19 +86,14 @@ static int64_t dump_send_ack(uint32_t dump_id)
return rc;
}
static void delay_release_kobj(void *kobj)
{
kobject_put((struct kobject *)kobj);
}
static ssize_t dump_ack_store(struct dump_obj *dump_obj,
struct dump_attribute *attr,
const char *buf,
size_t count)
{
dump_send_ack(dump_obj->id);
sysfs_schedule_callback(&dump_obj->kobj, delay_release_kobj,
&dump_obj->kobj, THIS_MODULE);
sysfs_remove_file_self(&dump_obj->kobj, &attr->attr);
kobject_put(&dump_obj->kobj);
return count;
}
......
......@@ -70,19 +70,14 @@ static ssize_t elog_ack_show(struct elog_obj *elog_obj,
return sprintf(buf, "ack - acknowledge log message\n");
}
static void delay_release_kobj(void *kobj)
{
kobject_put((struct kobject *)kobj);
}
static ssize_t elog_ack_store(struct elog_obj *elog_obj,
struct elog_attribute *attr,
const char *buf,
size_t count)
{
opal_send_ack_elog(elog_obj->id);
sysfs_schedule_callback(&elog_obj->kobj, delay_release_kobj,
&elog_obj->kobj, THIS_MODULE);
sysfs_remove_file_self(&elog_obj->kobj, &attr->attr);
kobject_put(&elog_obj->kobj);
return count;
}
......
/*
* PowerNV OPAL in-memory console interface
*
* Copyright 2014 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/io.h>
#include <asm/opal.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/types.h>
#include <asm/barrier.h>
/* OPAL in-memory console. Defined in OPAL source at core/console.c */
struct memcons {
__be64 magic;
#define MEMCONS_MAGIC 0x6630696567726173L
__be64 obuf_phys;
__be64 ibuf_phys;
__be32 obuf_size;
__be32 ibuf_size;
__be32 out_pos;
#define MEMCONS_OUT_POS_WRAP 0x80000000u
#define MEMCONS_OUT_POS_MASK 0x00ffffffu
__be32 in_prod;
__be32 in_cons;
};
static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *to,
loff_t pos, size_t count)
{
struct memcons *mc = bin_attr->private;
const char *conbuf;
size_t ret, first_read = 0;
uint32_t out_pos, avail;
if (!mc)
return -ENODEV;
out_pos = be32_to_cpu(ACCESS_ONCE(mc->out_pos));
/* Now we've read out_pos, put a barrier in before reading the new
* data it points to in conbuf. */
smp_rmb();
conbuf = phys_to_virt(be64_to_cpu(mc->obuf_phys));
/* When the buffer has wrapped, read from the out_pos marker to the end
* of the buffer, and then read the remaining data as in the un-wrapped
* case. */
if (out_pos & MEMCONS_OUT_POS_WRAP) {
out_pos &= MEMCONS_OUT_POS_MASK;
avail = be32_to_cpu(mc->obuf_size) - out_pos;
ret = memory_read_from_buffer(to, count, &pos,
conbuf + out_pos, avail);
if (ret < 0)
goto out;
first_read = ret;
to += first_read;
count -= first_read;
pos -= avail;
}
/* Sanity check. The firmware should not do this to us. */
if (out_pos > be32_to_cpu(mc->obuf_size)) {
pr_err("OPAL: memory console corruption. Aborting read.\n");
return -EINVAL;
}
ret = memory_read_from_buffer(to, count, &pos, conbuf, out_pos);
if (ret < 0)
goto out;
ret += first_read;
out:
return ret;
}
static struct bin_attribute opal_msglog_attr = {
.attr = {.name = "msglog", .mode = 0444},
.read = opal_msglog_read
};
void __init opal_msglog_init(void)
{
u64 mcaddr;
struct memcons *mc;
if (of_property_read_u64(opal_node, "ibm,opal-memcons", &mcaddr)) {
pr_warn("OPAL: Property ibm,opal-memcons not found, no message log\n");
return;
}
mc = phys_to_virt(mcaddr);
if (!mc) {
pr_warn("OPAL: memory console address is invalid\n");
return;
}
if (be64_to_cpu(mc->magic) != MEMCONS_MAGIC) {
pr_warn("OPAL: memory console version is invalid\n");
return;
}
opal_msglog_attr.private = mc;
if (sysfs_create_bin_file(opal_kobj, &opal_msglog_attr) != 0)
pr_warn("OPAL: sysfs file creation failed\n");
}
......@@ -33,6 +33,7 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
{
int ret, token;
struct opal_msg msg;
__be32 data;
token = opal_async_get_token_interruptible();
if (token < 0) {
......@@ -42,7 +43,7 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
}
mutex_lock(&opal_sensor_mutex);
ret = opal_sensor_read(sensor_hndl, token, sensor_data);
ret = opal_sensor_read(sensor_hndl, token, &data);
if (ret != OPAL_ASYNC_COMPLETION)
goto out_token;
......@@ -53,7 +54,8 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
goto out_token;
}
ret = msg.params[1];
*sensor_data = be32_to_cpu(data);
ret = be64_to_cpu(msg.params[1]);
out_token:
mutex_unlock(&opal_sensor_mutex);
......
......@@ -64,7 +64,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
ret = msg.params[1];
ret = be64_to_cpu(msg.params[1]);
out_token:
opal_async_release_token(token);
......@@ -98,7 +98,7 @@ static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
ret = msg.params[1];
ret = be64_to_cpu(msg.params[1]);
out_token:
opal_async_release_token(token);
......
......@@ -61,6 +61,7 @@ _STATIC(opal_return)
mtcr r4;
rfid
OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL);
OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE);
......
......@@ -46,7 +46,7 @@ struct mcheck_recoverable_range {
static struct mcheck_recoverable_range *mc_recoverable_range;
static int mc_recoverable_range_len;
static struct device_node *opal_node;
struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
extern u64 opal_mc_secondary_handler[];
static unsigned int *opal_irqs;
......@@ -102,19 +102,36 @@ int __init early_init_dt_scan_opal(unsigned long node,
int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data)
{
unsigned long i, size;
unsigned long i, psize, size;
const __be32 *prop;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &size);
prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
if (!prop)
return 1;
pr_debug("Found machine check recoverable ranges.\n");
/*
* Calculate number of available entries.
*
* Each recoverable address range entry is (start address, len,
* recovery address), 2 cells each for start and recovery address,
* 1 cell for len, totalling 5 cells per entry.
*/
mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
/* Sanity check */
if (!mc_recoverable_range_len)
return 1;
/* Size required to hold all the entries. */
size = mc_recoverable_range_len *
sizeof(struct mcheck_recoverable_range);
/*
* Allocate a buffer to hold the MC recoverable ranges. We would be
* accessing them in real mode, hence it needs to be within
......@@ -124,11 +141,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
ppc64_rma_size));
memset(mc_recoverable_range, 0, size);
/*
* Each recoverable address entry is an (start address,len,
* recover address) pair, * 2 cells each, totalling 4 cells per entry.
*/
for (i = 0; i < size / (sizeof(*prop) * 5); i++) {
for (i = 0; i < mc_recoverable_range_len; i++) {
mc_recoverable_range[i].start_addr =
of_read_number(prop + (i * 5) + 0, 2);
mc_recoverable_range[i].end_addr =
......@@ -142,7 +155,6 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
mc_recoverable_range[i].end_addr,
mc_recoverable_range[i].recover_addr);
}
mc_recoverable_range_len = i;
return 1;
}
......@@ -180,6 +192,20 @@ int opal_notifier_register(struct notifier_block *nb)
atomic_notifier_chain_register(&opal_notifier_head, nb);
return 0;
}
EXPORT_SYMBOL_GPL(opal_notifier_register);
int opal_notifier_unregister(struct notifier_block *nb)
{
if (!nb) {
pr_warning("%s: Invalid argument (%p)\n",
__func__, nb);
return -EINVAL;
}
atomic_notifier_chain_unregister(&opal_notifier_head, nb);
return 0;
}
EXPORT_SYMBOL_GPL(opal_notifier_unregister);
static void opal_do_notifier(uint64_t events)
{
......@@ -267,6 +293,7 @@ static void opal_handle_message(void)
* value in /proc/device-tree.
*/
static struct opal_msg msg;
u32 type;
ret = opal_get_msg(__pa(&msg), sizeof(msg));
/* No opal message pending. */
......@@ -280,13 +307,14 @@ static void opal_handle_message(void)
return;
}
type = be32_to_cpu(msg.msg_type);
/* Sanity check */
if (msg.msg_type > OPAL_MSG_TYPE_MAX) {
pr_warning("%s: Unknown message type: %u\n",
__func__, msg.msg_type);
if (type > OPAL_MSG_TYPE_MAX) {
pr_warning("%s: Unknown message type: %u\n", __func__, type);
return;
}
opal_message_do_notify(msg.msg_type, (void *)&msg);
opal_message_do_notify(type, (void *)&msg);
}
static int opal_message_notify(struct notifier_block *nb,
......@@ -574,6 +602,8 @@ static int __init opal_init(void)
opal_platform_dump_init();
/* Setup system parameters interface */
opal_sys_param_init();
/* Setup message log interface. */
opal_msglog_init();
}
return 0;
......@@ -605,3 +635,6 @@ void opal_shutdown(void)
mdelay(10);
}
}
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
......@@ -82,9 +82,9 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
* RTAS_TYPE_IO only exists in extended event log version 6 or later.
* No need to check event log version.
*/
if (unlikely(elog->type != RTAS_TYPE_IO)) {
printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
elog->type);
if (unlikely(rtas_error_type(elog) != RTAS_TYPE_IO)) {
printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d",
rtas_error_type(elog));
return NULL;
}
......
......@@ -298,13 +298,13 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc);
pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
return rc;
}
rc = ppc_md.nvram_write(buff, length, &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc);
pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
return rc;
}
......@@ -351,15 +351,14 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
sizeof(struct err_log_info),
&tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_read (%d)\n", __FUNCTION__,
rc);
pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
return rc;
}
}
rc = ppc_md.nvram_read(buff, length, &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_read (%d)\n", __FUNCTION__, rc);
pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
return rc;
}
......@@ -869,7 +868,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
__FUNCTION__, (int) reason);
__func__, (int) reason);
return;
}
......
......@@ -236,7 +236,8 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
rtas_elog = (struct rtas_error_log *)ras_log_buf;
if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC))
if (status == 0 &&
rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC)
fatal = 1;
else
fatal = 0;
......@@ -300,13 +301,14 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
if (!h->extended) {
if (!rtas_error_extended(h)) {
memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
} else {
int len;
int len, error_log_length;
len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX);
error_log_length = 8 + rtas_error_extended_log_length(h);
len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf;
......@@ -350,23 +352,24 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
{
int recovered = 0;
int disposition = rtas_error_disposition(err);
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
recovered = 0;
} else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
} else if (disposition == RTAS_DISP_FULLY_RECOVERED) {
/* Platform corrected itself */
recovered = 1;
} else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) {
} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
/* Platform corrected itself but could be degraded */
printk(KERN_ERR "MCE: limited recovery, system may "
"be degraded\n");
recovered = 1;
} else if (user_mode(regs) && !is_global_init(current) &&
err->severity == RTAS_SEVERITY_ERROR_SYNC) {
rtas_error_severity(err) == RTAS_SEVERITY_ERROR_SYNC) {
/*
* If we received a synchronous error when in userspace
......
......@@ -202,7 +202,7 @@ void __init test_of_node(void)
/* There should really be a struct device_node allocator */
memset(&of_node, 0, sizeof(of_node));
kref_init(&of_node.kobj.kref);
of_node_init(&of_node);
of_node.full_name = node_name;
check(0 == msi_bitmap_alloc(&bmp, size, &of_node));
......
......@@ -554,14 +554,6 @@ config SENSORS_IBMPEX
This driver can also be built as a module. If so, the module
will be called ibmpex.
config SENSORS_IBMPOWERNV
tristate "IBM PowerNv Platform temperature/power/fan sensor"
depends on PPC_POWERNV
default y
help
If you say yes here you get support for the temperature/fan/power
sensors on your platform.
config SENSORS_IIO_HWMON
tristate "Hwmon driver that uses channels specified via iio maps"
depends on IIO
......
......@@ -71,7 +71,6 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o
obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
......
This diff is collapsed.
......@@ -61,6 +61,7 @@ static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
/* For early boot console */
static struct hvc_opal_priv hvc_opal_boot_priv;
static u32 hvc_opal_boot_termno;
static bool hvc_opal_event_registered;
static const struct hv_ops hvc_opal_raw_ops = {
.get_chars = opal_get_chars,
......@@ -161,6 +162,18 @@ static const struct hv_ops hvc_opal_hvsi_ops = {
.tiocmset = hvc_opal_hvsi_tiocmset,
};
static int hvc_opal_console_event(struct notifier_block *nb,
unsigned long events, void *change)
{
if (events & OPAL_EVENT_CONSOLE_INPUT)
hvc_kick();
return 0;
}
static struct notifier_block hvc_opal_console_nb = {
.notifier_call = hvc_opal_console_event,
};
static int hvc_opal_probe(struct platform_device *dev)
{
const struct hv_ops *ops;
......@@ -170,6 +183,7 @@ static int hvc_opal_probe(struct platform_device *dev)
unsigned int termno, boot = 0;
const __be32 *reg;
if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
proto = HV_PROTOCOL_RAW;
ops = &hvc_opal_raw_ops;
......@@ -213,12 +227,18 @@ static int hvc_opal_probe(struct platform_device *dev)
dev->dev.of_node->full_name,
boot ? " (boot console)" : "");
/* We don't do IRQ yet */
/* We don't do IRQ ... */
hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
dev_set_drvdata(&dev->dev, hp);
/* ... but we use OPAL event to kick the console */
if (!hvc_opal_event_registered) {
opal_notifier_register(&hvc_opal_console_nb);
hvc_opal_event_registered = true;
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment