Commit fe1b5180 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:

 1) Queued spinlocks and rwlocks for sparc64, from Babu Moger.

 2) Some const'ification from Arvind Yadav.

 3) LDC/VIO driver infrastructure changes to facilitate future upcoming
    drivers, from Jag Raman.

 4) Initialize sched_clock() et al. early so that the initial printk
    timestamps are all done while the implementation is available and
    functioning. From Pavel Tatashin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (38 commits)
  sparc: kernel: pmc: make of_device_ids const.
  sparc64: fix typo in property
  sparc64: add port_id to VIO device metadata
  sparc64: Enhance search for VIO device in MDESC
  sparc64: enhance VIO device probing
  sparc64: check if a client is allowed to register for MDESC notifications
  sparc64: remove restriction on VIO device name size
  sparc64: refactor code to obtain cfg_handle property from MDESC
  sparc64: add MDESC node name property to VIO device metadata
  sparc64: mdesc: use __GFP_REPEAT action modifier for VM allocation
  sparc64: expand MDESC interface
  sparc64: skip handshake for LDC channels in RAW mode
  sparc64: specify the device class in VIO version info. packet
  sparc64: ensure VIO operations are defined while being used
  sparc: kernel: apc: make of_device_ids const
  sparc/time: make of_device_ids const
  sparc64: broken %tick frequency on spitfire cpus
  sparc64: use prom interface to get %stick frequency
  sparc64: optimize functions that access tick
  sparc64: add hot-patched and inlined get_tick()
  ...
parents 8b6b3172 0cd52df8
......@@ -83,6 +83,8 @@ config SPARC64
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_NMI
select HAVE_REGS_AND_STACK_ACCESS_API
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
config ARCH_DEFCONFIG
string
......@@ -92,6 +94,9 @@ config ARCH_DEFCONFIG
config ARCH_PROC_KCORE_TEXT
def_bool y
config CPU_BIG_ENDIAN
def_bool y
config ARCH_ATU
bool
default y if SPARC64
......
......@@ -6,6 +6,17 @@
#ifndef __ARCH_SPARC64_CMPXCHG__
#define __ARCH_SPARC64_CMPXCHG__
static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
{
__asm__ __volatile__("cas [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
return new;
}
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
......@@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
void __xchg_called_with_bad_pointer(void);
/*
* Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
* here is to get the bit shift of the byte we are interested in.
* The XOR is handy for reversing the bits for big-endian byte order.
*/
static inline unsigned long
xchg16(__volatile__ unsigned short *m, unsigned short val)
{
unsigned long maddr = (unsigned long)m;
int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
unsigned int mask = 0xffff << bit_shift;
unsigned int *ptr = (unsigned int *) (maddr & ~2);
unsigned int old32, new32, load32;
/* Read the old value */
load32 = *ptr;
do {
old32 = load32;
new32 = (load32 & (~mask)) | val << bit_shift;
load32 = __cmpxchg_u32(ptr, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> bit_shift;
}
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch (size) {
case 2:
return xchg16(ptr, x);
case 4:
return xchg32(ptr, x);
case 8:
......@@ -65,10 +104,11 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
__asm__ __volatile__("cas [%2], %3, %0"
__asm__ __volatile__("casx [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
......@@ -76,15 +116,31 @@ __cmpxchg_u32(volatile int *m, int old, int new)
return new;
}
/*
* Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
* here is to get the bit shift of the byte we are interested in.
* The XOR is handy for reversing the bits for big-endian byte order
*/
static inline unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
{
__asm__ __volatile__("casx [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
return new;
unsigned long maddr = (unsigned long)m;
int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
unsigned int mask = 0xff << bit_shift;
unsigned int *ptr = (unsigned int *) (maddr & ~3);
unsigned int old32, new32, load;
unsigned int load32 = *ptr;
do {
new32 = (load32 & ~mask) | (new << bit_shift);
old32 = (load32 & ~mask) | (old << bit_shift);
load32 = __cmpxchg_u32(ptr, old32, new32);
if (load32 == old32)
return old;
load = (load32 & mask) >> bit_shift;
} while (load == old);
return load;
}
/* This function doesn't exist, so you'll get a linker error
......@@ -95,6 +151,8 @@ static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
case 1:
return __cmpxchg_u8(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
......
......@@ -48,6 +48,8 @@ struct ldc_channel_config {
#define LDC_STATE_READY 0x03
#define LDC_STATE_CONNECTED 0x04
#define LDC_PACKET_SIZE 64
struct ldc_channel;
/* Allocate state for a channel. */
......@@ -72,6 +74,12 @@ int ldc_connect(struct ldc_channel *lp);
int ldc_disconnect(struct ldc_channel *lp);
int ldc_state(struct ldc_channel *lp);
void ldc_set_state(struct ldc_channel *lp, u8 state);
int ldc_mode(struct ldc_channel *lp);
void __ldc_print(struct ldc_channel *lp, const char *caller);
int ldc_rx_reset(struct ldc_channel *lp);
#define ldc_print(chan) __ldc_print(chan, __func__)
/* Read and write operations. Only valid when the link is up. */
int ldc_write(struct ldc_channel *lp, const void *buf,
......
......@@ -16,6 +16,7 @@ struct mdesc_handle *mdesc_grab(void);
void mdesc_release(struct mdesc_handle *);
#define MDESC_NODE_NULL (~(u64)0)
#define MDESC_MAX_STR_LEN 256
u64 mdesc_node_by_name(struct mdesc_handle *handle,
u64 from_node, const char *name);
......@@ -62,15 +63,32 @@ u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
void mdesc_update(void);
struct mdesc_notifier_client {
void (*add)(struct mdesc_handle *handle, u64 node);
void (*remove)(struct mdesc_handle *handle, u64 node);
void (*add)(struct mdesc_handle *handle, u64 node,
const char *node_name);
void (*remove)(struct mdesc_handle *handle, u64 node,
const char *node_name);
const char *node_name;
struct mdesc_notifier_client *next;
};
void mdesc_register_notifier(struct mdesc_notifier_client *client);
union md_node_info {
struct vdev_port {
u64 id; /* id */
u64 parent_cfg_hdl; /* parent config handle */
const char *name; /* name (property) */
} vdev_port;
struct ds_port {
u64 id; /* id */
} ds_port;
};
u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
union md_node_info *node_info);
int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
const char *node_name, union md_node_info *node_info);
void mdesc_fill_in_cpu_data(cpumask_t *mask);
void mdesc_populate_present_mask(cpumask_t *mask);
void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
......
#ifndef _ASM_SPARC_QRWLOCK_H
#define _ASM_SPARC_QRWLOCK_H
#include <asm-generic/qrwlock_types.h>
#include <asm-generic/qrwlock.h>
#endif /* _ASM_SPARC_QRWLOCK_H */
#ifndef _ASM_SPARC_QSPINLOCK_H
#define _ASM_SPARC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qspinlock.h>
#endif /* _ASM_SPARC_QSPINLOCK_H */
/*
* Just a place holder.
* Just a place holder.
*/
#ifndef _SPARC_SETUP_H
#define _SPARC_SETUP_H
......
......@@ -10,216 +10,12 @@
#include <asm/processor.h>
#include <asm/barrier.h>
/* To get debugging spinlocks which detect and catch
* deadlock situations, set CONFIG_DEBUG_SPINLOCK
* and rebuild your kernel.
*/
/* Because we play games to save cycles in the non-contention case, we
* need to be extra careful about branch targets into the "spinning"
* code. They live in their own section, but the newer V9 branches
* have a shorter range than the traditional 32-bit sparc branch
* variants. The rule is that the branches that go into and out of
* the spinner sections must be pre-V9 branches.
*/
#define arch_spin_is_locked(lp) ((lp)->lock != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldstub [%1], %0\n"
" brnz,pn %0, 2f\n"
" nop\n"
" .subsection 2\n"
"2: ldub [%1], %0\n"
" brnz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 1b\n"
" .previous"
: "=&r" (tmp)
: "r" (lock)
: "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
__asm__ __volatile__(
" ldstub [%1], %0\n"
: "=r" (result)
: "r" (lock)
: "memory");
return (result == 0UL);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
: /* No outputs */
: "r" (lock)
: "memory");
}
static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: ldstub [%2], %0\n"
" brnz,pn %0, 2f\n"
" nop\n"
" .subsection 2\n"
"2: rdpr %%pil, %1\n"
" wrpr %3, %%pil\n"
"3: ldub [%2], %0\n"
" brnz,pt %0, 3b\n"
" nop\n"
" ba,pt %%xcc, 1b\n"
" wrpr %1, %%pil\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r"(lock), "r"(flags)
: "memory");
}
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
static inline void arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__ (
"1: ldsw [%2], %0\n"
" brlz,pn %0, 2f\n"
"4: add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" .subsection 2\n"
"2: ldsw [%2], %0\n"
" brlz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
__asm__ __volatile__ (
"1: ldsw [%2], %0\n"
" brlz,a,pn %0, 2f\n"
" mov 0, %0\n"
" add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" mov 1, %0\n"
"2:"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
return tmp1;
}
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: lduw [%2], %0\n"
" sub %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%xcc, 1b\n"
" nop"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
}
static inline void arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
mask = 0x80000000UL;
__asm__ __volatile__(
"1: lduw [%2], %0\n"
" brnz,pn %0, 2f\n"
"4: or %0, %3, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" .subsection 2\n"
"2: lduw [%2], %0\n"
" brnz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock), "r" (mask)
: "memory");
}
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
: /* no outputs */
: "r" (lock)
: "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
mask = 0x80000000UL;
__asm__ __volatile__(
" mov 0, %2\n"
"1: lduw [%3], %0\n"
" brnz,pn %0, 2f\n"
" or %0, %4, %1\n"
" cas [%3], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" mov 1, %2\n"
"2:"
: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
: "r" (lock), "r" (mask)
: "memory");
return result;
}
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
#define arch_read_lock_flags(p, f) arch_read_lock(p)
#define arch_write_lock_flags(p, f) arch_write_lock(p)
#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define arch_write_can_lock(rw) (!(rw)->lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
......
#ifndef __SPARC_SPINLOCK_TYPES_H
#define __SPARC_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm-generic/qspinlock_types.h>
#else
typedef struct {
volatile unsigned char lock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
#endif /* CONFIG_QUEUED_SPINLOCKS */
#ifdef CONFIG_QUEUED_RWLOCKS
#include <asm-generic/qrwlock_types.h>
#else
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif /* CONFIG_QUEUED_RWLOCKS */
#endif
......@@ -9,7 +9,12 @@
#include <linux/types.h>
#include <linux/init.h>
/* The most frequently accessed fields should be first,
* to fit into the same cacheline.
*/
struct sparc64_tick_ops {
unsigned long ticks_per_nsec_quotient;
unsigned long offset;
unsigned long long (*get_tick)(void);
int (*add_compare)(unsigned long);
unsigned long softint_mask;
......@@ -17,6 +22,8 @@ struct sparc64_tick_ops {
void (*init_tick)(void);
unsigned long (*add_tick)(unsigned long);
unsigned long (*get_frequency)(void);
unsigned long frequency;
char *name;
};
......@@ -27,4 +34,64 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu);
void setup_sparc64_timer(void);
void __init time_init(void);
#define TICK_PRIV_BIT BIT(63)
#define TICKCMP_IRQ_BIT BIT(63)
#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
#define HBIRD_STICK_ADDR 0x1fe0000f070UL
#define GET_TICK_NINSTR 13
struct get_tick_patch {
unsigned int addr;
unsigned int tick[GET_TICK_NINSTR];
unsigned int stick[GET_TICK_NINSTR];
};
extern struct get_tick_patch __get_tick_patch;
extern struct get_tick_patch __get_tick_patch_end;
static inline unsigned long get_tick(void)
{
unsigned long tick, tmp1, tmp2;
__asm__ __volatile__(
/* read hbtick 13 instructions */
"661:\n"
" mov 0x1fe, %1\n"
" sllx %1, 0x20, %1\n"
" sethi %%hi(0xf000), %2\n"
" or %2, 0x70, %2\n"
" or %1, %2, %1\n" /* %1 = HBIRD_STICK_ADDR */
" add %1, 8, %2\n"
" ldxa [%2]%3, %0\n"
" ldxa [%1]%3, %1\n"
" ldxa [%2]%3, %2\n"
" sub %2, %0, %0\n" /* don't modify %xcc */
" brnz,pn %0, 661b\n" /* restart to save one register */
" sllx %2, 32, %2\n"
" or %2, %1, %0\n"
/* Common/not patched code */
" sllx %0, 1, %0\n"
" srlx %0, 1, %0\n" /* Clear TICK_PRIV_BIT */
/* Beginning of patch section */
" .section .get_tick_patch, \"ax\"\n"
" .word 661b\n"
/* read tick 2 instructions and 11 skipped */
" ba 1f\n"
" rd %%tick, %0\n"
" .skip 4 * (%4 - 2)\n"
"1:\n"
/* read stick 2 instructions and 11 skipped */
" ba 1f\n"
" rd %%asr24, %0\n"
" .skip 4 * (%4 - 2)\n"
"1:\n"
/* End of patch section */
" .previous\n"
: "=&r" (tick), "=&r" (tmp1), "=&r" (tmp2)
: "i" (ASI_PHYS_BYPASS_EC_E), "i" (GET_TICK_NINSTR));
return tick;
}
#endif /* _SPARC64_TIMER_H */
......@@ -316,24 +316,33 @@ static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
}
#define VIO_MAX_TYPE_LEN 32
#define VIO_MAX_NAME_LEN 32
#define VIO_MAX_COMPAT_LEN 64
struct vio_dev {
u64 mp;
struct device_node *dp;
char node_name[VIO_MAX_NAME_LEN];
char type[VIO_MAX_TYPE_LEN];
char compat[VIO_MAX_COMPAT_LEN];
int compat_len;
u64 dev_no;
u64 id;
unsigned long port_id;
unsigned long channel_id;
unsigned int tx_irq;
unsigned int rx_irq;
u64 rx_ino;
u64 tx_ino;
/* Handle to the root of "channel-devices" sub-tree in MDESC */
u64 cdev_handle;
/* MD specific data used to identify the vdev in MD */
union md_node_info md_node_info;
struct device dev;
};
......@@ -347,6 +356,7 @@ struct vio_driver {
void (*shutdown)(struct vio_dev *dev);
unsigned long driver_data;
struct device_driver driver;
bool no_irq;
};
struct vio_version {
......@@ -490,5 +500,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
void vio_port_up(struct vio_driver_state *vio);
int vio_set_intr(unsigned long dev_ino, int state);
u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev);
#endif /* _SPARC64_VIO_H */
......@@ -167,7 +167,7 @@ static int apc_probe(struct platform_device *op)
return 0;
}
static struct of_device_id apc_match[] = {
static const struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
......
......@@ -52,6 +52,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
void do_signal32(struct pt_regs * regs);
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
/* time_64.c */
void __init time_init_early(void);
/* compat_audit.c */
extern unsigned int sparc32_dir_class[];
extern unsigned int sparc32_chattr_class[];
......
......@@ -34,7 +34,6 @@
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#define LDC_PACKET_SIZE 64
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
......@@ -178,6 +177,8 @@ do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
#define LDC_ABORT(lp) ldc_abort((lp), __func__)
static const char *state_to_str(u8 state)
{
switch (state) {
......@@ -196,15 +197,6 @@ static const char *state_to_str(u8 state)
}
}
static void ldc_set_state(struct ldc_channel *lp, u8 state)
{
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
state_to_str(lp->state),
state_to_str(state));
lp->state = state;
}
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
......@@ -516,11 +508,12 @@ static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
return err;
}
static int ldc_abort(struct ldc_channel *lp)
static int ldc_abort(struct ldc_channel *lp, const char *msg)
{
unsigned long hv_err;
ldcdbg(STATE, "ABORT\n");
ldcdbg(STATE, "ABORT[%s]\n", msg);
ldc_print(lp);
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
......@@ -605,7 +598,7 @@ static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
}
}
if (err)
return ldc_abort(lp);
return LDC_ABORT(lp);
return 0;
}
......@@ -618,13 +611,13 @@ static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
return ldc_abort(lp);
return LDC_ABORT(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
return ldc_abort(lp);
return LDC_ABORT(lp);
return 0;
}
......@@ -635,17 +628,17 @@ static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
return ldc_abort(lp);
return LDC_ABORT(lp);
vap = find_by_major(vp->major);
if (!vap)
return ldc_abort(lp);
return LDC_ABORT(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
return ldc_abort(lp);
return LDC_ABORT(lp);
return send_tx_packet(lp, p, new_tail);
}
......@@ -668,7 +661,7 @@ static int process_version(struct ldc_channel *lp,
return process_ver_nack(lp, vp);
default:
return ldc_abort(lp);
return LDC_ABORT(lp);
}
}
......@@ -681,13 +674,13 @@ static int process_rts(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
return ldc_abort(lp);
return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
return ldc_abort(lp);
return LDC_ABORT(lp);
return 0;
}
......@@ -700,7 +693,7 @@ static int process_rtr(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
return ldc_abort(lp);
return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
......@@ -723,7 +716,7 @@ static int process_rdx(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
return ldc_abort(lp);
return LDC_ABORT(lp);
lp->rcv_nxt = p->seqid;
......@@ -750,14 +743,14 @@ static int process_control_frame(struct ldc_channel *lp,
return process_rdx(lp, p);
default:
return ldc_abort(lp);
return LDC_ABORT(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
return ldc_abort(lp);
return LDC_ABORT(lp);
}
static int process_data_ack(struct ldc_channel *lp,
......@@ -776,7 +769,7 @@ static int process_data_ack(struct ldc_channel *lp,
return 0;
}
if (head == lp->tx_tail)
return ldc_abort(lp);
return LDC_ABORT(lp);
}
return 0;
......@@ -820,16 +813,21 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
/*
* Generate an LDC_EVENT_UP event if the channel
* was not already up.
*/
if (orig_state != LDC_CHANNEL_UP) {
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
}
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
(void) __set_rx_head(lp, lp->rx_tail);
(void) ldc_rx_reset(lp);
goto out;
}
......@@ -880,7 +878,7 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
break;
default:
err = ldc_abort(lp);
err = LDC_ABORT(lp);
break;
}
......@@ -895,7 +893,7 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
err = __set_rx_head(lp, new);
if (err < 0) {
(void) ldc_abort(lp);
(void) LDC_ABORT(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
......@@ -936,7 +934,14 @@ static irqreturn_t ldc_tx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
event_mask |= LDC_EVENT_UP;
/*
* Generate an LDC_EVENT_UP event if the channel
* was not already up.
*/
if (orig_state != LDC_CHANNEL_UP) {
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
}
}
spin_unlock_irqrestore(&lp->lock, flags);
......@@ -1342,6 +1347,14 @@ int ldc_bind(struct ldc_channel *lp)
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
if (lp->cfg.mode == LDC_MODE_RAW) {
/*
* There is no handshake in RAW mode, so handshake
* is completed.
*/
lp->hs_state = LDC_HS_COMPLETE;
}
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
......@@ -1447,12 +1460,54 @@ int ldc_state(struct ldc_channel *lp)
}
EXPORT_SYMBOL(ldc_state);
void ldc_set_state(struct ldc_channel *lp, u8 state)
{
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
state_to_str(lp->state),
state_to_str(state));
lp->state = state;
}
EXPORT_SYMBOL(ldc_set_state);
int ldc_mode(struct ldc_channel *lp)
{
return lp->cfg.mode;
}
EXPORT_SYMBOL(ldc_mode);
int ldc_rx_reset(struct ldc_channel *lp)
{
return __set_rx_head(lp, lp->rx_tail);
}
void __ldc_print(struct ldc_channel *lp, const char *caller)
{
pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
"\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
"\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
"\trcv_nxt=%u snd_nxt=%u\n",
caller, lp->id, lp->flags, state_to_str(lp->state),
lp->chan_state, lp->hs_state,
lp->rx_head, lp->rx_tail, lp->rx_num_entries,
lp->tx_head, lp->tx_tail, lp->tx_num_entries,
lp->rcv_nxt, lp->snd_nxt);
}
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long new_tail;
unsigned long new_tail, hv_err;
int err;
hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
&lp->chan_state);
if (unlikely(hv_err))
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
return LDC_ABORT(lp);
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
......@@ -1483,7 +1538,7 @@ static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
......@@ -1526,7 +1581,7 @@ static int write_nonraw(struct ldc_channel *lp, const void *buf,
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
return ldc_abort(lp);
return LDC_ABORT(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
......@@ -1592,9 +1647,9 @@ static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
if (err)
return err;
err = __set_rx_head(lp, lp->rx_tail);
err = ldc_rx_reset(lp);
if (err < 0)
return ldc_abort(lp);
return LDC_ABORT(lp);
return 0;
}
......@@ -1607,7 +1662,7 @@ static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
return err;
}
if (p->stype & LDC_NACK)
return ldc_abort(lp);
return LDC_ABORT(lp);
return 0;
}
......@@ -1627,7 +1682,7 @@ static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
......@@ -1650,7 +1705,7 @@ static int rx_set_head(struct ldc_channel *lp, unsigned long head)
int err = __set_rx_head(lp, head);
if (err < 0)
return ldc_abort(lp);
return LDC_ABORT(lp);
lp->rx_head = head;
return 0;
......@@ -1689,7 +1744,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return ldc_abort(lp);
return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
......@@ -1733,9 +1788,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
/*
* If this is a control-only packet, there is nothing
* else to do but advance the rx queue since the packet
* was already processed above.
*/
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
goto no_data;
break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
......@@ -1900,6 +1960,8 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
unsigned long flags;
int err;
ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
if (!buf)
return -EINVAL;
......@@ -1915,6 +1977,9 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
spin_unlock_irqrestore(&lp->lock, flags);
ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
return err;
}
EXPORT_SYMBOL(ldc_read);
......
This diff is collapsed.
......@@ -71,7 +71,7 @@ static int pmc_probe(struct platform_device *op)
return 0;
}
static struct of_device_id pmc_match[] = {
static const struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
......
......@@ -381,7 +381,7 @@ bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int this_cpu_id;
/* On hypervisor based platforms we interrogate the 'reg'
* property. On everything else we look for a 'upa-portis',
* property. On everything else we look for a 'upa-portid',
* 'portid', or 'cpuid' property.
*/
......
......@@ -95,7 +95,7 @@ static struct console prom_early_console = {
.index = -1,
};
/*
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
......@@ -365,6 +365,7 @@ void __init start_early_boot(void)
}
current_thread_info()->cpu = cpu;
time_init_early();
prom_init_report();
start_kernel();
}
......@@ -639,7 +640,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
......@@ -648,7 +649,7 @@ void __init setup_arch(char **cmdline_p)
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
......
......@@ -298,7 +298,7 @@ static int clock_probe(struct platform_device *op)
return 0;
}
static struct of_device_id clock_match[] = {
static const struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
......
......@@ -32,7 +32,6 @@
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/ftrace.h>
......@@ -47,14 +46,13 @@
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
#include <asm/cacheflush.h>
#include "entry.h"
#include "kernel.h"
DEFINE_SPINLOCK(rtc_lock);
#define TICK_PRIV_BIT (1UL << 63)
#define TICKCMP_IRQ_BIT (1UL << 63)
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
......@@ -164,13 +162,44 @@ static unsigned long tick_add_tick(unsigned long adj)
return new_tick;
}
static struct sparc64_tick_ops tick_operations __read_mostly = {
/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
static unsigned long cpuid_to_freq(phandle node, int cpuid)
{
bool is_cpu_node = false;
unsigned long freq = 0;
char type[128];
if (!node)
return freq;
if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
is_cpu_node = (strcmp(type, "cpu") == 0);
/* try upa-portid then cpuid to get cpuid, see prom_64.c */
if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
prom_getint(node, "cpuid") == cpuid))
freq = prom_getintdefault(node, "clock-frequency", 0);
if (!freq)
freq = cpuid_to_freq(prom_getchild(node), cpuid);
if (!freq)
freq = cpuid_to_freq(prom_getsibling(node), cpuid);
return freq;
}
static unsigned long tick_get_frequency(void)
{
return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
}
static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
.name = "tick",
.init_tick = tick_init_tick,
.disable_irq = tick_disable_irq,
.get_tick = tick_get_tick,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
.get_frequency = tick_get_frequency,
.softint_mask = 1UL << 0,
};
......@@ -250,6 +279,11 @@ static int stick_add_compare(unsigned long adj)
return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
static unsigned long stick_get_frequency(void)
{
return prom_getintdefault(prom_root_node, "stick-frequency", 0);
}
static struct sparc64_tick_ops stick_operations __read_mostly = {
.name = "stick",
.init_tick = stick_init_tick,
......@@ -257,6 +291,7 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
.get_tick = stick_get_tick,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
.get_frequency = stick_get_frequency,
.softint_mask = 1UL << 16,
};
......@@ -277,9 +312,6 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
* 2) write high
* 3) write low
*/
#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
#define HBIRD_STICK_ADDR 0x1fe0000f070UL
static unsigned long __hbird_read_stick(void)
{
unsigned long ret, tmp1, tmp2, tmp3;
......@@ -381,6 +413,11 @@ static int hbtick_add_compare(unsigned long adj)
return ((long)(val2 - val)) > 0L;
}
static unsigned long hbtick_get_frequency(void)
{
return prom_getintdefault(prom_root_node, "stick-frequency", 0);
}
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.name = "hbtick",
.init_tick = hbtick_init_tick,
......@@ -388,11 +425,10 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.get_tick = hbtick_get_tick,
.add_tick = hbtick_add_tick,
.add_compare = hbtick_add_compare,
.get_frequency = hbtick_get_frequency,
.softint_mask = 1UL << 0,
};
static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
unsigned long cmos_regs;
EXPORT_SYMBOL(cmos_regs);
......@@ -582,34 +618,17 @@ static int __init clock_init(void)
*/
fs_initcall(clock_init);
/* This is gets the master TICK_INT timer going. */
static unsigned long sparc64_init_timers(void)
/* Return true if this is Hummingbird, aka Ultra-IIe */
static bool is_hummingbird(void)
{
struct device_node *dp;
unsigned long freq;
unsigned long ver, manuf, impl;
dp = of_find_node_by_path("/");
if (tlb_type == spitfire) {
unsigned long ver, manuf, impl;
__asm__ __volatile__ ("rdpr %%ver, %0"
: "=&r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
/* Hummingbird, aka Ultra-IIe */
tick_ops = &hbtick_operations;
freq = of_getintprop_default(dp, "stick-frequency", 0);
} else {
tick_ops = &tick_operations;
freq = local_cpu_data().clock_tick;
}
} else {
tick_ops = &stick_operations;
freq = of_getintprop_default(dp, "stick-frequency", 0);
}
__asm__ __volatile__ ("rdpr %%ver, %0"
: "=&r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
return freq;
return (manuf == 0x17 && impl == 0x13);
}
struct freq_table {
......@@ -671,12 +690,12 @@ core_initcall(register_sparc64_cpufreq_notifier);
static int sparc64_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return tick_ops->add_compare(delta) ? -ETIME : 0;
return tick_operations.add_compare(delta) ? -ETIME : 0;
}
static int sparc64_timer_shutdown(struct clock_event_device *evt)
{
tick_ops->disable_irq();
tick_operations.disable_irq();
return 0;
}
......@@ -693,7 +712,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long tick_mask = tick_ops->softint_mask;
unsigned long tick_mask = tick_operations.softint_mask;
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
......@@ -728,7 +747,7 @@ void setup_sparc64_timer(void)
: "=r" (pstate)
: "i" (PSTATE_IE));
tick_ops->init_tick();
tick_operations.init_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
......@@ -755,12 +774,10 @@ static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
unsigned long bclock, now;
unsigned long bclock = get_tick();
bclock = tick_ops->get_tick();
do {
now = tick_ops->get_tick();
} while ((now-bclock) < loops);
while ((get_tick() - bclock) < loops)
;
}
EXPORT_SYMBOL(__delay);
......@@ -772,26 +789,71 @@ EXPORT_SYMBOL(udelay);
static u64 clocksource_tick_read(struct clocksource *cs)
{
return tick_ops->get_tick();
return get_tick();
}
static void __init get_tick_patch(void)
{
unsigned int *addr, *instr, i;
struct get_tick_patch *p;
if (tlb_type == spitfire && is_hummingbird())
return;
for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
instr = (tlb_type == spitfire) ? p->tick : p->stick;
addr = (unsigned int *)(unsigned long)p->addr;
for (i = 0; i < GET_TICK_NINSTR; i++) {
addr[i] = instr[i];
/* ensure that address is modified before flush */
wmb();
flushi(&addr[i]);
}
}
}
static void init_tick_ops(struct sparc64_tick_ops *ops)
{
unsigned long freq, quotient, tick;
freq = ops->get_frequency();
quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
tick = ops->get_tick();
ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
ops->ticks_per_nsec_quotient = quotient;
ops->frequency = freq;
tick_operations = *ops;
get_tick_patch();
}
void __init time_init_early(void)
{
if (tlb_type == spitfire) {
if (is_hummingbird())
init_tick_ops(&hbtick_operations);
else
init_tick_ops(&tick_operations);
} else {
init_tick_ops(&stick_operations);
}
}
void __init time_init(void)
{
unsigned long freq = sparc64_init_timers();
unsigned long freq;
freq = tick_operations.frequency;
tb_ticks_per_usec = freq / USEC_PER_SEC;
timer_ticks_per_nsec_quotient =
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
clocksource_tick.name = tick_operations.name;
clocksource_tick.read = clocksource_tick_read;
clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
sparc64_clockevent.name = tick_ops->name;
sparc64_clockevent.name = tick_operations.name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
......@@ -809,14 +871,21 @@ void __init time_init(void)
unsigned long long sched_clock(void)
{
unsigned long ticks = tick_ops->get_tick();
unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
unsigned long offset = tick_operations.offset;
/* Use barrier so the compiler emits the loads first and overlaps load
* latency with reading tick, because reading %tick/%stick is a
* post-sync instruction that will flush and restart subsequent
* instructions after it commits.
*/
barrier();
return (ticks * timer_ticks_per_nsec_quotient)
>> SPARC64_NSEC_PER_CYC_SHIFT;
return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
}
int read_current_timer(unsigned long *timer_val)
{
*timer_val = tick_ops->get_tick();
*timer_val = get_tick();
return 0;
}
This diff is collapsed.
......@@ -223,6 +223,9 @@ static int send_rdx(struct vio_driver_state *vio)
static int send_attr(struct vio_driver_state *vio)
{
if (!vio->ops)
return -EINVAL;
return vio->ops->send_attr(vio);
}
......@@ -283,6 +286,7 @@ static int process_ver_info(struct vio_driver_state *vio,
ver.minor = vap->minor;
pkt->minor = ver.minor;
pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt->dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
......@@ -374,6 +378,9 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
if (!vio->ops)
return 0;
err = vio->ops->handle_attr(vio, pkt);
if (err < 0) {
return handshake_failure(vio);
......@@ -388,6 +395,7 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
vio->hs_state |= VIO_HS_SENT_DREG;
}
}
return 0;
}
......@@ -647,10 +655,13 @@ int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
err = process_unknown(vio, pkt);
break;
}
if (!err &&
vio->hs_state != prev_state &&
(vio->hs_state & VIO_HS_COMPLETE))
vio->ops->handshake_complete(vio);
(vio->hs_state & VIO_HS_COMPLETE)) {
if (vio->ops)
vio->ops->handshake_complete(vio);
}
return err;
}
......@@ -765,7 +776,11 @@ void vio_port_up(struct vio_driver_state *vio)
}
if (!err) {
err = ldc_connect(vio->lp);
if (ldc_mode(vio->lp) == LDC_MODE_RAW)
ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
else
err = ldc_connect(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu connect failed, "
"err=%d\n",
......@@ -805,8 +820,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
return -EINVAL;
}
if (!ops->send_attr ||
!ops->handle_attr ||
if (!ops || !ops->send_attr || !ops->handle_attr ||
!ops->handshake_complete)
return -EINVAL;
......
......@@ -149,6 +149,11 @@ SECTIONS
*(.sun_m7_2insn_patch)
__sun_m7_2insn_patch_end = .;
}
.get_tick_patch : {
__get_tick_patch = .;
*(.get_tick_patch)
__get_tick_patch_end = .;
}
PERCPU_SECTION(SMP_CACHE_BYTES)
#ifdef CONFIG_JUMP_LABEL
......
......@@ -20,6 +20,7 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <asm/qrwlock.h>
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment