Commit 1afe0375 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: virtual timer interface.

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Add virtual timer interface.
parent 2e05bc63
......@@ -250,6 +250,12 @@ config SHARED_KERNEL
You should only select this option if you know what you are
doing and want to exploit this feature.
config VIRT_TIMER
bool "Virtual CPU timer support"
help
This provides a kernel interface for virtual CPU timers.
Default is disabled.
endmenu
config PCMCIA
......
......@@ -76,6 +76,7 @@ CONFIG_BINFMT_MISC=m
# CONFIG_PROCESS_DEBUG is not set
CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set
# CONFIG_VIRT_TIMER is not set
# CONFIG_PCMCIA is not set
#
......
......@@ -40,6 +40,9 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/irq.h>
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
......@@ -77,6 +80,14 @@ void default_idle(void)
return;
}
#ifdef CONFIG_VIRT_TIMER
/*
* hook to stop timers that should not tick while CPU is idle
*/
if (stop_timers())
return;
#endif
/*
* Wait for external, I/O or machine check interrupt and
* switch off machine check bit after the wait has ended.
......
......@@ -111,6 +111,7 @@ void do_extint(struct pt_regs *regs, unsigned short code)
int index;
irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
account_ticks(regs);
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
......
......@@ -19,6 +19,9 @@
#ifdef CONFIG_IP_MULTICAST
#include <net/arp.h>
#endif
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
/*
* memory management
......@@ -65,6 +68,17 @@ EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page);
/*
* virtual CPU timer
*/
#ifdef CONFIG_VIRT_TIMER
EXPORT_SYMBOL(init_virt_timer);
EXPORT_SYMBOL(add_virt_timer);
EXPORT_SYMBOL(add_virt_timer_periodic);
EXPORT_SYMBOL(mod_virt_timer);
EXPORT_SYMBOL(del_virt_timer);
#endif
/*
* misc.
*/
......@@ -77,5 +91,5 @@ EXPORT_SYMBOL(console_device);
EXPORT_SYMBOL_NOVERS(do_call_softirq);
EXPORT_SYMBOL(sys_wait4);
EXPORT_SYMBOL(cpcmd);
EXPORT_SYMBOL(smp_call_function_on);
EXPORT_SYMBOL(sys_ioctl);
......@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <asm/sigp.h>
#include <asm/pgalloc.h>
......@@ -65,7 +66,7 @@ extern char vmpoff_cmd[];
extern void do_reipl(unsigned long devno);
static sigp_ccode smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig);
/*
......@@ -150,6 +151,59 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
return 0;
}
/*
* Call a function on one CPU
* cpu : the CPU the function should be executed on
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler. You may call it from a bottom half.
*
* It is guaranteed that the called function runs on the specified CPU,
* preemption is disabled.
*/
int smp_call_function_on(void (*func) (void *info), void *info,
int nonatomic, int wait, int cpu)
{
struct call_data_struct data;
int curr_cpu;
if (!cpu_online(cpu))
return -EINVAL;
/* disable preemption for local function call */
curr_cpu = get_cpu();
if (curr_cpu == cpu) {
/* direct call to function */
func(info);
put_cpu();
return 0;
}
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
spin_lock_bh(&call_lock);
call_data = &data;
smp_ext_bitcall(cpu, ec_call_function);
/* Wait for response */
while (atomic_read(&data.started) != 1)
cpu_relax();
if (wait)
while (atomic_read(&data.finished) != 1)
cpu_relax();
spin_unlock_bh(&call_lock);
put_cpu();
return 0;
}
static inline void do_send_stop(void)
{
u32 dummy;
......@@ -305,16 +359,14 @@ void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
* Send an external call sigp to another cpu and return without waiting
* for its completion.
*/
static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
{
sigp_ccode ccode;
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
ccode = signal_processor(cpu, sigp_external_call);
return ccode;
while(signal_processor(cpu, sigp_external_call) == sigp_busy)
udelay(10);
}
/*
......
This diff is collapsed.
......@@ -64,6 +64,9 @@ extern void pfault_fini(void);
extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
static ext_int_info_t ext_int_pfault;
#endif
#ifdef CONFIG_VIRT_TIMER
extern pgm_check_handler_t do_monitor_call;
#endif
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
......@@ -625,6 +628,9 @@ void __init trap_init(void)
#endif /* CONFIG_ARCH_S390X */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &privileged_op;
#ifdef CONFIG_VIRT_TIMER
pgm_check_table[0x40] = &do_monitor_call;
#endif
if (MACHINE_IS_VM) {
/*
* First try to get pfault pseudo page faults going.
......
......@@ -607,6 +607,7 @@ do_IRQ (struct pt_regs *regs)
struct irb *irb;
irq_enter ();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
account_ticks(regs);
/*
......
......@@ -29,6 +29,9 @@ typedef struct
__u16 cpu;
} sigp_info;
extern int smp_call_function_on(void (*func) (void *info), void *info,
int nonatomic, int wait, int cpu);
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_possible_map;
......@@ -61,4 +64,9 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu)
#endif
#ifndef CONFIG_SMP
#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
#endif
#endif
/*
* include/asm-s390/timer.h
*
* (C) Copyright IBM Corp. 2003
* Virtual CPU timer
*
* Author: Jan Glauber (jang@de.ibm.com)
*/
#ifndef _ASM_S390_TIMER_H
#define _ASM_S390_TIMER_H
#include <linux/timer.h>
#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
struct vtimer_list {
struct list_head entry;
int cpu;
__u64 expires;
__u64 interval;
spinlock_t lock;
unsigned long magic;
void (*function)(unsigned long, struct pt_regs*);
unsigned long data;
};
/* the offset value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
__u64 to_expire; /* current event expire time */
__u64 offset; /* list offset to zero */
__u64 idle; /* temp var for idle */
};
void set_vtimer(__u64 expires);
extern void init_virt_timer(struct vtimer_list *timer);
extern void add_virt_timer(void *new);
extern void add_virt_timer_periodic(void *new);
extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
extern int del_virt_timer(struct vtimer_list *timer);
int stop_timers(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment