Commit 361f7d17 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 core platform updates from Ingo Molnar:
 "The main changes are:

   - Intel Atom platform updates.  (Andy Shevchenko)

   - modularity fixlets.  (Paul Gortmaker)

   - x86 platform clockevents driver updates for lguest, uv and Xen.
     (Viresh Kumar)

   - Microsoft Hyper-V TSC fixlet.  (Vitaly Kuznetsov)"

* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/platform: Make atom/pmc_atom.c explicitly non-modular
  x86/hyperv: Mark the Hyper-V TSC as unstable
  x86/xen/time: Migrate to new set-state interface
  x86/uv/time: Migrate to new set-state interface
  x86/lguest/timer: Migrate to new set-state interface
  x86/pci/intel_mid_pci: Use proper constants for irq polarity
  x86/pci/intel_mid_pci: Make intel_mid_pci_ops static
  x86/pci/intel_mid_pci: Propagate actual return code
  x86/pci/intel_mid_pci: Work around for IRQ0 assignment
  x86/platform/iosf_mbi: Add Intel Tangier PCI id
  x86/platform/iosf_mbi: Source cleanup
  x86/platform/iosf_mbi: Remove NULL pointer checks for pci_dev_put()
  x86/platform/iosf_mbi: Check return value of debugfs_create properly
  x86/platform/iosf_mbi: Move to dedicated folder
  x86/platform/intel/pmc_atom: Move the PMC-Atom code to arch/x86/platform/atom
  x86/platform/intel/pmc_atom: Add Cherrytrail PMC interface
  x86/platform/intel/pmc_atom: Supply register mappings via PMC object
  x86/platform/intel/pmc_atom: Print index of device in loop
  x86/platform/intel/pmc_atom: Export accessors to PMC registers
parents 25525bea e971aa2c
...@@ -52,20 +52,20 @@ ...@@ -52,20 +52,20 @@
/* Quark available units */ /* Quark available units */
#define QRK_MBI_UNIT_HBA 0x00 #define QRK_MBI_UNIT_HBA 0x00
#define QRK_MBI_UNIT_HB 0x03 #define QRK_MBI_UNIT_HB 0x03
#define QRK_MBI_UNIT_RMU 0x04 #define QRK_MBI_UNIT_RMU 0x04
#define QRK_MBI_UNIT_MM 0x05 #define QRK_MBI_UNIT_MM 0x05
#define QRK_MBI_UNIT_MMESRAM 0x05 #define QRK_MBI_UNIT_MMESRAM 0x05
#define QRK_MBI_UNIT_SOC 0x31 #define QRK_MBI_UNIT_SOC 0x31
/* Quark read/write opcodes */ /* Quark read/write opcodes */
#define QRK_MBI_HBA_READ 0x10 #define QRK_MBI_HBA_READ 0x10
#define QRK_MBI_HBA_WRITE 0x11 #define QRK_MBI_HBA_WRITE 0x11
#define QRK_MBI_HB_READ 0x10 #define QRK_MBI_HB_READ 0x10
#define QRK_MBI_HB_WRITE 0x11 #define QRK_MBI_HB_WRITE 0x11
#define QRK_MBI_RMU_READ 0x10 #define QRK_MBI_RMU_READ 0x10
#define QRK_MBI_RMU_WRITE 0x11 #define QRK_MBI_RMU_WRITE 0x11
#define QRK_MBI_MM_READ 0x10 #define QRK_MBI_MM_READ 0x10
#define QRK_MBI_MM_WRITE 0x11 #define QRK_MBI_MM_WRITE 0x11
#define QRK_MBI_MMESRAM_READ 0x12 #define QRK_MBI_MMESRAM_READ 0x12
#define QRK_MBI_MMESRAM_WRITE 0x13 #define QRK_MBI_MMESRAM_WRITE 0x13
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
/* ValleyView Power Control Unit PCI Device ID */ /* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C #define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_CHT_PMC 0x229C
/* PMC Memory mapped IO registers */ /* PMC Memory mapped IO registers */
#define PMC_BASE_ADDR_OFFSET 0x44 #define PMC_BASE_ADDR_OFFSET 0x44
...@@ -29,6 +31,10 @@ ...@@ -29,6 +31,10 @@
#define PMC_FUNC_DIS 0x34 #define PMC_FUNC_DIS 0x34
#define PMC_FUNC_DIS_2 0x38 #define PMC_FUNC_DIS_2 0x38
/* CHT specific bits in FUNC_DIS2 register */
#define BIT_FD_GMM BIT(3)
#define BIT_FD_ISH BIT(4)
/* S0ix wake event control */ /* S0ix wake event control */
#define PMC_S0IX_WAKE_EN 0x3C #define PMC_S0IX_WAKE_EN 0x3C
...@@ -75,6 +81,21 @@ ...@@ -75,6 +81,21 @@
#define PMC_PSS_BIT_USB BIT(16) #define PMC_PSS_BIT_USB BIT(16)
#define PMC_PSS_BIT_USB_SUS BIT(17) #define PMC_PSS_BIT_USB_SUS BIT(17)
/* CHT specific bits in PSS register */
#define PMC_PSS_BIT_CHT_UFS BIT(7)
#define PMC_PSS_BIT_CHT_UXD BIT(11)
#define PMC_PSS_BIT_CHT_UXD_FD BIT(12)
#define PMC_PSS_BIT_CHT_UX_ENG BIT(15)
#define PMC_PSS_BIT_CHT_USB_SUS BIT(16)
#define PMC_PSS_BIT_CHT_GMM BIT(17)
#define PMC_PSS_BIT_CHT_ISH BIT(18)
#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26)
#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27)
#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28)
#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29)
#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30)
#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31)
/* These registers reflect D3 status of functions */ /* These registers reflect D3 status of functions */
#define PMC_D3_STS_0 0xA0 #define PMC_D3_STS_0 0xA0
...@@ -117,6 +138,10 @@ ...@@ -117,6 +138,10 @@
#define BIT_USH_SS_PHY BIT(2) #define BIT_USH_SS_PHY BIT(2)
#define BIT_DFX BIT(3) #define BIT_DFX BIT(3)
/* CHT specific bits in PMC_D3_STS_1 register */
#define BIT_STS_GMM BIT(1)
#define BIT_STS_ISH BIT(2)
/* PMC I/O Registers */ /* PMC I/O Registers */
#define ACPI_BASE_ADDR_OFFSET 0x40 #define ACPI_BASE_ADDR_OFFSET 0x40
#define ACPI_BASE_ADDR_MASK 0xFFFFFE00 #define ACPI_BASE_ADDR_MASK 0xFFFFFE00
...@@ -126,4 +151,8 @@ ...@@ -126,4 +151,8 @@
#define SLEEP_TYPE_MASK 0xFFFFECFF #define SLEEP_TYPE_MASK 0xFFFFECFF
#define SLEEP_TYPE_S5 0x1C00 #define SLEEP_TYPE_S5 0x1C00
#define SLEEP_ENABLE 0x2000 #define SLEEP_ENABLE 0x2000
extern int pmc_atom_read(int offset, u32 *value);
extern int pmc_atom_write(int offset, u32 value);
#endif /* PMC_ATOM_H */ #endif /* PMC_ATOM_H */
...@@ -109,8 +109,6 @@ obj-$(CONFIG_EFI) += sysfb_efi.o ...@@ -109,8 +109,6 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_TRACING) += tracepoint.o
obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
### ###
# 64 bit specific files # 64 bit specific files
......
...@@ -188,6 +188,7 @@ static void __init ms_hyperv_init_platform(void) ...@@ -188,6 +188,7 @@ static void __init ms_hyperv_init_platform(void)
machine_ops.shutdown = hv_machine_shutdown; machine_ops.shutdown = hv_machine_shutdown;
machine_ops.crash_shutdown = hv_machine_crash_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown;
mark_tsc_unstable("running on Hyper-V");
} }
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
......
...@@ -985,23 +985,11 @@ static int lguest_clockevent_set_next_event(unsigned long delta, ...@@ -985,23 +985,11 @@ static int lguest_clockevent_set_next_event(unsigned long delta,
return 0; return 0;
} }
static void lguest_clockevent_set_mode(enum clock_event_mode mode, static int lguest_clockevent_shutdown(struct clock_event_device *evt)
struct clock_event_device *evt) {
{ /* A 0 argument shuts the clock down. */
switch (mode) { hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
case CLOCK_EVT_MODE_UNUSED: return 0;
case CLOCK_EVT_MODE_SHUTDOWN:
/* A 0 argument shuts the clock down. */
hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* This is what we expect. */
break;
case CLOCK_EVT_MODE_PERIODIC:
BUG();
case CLOCK_EVT_MODE_RESUME:
break;
}
} }
/* This describes our primitive timer chip. */ /* This describes our primitive timer chip. */
...@@ -1009,7 +997,7 @@ static struct clock_event_device lguest_clockevent = { ...@@ -1009,7 +997,7 @@ static struct clock_event_device lguest_clockevent = {
.name = "lguest", .name = "lguest",
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = lguest_clockevent_set_next_event, .set_next_event = lguest_clockevent_set_next_event,
.set_mode = lguest_clockevent_set_mode, .set_state_shutdown = lguest_clockevent_shutdown,
.rating = INT_MAX, .rating = INT_MAX,
.mult = 1, .mult = 1,
.shift = 0, .shift = 0,
......
...@@ -35,6 +35,9 @@ ...@@ -35,6 +35,9 @@
#define PCIE_CAP_OFFSET 0x100 #define PCIE_CAP_OFFSET 0x100
/* Quirks for the listed devices */
#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
/* Fixed BAR fields */ /* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */ #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
#define PCI_FIXED_BAR_0_SIZE 0x04 #define PCI_FIXED_BAR_0_SIZE 0x04
...@@ -210,22 +213,41 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) ...@@ -210,22 +213,41 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{ {
struct irq_alloc_info info; struct irq_alloc_info info;
int polarity; int polarity;
int ret;
if (pci_has_managed_irq(dev)) if (pci_has_managed_irq(dev))
return 0; return 0;
if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) switch (intel_mid_identify_cpu()) {
polarity = 0; /* active high */ case INTEL_MID_CPU_CHIP_TANGIER:
else polarity = IOAPIC_POL_HIGH;
polarity = 1; /* active low */
/* Special treatment for IRQ0 */
if (dev->irq == 0) {
/*
* TNG has IRQ0 assigned to eMMC controller. But there
* are also other devices with bogus PCI configuration
* that have IRQ0 assigned. This check ensures that
* eMMC gets it.
*/
if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
return -EBUSY;
}
break;
default:
polarity = IOAPIC_POL_LOW;
break;
}
ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity); ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
/* /*
* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
* IOAPIC RTE entries, so we just enable RTE for the device. * IOAPIC RTE entries, so we just enable RTE for the device.
*/ */
if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info) < 0) ret = mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info);
return -EBUSY; if (ret < 0)
return ret;
dev->irq_managed = 1; dev->irq_managed = 1;
...@@ -244,7 +266,7 @@ static void intel_mid_pci_irq_disable(struct pci_dev *dev) ...@@ -244,7 +266,7 @@ static void intel_mid_pci_irq_disable(struct pci_dev *dev)
} }
} }
struct pci_ops intel_mid_pci_ops = { static struct pci_ops intel_mid_pci_ops = {
.read = pci_read, .read = pci_read,
.write = pci_write, .write = pci_write,
}; };
......
...@@ -5,6 +5,7 @@ obj-y += efi/ ...@@ -5,6 +5,7 @@ obj-y += efi/
obj-y += geode/ obj-y += geode/
obj-y += goldfish/ obj-y += goldfish/
obj-y += iris/ obj-y += iris/
obj-y += intel/
obj-y += intel-mid/ obj-y += intel-mid/
obj-y += intel-quark/ obj-y += intel-quark/
obj-y += olpc/ obj-y += olpc/
......
obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
...@@ -30,7 +30,9 @@ ...@@ -30,7 +30,9 @@
#define PCI_DEVICE_ID_BAYTRAIL 0x0F00 #define PCI_DEVICE_ID_BAYTRAIL 0x0F00
#define PCI_DEVICE_ID_BRASWELL 0x2280 #define PCI_DEVICE_ID_BRASWELL 0x2280
#define PCI_DEVICE_ID_QUARK_X1000 0x0958 #define PCI_DEVICE_ID_QUARK_X1000 0x0958
#define PCI_DEVICE_ID_TANGIER 0x1170
static struct pci_dev *mbi_pdev;
static DEFINE_SPINLOCK(iosf_mbi_lock); static DEFINE_SPINLOCK(iosf_mbi_lock);
static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
...@@ -38,8 +40,6 @@ static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) ...@@ -38,8 +40,6 @@ static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE; return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
} }
static struct pci_dev *mbi_pdev; /* one mbi device */
static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
{ {
int result; int result;
...@@ -104,7 +104,7 @@ int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) ...@@ -104,7 +104,7 @@ int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
unsigned long flags; unsigned long flags;
int ret; int ret;
/*Access to the GFX unit is handled by GPU code */ /* Access to the GFX unit is handled by GPU code */
if (port == BT_MBI_UNIT_GFX) { if (port == BT_MBI_UNIT_GFX) {
WARN_ON(1); WARN_ON(1);
return -EPERM; return -EPERM;
...@@ -127,7 +127,7 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr) ...@@ -127,7 +127,7 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
unsigned long flags; unsigned long flags;
int ret; int ret;
/*Access to the GFX unit is handled by GPU code */ /* Access to the GFX unit is handled by GPU code */
if (port == BT_MBI_UNIT_GFX) { if (port == BT_MBI_UNIT_GFX) {
WARN_ON(1); WARN_ON(1);
return -EPERM; return -EPERM;
...@@ -151,7 +151,7 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask) ...@@ -151,7 +151,7 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
unsigned long flags; unsigned long flags;
int ret; int ret;
/*Access to the GFX unit is handled by GPU code */ /* Access to the GFX unit is handled by GPU code */
if (port == BT_MBI_UNIT_GFX) { if (port == BT_MBI_UNIT_GFX) {
WARN_ON(1); WARN_ON(1);
return -EPERM; return -EPERM;
...@@ -240,17 +240,17 @@ static void iosf_sideband_debug_init(void) ...@@ -240,17 +240,17 @@ static void iosf_sideband_debug_init(void)
/* mdr */ /* mdr */
d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr); d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
if (IS_ERR_OR_NULL(d)) if (!d)
goto cleanup; goto cleanup;
/* mcrx */ /* mcrx */
debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx); d = debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
if (IS_ERR_OR_NULL(d)) if (!d)
goto cleanup; goto cleanup;
/* mcr - initiates mailbox tranaction */ /* mcr - initiates mailbox tranaction */
debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops); d = debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
if (IS_ERR_OR_NULL(d)) if (!d)
goto cleanup; goto cleanup;
return; return;
...@@ -292,6 +292,7 @@ static const struct pci_device_id iosf_mbi_pci_ids[] = { ...@@ -292,6 +292,7 @@ static const struct pci_device_id iosf_mbi_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_TANGIER) },
{ 0, }, { 0, },
}; };
MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids); MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
...@@ -314,10 +315,8 @@ static void __exit iosf_mbi_exit(void) ...@@ -314,10 +315,8 @@ static void __exit iosf_mbi_exit(void)
iosf_debugfs_remove(); iosf_debugfs_remove();
pci_unregister_driver(&iosf_mbi_pci_driver); pci_unregister_driver(&iosf_mbi_pci_driver);
if (mbi_pdev) { pci_dev_put(mbi_pdev);
pci_dev_put(mbi_pdev); mbi_pdev = NULL;
mbi_pdev = NULL;
}
} }
module_init(iosf_mbi_init); module_init(iosf_mbi_init);
......
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
static cycle_t uv_read_rtc(struct clocksource *cs); static cycle_t uv_read_rtc(struct clocksource *cs);
static int uv_rtc_next_event(unsigned long, struct clock_event_device *); static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
static void uv_rtc_timer_setup(enum clock_event_mode, static int uv_rtc_shutdown(struct clock_event_device *evt);
struct clock_event_device *);
static struct clocksource clocksource_uv = { static struct clocksource clocksource_uv = {
.name = RTC_NAME, .name = RTC_NAME,
...@@ -44,14 +43,14 @@ static struct clocksource clocksource_uv = { ...@@ -44,14 +43,14 @@ static struct clocksource clocksource_uv = {
}; };
static struct clock_event_device clock_event_device_uv = { static struct clock_event_device clock_event_device_uv = {
.name = RTC_NAME, .name = RTC_NAME,
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
.shift = 20, .shift = 20,
.rating = 400, .rating = 400,
.irq = -1, .irq = -1,
.set_next_event = uv_rtc_next_event, .set_next_event = uv_rtc_next_event,
.set_mode = uv_rtc_timer_setup, .set_state_shutdown = uv_rtc_shutdown,
.event_handler = NULL, .event_handler = NULL,
}; };
static DEFINE_PER_CPU(struct clock_event_device, cpu_ced); static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
...@@ -321,24 +320,14 @@ static int uv_rtc_next_event(unsigned long delta, ...@@ -321,24 +320,14 @@ static int uv_rtc_next_event(unsigned long delta,
} }
/* /*
* Setup the RTC timer in oneshot mode * Shutdown the RTC timer
*/ */
static void uv_rtc_timer_setup(enum clock_event_mode mode, static int uv_rtc_shutdown(struct clock_event_device *evt)
struct clock_event_device *evt)
{ {
int ced_cpu = cpumask_first(evt->cpumask); int ced_cpu = cpumask_first(evt->cpumask);
switch (mode) { uv_rtc_unset_timer(ced_cpu, 1);
case CLOCK_EVT_MODE_PERIODIC: return 0;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here yet */
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
uv_rtc_unset_timer(ced_cpu, 1);
break;
}
} }
static void uv_rtc_interrupt(void) static void uv_rtc_interrupt(void)
......
...@@ -274,30 +274,18 @@ static s64 get_abs_timeout(unsigned long delta) ...@@ -274,30 +274,18 @@ static s64 get_abs_timeout(unsigned long delta)
return xen_clocksource_read() + delta; return xen_clocksource_read() + delta;
} }
static void xen_timerop_set_mode(enum clock_event_mode mode, static int xen_timerop_shutdown(struct clock_event_device *evt)
struct clock_event_device *evt)
{ {
switch (mode) { /* cancel timeout */
case CLOCK_EVT_MODE_PERIODIC: HYPERVISOR_set_timer_op(0);
/* unsupported */
WARN_ON(1); return 0;
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
HYPERVISOR_set_timer_op(0); /* cancel timeout */
break;
}
} }
static int xen_timerop_set_next_event(unsigned long delta, static int xen_timerop_set_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); WARN_ON(!clockevent_state_oneshot(evt));
if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0) if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
BUG(); BUG();
...@@ -310,46 +298,39 @@ static int xen_timerop_set_next_event(unsigned long delta, ...@@ -310,46 +298,39 @@ static int xen_timerop_set_next_event(unsigned long delta,
} }
static const struct clock_event_device xen_timerop_clockevent = { static const struct clock_event_device xen_timerop_clockevent = {
.name = "xen", .name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
.max_delta_ns = 0xffffffff, .max_delta_ns = 0xffffffff,
.min_delta_ns = TIMER_SLOP, .min_delta_ns = TIMER_SLOP,
.mult = 1, .mult = 1,
.shift = 0, .shift = 0,
.rating = 500, .rating = 500,
.set_mode = xen_timerop_set_mode, .set_state_shutdown = xen_timerop_shutdown,
.set_next_event = xen_timerop_set_next_event, .set_next_event = xen_timerop_set_next_event,
}; };
static int xen_vcpuop_shutdown(struct clock_event_device *evt)
{
int cpu = smp_processor_id();
if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
return 0;
}
static void xen_vcpuop_set_mode(enum clock_event_mode mode, static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
struct clock_event_device *evt)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
switch (mode) { if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
case CLOCK_EVT_MODE_PERIODIC: BUG();
WARN_ON(1); /* unsupported */
break;
case CLOCK_EVT_MODE_ONESHOT:
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
break;
case CLOCK_EVT_MODE_UNUSED: return 0;
case CLOCK_EVT_MODE_SHUTDOWN:
if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
} }
static int xen_vcpuop_set_next_event(unsigned long delta, static int xen_vcpuop_set_next_event(unsigned long delta,
...@@ -359,7 +340,7 @@ static int xen_vcpuop_set_next_event(unsigned long delta, ...@@ -359,7 +340,7 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
struct vcpu_set_singleshot_timer single; struct vcpu_set_singleshot_timer single;
int ret; int ret;
WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); WARN_ON(!clockevent_state_oneshot(evt));
single.timeout_abs_ns = get_abs_timeout(delta); single.timeout_abs_ns = get_abs_timeout(delta);
single.flags = VCPU_SSHOTTMR_future; single.flags = VCPU_SSHOTTMR_future;
...@@ -382,7 +363,8 @@ static const struct clock_event_device xen_vcpuop_clockevent = { ...@@ -382,7 +363,8 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
.shift = 0, .shift = 0,
.rating = 500, .rating = 500,
.set_mode = xen_vcpuop_set_mode, .set_state_shutdown = xen_vcpuop_shutdown,
.set_state_oneshot = xen_vcpuop_set_oneshot,
.set_next_event = xen_vcpuop_set_next_event, .set_next_event = xen_vcpuop_set_next_event,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment