Commit abf8792d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'msm-smp' of git://codeaurora.org/quic/kernel/davidb/linux-msm

* 'msm-smp' of git://codeaurora.org/quic/kernel/davidb/linux-msm:
  msm: add SMP support for msm
  msm: hotplug: support cpu hotplug on msm
  msm: timer: SMP timer support for msm
  msm: scm-boot: Support for setting cold/warm boot addresses
  msm: Secure Channel Manager (SCM) support
parents e0e736fc e14411da
...@@ -40,11 +40,13 @@ config ARCH_MSM8X60 ...@@ -40,11 +40,13 @@ config ARCH_MSM8X60
bool "MSM8X60" bool "MSM8X60"
select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \ select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \
&& !MACH_MSM8X60_FFA) && !MACH_MSM8X60_FFA)
select ARCH_MSM_SCORPIONMP
select ARM_GIC select ARM_GIC
select CPU_V7 select CPU_V7
select MSM_V2_TLMM select MSM_V2_TLMM
select MSM_GPIOMUX select MSM_GPIOMUX
select IOMMU_API select IOMMU_API
select MSM_SCM if SMP
endchoice endchoice
...@@ -172,4 +174,7 @@ config MSM_V2_TLMM ...@@ -172,4 +174,7 @@ config MSM_V2_TLMM
config IOMMU_API config IOMMU_API
bool bool
config MSM_SCM
bool
endif endif
...@@ -18,6 +18,10 @@ obj-$(CONFIG_MSM_PROC_COMM) += clock.o ...@@ -18,6 +18,10 @@ obj-$(CONFIG_MSM_PROC_COMM) += clock.o
obj-$(CONFIG_ARCH_QSD8X50) += sirc.o obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
obj-$(CONFIG_MSM_SMD) += last_radio_log.o obj-$(CONFIG_MSM_SMD) += last_radio_log.o
obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o
obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o board-trout-panel.o devices-msm7x00.o obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o board-trout-panel.o devices-msm7x00.o
......
/*
* linux/arch/arm/mach-realview/headsmp.S
*
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
__INIT
/*
* MSM specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
* ready for them to initialise.
*/
ENTRY(msm_secondary_startup)
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
ldmia r4, {r5, r6}
sub r4, r4, r5
add r6, r6, r4
pen: ldr r7, [r6]
cmp r7, r0
bne pen
/*
* we've been released from the holding pen: secondary_stack
* should now contain the SVC stack for this core
*/
b secondary_startup
.align
1: .long .
.long pen_release
/*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
extern volatile int pen_release;
static inline void cpu_enter_lowpower(void)
{
/* Just flush the cache. Changing the coherency is not yet
* available on msm. */
flush_cache_all();
}
static inline void cpu_leave_lowpower(void)
{
}
static inline void platform_do_lowpower(unsigned int cpu)
{
/* Just enter wfi for now. TODO: Properly shut off the cpu. */
for (;;) {
/*
* here's the WFI
*/
asm("wfi"
:
:
: "memory", "cc");
if (pen_release == cpu) {
/*
* OK, proper wakeup, we're done
*/
break;
}
/*
* getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
* The trouble is, letting people know about this is not really
* possible, since we are currently running incoherently, and
* therefore cannot safely call printk() or anything else
*/
pr_debug("CPU%u: spurious wakeup call\n", cpu);
}
}
int platform_cpu_kill(unsigned int cpu)
{
return 1;
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void platform_cpu_die(unsigned int cpu)
{
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
platform_do_lowpower(cpu);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
}
int platform_cpu_disable(unsigned int cpu)
{
/*
* we don't allow CPU 0 to be shutdown (it is still too special
* e.g. clock tick interrupts)
*/
return cpu == 0 ? -EPERM : 0;
}
...@@ -60,7 +60,11 @@ ...@@ -60,7 +60,11 @@
#define MSM_TMR_BASE IOMEM(0xF0200000) #define MSM_TMR_BASE IOMEM(0xF0200000)
#define MSM_TMR_PHYS 0x02000000 #define MSM_TMR_PHYS 0x02000000
#define MSM_TMR_SIZE (SZ_1M) #define MSM_TMR_SIZE SZ_4K
#define MSM_TMR0_BASE IOMEM(0xF0201000)
#define MSM_TMR0_PHYS 0x02040000
#define MSM_TMR0_SIZE SZ_4K
#define MSM_GPT_BASE (MSM_TMR_BASE + 0x4) #define MSM_GPT_BASE (MSM_TMR_BASE + 0x4)
#define MSM_DGT_BASE (MSM_TMR_BASE + 0x24) #define MSM_DGT_BASE (MSM_TMR_BASE + 0x24)
......
...@@ -105,6 +105,7 @@ static struct map_desc msm8x60_io_desc[] __initdata = { ...@@ -105,6 +105,7 @@ static struct map_desc msm8x60_io_desc[] __initdata = {
MSM_DEVICE(QGIC_DIST), MSM_DEVICE(QGIC_DIST),
MSM_DEVICE(QGIC_CPU), MSM_DEVICE(QGIC_CPU),
MSM_DEVICE(TMR), MSM_DEVICE(TMR),
MSM_DEVICE(TMR0),
MSM_DEVICE(ACC), MSM_DEVICE(ACC),
MSM_DEVICE(GCC), MSM_DEVICE(GCC),
}; };
......
/*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <asm/hardware/gic.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
#include <mach/msm_iomap.h>
#include "scm-boot.h"
#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0
#define SCSS_CPU1CORE_RESET 0xD80
#define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64
/* Mask for edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
#define GIC_PPI_EDGE_MASK 0xFFFFD7FF
extern void msm_secondary_startup(void);
/*
* control for which core is the next to come out of the secondary
* boot "holding pen".
*/
volatile int pen_release = -1;
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
/* Configure edge-triggered PPIs */
writel(GIC_PPI_EDGE_MASK, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
gic_secondary_init(0);
/*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
pen_release = -1;
smp_wmb();
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
}
static __cpuinit void prepare_cold_cpu(unsigned int cpu)
{
int ret;
ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
SCM_FLAG_COLDBOOT_CPU1);
if (ret == 0) {
void *sc1_base_ptr;
sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
if (sc1_base_ptr) {
writel(0, sc1_base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
writel(0, sc1_base_ptr + SCSS_CPU1CORE_RESET);
writel(3, sc1_base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP);
iounmap(sc1_base_ptr);
}
} else
printk(KERN_DEBUG "Failed to set secondary core boot "
"address\n");
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
static int cold_boot_done;
/* Only need to bring cpu out of reset this way once */
if (cold_boot_done == false) {
prepare_cold_cpu(cpu);
cold_boot_done = true;
}
/*
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
pen_release = cpu;
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system. The msm8x60
* does not support the ARM SCU, so just set the possible cpu mask to
* NR_CPUS.
*/
void __init smp_init_cpus(void)
{
unsigned int i;
for (i = 0; i < NR_CPUS; i++)
set_cpu_possible(i, true);
}
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
int i;
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
*/
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
}
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include "scm.h"
#include "scm-boot.h"
/*
* Set the cold/warm boot address for one of the CPU cores.
*/
int scm_set_boot_addr(phys_addr_t addr, int flags)
{
struct {
unsigned int flags;
phys_addr_t addr;
} cmd;
cmd.addr = addr;
cmd.flags = flags;
return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
&cmd, sizeof(cmd), NULL, 0);
}
EXPORT_SYMBOL(scm_set_boot_addr);
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MACH_SCM_BOOT_H
#define __MACH_SCM_BOOT_H
#define SCM_BOOT_ADDR 0x1
#define SCM_FLAG_COLDBOOT_CPU1 0x1
#define SCM_FLAG_WARMBOOT_CPU1 0x2
#define SCM_FLAG_WARMBOOT_CPU0 0x4
int scm_set_boot_addr(phys_addr_t addr, int flags);
#endif
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <asm/cacheflush.h>
#include "scm.h"
/* Cache line size for msm8x60 */
#define CACHELINESIZE 32
#define SCM_ENOMEM -5
#define SCM_EOPNOTSUPP -4
#define SCM_EINVAL_ADDR -3
#define SCM_EINVAL_ARG -2
#define SCM_ERROR -1
#define SCM_INTERRUPTED 1
static DEFINE_MUTEX(scm_lock);
/**
* struct scm_command - one SCM command buffer
* @len: total available memory for command and response
* @buf_offset: start of command buffer
* @resp_hdr_offset: start of response buffer
* @id: command to be executed
* @buf: buffer returned from scm_get_command_buffer()
*
* An SCM command is layed out in memory as follows:
*
* ------------------- <--- struct scm_command
* | command header |
* ------------------- <--- scm_get_command_buffer()
* | command buffer |
* ------------------- <--- struct scm_response and
* | response header | scm_command_to_response()
* ------------------- <--- scm_get_response_buffer()
* | response buffer |
* -------------------
*
* There can be arbitrary padding between the headers and buffers so
* you should always use the appropriate scm_get_*_buffer() routines
* to access the buffers in a safe manner.
*/
struct scm_command {
u32 len;
u32 buf_offset;
u32 resp_hdr_offset;
u32 id;
u32 buf[0];
};
/**
* struct scm_response - one SCM response buffer
* @len: total available memory for response
* @buf_offset: start of response data relative to start of scm_response
* @is_complete: indicates if the command has finished processing
*/
struct scm_response {
u32 len;
u32 buf_offset;
u32 is_complete;
};
/**
* alloc_scm_command() - Allocate an SCM command
* @cmd_size: size of the command buffer
* @resp_size: size of the response buffer
*
* Allocate an SCM command, including enough room for the command
* and response headers as well as the command and response buffers.
*
* Returns a valid &scm_command on success or %NULL if the allocation fails.
*/
static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
{
struct scm_command *cmd;
size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
resp_size;
cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
if (cmd) {
cmd->len = len;
cmd->buf_offset = offsetof(struct scm_command, buf);
cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
}
return cmd;
}
/**
* free_scm_command() - Free an SCM command
* @cmd: command to free
*
* Free an SCM command.
*/
static inline void free_scm_command(struct scm_command *cmd)
{
kfree(cmd);
}
/**
* scm_command_to_response() - Get a pointer to a scm_response
* @cmd: command
*
* Returns a pointer to a response for a command.
*/
static inline struct scm_response *scm_command_to_response(
const struct scm_command *cmd)
{
return (void *)cmd + cmd->resp_hdr_offset;
}
/**
* scm_get_command_buffer() - Get a pointer to a command buffer
* @cmd: command
*
* Returns a pointer to the command buffer of a command.
*/
static inline void *scm_get_command_buffer(const struct scm_command *cmd)
{
return (void *)cmd->buf;
}
/**
* scm_get_response_buffer() - Get a pointer to a response buffer
* @rsp: response
*
* Returns a pointer to a response buffer of a response.
*/
static inline void *scm_get_response_buffer(const struct scm_response *rsp)
{
return (void *)rsp + rsp->buf_offset;
}
static int scm_remap_error(int err)
{
switch (err) {
case SCM_ERROR:
return -EIO;
case SCM_EINVAL_ADDR:
case SCM_EINVAL_ARG:
return -EINVAL;
case SCM_EOPNOTSUPP:
return -EOPNOTSUPP;
case SCM_ENOMEM:
return -ENOMEM;
}
return -EINVAL;
}
static u32 smc(u32 cmd_addr)
{
int context_id;
register u32 r0 asm("r0") = 1;
register u32 r1 asm("r1") = (u32)&context_id;
register u32 r2 asm("r2") = cmd_addr;
asm(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
: "r3");
return r0;
}
static int __scm_call(const struct scm_command *cmd)
{
int ret;
u32 cmd_addr = virt_to_phys(cmd);
/*
* Flush the entire cache here so callers don't have to remember
* to flush the cache when passing physical addresses to the secure
* side in the buffer.
*/
flush_cache_all();
do {
ret = smc(cmd_addr);
if (ret < 0) {
ret = scm_remap_error(ret);
break;
}
} while (ret == SCM_INTERRUPTED);
return ret;
}
/**
* scm_call() - Send an SCM command
* @svc_id: service identifier
* @cmd_id: command identifier
* @cmd_buf: command buffer
* @cmd_len: length of the command buffer
* @resp_buf: response buffer
* @resp_len: length of the response buffer
*
* Sends a command to the SCM and waits for the command to finish processing.
*/
int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len)
{
int ret;
struct scm_command *cmd;
struct scm_response *rsp;
cmd = alloc_scm_command(cmd_len, resp_len);
if (!cmd)
return -ENOMEM;
cmd->id = (svc_id << 10) | cmd_id;
if (cmd_buf)
memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
mutex_lock(&scm_lock);
ret = __scm_call(cmd);
mutex_unlock(&scm_lock);
if (ret)
goto out;
rsp = scm_command_to_response(cmd);
do {
u32 start = (u32)rsp;
u32 end = (u32)scm_get_response_buffer(rsp) + resp_len;
start &= ~(CACHELINESIZE - 1);
while (start < end) {
asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
: "memory");
start += CACHELINESIZE;
}
} while (!rsp->is_complete);
if (resp_buf)
memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
out:
free_scm_command(cmd);
return ret;
}
EXPORT_SYMBOL(scm_call);
u32 scm_get_version(void)
{
int context_id;
static u32 version = -1;
register u32 r0 asm("r0") = 0x1 << 8;
register u32 r1 asm("r1") = (u32)&context_id;
if (version != -1)
return version;
mutex_lock(&scm_lock);
asm(
__asmeq("%0", "r1")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
"smc #0 @ switch to secure world\n"
: "=r" (r1)
: "r" (r0), "r" (r1)
: "r2", "r3");
version = r1;
mutex_unlock(&scm_lock);
return version;
}
EXPORT_SYMBOL(scm_get_version);
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MACH_SCM_H
#define __MACH_SCM_H
#define SCM_SVC_BOOT 0x1
#define SCM_SVC_PIL 0x2
extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len);
#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
extern u32 scm_get_version(void);
#endif
...@@ -47,6 +47,19 @@ enum { ...@@ -47,6 +47,19 @@ enum {
#define GPT_HZ 32768 #define GPT_HZ 32768
enum timer_location {
LOCAL_TIMER = 0,
GLOBAL_TIMER = 1,
};
#ifdef MSM_TMR0_BASE
#define MSM_TMR_GLOBAL (MSM_TMR0_BASE - MSM_TMR_BASE)
#else
#define MSM_TMR_GLOBAL 0
#endif
#define MSM_GLOBAL_TIMER MSM_CLOCK_DGT
#if defined(CONFIG_ARCH_QSD8X50) #if defined(CONFIG_ARCH_QSD8X50)
#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
#define MSM_DGT_SHIFT (0) #define MSM_DGT_SHIFT (0)
...@@ -65,49 +78,67 @@ struct msm_clock { ...@@ -65,49 +78,67 @@ struct msm_clock {
void __iomem *regbase; void __iomem *regbase;
uint32_t freq; uint32_t freq;
uint32_t shift; uint32_t shift;
void __iomem *global_counter;
void __iomem *local_counter;
};
enum {
MSM_CLOCK_GPT,
MSM_CLOCK_DGT,
NR_TIMERS,
}; };
static struct msm_clock msm_clocks[];
static struct clock_event_device *local_clock_event;
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{ {
struct clock_event_device *evt = dev_id; struct clock_event_device *evt = dev_id;
if (smp_processor_id() != 0)
evt = local_clock_event;
if (evt->event_handler == NULL)
return IRQ_HANDLED;
evt->event_handler(evt); evt->event_handler(evt);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static cycle_t msm_gpt_read(struct clocksource *cs) static cycle_t msm_read_timer_count(struct clocksource *cs)
{ {
return readl(MSM_GPT_BASE + TIMER_COUNT_VAL); struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
return readl(clk->global_counter);
} }
static cycle_t msm_dgt_read(struct clocksource *cs) static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
{ {
return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT; #ifdef CONFIG_SMP
int i;
for (i = 0; i < NR_TIMERS; i++)
if (evt == &(msm_clocks[i].clockevent))
return &msm_clocks[i];
return &msm_clocks[MSM_GLOBAL_TIMER];
#else
return container_of(evt, struct msm_clock, clockevent);
#endif
} }
static int msm_timer_set_next_event(unsigned long cycles, static int msm_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
struct msm_clock *clock = container_of(evt, struct msm_clock, clockevent); struct msm_clock *clock = clockevent_to_clock(evt);
uint32_t now = readl(clock->regbase + TIMER_COUNT_VAL); uint32_t now = readl(clock->local_counter);
uint32_t alarm = now + (cycles << clock->shift); uint32_t alarm = now + (cycles << clock->shift);
int late;
writel(alarm, clock->regbase + TIMER_MATCH_VAL); writel(alarm, clock->regbase + TIMER_MATCH_VAL);
now = readl(clock->regbase + TIMER_COUNT_VAL);
late = now - alarm;
if (late >= (-2 << clock->shift) && late < DGT_HZ*5) {
printk(KERN_NOTICE "msm_timer_set_next_event(%lu) clock %s, "
"alarm already expired, now %x, alarm %x, late %d\n",
cycles, clock->clockevent.name, now, alarm, late);
return -ETIME;
}
return 0; return 0;
} }
static void msm_timer_set_mode(enum clock_event_mode mode, static void msm_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
struct msm_clock *clock = container_of(evt, struct msm_clock, clockevent); struct msm_clock *clock = clockevent_to_clock(evt);
switch (mode) { switch (mode) {
case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_PERIODIC:
...@@ -123,7 +154,7 @@ static void msm_timer_set_mode(enum clock_event_mode mode, ...@@ -123,7 +154,7 @@ static void msm_timer_set_mode(enum clock_event_mode mode,
} }
static struct msm_clock msm_clocks[] = { static struct msm_clock msm_clocks[] = {
{ [MSM_CLOCK_GPT] = {
.clockevent = { .clockevent = {
.name = "gp_timer", .name = "gp_timer",
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
...@@ -135,7 +166,7 @@ static struct msm_clock msm_clocks[] = { ...@@ -135,7 +166,7 @@ static struct msm_clock msm_clocks[] = {
.clocksource = { .clocksource = {
.name = "gp_timer", .name = "gp_timer",
.rating = 200, .rating = 200,
.read = msm_gpt_read, .read = msm_read_timer_count,
.mask = CLOCKSOURCE_MASK(32), .mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}, },
...@@ -147,9 +178,12 @@ static struct msm_clock msm_clocks[] = { ...@@ -147,9 +178,12 @@ static struct msm_clock msm_clocks[] = {
.irq = INT_GP_TIMER_EXP .irq = INT_GP_TIMER_EXP
}, },
.regbase = MSM_GPT_BASE, .regbase = MSM_GPT_BASE,
.freq = GPT_HZ .freq = GPT_HZ,
.local_counter = MSM_GPT_BASE + TIMER_COUNT_VAL,
.global_counter = MSM_GPT_BASE + TIMER_COUNT_VAL +
MSM_TMR_GLOBAL,
}, },
{ [MSM_CLOCK_DGT] = {
.clockevent = { .clockevent = {
.name = "dg_timer", .name = "dg_timer",
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
...@@ -161,7 +195,7 @@ static struct msm_clock msm_clocks[] = { ...@@ -161,7 +195,7 @@ static struct msm_clock msm_clocks[] = {
.clocksource = { .clocksource = {
.name = "dg_timer", .name = "dg_timer",
.rating = 300, .rating = 300,
.read = msm_dgt_read, .read = msm_read_timer_count,
.mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)), .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}, },
...@@ -174,7 +208,10 @@ static struct msm_clock msm_clocks[] = { ...@@ -174,7 +208,10 @@ static struct msm_clock msm_clocks[] = {
}, },
.regbase = MSM_DGT_BASE, .regbase = MSM_DGT_BASE,
.freq = DGT_HZ >> MSM_DGT_SHIFT, .freq = DGT_HZ >> MSM_DGT_SHIFT,
.shift = MSM_DGT_SHIFT .shift = MSM_DGT_SHIFT,
.local_counter = MSM_DGT_BASE + TIMER_COUNT_VAL,
.global_counter = MSM_DGT_BASE + TIMER_COUNT_VAL +
MSM_TMR_GLOBAL,
} }
}; };
...@@ -183,7 +220,7 @@ static void __init msm_timer_init(void) ...@@ -183,7 +220,7 @@ static void __init msm_timer_init(void)
int i; int i;
int res; int res;
#ifdef CONFIG_ARCH_MSM8X60 #ifdef CONFIG_ARCH_MSM_SCORPIONMP
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
#endif #endif
...@@ -217,6 +254,48 @@ static void __init msm_timer_init(void) ...@@ -217,6 +254,48 @@ static void __init msm_timer_init(void)
} }
} }
#ifdef CONFIG_SMP
void __cpuinit local_timer_setup(struct clock_event_device *evt)
{
struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
/* Use existing clock_event for cpu 0 */
if (!smp_processor_id())
return;
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
if (!local_clock_event) {
writel(0, clock->regbase + TIMER_ENABLE);
writel(0, clock->regbase + TIMER_CLEAR);
writel(~0, clock->regbase + TIMER_MATCH_VAL);
}
evt->irq = clock->irq.irq;
evt->name = "local_timer";
evt->features = CLOCK_EVT_FEAT_ONESHOT;
evt->rating = clock->clockevent.rating;
evt->set_mode = msm_timer_set_mode;
evt->set_next_event = msm_timer_set_next_event;
evt->shift = clock->clockevent.shift;
evt->mult = div_sc(clock->freq, NSEC_PER_SEC, evt->shift);
evt->max_delta_ns =
clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
evt->min_delta_ns = clockevent_delta2ns(4, evt);
local_clock_event = evt;
gic_enable_ppi(clock->irq.irq);
clockevents_register_device(evt);
}
inline int local_timer_ack(void)
{
return 1;
}
#endif
struct sys_timer msm_timer = { struct sys_timer msm_timer = {
.init = msm_timer_init .init = msm_timer_init
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment