Commit 1695a925 authored by David Mosberger's avatar David Mosberger

ia64: Prepare for GCC v3.4. Sync with 2.5.69.

parent 3b153731
#define MACHVEC_PLATFORM_NAME dig #define MACHVEC_PLATFORM_NAME dig
#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig.h>
#include <asm/machvec_init.h> #include <asm/machvec_init.h>
...@@ -234,7 +234,12 @@ static int reserve_sba_gart = 1; ...@@ -234,7 +234,12 @@ static int reserve_sba_gart = 1;
static u64 prefetch_spill_page; static u64 prefetch_spill_page;
#endif #endif
#define GET_IOC(dev) ((struct ioc *) PCI_CONTROLLER(dev)->iommu) #ifdef CONFIG_PCI
# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
#else
# define GET_IOC(dev) NULL
#endif
/* /*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
...@@ -752,12 +757,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) ...@@ -752,12 +757,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @addr: driver buffer to map. * @addr: driver buffer to map.
* @size: number of bytes to map in driver buffer. * @size: number of bytes to map in driver buffer.
* @direction: R/W or both. * @dir: R/W or both.
* *
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
dma_addr_t dma_addr_t
sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) sba_map_single(struct device *dev, void *addr, size_t size, int dir)
{ {
struct ioc *ioc; struct ioc *ioc;
unsigned long flags; unsigned long flags;
...@@ -776,7 +781,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -776,7 +781,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
/* /*
** Check if the PCI device can DMA to ptr... if so, just return ptr ** Check if the PCI device can DMA to ptr... if so, just return ptr
*/ */
if ((pci_addr & ~dev->dma_mask) == 0) { if (dev && dev->dma_mask && (pci_addr & ~*dev->dma_mask) == 0) {
/* /*
** Device is bit capable of DMA'ing to the buffer... ** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr ** just return the PCI address of ptr
...@@ -787,7 +792,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -787,7 +792,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif #endif
DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
dev->dma_mask, pci_addr); *dev->dma_mask, pci_addr);
return pci_addr; return pci_addr;
} }
#endif #endif
...@@ -845,12 +850,11 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -845,12 +850,11 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped. * @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer. * @size: number of bytes mapped in driver buffer.
* @direction: R/W or both. * @dir: R/W or both.
* *
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
int direction)
{ {
struct ioc *ioc; struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0 #if DELAYED_RESOURCE_CNT > 0
...@@ -875,7 +879,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -875,7 +879,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
#ifdef ENABLE_MARK_CLEAN #ifdef ENABLE_MARK_CLEAN
if (direction == PCI_DMA_FROMDEVICE) { if (dir == DMA_FROM_DEVICE) {
mark_clean(phys_to_virt(iova), size); mark_clean(phys_to_virt(iova), size);
} }
#endif #endif
...@@ -917,7 +921,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -917,7 +921,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */ #endif /* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN #ifdef ENABLE_MARK_CLEAN
if (direction == PCI_DMA_FROMDEVICE) { if (dir == DMA_FROM_DEVICE) {
u32 iovp = (u32) SBA_IOVP(ioc,iova); u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp); int off = PDIR_INDEX(iovp);
void *addr; void *addr;
...@@ -962,31 +966,25 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -962,31 +966,25 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
void * void *
sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
{ {
struct ioc *ioc; struct ioc *ioc;
void *ret; void *addr;
if (!hwdev) {
/* only support PCI */
*dma_handle = 0;
return 0;
}
ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size)); addr = (void *) __get_free_pages(flags, get_order(size));
if (!addr)
return NULL;
if (ret) {
memset(ret, 0, size);
/* /*
* REVISIT: if sba_map_single starts needing more * REVISIT: if sba_map_single starts needing more than dma_mask from the
* than dma_mask from the device, this needs to be * device, this needs to be updated.
* updated.
*/ */
ioc = GET_IOC(hwdev); ioc = GET_IOC(hwdev);
*dma_handle = sba_map_single(ioc->sac_only_dev, ret, size, 0); ASSERT(ioc);
} *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
return ret; memset(addr, 0, size);
return addr;
} }
...@@ -999,8 +997,7 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) ...@@ -999,8 +997,7 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
* *
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, void sba_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
dma_addr_t dma_handle)
{ {
sba_unmap_single(hwdev, dma_handle, size, 0); sba_unmap_single(hwdev, dma_handle, size, 0);
free_pages((unsigned long) vaddr, get_order(size)); free_pages((unsigned long) vaddr, get_order(size));
...@@ -1249,11 +1246,11 @@ sba_coalesce_chunks( struct ioc *ioc, ...@@ -1249,11 +1246,11 @@ sba_coalesce_chunks( struct ioc *ioc,
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs * @sglist: array of buffer/length pairs
* @nents: number of entries in list * @nents: number of entries in list
* @direction: R/W or both. * @dir: R/W or both.
* *
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction) int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
{ {
struct ioc *ioc; struct ioc *ioc;
int coalesced, filled = 0; int coalesced, filled = 0;
...@@ -1267,7 +1264,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int d ...@@ -1267,7 +1264,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int d
ASSERT(ioc); ASSERT(ioc);
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
if (dev->dma_mask >= ioc->dma_mask) { if (dev && dev->dma_mask && (ioc->dma_mask & ~*dev->dma_mask) == 0) {
for (sg = sglist ; filled < nents ; filled++, sg++){ for (sg = sglist ; filled < nents ; filled++, sg++){
sg->dma_length = sg->length; sg->dma_length = sg->length;
sg->dma_address = virt_to_phys(sba_sg_address(sg)); sg->dma_address = virt_to_phys(sba_sg_address(sg));
...@@ -1283,9 +1280,8 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int d ...@@ -1283,9 +1280,8 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int d
/* Fast path single entry scatterlists. */ /* Fast path single entry scatterlists. */
if (nents == 1) { if (nents == 1) {
sglist->dma_length = sglist->length; sglist->dma_length = sglist->length;
sglist->dma_address = sba_map_single(dev, sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length,
sba_sg_address(sglist), dir);
sglist->length, direction);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* /*
** Should probably do some stats counting, but trying to ** Should probably do some stats counting, but trying to
...@@ -1351,12 +1347,11 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int d ...@@ -1351,12 +1347,11 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int d
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs * @sglist: array of buffer/length pairs
* @nents: number of entries in list * @nents: number of entries in list
* @direction: R/W or both. * @dir: R/W or both.
* *
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
int direction)
{ {
struct ioc *ioc; struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
...@@ -1381,8 +1376,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1381,8 +1376,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
while (nents && sglist->dma_length) { while (nents && sglist->dma_length) {
sba_unmap_single(dev, sglist->dma_address, sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
sglist->dma_length, direction);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* /*
** This leaves inconsistent data in the stats, but we can't ** This leaves inconsistent data in the stats, but we can't
...@@ -1599,7 +1593,7 @@ ioc_sac_init(struct ioc *ioc) ...@@ -1599,7 +1593,7 @@ ioc_sac_init(struct ioc *ioc)
struct pci_controller *controller = NULL; struct pci_controller *controller = NULL;
/* /*
* pci_alloc_consistent() must return a DMA address which is * pci_alloc_coherent() must return a DMA address which is
* SAC (single address cycle) addressable, so allocate a * SAC (single address cycle) addressable, so allocate a
* pseudo-device to enforce that. * pseudo-device to enforce that.
*/ */
...@@ -1616,6 +1610,9 @@ ioc_sac_init(struct ioc *ioc) ...@@ -1616,6 +1610,9 @@ ioc_sac_init(struct ioc *ioc)
controller->iommu = ioc; controller->iommu = ioc;
sac->sysdata = controller; sac->sysdata = controller;
sac->dma_mask = 0xFFFFFFFFUL; sac->dma_mask = 0xFFFFFFFFUL;
#ifdef CONFIG_PCI
sac->dev.bus = &pci_bus_type;
#endif
ioc->sac_only_dev = sac; ioc->sac_only_dev = sac;
} }
...@@ -1675,9 +1672,8 @@ ioc_init(u64 hpa, void *handle) ...@@ -1675,9 +1672,8 @@ ioc_init(u64 hpa, void *handle)
if (!ioc->name) { if (!ioc->name) {
ioc->name = kmalloc(24, GFP_KERNEL); ioc->name = kmalloc(24, GFP_KERNEL);
if (ioc->name) if (ioc->name)
sprintf(ioc->name, "Unknown (%04x:%04x)", sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
ioc->func_id & 0xFFFF, ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
(ioc->func_id >> 16) & 0xFFFF);
else else
ioc->name = "Unknown"; ioc->name = "Unknown";
} }
...@@ -1813,10 +1809,9 @@ static int ...@@ -1813,10 +1809,9 @@ static int
ioc_map_show(struct seq_file *s, void *v) ioc_map_show(struct seq_file *s, void *v)
{ {
struct ioc *ioc = v; struct ioc *ioc = v;
unsigned int *res_ptr = (unsigned int *)ioc->res_map; unsigned int i, *res_ptr = (unsigned int *)ioc->res_map;
int i;
for (i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) for (i = 0; i < ioc->res_size / sizeof(unsigned int); ++i, ++res_ptr)
seq_printf(s, "%s%08x", (i & 7) ? " " : "\n ", *res_ptr); seq_printf(s, "%s%08x", (i & 7) ? " " : "\n ", *res_ptr);
seq_printf(s, "\n"); seq_printf(s, "\n");
...@@ -1938,13 +1933,17 @@ static struct acpi_driver acpi_sba_ioc_driver = { ...@@ -1938,13 +1933,17 @@ static struct acpi_driver acpi_sba_ioc_driver = {
static int __init static int __init
sba_init(void) sba_init(void)
{ {
struct pci_bus *b;
MAX_DMA_ADDRESS = ~0UL; MAX_DMA_ADDRESS = ~0UL;
acpi_bus_register_driver(&acpi_sba_ioc_driver); acpi_bus_register_driver(&acpi_sba_ioc_driver);
#ifdef CONFIG_PCI
{
struct pci_bus *b;
pci_for_each_bus(b) pci_for_each_bus(b)
sba_connect_bus(b); sba_connect_bus(b);
}
#endif
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
ioc_proc_init(); ioc_proc_init();
...@@ -1962,7 +1961,7 @@ nosbagart(char *str) ...@@ -1962,7 +1961,7 @@ nosbagart(char *str)
} }
int int
sba_dma_supported (struct pci_dev *dev, u64 mask) sba_dma_supported (struct device *dev, u64 mask)
{ {
/* make sure it's at least 32bit capable */ /* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
...@@ -1975,5 +1974,5 @@ EXPORT_SYMBOL(sba_unmap_single); ...@@ -1975,5 +1974,5 @@ EXPORT_SYMBOL(sba_unmap_single);
EXPORT_SYMBOL(sba_map_sg); EXPORT_SYMBOL(sba_map_sg);
EXPORT_SYMBOL(sba_unmap_sg); EXPORT_SYMBOL(sba_unmap_sg);
EXPORT_SYMBOL(sba_dma_supported); EXPORT_SYMBOL(sba_dma_supported);
EXPORT_SYMBOL(sba_alloc_consistent); EXPORT_SYMBOL(sba_alloc_coherent);
EXPORT_SYMBOL(sba_free_consistent); EXPORT_SYMBOL(sba_free_coherent);
...@@ -59,7 +59,7 @@ simcons_write (struct console *cons, const char *buf, unsigned count) ...@@ -59,7 +59,7 @@ simcons_write (struct console *cons, const char *buf, unsigned count)
static struct tty_driver *simcons_console_device (struct console *c, int *index) static struct tty_driver *simcons_console_device (struct console *c, int *index)
{ {
extern struct tty_driver hp_serial_driver; extern struct tty_driver hp_simserial_driver;
*index = c->index; *index = c->index;
return &hp_serial_driver; return &hp_simserial_driver;
} }
#define MACHVEC_PLATFORM_NAME hpsim #define MACHVEC_PLATFORM_NAME hpsim
#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpsim.h>
#include <asm/machvec_init.h> #include <asm/machvec_init.h>
...@@ -55,7 +55,7 @@ static int simeth_close(struct net_device *dev); ...@@ -55,7 +55,7 @@ static int simeth_close(struct net_device *dev);
static int simeth_tx(struct sk_buff *skb, struct net_device *dev); static int simeth_tx(struct sk_buff *skb, struct net_device *dev);
static int simeth_rx(struct net_device *dev); static int simeth_rx(struct net_device *dev);
static struct net_device_stats *simeth_get_stats(struct net_device *dev); static struct net_device_stats *simeth_get_stats(struct net_device *dev);
static void simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs); static irqreturn_t simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs);
static void set_multicast_list(struct net_device *dev); static void set_multicast_list(struct net_device *dev);
static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr); static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr);
...@@ -494,20 +494,21 @@ simeth_rx(struct net_device *dev) ...@@ -494,20 +494,21 @@ simeth_rx(struct net_device *dev)
/* /*
* Interrupt handler (Yes, we can do it too !!!) * Interrupt handler (Yes, we can do it too !!!)
*/ */
static void static irqreturn_t
simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs) simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{ {
struct net_device *dev = dev_id; struct net_device *dev = dev_id;
if ( dev == NULL ) { if ( dev == NULL ) {
printk(KERN_WARNING "simeth: irq %d for unknown device\n", irq); printk(KERN_WARNING "simeth: irq %d for unknown device\n", irq);
return; return IRQ_NONE;
} }
/* /*
* very simple loop because we get interrupts only when receiving * very simple loop because we get interrupts only when receiving
*/ */
while (simeth_rx(dev)); while (simeth_rx(dev));
return IRQ_HANDLED;
} }
static struct net_device_stats * static struct net_device_stats *
......
...@@ -103,7 +103,8 @@ static struct serial_uart_config uart_config[] = { ...@@ -103,7 +103,8 @@ static struct serial_uart_config uart_config[] = {
{ 0, 0} { 0, 0}
}; };
static struct tty_driver hp_serial_driver, callout_driver; struct tty_driver hp_simserial_driver;
static struct tty_driver callout_driver;
static int serial_refcount; static int serial_refcount;
static struct async_struct *IRQ_ports[NR_IRQS]; static struct async_struct *IRQ_ports[NR_IRQS];
...@@ -184,7 +185,7 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs) ...@@ -184,7 +185,7 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
/* /*
* This is the serial driver's interrupt routine for a single port * This is the serial driver's interrupt routine for a single port
*/ */
static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs) static irqreturn_t rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs)
{ {
struct async_struct * info; struct async_struct * info;
...@@ -195,13 +196,14 @@ static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs) ...@@ -195,13 +196,14 @@ static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs)
info = IRQ_ports[irq]; info = IRQ_ports[irq];
if (!info || !info->tty) { if (!info || !info->tty) {
printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info); printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info);
return; return IRQ_NONE;
} }
/* /*
* pretty simple in our case, because we only get interrupts * pretty simple in our case, because we only get interrupts
* on inbound traffic * on inbound traffic
*/ */
receive_chars(info->tty, regs); receive_chars(info->tty, regs);
return IRQ_HANDLED;
} }
/* /*
...@@ -768,7 +770,7 @@ startup(struct async_struct *info) ...@@ -768,7 +770,7 @@ startup(struct async_struct *info)
{ {
unsigned long flags; unsigned long flags;
int retval=0; int retval=0;
void (*handler)(int, void *, struct pt_regs *); irqreturn_t (*handler)(int, void *, struct pt_regs *);
struct serial_state *state= info->state; struct serial_state *state= info->state;
unsigned long page; unsigned long page;
...@@ -808,8 +810,7 @@ startup(struct async_struct *info) ...@@ -808,8 +810,7 @@ startup(struct async_struct *info)
} else } else
handler = rs_interrupt_single; handler = rs_interrupt_single;
retval = request_irq(state->irq, handler, IRQ_T(info), retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL);
"simserial", NULL);
if (retval) { if (retval) {
if (capable(CAP_SYS_ADMIN)) { if (capable(CAP_SYS_ADMIN)) {
if (info->tty) if (info->tty)
...@@ -1028,43 +1029,43 @@ simrs_init (void) ...@@ -1028,43 +1029,43 @@ simrs_init (void)
/* Initialize the tty_driver structure */ /* Initialize the tty_driver structure */
memset(&hp_serial_driver, 0, sizeof(struct tty_driver)); memset(&hp_simserial_driver, 0, sizeof(struct tty_driver));
hp_serial_driver.magic = TTY_DRIVER_MAGIC; hp_simserial_driver.magic = TTY_DRIVER_MAGIC;
hp_serial_driver.driver_name = "simserial"; hp_simserial_driver.driver_name = "simserial";
hp_serial_driver.name = "ttyS"; hp_simserial_driver.name = "ttyS";
hp_serial_driver.major = TTY_MAJOR; hp_simserial_driver.major = TTY_MAJOR;
hp_serial_driver.minor_start = 64; hp_simserial_driver.minor_start = 64;
hp_serial_driver.num = 1; hp_simserial_driver.num = 1;
hp_serial_driver.type = TTY_DRIVER_TYPE_SERIAL; hp_simserial_driver.type = TTY_DRIVER_TYPE_SERIAL;
hp_serial_driver.subtype = SERIAL_TYPE_NORMAL; hp_simserial_driver.subtype = SERIAL_TYPE_NORMAL;
hp_serial_driver.init_termios = tty_std_termios; hp_simserial_driver.init_termios = tty_std_termios;
hp_serial_driver.init_termios.c_cflag = hp_simserial_driver.init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL; B9600 | CS8 | CREAD | HUPCL | CLOCAL;
hp_serial_driver.flags = TTY_DRIVER_REAL_RAW; hp_simserial_driver.flags = TTY_DRIVER_REAL_RAW;
hp_serial_driver.refcount = &serial_refcount; hp_simserial_driver.refcount = &serial_refcount;
hp_serial_driver.table = serial_table; hp_simserial_driver.table = serial_table;
hp_serial_driver.termios = serial_termios; hp_simserial_driver.termios = serial_termios;
hp_serial_driver.termios_locked = serial_termios_locked; hp_simserial_driver.termios_locked = serial_termios_locked;
hp_serial_driver.open = rs_open; hp_simserial_driver.open = rs_open;
hp_serial_driver.close = rs_close; hp_simserial_driver.close = rs_close;
hp_serial_driver.write = rs_write; hp_simserial_driver.write = rs_write;
hp_serial_driver.put_char = rs_put_char; hp_simserial_driver.put_char = rs_put_char;
hp_serial_driver.flush_chars = rs_flush_chars; hp_simserial_driver.flush_chars = rs_flush_chars;
hp_serial_driver.write_room = rs_write_room; hp_simserial_driver.write_room = rs_write_room;
hp_serial_driver.chars_in_buffer = rs_chars_in_buffer; hp_simserial_driver.chars_in_buffer = rs_chars_in_buffer;
hp_serial_driver.flush_buffer = rs_flush_buffer; hp_simserial_driver.flush_buffer = rs_flush_buffer;
hp_serial_driver.ioctl = rs_ioctl; hp_simserial_driver.ioctl = rs_ioctl;
hp_serial_driver.throttle = rs_throttle; hp_simserial_driver.throttle = rs_throttle;
hp_serial_driver.unthrottle = rs_unthrottle; hp_simserial_driver.unthrottle = rs_unthrottle;
hp_serial_driver.send_xchar = rs_send_xchar; hp_simserial_driver.send_xchar = rs_send_xchar;
hp_serial_driver.set_termios = rs_set_termios; hp_simserial_driver.set_termios = rs_set_termios;
hp_serial_driver.stop = rs_stop; hp_simserial_driver.stop = rs_stop;
hp_serial_driver.start = rs_start; hp_simserial_driver.start = rs_start;
hp_serial_driver.hangup = rs_hangup; hp_simserial_driver.hangup = rs_hangup;
hp_serial_driver.break_ctl = rs_break; hp_simserial_driver.break_ctl = rs_break;
hp_serial_driver.wait_until_sent = rs_wait_until_sent; hp_simserial_driver.wait_until_sent = rs_wait_until_sent;
hp_serial_driver.read_proc = rs_read_proc; hp_simserial_driver.read_proc = rs_read_proc;
/* /*
* Let's have a little bit of fun ! * Let's have a little bit of fun !
...@@ -1087,14 +1088,14 @@ simrs_init (void) ...@@ -1087,14 +1088,14 @@ simrs_init (void)
* The callout device is just like normal device except for * The callout device is just like normal device except for
* major number and the subtype code. * major number and the subtype code.
*/ */
callout_driver = hp_serial_driver; callout_driver = hp_simserial_driver;
callout_driver.name = "cua"; callout_driver.name = "cua";
callout_driver.major = TTYAUX_MAJOR; callout_driver.major = TTYAUX_MAJOR;
callout_driver.subtype = SERIAL_TYPE_CALLOUT; callout_driver.subtype = SERIAL_TYPE_CALLOUT;
callout_driver.read_proc = 0; callout_driver.read_proc = 0;
callout_driver.proc_entry = 0; callout_driver.proc_entry = 0;
if (tty_register_driver(&hp_serial_driver)) if (tty_register_driver(&hp_simserial_driver))
panic("Couldn't register simserial driver\n"); panic("Couldn't register simserial driver\n");
if (tty_register_driver(&callout_driver)) if (tty_register_driver(&callout_driver))
......
#define MACHVEC_PLATFORM_NAME hpzx1 #define MACHVEC_PLATFORM_NAME hpzx1
#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1.h>
#include <asm/machvec_init.h> #include <asm/machvec_init.h>
...@@ -3,13 +3,16 @@ ...@@ -3,13 +3,16 @@
* *
* Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <n0ano@valinux.com> * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co * Copyright (C) 2001-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/dirent.h> #include <linux/dirent.h>
#include <linux/fs.h> /* argh, msdos_fs.h isn't self-contained... */ #include <linux/fs.h> /* argh, msdos_fs.h isn't self-contained... */
#include <linux/signal.h> /* argh, msdos_fs.h isn't self-contained... */
#include <asm/ia32.h>
#include <linux/msdos_fs.h> #include <linux/msdos_fs.h>
#include <linux/mtio.h> #include <linux/mtio.h>
...@@ -33,8 +36,6 @@ ...@@ -33,8 +36,6 @@
#define __KERNEL__ #define __KERNEL__
#include <scsi/sg.h> #include <scsi/sg.h>
#include <asm/ia32.h>
#include <../drivers/char/drm/drm.h> #include <../drivers/char/drm/drm.h>
#include <../drivers/char/drm/mga_drm.h> #include <../drivers/char/drm/mga_drm.h>
#include <../drivers/char/drm/i810_drm.h> #include <../drivers/char/drm/i810_drm.h>
......
...@@ -53,10 +53,10 @@ ...@@ -53,10 +53,10 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/ia32.h>
#include <net/scm.h> #include <net/scm.h>
#include <net/sock.h> #include <net/sock.h>
#include <asm/ia32.h>
#define DEBUG 0 #define DEBUG 0
...@@ -177,7 +177,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf) ...@@ -177,7 +177,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf)
{ {
int err; int err;
if (stat->size > MAX_NON_LFS) if ((u64) stat->size > MAX_NON_LFS)
return -EOVERFLOW; return -EOVERFLOW;
if (clear_user(ubuf, sizeof(*ubuf))) if (clear_user(ubuf, sizeof(*ubuf)))
...@@ -927,8 +927,7 @@ asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long) ...@@ -927,8 +927,7 @@ asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long)
static struct iovec * static struct iovec *
get_compat_iovec (struct compat_iovec *iov32, struct iovec *iov_buf, u32 count, int type) get_compat_iovec (struct compat_iovec *iov32, struct iovec *iov_buf, u32 count, int type)
{ {
int i; u32 i, buf, len;
u32 buf, len;
struct iovec *ivp, *iov; struct iovec *ivp, *iov;
/* Get the "struct iovec" from user memory */ /* Get the "struct iovec" from user memory */
...@@ -2070,7 +2069,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2070,7 +2069,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = -EIO; ret = -EIO;
break; break;
} }
for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) { for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
put_user(getreg(child, i), (unsigned int *) A(data)); put_user(getreg(child, i), (unsigned int *) A(data));
data += sizeof(int); data += sizeof(int);
} }
...@@ -2082,7 +2081,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2082,7 +2081,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = -EIO; ret = -EIO;
break; break;
} }
for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) { for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
get_user(tmp, (unsigned int *) A(data)); get_user(tmp, (unsigned int *) A(data));
putreg(child, i, tmp); putreg(child, i, tmp);
data += sizeof(int); data += sizeof(int);
...@@ -2158,7 +2157,7 @@ sys32_iopl (int level) ...@@ -2158,7 +2157,7 @@ sys32_iopl (int level)
return(-EINVAL); return(-EINVAL);
/* Trying to gain more privileges? */ /* Trying to gain more privileges? */
asm volatile ("mov %0=ar.eflag ;;" : "=r"(old)); asm volatile ("mov %0=ar.eflag ;;" : "=r"(old));
if (level > ((old >> 12) & 3)) { if ((unsigned int) level > ((old >> 12) & 3)) {
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
} }
......
...@@ -196,7 +196,8 @@ acpi_parse_lsapic (acpi_table_entry_header *header) ...@@ -196,7 +196,8 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
printk(" enabled"); printk(" enabled");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid; smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
if (hard_smp_processor_id() == smp_boot_data.cpu_phys_id[total_cpus]) if (hard_smp_processor_id()
== (unsigned int) smp_boot_data.cpu_phys_id[total_cpus])
printk(" (BSP)"); printk(" (BSP)");
#endif #endif
} }
......
...@@ -203,16 +203,16 @@ STUB_GET_NEXT_HIGH_MONO_COUNT(virt, ) ...@@ -203,16 +203,16 @@ STUB_GET_NEXT_HIGH_MONO_COUNT(virt, )
STUB_RESET_SYSTEM(virt, ) STUB_RESET_SYSTEM(virt, )
void void
efi_gettimeofday (struct timeval *tv) efi_gettimeofday (struct timespec *ts)
{ {
efi_time_t tm; efi_time_t tm;
memset(tv, 0, sizeof(tv)); memset(ts, 0, sizeof(ts));
if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS) if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS)
return; return;
tv->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
tv->tv_usec = tm.nanosecond / 1000; ts->tv_nsec = tm.nanosecond;
} }
static int static int
...@@ -512,7 +512,7 @@ efi_init (void) ...@@ -512,7 +512,7 @@ efi_init (void)
/* Show what we know for posterity */ /* Show what we know for posterity */
c16 = __va(efi.systab->fw_vendor); c16 = __va(efi.systab->fw_vendor);
if (c16) { if (c16) {
for (i = 0;i < sizeof(vendor) && *c16; ++i) for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
vendor[i] = *c16++; vendor[i] = *c16++;
vendor[i] = '\0'; vendor[i] = '\0';
} }
...@@ -520,7 +520,7 @@ efi_init (void) ...@@ -520,7 +520,7 @@ efi_init (void)
printk(KERN_INFO "EFI v%u.%.02u by %s:", printk(KERN_INFO "EFI v%u.%.02u by %s:",
efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
for (i = 0; i < efi.systab->nr_tables; i++) { for (i = 0; i < (int) efi.systab->nr_tables; i++) {
if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
efi.mps = __va(config_tables[i].table); efi.mps = __va(config_tables[i].table);
printk(" MPS=0x%lx", config_tables[i].table); printk(" MPS=0x%lx", config_tables[i].table);
......
...@@ -138,8 +138,7 @@ utf8_strlen(efi_char16_t *data, unsigned long maxlength) ...@@ -138,8 +138,7 @@ utf8_strlen(efi_char16_t *data, unsigned long maxlength)
static inline unsigned long static inline unsigned long
utf8_strsize(efi_char16_t *data, unsigned long maxlength) utf8_strsize(efi_char16_t *data, unsigned long maxlength)
{ {
return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
sizeof(efi_char16_t);
} }
...@@ -170,8 +169,7 @@ efivar_create_proc_entry(unsigned long variable_name_size, ...@@ -170,8 +169,7 @@ efivar_create_proc_entry(unsigned long variable_name_size,
efi_guid_t *vendor_guid) efi_guid_t *vendor_guid)
{ {
int i, short_name_size = variable_name_size / int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
sizeof(efi_char16_t) + 38;
char *short_name; char *short_name;
efivar_entry_t *new_efivar; efivar_entry_t *new_efivar;
...@@ -192,7 +190,7 @@ efivar_create_proc_entry(unsigned long variable_name_size, ...@@ -192,7 +190,7 @@ efivar_create_proc_entry(unsigned long variable_name_size,
/* Convert Unicode to normal chars (assume top bits are 0), /* Convert Unicode to normal chars (assume top bits are 0),
ala UTF-8 */ ala UTF-8 */
for (i=0; i<variable_name_size / sizeof(efi_char16_t); i++) { for (i=0; i< (int) (variable_name_size / sizeof(efi_char16_t)); i++) {
short_name[i] = variable_name[i] & 0xFF; short_name[i] = variable_name[i] & 0xFF;
} }
......
...@@ -37,7 +37,7 @@ static char fw_mem[( sizeof(struct ia64_boot_param) ...@@ -37,7 +37,7 @@ static char fw_mem[( sizeof(struct ia64_boot_param)
+ NUM_MEM_DESCS*(sizeof(efi_memory_desc_t)) + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t))
+ 1024)] __attribute__ ((aligned (8))); + 1024)] __attribute__ ((aligned (8)));
#ifdef CONFIG_IA64_HP_SIM #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
/* Simulator system calls: */ /* Simulator system calls: */
...@@ -233,7 +233,7 @@ asm ( ...@@ -233,7 +233,7 @@ asm (
static efi_status_t static efi_status_t
efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
{ {
#ifdef CONFIG_IA64_HP_SIM #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
struct { struct {
int tv_sec; /* must be 32bits to work */ int tv_sec; /* must be 32bits to work */
int tv_usec; int tv_usec;
...@@ -255,7 +255,7 @@ efi_get_time (efi_time_t *tm, efi_time_cap_t *tc) ...@@ -255,7 +255,7 @@ efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
static void static void
efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data)
{ {
#ifdef CONFIG_IA64_HP_SIM #if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
ssc(status, 0, 0, 0, SSC_EXIT); ssc(status, 0, 0, 0, SSC_EXIT);
#else #else
# error Not implemented yet... # error Not implemented yet...
......
...@@ -46,6 +46,7 @@ EXPORT_SYMBOL(ip_fast_csum); ...@@ -46,6 +46,7 @@ EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(__ia64_memcpy_fromio); EXPORT_SYMBOL(__ia64_memcpy_fromio);
EXPORT_SYMBOL(__ia64_memcpy_toio); EXPORT_SYMBOL(__ia64_memcpy_toio);
EXPORT_SYMBOL(__ia64_memset_c_io); EXPORT_SYMBOL(__ia64_memset_c_io);
EXPORT_SYMBOL(io_space);
#include <asm/semaphore.h> #include <asm/semaphore.h>
EXPORT_SYMBOL_NOVERS(__down); EXPORT_SYMBOL_NOVERS(__down);
...@@ -161,3 +162,11 @@ EXPORT_SYMBOL(unw_access_br); ...@@ -161,3 +162,11 @@ EXPORT_SYMBOL(unw_access_br);
EXPORT_SYMBOL(unw_access_fr); EXPORT_SYMBOL(unw_access_fr);
EXPORT_SYMBOL(unw_access_ar); EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr); EXPORT_SYMBOL(unw_access_pr);
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
extern void ia64_spinlock_contention_pre3_4 (void);
EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
#else
extern void ia64_spinlock_contention (void);
EXPORT_SYMBOL(ia64_spinlock_contention);
#endif
...@@ -680,6 +680,8 @@ iosapic_enable_intr (unsigned int vector) ...@@ -680,6 +680,8 @@ iosapic_enable_intr (unsigned int vector)
vector, dest); vector, dest);
} }
#ifdef CONFIG_ACPI_PCI
void __init void __init
iosapic_parse_prt (void) iosapic_parse_prt (void)
{ {
...@@ -712,7 +714,8 @@ iosapic_parse_prt (void) ...@@ -712,7 +714,8 @@ iosapic_parse_prt (void)
/* new GSI; allocate a vector for it */ /* new GSI; allocate a vector for it */
vector = ia64_alloc_vector(); vector = ia64_alloc_vector();
register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW, IOSAPIC_LEVEL); register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW,
IOSAPIC_LEVEL);
} }
snprintf(pci_id, sizeof(pci_id), "%02x:%02x:%02x[%c]", snprintf(pci_id, sizeof(pci_id), "%02x:%02x:%02x[%c]",
entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin); entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin);
...@@ -723,7 +726,10 @@ iosapic_parse_prt (void) ...@@ -723,7 +726,10 @@ iosapic_parse_prt (void)
*/ */
idesc = irq_desc(vector); idesc = irq_desc(vector);
if (idesc->handler != irq_type) if (idesc->handler != irq_type)
register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW, IOSAPIC_LEVEL); register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW,
IOSAPIC_LEVEL);
} }
} }
#endif /* CONFIG_ACPI */
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/ptrace.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -33,6 +32,7 @@ ...@@ -33,6 +32,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -91,7 +91,8 @@ static void register_irq_proc (unsigned int irq); ...@@ -91,7 +91,8 @@ static void register_irq_proc (unsigned int irq);
* Special irq handlers. * Special irq handlers.
*/ */
void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
{ return IRQ_NONE; }
/* /*
* Generic no controller code * Generic no controller code
...@@ -141,9 +142,11 @@ struct hw_interrupt_type no_irq_type = { ...@@ -141,9 +142,11 @@ struct hw_interrupt_type no_irq_type = {
}; };
atomic_t irq_err_count; atomic_t irq_err_count;
#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG) #ifdef CONFIG_X86_IO_APIC
#ifdef APIC_MISMATCH_DEBUG
atomic_t irq_mis_count; atomic_t irq_mis_count;
#endif #endif
#endif
/* /*
* Generic, controller-independent functions: * Generic, controller-independent functions:
...@@ -178,6 +181,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -178,6 +181,7 @@ int show_interrupts(struct seq_file *p, void *v)
#endif #endif
seq_printf(p, " %14s", idesc->handler->typename); seq_printf(p, " %14s", idesc->handler->typename);
seq_printf(p, " %s", action->name); seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next) for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name); seq_printf(p, ", %s", action->name);
...@@ -190,16 +194,18 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -190,16 +194,18 @@ int show_interrupts(struct seq_file *p, void *v)
if (cpu_online(j)) if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j)); seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n'); seq_putc(p, '\n');
#if defined(CONFIG_SMP) && defined(CONFIG_X86) #if CONFIG_X86_LOCAL_APIC
seq_puts(p, "LOC: "); seq_puts(p, "LOC: ");
for (j = 0; j < NR_CPUS; j++) for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j)) if (cpu_online(j))
seq_printf(p, "%10u ", apic_timer_irqs[j]); seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
seq_putc(p, '\n'); seq_putc(p, '\n');
#endif #endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG) #ifdef CONFIG_X86_IO_APIC
#ifdef APIC_MISMATCH_DEBUG
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
#endif
#endif #endif
return 0; return 0;
} }
...@@ -219,21 +225,46 @@ inline void synchronize_irq(unsigned int irq) ...@@ -219,21 +225,46 @@ inline void synchronize_irq(unsigned int irq)
* waste of time and is not what some drivers would * waste of time and is not what some drivers would
* prefer. * prefer.
*/ */
int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) int handle_IRQ_event(unsigned int irq,
struct pt_regs *regs, struct irqaction *action)
{ {
int status = 1; /* Force the "do bottom halves" bit */ int status = 1; /* Force the "do bottom halves" bit */
int retval = 0;
struct irqaction *first_action = action;
if (!(action->flags & SA_INTERRUPT)) if (!(action->flags & SA_INTERRUPT))
local_irq_enable(); local_irq_enable();
do { do {
status |= action->flags; status |= action->flags;
action->handler(irq, action->dev_id, regs); retval |= action->handler(irq, action->dev_id, regs);
action = action->next; action = action->next;
} while (action); } while (action);
if (status & SA_SAMPLE_RANDOM) if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq); add_interrupt_randomness(irq);
local_irq_disable(); local_irq_disable();
if (retval != 1) {
static int count = 100;
if (count) {
count--;
if (retval) {
printk("irq event %d: bogus retval mask %x\n",
irq, retval);
} else {
printk("irq %d: nobody cared!\n", irq);
}
dump_stack();
printk("handlers:\n");
action = first_action;
do {
printk("[<%p>]", action->handler);
print_symbol(" (%s)",
(unsigned long)action->handler);
printk("\n");
action = action->next;
} while (action);
}
}
return status; return status;
} }
...@@ -455,7 +486,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs) ...@@ -455,7 +486,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
*/ */
int request_irq(unsigned int irq, int request_irq(unsigned int irq,
void (*handler)(int, void *, struct pt_regs *), irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, unsigned long irqflags,
const char * devname, const char * devname,
void *dev_id) void *dev_id)
...@@ -482,7 +513,7 @@ int request_irq(unsigned int irq, ...@@ -482,7 +513,7 @@ int request_irq(unsigned int irq,
return -EINVAL; return -EINVAL;
action = (struct irqaction *) action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL); kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
if (!action) if (!action)
return -ENOMEM; return -ENOMEM;
...@@ -511,10 +542,7 @@ int request_irq(unsigned int irq, ...@@ -511,10 +542,7 @@ int request_irq(unsigned int irq,
* does not return until any executing interrupts for this IRQ * does not return until any executing interrupts for this IRQ
* have completed. * have completed.
* *
* This function may be called from interrupt context. * This function must not be called from interrupt context.
*
* Bugs: Attempting to free an irq in a handler for the same irq hangs
* the machine.
*/ */
void free_irq(unsigned int irq, void *dev_id) void free_irq(unsigned int irq, void *dev_id)
...@@ -545,11 +573,8 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -545,11 +573,8 @@ void free_irq(unsigned int irq, void *dev_id)
} }
spin_unlock_irqrestore(&desc->lock,flags); spin_unlock_irqrestore(&desc->lock,flags);
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */ /* Wait to make sure it's not being used on another CPU */
while (desc->status & IRQ_INPROGRESS)
synchronize_irq(irq); synchronize_irq(irq);
#endif
kfree(action); kfree(action);
return; return;
} }
...@@ -664,7 +689,6 @@ unsigned long probe_irq_on(void) ...@@ -664,7 +689,6 @@ unsigned long probe_irq_on(void)
* only return ISA irq numbers - just so that we reset them * only return ISA irq numbers - just so that we reset them
* all to a known state. * all to a known state.
*/ */
unsigned int probe_irq_mask(unsigned long val) unsigned int probe_irq_mask(unsigned long val)
{ {
int i; int i;
...@@ -748,6 +772,8 @@ int setup_irq(unsigned int irq, struct irqaction * new) ...@@ -748,6 +772,8 @@ int setup_irq(unsigned int irq, struct irqaction * new)
struct irqaction *old, **p; struct irqaction *old, **p;
irq_desc_t *desc = irq_desc(irq); irq_desc_t *desc = irq_desc(irq);
if (desc->handler == &no_irq_type)
return -ENOSYS;
/* /*
* Some drivers like serial.c use request_irq() heavily, * Some drivers like serial.c use request_irq() heavily,
* so we have to be careful not to interfere with a * so we have to be careful not to interfere with a
...@@ -808,11 +834,11 @@ static struct proc_dir_entry * irq_dir [NR_IRQS]; ...@@ -808,11 +834,11 @@ static struct proc_dir_entry * irq_dir [NR_IRQS];
#define HEX_DIGITS 8 #define HEX_DIGITS 8
static int parse_hex_value (const char *buffer, unsigned long count, unsigned long *ret) static unsigned int parse_hex_value (const char *buffer,
unsigned long count, unsigned long *ret)
{ {
unsigned char hexnum [HEX_DIGITS]; unsigned char hexnum [HEX_DIGITS];
unsigned long value; unsigned long value, i;
int i;
if (!count) if (!count)
return -EINVAL; return -EINVAL;
...@@ -950,12 +976,13 @@ static void register_irq_proc (unsigned int irq) ...@@ -950,12 +976,13 @@ static void register_irq_proc (unsigned int irq)
#if CONFIG_SMP #if CONFIG_SMP
{ {
struct proc_dir_entry *entry; struct proc_dir_entry *entry;
/* create /proc/irq/1234/smp_affinity */ /* create /proc/irq/1234/smp_affinity */
entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
if (entry) { if (entry) {
entry->nlink = 1; entry->nlink = 1;
entry->data = (void *)(unsigned long)irq; entry->data = (void *)(long)irq;
entry->read_proc = irq_affinity_read_proc; entry->read_proc = irq_affinity_read_proc;
entry->write_proc = irq_affinity_write_proc; entry->write_proc = irq_affinity_write_proc;
} }
......
...@@ -145,7 +145,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -145,7 +145,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs); extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction ipi_irqaction = { static struct irqaction ipi_irqaction = {
.handler = handle_IPI, .handler = handle_IPI,
......
#include <linux/config.h> #include <linux/config.h>
#include <asm/system.h>
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/page.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/page.h>
struct ia64_machine_vector ia64_mv; struct ia64_machine_vector ia64_mv;
...@@ -43,3 +45,9 @@ void ...@@ -43,3 +45,9 @@ void
machvec_noop (void) machvec_noop (void)
{ {
} }
void
machvec_memory_fence (void)
{
mb();
}
...@@ -172,7 +172,7 @@ mca_handler_platform (void) ...@@ -172,7 +172,7 @@ mca_handler_platform (void)
} }
void irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{ {
IA64_MCA_DEBUG("ia64_mca_cpe_int_handler: received interrupt. CPU:%d vector = %#x\n", IA64_MCA_DEBUG("ia64_mca_cpe_int_handler: received interrupt. CPU:%d vector = %#x\n",
...@@ -180,6 +180,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) ...@@ -180,6 +180,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
/* Get the CMC error record and log it */ /* Get the CMC error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE, 0); ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE, 0);
return IRQ_HANDLED;
} }
static void static void
...@@ -338,10 +339,26 @@ init_handler_platform (sal_log_processor_info_t *proc_ptr, ...@@ -338,10 +339,26 @@ init_handler_platform (sal_log_processor_info_t *proc_ptr,
udelay(5*1000000); udelay(5*1000000);
show_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area); show_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area);
printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
fetch_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area, pt, sw); fetch_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area, pt, sw);
unw_init_from_interruption(&info, current, pt, sw); unw_init_from_interruption(&info, current, pt, sw);
ia64_do_show_stack(&info, NULL); ia64_do_show_stack(&info, NULL);
if (!tasklist_lock.write_lock)
read_lock(&tasklist_lock);
{
struct task_struct *g, *t;
do_each_thread (g, t) {
if (t == current)
continue;
printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
show_stack(t);
} while_each_thread (g, t);
}
if (!tasklist_lock.write_lock)
read_unlock(&tasklist_lock);
printk("\nINIT dump complete. Please reboot now.\n"); printk("\nINIT dump complete. Please reboot now.\n");
while (1); /* hang city if no debugger */ while (1); /* hang city if no debugger */
} }
...@@ -828,7 +845,7 @@ ia64_mca_wakeup_all(void) ...@@ -828,7 +845,7 @@ ia64_mca_wakeup_all(void)
* Inputs : None * Inputs : None
* Outputs : None * Outputs : None
*/ */
void irqreturn_t
ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
{ {
unsigned long flags; unsigned long flags;
...@@ -851,6 +868,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) ...@@ -851,6 +868,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
/* Enable all interrupts */ /* Enable all interrupts */
local_irq_restore(flags); local_irq_restore(flags);
return IRQ_HANDLED;
} }
...@@ -869,10 +887,10 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) ...@@ -869,10 +887,10 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
* Outputs : None * Outputs : None
* *
*/ */
void irqreturn_t
ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs) ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
{ {
return IRQ_HANDLED;
} }
/* /*
...@@ -967,7 +985,7 @@ ia64_mca_ucmc_handler(void) ...@@ -967,7 +985,7 @@ ia64_mca_ucmc_handler(void)
* Outputs * Outputs
* None * None
*/ */
void irqreturn_t
ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
{ {
static unsigned long cmc_history[CMC_HISTORY_LENGTH]; static unsigned long cmc_history[CMC_HISTORY_LENGTH];
...@@ -1024,7 +1042,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) ...@@ -1024,7 +1042,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
/* lock already released, get out now */ /* lock already released, get out now */
return; return IRQ_HANDLED;
} else { } else {
cmc_history[index++] = now; cmc_history[index++] = now;
if (index == CMC_HISTORY_LENGTH) if (index == CMC_HISTORY_LENGTH)
...@@ -1032,6 +1050,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) ...@@ -1032,6 +1050,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
} }
} }
spin_unlock(&cmc_history_lock); spin_unlock(&cmc_history_lock);
return IRQ_HANDLED;
} }
/* /*
...@@ -1096,7 +1115,7 @@ ia64_mca_cmc_int_caller(void *dummy) ...@@ -1096,7 +1115,7 @@ ia64_mca_cmc_int_caller(void *dummy)
static void static void
ia64_mca_cmc_poll (unsigned long dummy) ia64_mca_cmc_poll (unsigned long dummy)
{ {
int start_count; unsigned long start_count;
start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
...@@ -1147,7 +1166,7 @@ ia64_mca_cpe_int_caller(void *dummy) ...@@ -1147,7 +1166,7 @@ ia64_mca_cpe_int_caller(void *dummy)
static void static void
ia64_mca_cpe_poll (unsigned long dummy) ia64_mca_cpe_poll (unsigned long dummy)
{ {
int start_count; unsigned long start_count;
static int poll_time = MAX_CPE_POLL_INTERVAL; static int poll_time = MAX_CPE_POLL_INTERVAL;
start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
...@@ -1264,7 +1283,8 @@ ia64_log_prt_guid (efi_guid_t *p_guid, prfunc_t prfunc) ...@@ -1264,7 +1283,8 @@ ia64_log_prt_guid (efi_guid_t *p_guid, prfunc_t prfunc)
static void static void
ia64_log_hexdump(unsigned char *p, unsigned long n_ch, prfunc_t prfunc) ia64_log_hexdump(unsigned char *p, unsigned long n_ch, prfunc_t prfunc)
{ {
int i, j; unsigned long i;
int j;
if (!p) if (!p)
return; return;
...@@ -2112,7 +2132,7 @@ ia64_log_processor_info_print(sal_log_record_header_t *lh, prfunc_t prfunc) ...@@ -2112,7 +2132,7 @@ ia64_log_processor_info_print(sal_log_record_header_t *lh, prfunc_t prfunc)
{ {
sal_log_section_hdr_t *slsh; sal_log_section_hdr_t *slsh;
int n_sects; int n_sects;
int ercd_pos; u32 ercd_pos;
if (!lh) if (!lh)
return; return;
...@@ -2174,7 +2194,7 @@ ia64_log_platform_info_print (sal_log_record_header_t *lh, prfunc_t prfunc) ...@@ -2174,7 +2194,7 @@ ia64_log_platform_info_print (sal_log_record_header_t *lh, prfunc_t prfunc)
{ {
sal_log_section_hdr_t *slsh; sal_log_section_hdr_t *slsh;
int n_sects; int n_sects;
int ercd_pos; u32 ercd_pos;
int platform_err = 0; int platform_err = 0;
if (!lh) if (!lh)
......
...@@ -91,7 +91,7 @@ static const char *rse_hints[]={ ...@@ -91,7 +91,7 @@ static const char *rse_hints[]={
"eager loads and stores" "eager loads and stores"
}; };
#define RSE_HINTS_COUNT (sizeof(rse_hints)/sizeof(const char *)) #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
static const char *mem_attrib[]={ static const char *mem_attrib[]={
"WB", /* 000 */ "WB", /* 000 */
...@@ -192,10 +192,10 @@ power_info(char *page) ...@@ -192,10 +192,10 @@ power_info(char *page)
for (i=0; i < 8 ; i++ ) { for (i=0; i < 8 ; i++ ) {
if (halt_info[i].pal_power_mgmt_info_s.im == 1) { if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
p += sprintf(p, "Power level %d:\n" \ p += sprintf(p, "Power level %d:\n"
"\tentry_latency : %d cycles\n" \ "\tentry_latency : %d cycles\n"
"\texit_latency : %d cycles\n" \ "\texit_latency : %d cycles\n"
"\tpower consumption : %d mW\n" \ "\tpower consumption : %d mW\n"
"\tCache+TLB coherency : %s\n", i, "\tCache+TLB coherency : %s\n", i,
halt_info[i].pal_power_mgmt_info_s.entry_latency, halt_info[i].pal_power_mgmt_info_s.entry_latency,
halt_info[i].pal_power_mgmt_info_s.exit_latency, halt_info[i].pal_power_mgmt_info_s.exit_latency,
...@@ -212,9 +212,9 @@ static int ...@@ -212,9 +212,9 @@ static int
cache_info(char *page) cache_info(char *page)
{ {
char *p = page; char *p = page;
u64 levels, unique_caches; u64 i, levels, unique_caches;
pal_cache_config_info_t cci; pal_cache_config_info_t cci;
int i,j, k; int j, k;
s64 status; s64 status;
if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
...@@ -232,8 +232,9 @@ cache_info(char *page) ...@@ -232,8 +232,9 @@ cache_info(char *page)
if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) { if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
continue; continue;
} }
p += sprintf(p, "%s Cache level %d:\n" \ p += sprintf(p,
"\tSize : %ld bytes\n" \ "%s Cache level %lu:\n"
"\tSize : %lu bytes\n"
"\tAttributes : ", "\tAttributes : ",
cache_types[j+cci.pcci_unified], i+1, cache_types[j+cci.pcci_unified], i+1,
cci.pcci_cache_size); cci.pcci_cache_size);
...@@ -242,37 +243,38 @@ cache_info(char *page) ...@@ -242,37 +243,38 @@ cache_info(char *page)
p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]); p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
p += sprintf(p, "\tAssociativity : %d\n" \ p += sprintf(p,
"\tLine size : %d bytes\n" \ "\tAssociativity : %d\n"
"\tLine size : %d bytes\n"
"\tStride : %d bytes\n", "\tStride : %d bytes\n",
cci.pcci_assoc, cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
1<<cci.pcci_line_size,
1<<cci.pcci_stride);
if (j == 1) if (j == 1)
p += sprintf(p, "\tStore latency : N/A\n"); p += sprintf(p, "\tStore latency : N/A\n");
else else
p += sprintf(p, "\tStore latency : %d cycle(s)\n", p += sprintf(p, "\tStore latency : %d cycle(s)\n",
cci.pcci_st_latency); cci.pcci_st_latency);
p += sprintf(p, "\tLoad latency : %d cycle(s)\n" \ p += sprintf(p,
"\tStore hints : ", "\tLoad latency : %d cycle(s)\n"
cci.pcci_ld_latency); "\tStore hints : ", cci.pcci_ld_latency);
for(k=0; k < 8; k++ ) { for(k=0; k < 8; k++ ) {
if ( cci.pcci_st_hints & 0x1) p += sprintf(p, "[%s]", cache_st_hints[k]); if ( cci.pcci_st_hints & 0x1)
p += sprintf(p, "[%s]", cache_st_hints[k]);
cci.pcci_st_hints >>=1; cci.pcci_st_hints >>=1;
} }
p += sprintf(p, "\n\tLoad hints : "); p += sprintf(p, "\n\tLoad hints : ");
for(k=0; k < 8; k++ ) { for(k=0; k < 8; k++ ) {
if ( cci.pcci_ld_hints & 0x1) p += sprintf(p, "[%s]", cache_ld_hints[k]); if (cci.pcci_ld_hints & 0x1)
p += sprintf(p, "[%s]", cache_ld_hints[k]);
cci.pcci_ld_hints >>=1; cci.pcci_ld_hints >>=1;
} }
p += sprintf(p, "\n\tAlias boundary : %d byte(s)\n" \ p += sprintf(p,
"\tTag LSB : %d\n" \ "\n\tAlias boundary : %d byte(s)\n"
"\tTag LSB : %d\n"
"\tTag MSB : %d\n", "\tTag MSB : %d\n",
1<<cci.pcci_alias_boundary, 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
cci.pcci_tag_lsb,
cci.pcci_tag_msb); cci.pcci_tag_msb);
/* when unified, data(j=2) is enough */ /* when unified, data(j=2) is enough */
...@@ -303,20 +305,20 @@ vm_info(char *page) ...@@ -303,20 +305,20 @@ vm_info(char *page)
} }
p += sprintf(p, "Physical Address Space : %d bits\n" \ p += sprintf(p,
"Virtual Address Space : %d bits\n" \ "Physical Address Space : %d bits\n"
"Protection Key Registers(PKR) : %d\n" \ "Virtual Address Space : %d bits\n"
"Implemented bits in PKR.key : %d\n" \ "Protection Key Registers(PKR) : %d\n"
"Hash Tag ID : 0x%x\n" \ "Implemented bits in PKR.key : %d\n"
"Hash Tag ID : 0x%x\n"
"Size of RR.rid : %d\n", "Size of RR.rid : %d\n",
vm_info_1.pal_vm_info_1_s.phys_add_size, vm_info_1.pal_vm_info_1_s.phys_add_size,
vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1,
vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id,
vm_info_1.pal_vm_info_1_s.key_size,
vm_info_1.pal_vm_info_1_s.hash_tag_id,
vm_info_2.pal_vm_info_2_s.rid_size); vm_info_2.pal_vm_info_2_s.rid_size);
if (ia64_pal_mem_attrib(&attrib) != 0) return 0; if (ia64_pal_mem_attrib(&attrib) != 0)
return 0;
p += sprintf(p, "Supported memory attributes : "); p += sprintf(p, "Supported memory attributes : ");
sep = ""; sep = "";
...@@ -333,9 +335,10 @@ vm_info(char *page) ...@@ -333,9 +335,10 @@ vm_info(char *page)
return 0; return 0;
} }
p += sprintf(p, "\nTLB walker : %simplemented\n" \ p += sprintf(p,
"Number of DTR : %d\n" \ "\nTLB walker : %simplemented\n"
"Number of ITR : %d\n" \ "Number of DTR : %d\n"
"Number of ITR : %d\n"
"TLB insertable page sizes : ", "TLB insertable page sizes : ",
vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
...@@ -353,18 +356,16 @@ vm_info(char *page) ...@@ -353,18 +356,16 @@ vm_info(char *page)
return 0; return 0;
} }
p += sprintf(p, "\nPurge base address : 0x%016lx\n" \ p += sprintf(p,
"Purge outer loop count : %d\n" \ "\nPurge base address : 0x%016lx\n"
"Purge inner loop count : %d\n" \ "Purge outer loop count : %d\n"
"Purge outer loop stride : %d\n" \ "Purge inner loop count : %d\n"
"Purge outer loop stride : %d\n"
"Purge inner loop stride : %d\n", "Purge inner loop stride : %d\n",
ptce.base, ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]);
ptce.count[0],
ptce.count[1],
ptce.stride[0],
ptce.stride[1]);
p += sprintf(p, "TC Levels : %d\n" \ p += sprintf(p,
"TC Levels : %d\n"
"Unique TC(s) : %d\n", "Unique TC(s) : %d\n",
vm_info_1.pal_vm_info_1_s.num_tc_levels, vm_info_1.pal_vm_info_1_s.num_tc_levels,
vm_info_1.pal_vm_info_1_s.max_unique_tcs); vm_info_1.pal_vm_info_1_s.max_unique_tcs);
...@@ -379,15 +380,14 @@ vm_info(char *page) ...@@ -379,15 +380,14 @@ vm_info(char *page)
continue; continue;
} }
p += sprintf(p, "\n%s Translation Cache Level %d:\n" \ p += sprintf(p,
"\tHash sets : %d\n" \ "\n%s Translation Cache Level %d:\n"
"\tAssociativity : %d\n" \ "\tHash sets : %d\n"
"\tNumber of entries : %d\n" \ "\tAssociativity : %d\n"
"\tNumber of entries : %d\n"
"\tFlags : ", "\tFlags : ",
cache_types[j+tc_info.tc_unified], i+1, cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets,
tc_info.tc_num_sets, tc_info.tc_associativity, tc_info.tc_num_entries);
tc_info.tc_associativity,
tc_info.tc_num_entries);
if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized ");
if (tc_info.tc_unified) p += sprintf(p, "Unified "); if (tc_info.tc_unified) p += sprintf(p, "Unified ");
...@@ -436,17 +436,18 @@ register_info(char *page) ...@@ -436,17 +436,18 @@ register_info(char *page)
if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0;
p += sprintf(p, "RSE stacked physical registers : %ld\n" \ p += sprintf(p,
"RSE stacked physical registers : %ld\n"
"RSE load/store hints : %ld (%s)\n", "RSE load/store hints : %ld (%s)\n",
phys_stacked, phys_stacked, hints.ph_data,
hints.ph_data,
hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)"); hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)");
if (ia64_pal_debug_info(&iregs, &dregs)) return 0; if (ia64_pal_debug_info(&iregs, &dregs))
return 0;
p += sprintf(p, "Instruction debug register pairs : %ld\n" \ p += sprintf(p,
"Data debug register pairs : %ld\n", "Instruction debug register pairs : %ld\n"
iregs, dregs); "Data debug register pairs : %ld\n", iregs, dregs);
return p - page; return p - page;
} }
...@@ -563,26 +564,21 @@ version_info(char *page) ...@@ -563,26 +564,21 @@ version_info(char *page)
*/ */
if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
p += sprintf(p, "PAL_vendor : 0x%02x (min=0x%02x)\n" \ p += sprintf(p,
"PAL_A : %x.%x.%x (min=%x.%x.%x)\n" \ "PAL_vendor : 0x%02x (min=0x%02x)\n"
"PAL_A : %x.%x.%x (min=%x.%x.%x)\n"
"PAL_B : %x.%x.%x (min=%x.%x.%x)\n", "PAL_B : %x.%x.%x (min=%x.%x.%x)\n",
cur_ver.pal_version_s.pv_pal_vendor, cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
min_ver.pal_version_s.pv_pal_vendor,
cur_ver.pal_version_s.pv_pal_a_model>>4, cur_ver.pal_version_s.pv_pal_a_model>>4,
cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
cur_ver.pal_version_s.pv_pal_a_rev,
min_ver.pal_version_s.pv_pal_a_model>>4, min_ver.pal_version_s.pv_pal_a_model>>4,
min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
min_ver.pal_version_s.pv_pal_a_rev,
cur_ver.pal_version_s.pv_pal_b_model>>4, cur_ver.pal_version_s.pv_pal_b_model>>4,
cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
cur_ver.pal_version_s.pv_pal_b_rev,
min_ver.pal_version_s.pv_pal_b_model>>4, min_ver.pal_version_s.pv_pal_b_model>>4,
min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
min_ver.pal_version_s.pv_pal_b_rev);
return p - page; return p - page;
} }
...@@ -595,26 +591,20 @@ perfmon_info(char *page) ...@@ -595,26 +591,20 @@ perfmon_info(char *page)
if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0; if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
p += sprintf(p, "PMC/PMD pairs : %d\n" \ p += sprintf(p,
"Counter width : %d bits\n" \ "PMC/PMD pairs : %d\n"
"Cycle event number : %d\n" \ "Counter width : %d bits\n"
"Retired event number : %d\n" \ "Cycle event number : %d\n"
"Retired event number : %d\n"
"Implemented PMC : ", "Implemented PMC : ",
pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
pm_info.pal_perf_mon_info_s.width, pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
pm_info.pal_perf_mon_info_s.cycles,
pm_info.pal_perf_mon_info_s.retired);
p = bitregister_process(p, pm_buffer, 256); p = bitregister_process(p, pm_buffer, 256);
p += sprintf(p, "\nImplemented PMD : "); p += sprintf(p, "\nImplemented PMD : ");
p = bitregister_process(p, pm_buffer+4, 256); p = bitregister_process(p, pm_buffer+4, 256);
p += sprintf(p, "\nCycles count capable : "); p += sprintf(p, "\nCycles count capable : ");
p = bitregister_process(p, pm_buffer+8, 256); p = bitregister_process(p, pm_buffer+8, 256);
p += sprintf(p, "\nRetired bundles count capable : "); p += sprintf(p, "\nRetired bundles count capable : ");
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
...@@ -646,12 +636,11 @@ frequency_info(char *page) ...@@ -646,12 +636,11 @@ frequency_info(char *page)
if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
p += sprintf(p, "Processor/Clock ratio : %ld/%ld\n" \ p += sprintf(p,
"Bus/Clock ratio : %ld/%ld\n" \ "Processor/Clock ratio : %ld/%ld\n"
"Bus/Clock ratio : %ld/%ld\n"
"ITC/Clock ratio : %ld/%ld\n", "ITC/Clock ratio : %ld/%ld\n",
proc.num, proc.den, proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
bus.num, bus.den,
itc.num, itc.den);
return p - page; return p - page;
} }
...@@ -665,7 +654,7 @@ tr_info(char *page) ...@@ -665,7 +654,7 @@ tr_info(char *page)
u64 tr_buffer[4]; u64 tr_buffer[4];
pal_vm_info_1_u_t vm_info_1; pal_vm_info_1_u_t vm_info_1;
pal_vm_info_2_u_t vm_info_2; pal_vm_info_2_u_t vm_info_2;
int i, j; u64 i, j;
u64 max[3], pgm; u64 max[3], pgm;
struct ifa_reg { struct ifa_reg {
u64 valid:1; u64 valid:1;
...@@ -711,7 +700,7 @@ tr_info(char *page) ...@@ -711,7 +700,7 @@ tr_info(char *page)
status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid); status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
if (status != 0) { if (status != 0) {
printk(KERN_ERR "palinfo: pal call failed on tr[%d:%d]=%ld\n", printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
i, j, status); i, j, status);
continue; continue;
} }
...@@ -725,33 +714,28 @@ tr_info(char *page) ...@@ -725,33 +714,28 @@ tr_info(char *page)
rid_reg = (struct rid_reg *)&tr_buffer[3]; rid_reg = (struct rid_reg *)&tr_buffer[3];
pgm = -1 << (itir_reg->ps - 12); pgm = -1 << (itir_reg->ps - 12);
p += sprintf(p, "%cTR%d: av=%d pv=%d dv=%d mv=%d\n" \ p += sprintf(p,
"\tppn : 0x%lx\n" \ "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
"\tvpn : 0x%lx\n" \ "\tppn : 0x%lx\n"
"\tvpn : 0x%lx\n"
"\tps : ", "\tps : ",
"ID"[i], j,
"ID"[i],
j,
tr_valid.pal_tr_valid_s.access_rights_valid, tr_valid.pal_tr_valid_s.access_rights_valid,
tr_valid.pal_tr_valid_s.priv_level_valid, tr_valid.pal_tr_valid_s.priv_level_valid,
tr_valid.pal_tr_valid_s.dirty_bit_valid, tr_valid.pal_tr_valid_s.dirty_bit_valid,
tr_valid.pal_tr_valid_s.mem_attr_valid, tr_valid.pal_tr_valid_s.mem_attr_valid,
(gr_reg->ppn & pgm)<< 12, (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
(ifa_reg->vpn & pgm)<< 12);
p = bitvector_process(p, 1<< itir_reg->ps); p = bitvector_process(p, 1<< itir_reg->ps);
p += sprintf(p, "\n\tpl : %d\n" \ p += sprintf(p,
"\tar : %d\n" \ "\n\tpl : %d\n"
"\trid : %x\n" \ "\tar : %d\n"
"\tp : %d\n" \ "\trid : %x\n"
"\tma : %d\n" \ "\tp : %d\n"
"\tma : %d\n"
"\td : %d\n", "\td : %d\n",
gr_reg->pl, gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
gr_reg->ar,
rid_reg->rid,
gr_reg->p,
gr_reg->ma,
gr_reg->d); gr_reg->d);
} }
} }
...@@ -776,7 +760,7 @@ static palinfo_entry_t palinfo_entries[]={ ...@@ -776,7 +760,7 @@ static palinfo_entry_t palinfo_entries[]={
{ "tr_info", tr_info, } { "tr_info", tr_info, }
}; };
#define NR_PALINFO_ENTRIES (sizeof(palinfo_entries)/sizeof(palinfo_entry_t)) #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
/* /*
* this array is used to keep track of the proc entries we create. This is * this array is used to keep track of the proc entries we create. This is
......
...@@ -364,7 +364,8 @@ typedef struct { ...@@ -364,7 +364,8 @@ typedef struct {
#define PFM_CMD_IDX(cmd) (cmd) #define PFM_CMD_IDX(cmd) (cmd)
#define PFM_CMD_IS_VALID(cmd) ((PFM_CMD_IDX(cmd) >= 0) && (PFM_CMD_IDX(cmd) < PFM_CMD_COUNT) \ #define PFM_CMD_IS_VALID(cmd) ((PFM_CMD_IDX(cmd) >= 0) \
&& (PFM_CMD_IDX(cmd) < (int) PFM_CMD_COUNT) \
&& pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL) && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL)
#define PFM_CMD_USE_PID(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_PID) != 0) #define PFM_CMD_USE_PID(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_PID) != 0)
...@@ -726,8 +727,7 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad ...@@ -726,8 +727,7 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad
static unsigned long static unsigned long
pfm_smpl_entry_size(unsigned long *which, unsigned long size) pfm_smpl_entry_size(unsigned long *which, unsigned long size)
{ {
unsigned long res = 0; unsigned long i, res = 0;
int i;
for (i=0; i < size; i++, which++) res += hweight64(*which); for (i=0; i < size; i++, which++) res += hweight64(*which);
...@@ -2172,11 +2172,11 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru ...@@ -2172,11 +2172,11 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
* never leaves the current CPU and the state * never leaves the current CPU and the state
* is shared by all processes running on it * is shared by all processes running on it
*/ */
for (i=0; i < pmu_conf.num_ibrs; i++) { for (i=0; i < (int) pmu_conf.num_ibrs; i++) {
ia64_set_ibr(i, 0UL); ia64_set_ibr(i, 0UL);
} }
ia64_srlz_i(); ia64_srlz_i();
for (i=0; i < pmu_conf.num_dbrs; i++) { for (i=0; i < (int) pmu_conf.num_dbrs; i++) {
ia64_set_dbr(i, 0UL); ia64_set_dbr(i, 0UL);
} }
ia64_srlz_d(); ia64_srlz_d();
...@@ -2518,7 +2518,7 @@ static pfm_cmd_desc_t pfm_cmd_tab[]={ ...@@ -2518,7 +2518,7 @@ static pfm_cmd_desc_t pfm_cmd_tab[]={
/* 33 */{ pfm_write_dbrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)} /* 33 */{ pfm_write_dbrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)}
#endif #endif
}; };
#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t)) #define PFM_CMD_COUNT ARRAY_SIZE(pfm_cmd_tab)
static int static int
check_task_state(struct task_struct *task) check_task_state(struct task_struct *task)
...@@ -3131,7 +3131,7 @@ pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 ...@@ -3131,7 +3131,7 @@ pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64
return 0x1UL; return 0x1UL;
} }
static void static irqreturn_t
pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
{ {
u64 pmc0; u64 pmc0;
...@@ -3146,7 +3146,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) ...@@ -3146,7 +3146,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
if (pfm_alternate_intr_handler) { if (pfm_alternate_intr_handler) {
(*pfm_alternate_intr_handler->handler)(irq, arg, regs); (*pfm_alternate_intr_handler->handler)(irq, arg, regs);
put_cpu(); put_cpu();
return; return IRQ_HANDLED;
} }
/* /*
...@@ -3171,7 +3171,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) ...@@ -3171,7 +3171,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
printk(KERN_DEBUG "perfmon: Spurious overflow interrupt: process %d has " printk(KERN_DEBUG "perfmon: Spurious overflow interrupt: process %d has "
"no PFM context\n", task->pid); "no PFM context\n", task->pid);
put_cpu(); put_cpu();
return; return IRQ_HANDLED;
} }
/* /*
...@@ -3199,6 +3199,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) ...@@ -3199,6 +3199,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
pfm_stats[smp_processor_id()].pfm_spurious_ovfl_intr_count++; pfm_stats[smp_processor_id()].pfm_spurious_ovfl_intr_count++;
} }
put_cpu_no_resched(); put_cpu_no_resched();
return IRQ_HANDLED;
} }
/* for debug only */ /* for debug only */
...@@ -3452,11 +3453,11 @@ pfm_load_regs (struct task_struct *task) ...@@ -3452,11 +3453,11 @@ pfm_load_regs (struct task_struct *task)
* in the next version of perfmon. * in the next version of perfmon.
*/ */
if (ctx->ctx_fl_using_dbreg) { if (ctx->ctx_fl_using_dbreg) {
for (i=0; i < pmu_conf.num_ibrs; i++) { for (i=0; i < (int) pmu_conf.num_ibrs; i++) {
ia64_set_ibr(i, t->ibr[i]); ia64_set_ibr(i, t->ibr[i]);
} }
ia64_srlz_i(); ia64_srlz_i();
for (i=0; i < pmu_conf.num_dbrs; i++) { for (i=0; i < (int) pmu_conf.num_dbrs; i++) {
ia64_set_dbr(i, t->dbr[i]); ia64_set_dbr(i, t->dbr[i]);
} }
ia64_srlz_d(); ia64_srlz_d();
...@@ -3467,7 +3468,7 @@ pfm_load_regs (struct task_struct *task) ...@@ -3467,7 +3468,7 @@ pfm_load_regs (struct task_struct *task)
* this path cannot be used in SMP * this path cannot be used in SMP
*/ */
if (owner == task) { if (owner == task) {
if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
DBprintk(("invalid last_cpu=%d for [%d]\n", DBprintk(("invalid last_cpu=%d for [%d]\n",
atomic_read(&ctx->ctx_last_cpu), task->pid)); atomic_read(&ctx->ctx_last_cpu), task->pid));
...@@ -3741,7 +3742,7 @@ pfm_flush_regs (struct task_struct *task) ...@@ -3741,7 +3742,7 @@ pfm_flush_regs (struct task_struct *task)
* *
*/ */
if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
printk(KERN_DEBUG "perfmon: [%d] last_cpu=%d\n", printk(KERN_DEBUG "perfmon: [%d] last_cpu=%d\n",
task->pid, atomic_read(&ctx->ctx_last_cpu)); task->pid, atomic_read(&ctx->ctx_last_cpu));
......
...@@ -123,8 +123,8 @@ show_regs (struct pt_regs *regs) ...@@ -123,8 +123,8 @@ show_regs (struct pt_regs *regs)
if (user_mode(regs)) { if (user_mode(regs)) {
/* print the stacked registers */ /* print the stacked registers */
unsigned long val, sof, *bsp, ndirty; unsigned long val, *bsp, ndirty;
int i, is_nat = 0; int i, sof, is_nat = 0;
sof = regs->cr_ifs & 0x7f; /* size of frame */ sof = regs->cr_ifs & 0x7f; /* size of frame */
ndirty = (regs->loadrs >> 19); ndirty = (regs->loadrs >> 19);
...@@ -379,6 +379,7 @@ copy_thread (int nr, unsigned long clone_flags, ...@@ -379,6 +379,7 @@ copy_thread (int nr, unsigned long clone_flags,
# define THREAD_FLAGS_TO_SET 0 # define THREAD_FLAGS_TO_SET 0
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET); | THREAD_FLAGS_TO_SET);
p->thread.last_fph_cpu = -1;
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
/* /*
* If we're cloning an IA32 task then save the IA32 extra * If we're cloning an IA32 task then save the IA32 extra
......
...@@ -202,17 +202,16 @@ static unsigned long ...@@ -202,17 +202,16 @@ static unsigned long
get_rnat (struct pt_regs *pt, struct switch_stack *sw, get_rnat (struct pt_regs *pt, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr) unsigned long *krbs, unsigned long *urnat_addr)
{ {
unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, kmask = ~0UL; unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0UL;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs; long num_regs;
kbsp = (unsigned long *) sw->ar_bspstore; kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore;
/* /*
* First, figure out which bit number slot 0 in user-land maps * First, figure out which bit number slot 0 in user-land maps to in the kernel
* to in the kernel rnat. Do this by figuring out how many * rnat. Do this by figuring out how many register slots we're beyond the user's
* register slots we're beyond the user's backingstore and * backingstore and then computing the equivalent address in kernel space.
* then computing the equivalent address in kernel space.
*/ */
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
...@@ -222,8 +221,8 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw, ...@@ -222,8 +221,8 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw,
if (ubspstore + 63 > urnat_addr) { if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */ /* some bits need to be merged in from pt->ar_rnat */
kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1); umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1);
urnat = (pt->ar_rnat & ~kmask); urnat = (pt->ar_rnat & umask);
} }
if (rnat0_kaddr >= kbsp) { if (rnat0_kaddr >= kbsp) {
rnat0 = sw->ar_rnat; rnat0 = sw->ar_rnat;
...@@ -235,7 +234,7 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw, ...@@ -235,7 +234,7 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw,
} else if (rnat1_kaddr > krbs) { } else if (rnat1_kaddr > krbs) {
rnat1 = *rnat1_kaddr; rnat1 = *rnat1_kaddr;
} }
urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & kmask; urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & ~umask;
return urnat; return urnat;
} }
...@@ -246,17 +245,19 @@ static void ...@@ -246,17 +245,19 @@ static void
put_rnat (struct pt_regs *pt, struct switch_stack *sw, put_rnat (struct pt_regs *pt, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat) unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
{ {
unsigned long rnat0 = 0, rnat1 = 0, rnat = 0, *slot0_kaddr, kmask = ~0UL, mask; unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift, slot, ndirty;
long num_regs; long num_regs, nbits;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
nbits = ndirty % 63;
kbsp = (unsigned long *) sw->ar_bspstore; kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore;
/* /*
* First, figure out which bit number slot 0 in user-land maps * First, figure out which bit number slot 0 in user-land maps to in the kernel
* to in the kernel rnat. Do this by figuring out how many * rnat. Do this by figuring out how many register slots we're beyond the user's
* register slots we're beyond the user's backingstore and * backingstore and then computing the equivalent address in kernel space.
* then computing the equivalent address in kernel space.
*/ */
num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1); num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
...@@ -264,29 +265,37 @@ put_rnat (struct pt_regs *pt, struct switch_stack *sw, ...@@ -264,29 +265,37 @@ put_rnat (struct pt_regs *pt, struct switch_stack *sw,
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64; rnat0_kaddr = rnat1_kaddr - 64;
printk("%s: ubspstore=%p urnat_addr=%p\n", __FUNCTION__, ubspstore, urnat_addr);
if (ubspstore + 63 > urnat_addr) { if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */ /* some bits need to be place in pt->ar_rnat: */
kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1); slot = ia64_rse_slot_num(ubspstore);
pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask); umask = ((1UL << slot) - 1);
pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
nbits -= slot;
if (nbits <= 0)
return;
} }
mask = (1UL << nbits) - 1;
/* /*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an * Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here. * rnat slot is ignored. so we don't have to clear it here.
*/ */
rnat0 = (urnat << shift); rnat0 = (urnat << shift);
mask = ~0UL << shift; m = mask << shift;
printk("%s: rnat0=%016lx, m=%016lx, rnat0_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat0, m, rnat0_kaddr, kbsp);
if (rnat0_kaddr >= kbsp) { if (rnat0_kaddr >= kbsp) {
sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat0 & mask); sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
} else if (rnat0_kaddr > krbs) { } else if (rnat0_kaddr > krbs) {
*rnat0_kaddr = ((*rnat0_kaddr & ~mask) | (rnat0 & mask)); *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
} }
rnat1 = (urnat >> (63 - shift)); rnat1 = (urnat >> (63 - shift));
mask = ~0UL >> (63 - shift); m = mask >> (63 - shift);
printk("%s: rnat1=%016lx, m=%016lx, rnat1_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat1, m, rnat1_kaddr, kbsp);
if (rnat1_kaddr >= kbsp) { if (rnat1_kaddr >= kbsp) {
sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat1 & mask); sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
} else if (rnat1_kaddr > krbs) { } else if (rnat1_kaddr > krbs) {
*rnat1_kaddr = ((*rnat1_kaddr & ~mask) | (rnat1 & mask)); *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
} }
} }
...@@ -612,10 +621,8 @@ ia64_sync_fph (struct task_struct *task) ...@@ -612,10 +621,8 @@ ia64_sync_fph (struct task_struct *task)
task->thread.last_fph_cpu = -1; /* force reload */ task->thread.last_fph_cpu = -1; /* force reload */
memset(&task->thread.fph, 0, sizeof(task->thread.fph)); memset(&task->thread.fph, 0, sizeof(task->thread.fph));
} }
#ifndef CONFIG_SMP
if (ia64_get_fpu_owner() == task) if (ia64_get_fpu_owner() == task)
ia64_set_fpu_owner(0); ia64_set_fpu_owner(0);
#endif
psr->dfh = 1; psr->dfh = 1;
} }
...@@ -704,7 +711,9 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -704,7 +711,9 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
case PT_R4: case PT_R5: case PT_R6: case PT_R7: case PT_R4: case PT_R5: case PT_R6: case PT_R7:
if (write_access) { if (write_access) {
/* read NaT bit first: */ /* read NaT bit first: */
ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat); unsigned long dummy;
ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
......
...@@ -38,7 +38,7 @@ static salinfo_entry_t salinfo_entries[]={ ...@@ -38,7 +38,7 @@ static salinfo_entry_t salinfo_entries[]={
{ "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, }, { "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, },
}; };
#define NR_SALINFO_ENTRIES (sizeof(salinfo_entries)/sizeof(salinfo_entry_t)) #define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries)
/* /*
* One for each feature and one more for the directory entry... * One for each feature and one more for the directory entry...
......
...@@ -59,6 +59,7 @@ unsigned long ia64_cycles_per_usec; ...@@ -59,6 +59,7 @@ unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param; struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info; struct screen_info screen_info;
unsigned long ia64_max_cacheline_size;
unsigned long ia64_iobase; /* virtual address for I/O accesses */ unsigned long ia64_iobase; /* virtual address for I/O accesses */
struct io_space io_space[MAX_IO_SPACES]; struct io_space io_space[MAX_IO_SPACES];
unsigned int num_io_spaces; unsigned int num_io_spaces;
...@@ -501,7 +502,7 @@ show_cpuinfo (struct seq_file *m, void *v) ...@@ -501,7 +502,7 @@ show_cpuinfo (struct seq_file *m, void *v)
memcpy(features, " standard", 10); memcpy(features, " standard", 10);
cp = features; cp = features;
sep = 0; sep = 0;
for (i = 0; i < sizeof(feature_bits)/sizeof(feature_bits[0]); ++i) { for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
if (mask & feature_bits[i].mask) { if (mask & feature_bits[i].mask) {
if (sep) if (sep)
*cp++ = sep; *cp++ = sep;
...@@ -632,6 +633,39 @@ setup_per_cpu_areas (void) ...@@ -632,6 +633,39 @@ setup_per_cpu_areas (void)
/* start_kernel() requires this... */ /* start_kernel() requires this... */
} }
static void
get_max_cacheline_size (void)
{
unsigned long line_size, max = 1;
u64 l, levels, unique_caches;
pal_cache_config_info_t cci;
s64 status;
status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
__FUNCTION__, status);
max = SMP_CACHE_BYTES;
goto out;
}
for (l = 0; l < levels; ++l) {
status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
&cci);
if (status != 0) {
printk(KERN_ERR
"%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n",
__FUNCTION__, l, status);
max = SMP_CACHE_BYTES;
}
line_size = 1 << cci.pcci_line_size;
if (line_size > max)
max = line_size;
}
out:
if (max > ia64_max_cacheline_size)
ia64_max_cacheline_size = max;
}
/* /*
* cpu_init() initializes state that is per-CPU. This function acts * cpu_init() initializes state that is per-CPU. This function acts
...@@ -675,6 +709,8 @@ cpu_init (void) ...@@ -675,6 +709,8 @@ cpu_init (void)
cpu_info->node_data = get_node_data_ptr(); cpu_info->node_data = get_node_data_ptr();
#endif #endif
get_max_cacheline_size();
/* /*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
* ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
......
...@@ -528,7 +528,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -528,7 +528,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
else else
errno = -errno; errno = -errno;
} }
} else if (scr->pt.r10 != -1) } else if ((long) scr->pt.r10 != -1)
/* /*
* A system calls has to be restarted only if one of the error codes * A system calls has to be restarted only if one of the error codes
* ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* SMP Support * SMP Support
* *
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Lots of stuff stolen from arch/alpha/kernel/smp.c * Lots of stuff stolen from arch/alpha/kernel/smp.c
* *
...@@ -87,7 +87,7 @@ stop_this_cpu (void) ...@@ -87,7 +87,7 @@ stop_this_cpu (void)
cpu_halt(); cpu_halt();
} }
void irqreturn_t
handle_IPI (int irq, void *dev_id, struct pt_regs *regs) handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
{ {
int this_cpu = get_cpu(); int this_cpu = get_cpu();
...@@ -147,10 +147,11 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs) ...@@ -147,10 +147,11 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
mb(); /* Order data access and bit testing. */ mb(); /* Order data access and bit testing. */
} }
put_cpu(); put_cpu();
return IRQ_HANDLED;
} }
/* /*
* Called with preeemption disabled * Called with preeemption disabled.
*/ */
static inline void static inline void
send_IPI_single (int dest_cpu, int op) send_IPI_single (int dest_cpu, int op)
...@@ -160,12 +161,12 @@ send_IPI_single (int dest_cpu, int op) ...@@ -160,12 +161,12 @@ send_IPI_single (int dest_cpu, int op)
} }
/* /*
* Called with preeemption disabled * Called with preeemption disabled.
*/ */
static inline void static inline void
send_IPI_allbutself (int op) send_IPI_allbutself (int op)
{ {
int i; unsigned int i;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i) && i != smp_processor_id()) if (cpu_online(i) && i != smp_processor_id())
...@@ -174,7 +175,7 @@ send_IPI_allbutself (int op) ...@@ -174,7 +175,7 @@ send_IPI_allbutself (int op)
} }
/* /*
* Called with preeemption disabled * Called with preeemption disabled.
*/ */
static inline void static inline void
send_IPI_all (int op) send_IPI_all (int op)
...@@ -187,7 +188,7 @@ send_IPI_all (int op) ...@@ -187,7 +188,7 @@ send_IPI_all (int op)
} }
/* /*
* Called with preeemption disabled * Called with preeemption disabled.
*/ */
static inline void static inline void
send_IPI_self (int op) send_IPI_self (int op)
...@@ -196,7 +197,7 @@ send_IPI_self (int op) ...@@ -196,7 +197,7 @@ send_IPI_self (int op)
} }
/* /*
* Called with preeemption disabled * Called with preeemption disabled.
*/ */
void void
smp_send_reschedule (int cpu) smp_send_reschedule (int cpu)
......
...@@ -192,6 +192,7 @@ ia64_sync_itc (unsigned int master) ...@@ -192,6 +192,7 @@ ia64_sync_itc (unsigned int master)
{ {
long i, delta, adj, adjust_latency = 0, done = 0; long i, delta, adj, adjust_latency = 0, done = 0;
unsigned long flags, rt, master_time_stamp, bound; unsigned long flags, rt, master_time_stamp, bound;
extern void ia64_cpu_local_tick (void);
#if DEBUG_ITC_SYNC #if DEBUG_ITC_SYNC
struct { struct {
long rt; /* roundtrip time */ long rt; /* roundtrip time */
...@@ -246,6 +247,16 @@ ia64_sync_itc (unsigned int master) ...@@ -246,6 +247,16 @@ ia64_sync_itc (unsigned int master)
printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, " printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
"maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
/*
* Check whether we sync'd the itc ahead of the next timer interrupt. If so, just
* reset it.
*/
if (time_after(ia64_get_itc(), local_cpu_data->itm_next)) {
Dprintk("CPU %d: oops, jumped a timer tick; resetting timer.\n",
smp_processor_id());
ia64_cpu_local_tick();
}
} }
/* /*
...@@ -311,15 +322,6 @@ smp_callin (void) ...@@ -311,15 +322,6 @@ smp_callin (void)
*/ */
Dprintk("Going to syncup ITC with BP.\n"); Dprintk("Going to syncup ITC with BP.\n");
ia64_sync_itc(0); ia64_sync_itc(0);
/*
* Make sure we didn't sync the itc ahead of the next
* timer interrupt, if so, just reset it.
*/
if (time_after(ia64_get_itc(),local_cpu_data->itm_next)) {
Dprintk("oops, jumped a timer.\n");
ia64_cpu_local_tick();
}
} }
/* /*
......
...@@ -83,11 +83,26 @@ gettimeoffset (void) ...@@ -83,11 +83,26 @@ gettimeoffset (void)
return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT; return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
} }
static inline void
set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
{
while (nsec > NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
void void
do_settimeofday (struct timeval *tv) do_settimeofday (struct timeval *tv)
{ {
time_t sec = tv->tv_sec; time_t wtm_sec, sec = tv->tv_sec;
long nsec = tv->tv_usec * 1000; long wtm_nsec, nsec = tv->tv_usec * 1000;
write_seqlock_irq(&xtime_lock); write_seqlock_irq(&xtime_lock);
{ {
...@@ -99,13 +114,12 @@ do_settimeofday (struct timeval *tv) ...@@ -99,13 +114,12 @@ do_settimeofday (struct timeval *tv)
*/ */
nsec -= gettimeoffset(); nsec -= gettimeoffset();
while (nsec < 0) { wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
nsec += 1000000000; wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
sec--;
} set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
xtime.tv_sec = sec;
xtime.tv_nsec = nsec;
time_adjust = 0; /* stop active adjtime() */ time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC; time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT; time_maxerror = NTP_PHASE_LIMIT;
...@@ -166,8 +180,8 @@ do_gettimeofday (struct timeval *tv) ...@@ -166,8 +180,8 @@ do_gettimeofday (struct timeval *tv)
usec = (nsec + offset) / 1000; usec = (nsec + offset) / 1000;
while (unlikely(usec >= 1000000)) { while (unlikely(usec >= USEC_PER_SEC)) {
usec -= 1000000; usec -= USEC_PER_SEC;
++sec; ++sec;
} }
...@@ -175,8 +189,8 @@ do_gettimeofday (struct timeval *tv) ...@@ -175,8 +189,8 @@ do_gettimeofday (struct timeval *tv)
tv->tv_usec = usec; tv->tv_usec = usec;
} }
static void static irqreturn_t
timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{ {
unsigned long new_itm; unsigned long new_itm;
...@@ -231,12 +245,13 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -231,12 +245,13 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
ia64_set_itm(new_itm); ia64_set_itm(new_itm);
/* double check, in case we got hit by a (slow) PMI: */ /* double check, in case we got hit by a (slow) PMI: */
} while (time_after_eq(ia64_get_itc(), new_itm)); } while (time_after_eq(ia64_get_itc(), new_itm));
return IRQ_HANDLED;
} }
/* /*
* Encapsulate access to the itm structure for SMP. * Encapsulate access to the itm structure for SMP.
*/ */
void __init void
ia64_cpu_local_tick (void) ia64_cpu_local_tick (void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -281,7 +296,7 @@ ia64_init_itm (void) ...@@ -281,7 +296,7 @@ ia64_init_itm (void)
if (status != 0) { if (status != 0) {
/* invent "random" values */ /* invent "random" values */
printk(KERN_ERR printk(KERN_ERR
"SAL/PAL failed to obtain frequency info---inventing reasonably values\n"); "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
platform_base_freq = 100000000; platform_base_freq = 100000000;
itc_ratio.num = 3; itc_ratio.num = 3;
itc_ratio.den = 1; itc_ratio.den = 1;
...@@ -305,8 +320,8 @@ ia64_init_itm (void) ...@@ -305,8 +320,8 @@ ia64_init_itm (void)
local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
local_cpu_data->itc_freq = itc_freq; local_cpu_data->itc_freq = itc_freq;
local_cpu_data->cyc_per_usec = (itc_freq + 500000) / 1000000; local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
local_cpu_data->nsec_per_cyc = ((1000000000UL<<IA64_NSEC_PER_CYC_SHIFT) local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
+ itc_freq/2)/itc_freq; + itc_freq/2)/itc_freq;
/* Setup the CPU local timer tick */ /* Setup the CPU local timer tick */
...@@ -323,6 +338,12 @@ void __init ...@@ -323,6 +338,12 @@ void __init
time_init (void) time_init (void)
{ {
register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
efi_gettimeofday((struct timeval *) &xtime); efi_gettimeofday(&xtime);
ia64_init_itm(); ia64_init_itm();
/*
* Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
* tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
*/
set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
} }
...@@ -682,7 +682,7 @@ finish_prologue (struct unw_state_record *sr) ...@@ -682,7 +682,7 @@ finish_prologue (struct unw_state_record *sr)
* First, resolve implicit register save locations (see Section "11.4.2.3 Rules * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
* for Using Unwind Descriptors", rule 3): * for Using Unwind Descriptors", rule 3):
*/ */
for (i = 0; i < (int) (sizeof(unw.save_order)/sizeof(unw.save_order[0])); ++i) { for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
reg = sr->curr.reg + unw.save_order[i]; reg = sr->curr.reg + unw.save_order[i];
if (reg->where == UNW_WHERE_GR_SAVE) { if (reg->where == UNW_WHERE_GR_SAVE) {
reg->where = UNW_WHERE_GR; reg->where = UNW_WHERE_GR;
...@@ -1214,13 +1214,13 @@ script_new (unsigned long ip) ...@@ -1214,13 +1214,13 @@ script_new (unsigned long ip)
spin_unlock(&unw.lock); spin_unlock(&unw.lock);
/* /*
* XXX We'll deadlock here if we interrupt a thread that is * We'd deadlock here if we interrupted a thread that is holding a read lock on
* holding a read lock on script->lock. A try_write_lock() * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
* might be mighty handy here... Alternatively, we could * alternative would be to disable interrupts whenever we hold a read-lock, but
* disable interrupts whenever we hold a read-lock, but that * that seems silly.
* seems silly.
*/ */
write_lock(&script->lock); if (!write_trylock(&script->lock))
return NULL;
spin_lock(&unw.lock); spin_lock(&unw.lock);
{ {
......
...@@ -51,84 +51,79 @@ __ia64_memset_c_io (unsigned long dst, unsigned long c, long count) ...@@ -51,84 +51,79 @@ __ia64_memset_c_io (unsigned long dst, unsigned long c, long count)
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
#undef __ia64_inb
#undef __ia64_inw
#undef __ia64_inl
#undef __ia64_outb
#undef __ia64_outw
#undef __ia64_outl
#undef __ia64_readb
#undef __ia64_readw
#undef __ia64_readl
#undef __ia64_readq
#undef __ia64_writeb
#undef __ia64_writew
#undef __ia64_writel
#undef __ia64_writeq
unsigned int unsigned int
ia64_inb (unsigned long port) __ia64_inb (unsigned long port)
{ {
return __ia64_inb(port); return ___ia64_inb(port);
} }
unsigned int unsigned int
ia64_inw (unsigned long port) __ia64_inw (unsigned long port)
{ {
return __ia64_inw(port); return ___ia64_inw(port);
} }
unsigned int unsigned int
ia64_inl (unsigned long port) __ia64_inl (unsigned long port)
{ {
return __ia64_inl(port); return ___ia64_inl(port);
} }
void void
ia64_outb (unsigned char val, unsigned long port) __ia64_outb (unsigned char val, unsigned long port)
{ {
__ia64_outb(val, port); ___ia64_outb(val, port);
} }
void void
ia64_outw (unsigned short val, unsigned long port) __ia64_outw (unsigned short val, unsigned long port)
{ {
__ia64_outw(val, port); ___ia64_outw(val, port);
} }
void void
ia64_outl (unsigned int val, unsigned long port) __ia64_outl (unsigned int val, unsigned long port)
{ {
__ia64_outl(val, port); ___ia64_outl(val, port);
} }
unsigned char unsigned char
ia64_readb (void *addr) __ia64_readb (void *addr)
{ {
return __ia64_readb (addr); return ___ia64_readb (addr);
} }
unsigned short unsigned short
ia64_readw (void *addr) __ia64_readw (void *addr)
{ {
return __ia64_readw (addr); return ___ia64_readw (addr);
} }
unsigned int unsigned int
ia64_readl (void *addr) __ia64_readl (void *addr)
{ {
return __ia64_readl (addr); return ___ia64_readl (addr);
} }
unsigned long unsigned long
ia64_readq (void *addr) __ia64_readq (void *addr)
{ {
return __ia64_readq (addr); return ___ia64_readq (addr);
} }
/* define aliases: */
asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
asm ("__ia64_inb = ia64_inb");
asm ("__ia64_inw = ia64_inw");
asm ("__ia64_inl = ia64_inl");
asm (".global __ia64_outb, __ia64_outw, __ia64_outl");
asm ("__ia64_outb = ia64_outb");
asm ("__ia64_outw = ia64_outw");
asm ("__ia64_outl = ia64_outl");
asm (".global __ia64_readb, __ia64_readw, __ia64_readl, __ia64_readq");
asm ("__ia64_readb = ia64_readb");
asm ("__ia64_readw = ia64_readw");
asm ("__ia64_readl = ia64_readl");
asm ("__ia64_readq = ia64_readq");
#endif /* CONFIG_IA64_GENERIC */ #endif /* CONFIG_IA64_GENERIC */
...@@ -5,7 +5,10 @@ ...@@ -5,7 +5,10 @@
* I/O TLBs (aka DMA address translation hardware). * I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* *
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing. * unnecessary i-cache flushing.
*/ */
...@@ -92,7 +95,7 @@ __setup("swiotlb=", setup_io_tlb_npages); ...@@ -92,7 +95,7 @@ __setup("swiotlb=", setup_io_tlb_npages);
void void
swiotlb_init (void) swiotlb_init (void)
{ {
int i; unsigned long i;
/* /*
* Get IO TLB memory from the low pages * Get IO TLB memory from the low pages
...@@ -121,7 +124,7 @@ swiotlb_init (void) ...@@ -121,7 +124,7 @@ swiotlb_init (void)
* Allocates bounce buffer and returns its kernel virtual address. * Allocates bounce buffer and returns its kernel virtual address.
*/ */
static void * static void *
map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction) map_single (struct device *hwdev, char *buffer, size_t size, int dir)
{ {
unsigned long flags; unsigned long flags;
char *dma_addr; char *dma_addr;
...@@ -161,7 +164,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction) ...@@ -161,7 +164,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
if (io_tlb_list[index] >= nslots) { if (io_tlb_list[index] >= nslots) {
int count = 0; int count = 0;
for (i = index; i < index + nslots; i++) for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0; io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
&& io_tlb_list[i]; i--) && io_tlb_list[i]; i--)
...@@ -195,7 +198,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction) ...@@ -195,7 +198,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
* needed when we sync the memory. Then we sync the buffer if needed. * needed when we sync the memory. Then we sync the buffer if needed.
*/ */
io_tlb_orig_addr[index] = buffer; io_tlb_orig_addr[index] = buffer;
if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL) if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
memcpy(dma_addr, buffer, size); memcpy(dma_addr, buffer, size);
return dma_addr; return dma_addr;
...@@ -205,7 +208,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction) ...@@ -205,7 +208,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
* dma_addr is the kernel virtual address of the bounce buffer to unmap. * dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/ */
static void static void
unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
{ {
unsigned long flags; unsigned long flags;
int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
...@@ -215,7 +218,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) ...@@ -215,7 +218,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
/* /*
* First, sync the memory before unmapping the entry * First, sync the memory before unmapping the entry
*/ */
if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL)) if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
/* /*
* bounce... copy the data back into the original buffer * and delete the * bounce... copy the data back into the original buffer * and delete the
* bounce buffer. * bounce buffer.
...@@ -250,49 +253,46 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) ...@@ -250,49 +253,46 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
} }
static void static void
sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
{ {
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index]; char *buffer = io_tlb_orig_addr[index];
/* /*
* bounce... copy the data back into/from the original buffer * bounce... copy the data back into/from the original buffer
* XXX How do you handle PCI_DMA_BIDIRECTIONAL here ? * XXX How do you handle DMA_BIDIRECTIONAL here ?
*/ */
if (direction == PCI_DMA_FROMDEVICE) if (dir == DMA_FROM_DEVICE)
memcpy(buffer, dma_addr, size); memcpy(buffer, dma_addr, size);
else if (direction == PCI_DMA_TODEVICE) else if (dir == DMA_TO_DEVICE)
memcpy(dma_addr, buffer, size); memcpy(dma_addr, buffer, size);
else else
BUG(); BUG();
} }
void * void *
swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
{ {
unsigned long pci_addr; unsigned long dev_addr;
int gfp = GFP_ATOMIC;
void *ret; void *ret;
/* /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
* Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA flags |= GFP_DMA;
* mask says.
*/ ret = (void *)__get_free_pages(flags, get_order(size));
gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
ret = (void *)__get_free_pages(gfp, get_order(size));
if (!ret) if (!ret)
return NULL; return NULL;
memset(ret, 0, size); memset(ret, 0, size);
pci_addr = virt_to_phys(ret); dev_addr = virt_to_phys(ret);
if (hwdev && (pci_addr & ~hwdev->dma_mask) != 0) if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device"); panic("swiotlb_alloc_consistent: allocated memory is out of range for device");
*dma_handle = pci_addr; *dma_handle = dev_addr;
return ret; return ret;
} }
void void
swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{ {
free_pages((unsigned long) vaddr, get_order(size)); free_pages((unsigned long) vaddr, get_order(size));
} }
...@@ -305,34 +305,34 @@ swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_ad ...@@ -305,34 +305,34 @@ swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_ad
* swiotlb_unmap_single or swiotlb_dma_sync_single is performed. * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/ */
dma_addr_t dma_addr_t
swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction) swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir)
{ {
unsigned long pci_addr = virt_to_phys(ptr); unsigned long dev_addr = virt_to_phys(ptr);
if (direction == PCI_DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
/* /*
* Check if the PCI device can DMA to ptr... if so, just return ptr * Check if the PCI device can DMA to ptr... if so, just return ptr
*/ */
if ((pci_addr & ~hwdev->dma_mask) == 0) if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) == 0)
/* /*
* Device is bit capable of DMA'ing to the buffer... just return the PCI * Device is bit capable of DMA'ing to the buffer... just return the PCI
* address of ptr * address of ptr
*/ */
return pci_addr; return dev_addr;
/* /*
* get a bounce buffer: * get a bounce buffer:
*/ */
pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction)); dev_addr = virt_to_phys(map_single(hwdev, ptr, size, dir));
/* /*
* Ensure that the address returned is DMA'ble: * Ensure that the address returned is DMA'ble:
*/ */
if ((pci_addr & ~hwdev->dma_mask) != 0) if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
panic("map_single: bounce buffer is not DMA'ble"); panic("map_single: bounce buffer is not DMA'ble");
return pci_addr; return dev_addr;
} }
/* /*
...@@ -363,15 +363,15 @@ mark_clean (void *addr, size_t size) ...@@ -363,15 +363,15 @@ mark_clean (void *addr, size_t size)
* device wrote there. * device wrote there.
*/ */
void void
swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction) swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
{ {
char *dma_addr = phys_to_virt(pci_addr); char *dma_addr = phys_to_virt(dev_addr);
if (direction == PCI_DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
unmap_single(hwdev, dma_addr, size, direction); unmap_single(hwdev, dma_addr, size, dir);
else if (direction == PCI_DMA_FROMDEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size); mark_clean(dma_addr, size);
} }
...@@ -385,15 +385,15 @@ swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, i ...@@ -385,15 +385,15 @@ swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, i
* again owns the buffer. * again owns the buffer.
*/ */
void void
swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction) swiotlb_sync_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
{ {
char *dma_addr = phys_to_virt(pci_addr); char *dma_addr = phys_to_virt(dev_addr);
if (direction == PCI_DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, direction); sync_single(hwdev, dma_addr, size, dir);
else if (direction == PCI_DMA_FROMDEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size); mark_clean(dma_addr, size);
} }
...@@ -412,23 +412,22 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in ...@@ -412,23 +412,22 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in
* Device ownership issues as mentioned above for swiotlb_map_single are the same here. * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
*/ */
int int
swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
{ {
void *addr; void *addr;
unsigned long pci_addr; unsigned long dev_addr;
int i; int i;
if (direction == PCI_DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
for (i = 0; i < nelems; i++, sg++) { for (i = 0; i < nelems; i++, sg++) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = SG_ENT_VIRT_ADDRESS(sg);
pci_addr = virt_to_phys(addr); dev_addr = virt_to_phys(addr);
if ((pci_addr & ~hwdev->dma_mask) != 0) if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
sg->dma_address = (dma_addr_t) sg->dma_address = (dma_addr_t) map_single(hwdev, addr, sg->length, dir);
map_single(hwdev, addr, sg->length, direction);
else else
sg->dma_address = pci_addr; sg->dma_address = dev_addr;
sg->dma_length = sg->length; sg->dma_length = sg->length;
} }
return nelems; return nelems;
...@@ -439,17 +438,17 @@ swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int d ...@@ -439,17 +438,17 @@ swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int d
* here are the same as for swiotlb_unmap_single() above. * here are the same as for swiotlb_unmap_single() above.
*/ */
void void
swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
{ {
int i; int i;
if (direction == PCI_DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction); unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
else if (direction == PCI_DMA_FROMDEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
} }
...@@ -461,16 +460,16 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int ...@@ -461,16 +460,16 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
* usage. * usage.
*/ */
void void
swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) swiotlb_sync_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
{ {
int i; int i;
if (direction == PCI_DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction); sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
} }
/* /*
...@@ -479,7 +478,7 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int ...@@ -479,7 +478,7 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
* you would pass 0x00ffffff as the mask to this function. * you would pass 0x00ffffff as the mask to this function.
*/ */
int int
swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask) swiotlb_dma_supported (struct device *hwdev, u64 mask)
{ {
return 1; return 1;
} }
...@@ -491,6 +490,6 @@ EXPORT_SYMBOL(swiotlb_map_sg); ...@@ -491,6 +490,6 @@ EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg); EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single); EXPORT_SYMBOL(swiotlb_sync_single);
EXPORT_SYMBOL(swiotlb_sync_sg); EXPORT_SYMBOL(swiotlb_sync_sg);
EXPORT_SYMBOL(swiotlb_alloc_consistent); EXPORT_SYMBOL(swiotlb_alloc_coherent);
EXPORT_SYMBOL(swiotlb_free_consistent); EXPORT_SYMBOL(swiotlb_free_coherent);
EXPORT_SYMBOL(swiotlb_pci_dma_supported); EXPORT_SYMBOL(swiotlb_dma_supported);
...@@ -48,13 +48,13 @@ check_pgt_cache (void) ...@@ -48,13 +48,13 @@ check_pgt_cache (void)
low = pgt_cache_water[0]; low = pgt_cache_water[0];
high = pgt_cache_water[1]; high = pgt_cache_water[1];
if (pgtable_cache_size > high) { if (pgtable_cache_size > (u64) high) {
do { do {
if (pgd_quicklist) if (pgd_quicklist)
free_page((unsigned long)pgd_alloc_one_fast(0)); free_page((unsigned long)pgd_alloc_one_fast(0));
if (pmd_quicklist) if (pmd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0)); free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > low); } while (pgtable_cache_size > (u64) low);
} }
} }
...@@ -406,7 +406,7 @@ mem_init (void) ...@@ -406,7 +406,7 @@ mem_init (void)
* any drivers that may need the PCI DMA interface are initialized or bootmem has * any drivers that may need the PCI DMA interface are initialized or bootmem has
* been freed. * been freed.
*/ */
platform_pci_dma_init(); platform_dma_init();
#endif #endif
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
...@@ -445,7 +445,7 @@ mem_init (void) ...@@ -445,7 +445,7 @@ mem_init (void)
num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS; num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
if (num_pgt_pages > nr_free_pages() / 10) if (num_pgt_pages > nr_free_pages() / 10)
num_pgt_pages = nr_free_pages() / 10; num_pgt_pages = nr_free_pages() / 10;
if (num_pgt_pages > pgt_cache_water[1]) if (num_pgt_pages > (u64) pgt_cache_water[1])
pgt_cache_water[1] = num_pgt_pages; pgt_cache_water[1] = num_pgt_pages;
/* install the gate page in the global page table: */ /* install the gate page in the global page table: */
......
...@@ -34,8 +34,10 @@ ...@@ -34,8 +34,10 @@
#ifdef CONFIG_IA64_SGI_SN1 #ifdef CONFIG_IA64_SGI_SN1
#define MACHVEC_PLATFORM_NAME sn1 #define MACHVEC_PLATFORM_NAME sn1
#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn1.h>
#else CONFIG_IA64_SGI_SN1 #else CONFIG_IA64_SGI_SN1
#define MACHVEC_PLATFORM_NAME sn2 #define MACHVEC_PLATFORM_NAME sn2
#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
#else #else
#error "unknown platform" #error "unknown platform"
#endif #endif
......
...@@ -193,7 +193,7 @@ main (int argc, char **argv) ...@@ -193,7 +193,7 @@ main (int argc, char **argv)
printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by " printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by "
"arch/ia64/tools/print_offsets.\n *\n */\n\n"); "arch/ia64/tools/print_offsets.\n *\n */\n\n");
for (i = 0; i < sizeof (tab) / sizeof (tab[0]); ++i) for (i = 0; i < (int) (sizeof (tab) / sizeof (tab[0])); ++i)
{ {
if (tab[i].name[0] == '\0') if (tab[i].name[0] == '\0')
printf ("\n"); printf ("\n");
......
...@@ -56,11 +56,16 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -56,11 +56,16 @@ ia64_atomic_sub (int i, atomic_t *v)
} }
#define atomic_add_return(i,v) \ #define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \ ({ \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ int __ia64_aar_i = (i); \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ (__builtin_constant_p(i) \
? ia64_fetch_and_add(i, &(v)->counter) \ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
: ia64_atomic_add(i, v)) || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
})
/* /*
* Atomically add I to V and return TRUE if the resulting value is * Atomically add I to V and return TRUE if the resulting value is
...@@ -72,13 +77,17 @@ atomic_add_negative (int i, atomic_t *v) ...@@ -72,13 +77,17 @@ atomic_add_negative (int i, atomic_t *v)
return atomic_add_return(i, v) < 0; return atomic_add_return(i, v) < 0;
} }
#define atomic_sub_return(i,v) \ #define atomic_sub_return(i,v) \
((__builtin_constant_p(i) && \ ({ \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ int __ia64_asr_i = (i); \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ (__builtin_constant_p(i) \
? ia64_fetch_and_add(-(i), &(v)->counter) \ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
: ia64_atomic_sub(i, v)) || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v))
......
...@@ -453,9 +453,9 @@ find_next_bit (void *addr, unsigned long size, unsigned long offset) ...@@ -453,9 +453,9 @@ find_next_bit (void *addr, unsigned long size, unsigned long offset)
#define __clear_bit(nr, addr) clear_bit(nr, addr) #define __clear_bit(nr, addr) clear_bit(nr, addr)
#define ext2_set_bit test_and_set_bit #define ext2_set_bit test_and_set_bit
#define ext2_set_atomic(l,n,a) test_and_set_bit(n,a) #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
#define ext2_clear_bit test_and_clear_bit #define ext2_clear_bit test_and_clear_bit
#define ext2_clear_atomic(l,n,a) test_and_clear_bit(n,a) #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
#define ext2_test_bit test_bit #define ext2_test_bit test_bit
#define ext2_find_first_zero_bit find_first_zero_bit #define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit #define ext2_find_next_zero_bit find_next_zero_bit
......
#include <asm-generic/dma-mapping.h> #ifndef _ASM_IA64_DMA_MAPPING_H
#define _ASM_IA64_DMA_MAPPING_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define dma_alloc_coherent platform_dma_alloc_coherent
#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
#define dma_free_coherent platform_dma_free_coherent
#define dma_free_noncoherent platform_dma_free_coherent
#define dma_map_single platform_dma_map_single
#define dma_map_sg platform_dma_map_sg
#define dma_unmap_single platform_dma_unmap_single
#define dma_unmap_sg platform_dma_unmap_sg
#define dma_sync_single platform_dma_sync_single
#define dma_sync_sg platform_dma_sync_sg
#define dma_map_page(dev, pg, off, size, dir) \
dma_map_single(dev, page_address(pg) + (off), (size), (dir))
#define dma_unmap_page(dev, dma_addr, size, dir) \
dma_unmap_single(dev, dma_addr, size, dir)
/*
* Rest of this file is part of the "Advanced DMA API". Use at your own risk.
* See Documentation/DMA-API.txt for details.
*/
#define dma_sync_single_range(dev, dma_handle, offset, size, dir) \
dma_sync_single(dev, dma_handle, size, dir)
#define dma_supported platform_dma_supported
static inline int
dma_set_mask (struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline int
dma_get_cache_alignment (void)
{
extern int ia64_max_cacheline_size;
return ia64_max_cacheline_size;
}
static inline void
dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
{
/*
* IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
* ensure that dma_cache_sync() enforces order, hence the mb().
*/
mb();
}
#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
#endif /* _ASM_IA64_DMA_MAPPING_H */
...@@ -453,8 +453,6 @@ struct ia32_modify_ldt_ldt_s { ...@@ -453,8 +453,6 @@ struct ia32_modify_ldt_ldt_s {
struct linux_binprm; struct linux_binprm;
extern void ia32_gdt_init (void); extern void ia32_gdt_init (void);
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
extern void ia32_init_addr_space (struct pt_regs *regs); extern void ia32_init_addr_space (struct pt_regs *regs);
extern int ia32_setup_arg_pages (struct linux_binprm *bprm); extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr); extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
...@@ -476,4 +474,8 @@ extern void ia32_load_segment_descriptors (struct task_struct *task); ...@@ -476,4 +474,8 @@ extern void ia32_load_segment_descriptors (struct task_struct *task);
#endif /* !CONFIG_IA32_SUPPORT */ #endif /* !CONFIG_IA32_SUPPORT */
/* Declare this uncondiontally, so we don't get warnings for unreachable code. */
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
#endif /* _ASM_IA64_IA32_H */ #endif /* _ASM_IA64_IA32_H */
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* over and over again with slight variations and possibly making a * over and over again with slight variations and possibly making a
* mistake somewhere. * mistake somewhere.
* *
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
...@@ -111,6 +111,21 @@ __ia64_mk_io_addr (unsigned long port) ...@@ -111,6 +111,21 @@ __ia64_mk_io_addr (unsigned long port)
return (void *) (space->mmio_base | offset); return (void *) (space->mmio_base | offset);
} }
#define __ia64_inb ___ia64_inb
#define __ia64_inw ___ia64_inw
#define __ia64_inl ___ia64_inl
#define __ia64_outb ___ia64_outb
#define __ia64_outw ___ia64_outw
#define __ia64_outl ___ia64_outl
#define __ia64_readb ___ia64_readb
#define __ia64_readw ___ia64_readw
#define __ia64_readl ___ia64_readl
#define __ia64_readq ___ia64_readq
#define __ia64_writeb ___ia64_writeb
#define __ia64_writew ___ia64_writew
#define __ia64_writel ___ia64_writel
#define __ia64_writeq ___ia64_writeq
/* /*
* For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
* that the access has completed before executing other I/O accesses. Since we're doing * that the access has completed before executing other I/O accesses. Since we're doing
...@@ -120,7 +135,7 @@ __ia64_mk_io_addr (unsigned long port) ...@@ -120,7 +135,7 @@ __ia64_mk_io_addr (unsigned long port)
*/ */
static inline unsigned int static inline unsigned int
__ia64_inb (unsigned long port) ___ia64_inb (unsigned long port)
{ {
volatile unsigned char *addr = __ia64_mk_io_addr(port); volatile unsigned char *addr = __ia64_mk_io_addr(port);
unsigned char ret; unsigned char ret;
...@@ -131,7 +146,7 @@ __ia64_inb (unsigned long port) ...@@ -131,7 +146,7 @@ __ia64_inb (unsigned long port)
} }
static inline unsigned int static inline unsigned int
__ia64_inw (unsigned long port) ___ia64_inw (unsigned long port)
{ {
volatile unsigned short *addr = __ia64_mk_io_addr(port); volatile unsigned short *addr = __ia64_mk_io_addr(port);
unsigned short ret; unsigned short ret;
...@@ -142,7 +157,7 @@ __ia64_inw (unsigned long port) ...@@ -142,7 +157,7 @@ __ia64_inw (unsigned long port)
} }
static inline unsigned int static inline unsigned int
__ia64_inl (unsigned long port) ___ia64_inl (unsigned long port)
{ {
volatile unsigned int *addr = __ia64_mk_io_addr(port); volatile unsigned int *addr = __ia64_mk_io_addr(port);
unsigned int ret; unsigned int ret;
...@@ -153,7 +168,7 @@ __ia64_inl (unsigned long port) ...@@ -153,7 +168,7 @@ __ia64_inl (unsigned long port)
} }
static inline void static inline void
__ia64_outb (unsigned char val, unsigned long port) ___ia64_outb (unsigned char val, unsigned long port)
{ {
volatile unsigned char *addr = __ia64_mk_io_addr(port); volatile unsigned char *addr = __ia64_mk_io_addr(port);
...@@ -162,7 +177,7 @@ __ia64_outb (unsigned char val, unsigned long port) ...@@ -162,7 +177,7 @@ __ia64_outb (unsigned char val, unsigned long port)
} }
static inline void static inline void
__ia64_outw (unsigned short val, unsigned long port) ___ia64_outw (unsigned short val, unsigned long port)
{ {
volatile unsigned short *addr = __ia64_mk_io_addr(port); volatile unsigned short *addr = __ia64_mk_io_addr(port);
...@@ -171,7 +186,7 @@ __ia64_outw (unsigned short val, unsigned long port) ...@@ -171,7 +186,7 @@ __ia64_outw (unsigned short val, unsigned long port)
} }
static inline void static inline void
__ia64_outl (unsigned int val, unsigned long port) ___ia64_outl (unsigned int val, unsigned long port)
{ {
volatile unsigned int *addr = __ia64_mk_io_addr(port); volatile unsigned int *addr = __ia64_mk_io_addr(port);
...@@ -184,17 +199,8 @@ __insb (unsigned long port, void *dst, unsigned long count) ...@@ -184,17 +199,8 @@ __insb (unsigned long port, void *dst, unsigned long count)
{ {
unsigned char *dp = dst; unsigned char *dp = dst;
if (platform_inb == __ia64_inb) {
volatile unsigned char *addr = __ia64_mk_io_addr(port);
__ia64_mf_a();
while (count--)
*dp++ = *addr;
__ia64_mf_a();
} else
while (count--) while (count--)
*dp++ = platform_inb(port); *dp++ = platform_inb(port);
return;
} }
static inline void static inline void
...@@ -202,17 +208,8 @@ __insw (unsigned long port, void *dst, unsigned long count) ...@@ -202,17 +208,8 @@ __insw (unsigned long port, void *dst, unsigned long count)
{ {
unsigned short *dp = dst; unsigned short *dp = dst;
if (platform_inw == __ia64_inw) {
volatile unsigned short *addr = __ia64_mk_io_addr(port);
__ia64_mf_a();
while (count--)
*dp++ = *addr;
__ia64_mf_a();
} else
while (count--) while (count--)
*dp++ = platform_inw(port); *dp++ = platform_inw(port);
return;
} }
static inline void static inline void
...@@ -220,17 +217,8 @@ __insl (unsigned long port, void *dst, unsigned long count) ...@@ -220,17 +217,8 @@ __insl (unsigned long port, void *dst, unsigned long count)
{ {
unsigned int *dp = dst; unsigned int *dp = dst;
if (platform_inl == __ia64_inl) {
volatile unsigned int *addr = __ia64_mk_io_addr(port);
__ia64_mf_a();
while (count--)
*dp++ = *addr;
__ia64_mf_a();
} else
while (count--) while (count--)
*dp++ = platform_inl(port); *dp++ = platform_inl(port);
return;
} }
static inline void static inline void
...@@ -238,16 +226,8 @@ __outsb (unsigned long port, const void *src, unsigned long count) ...@@ -238,16 +226,8 @@ __outsb (unsigned long port, const void *src, unsigned long count)
{ {
const unsigned char *sp = src; const unsigned char *sp = src;
if (platform_outb == __ia64_outb) {
volatile unsigned char *addr = __ia64_mk_io_addr(port);
while (count--)
*addr = *sp++;
__ia64_mf_a();
} else
while (count--) while (count--)
platform_outb(*sp++, port); platform_outb(*sp++, port);
return;
} }
static inline void static inline void
...@@ -255,16 +235,8 @@ __outsw (unsigned long port, const void *src, unsigned long count) ...@@ -255,16 +235,8 @@ __outsw (unsigned long port, const void *src, unsigned long count)
{ {
const unsigned short *sp = src; const unsigned short *sp = src;
if (platform_outw == __ia64_outw) {
volatile unsigned short *addr = __ia64_mk_io_addr(port);
while (count--)
*addr = *sp++;
__ia64_mf_a();
} else
while (count--) while (count--)
platform_outw(*sp++, port); platform_outw(*sp++, port);
return;
} }
static inline void static inline void
...@@ -272,16 +244,8 @@ __outsl (unsigned long port, void *src, unsigned long count) ...@@ -272,16 +244,8 @@ __outsl (unsigned long port, void *src, unsigned long count)
{ {
const unsigned int *sp = src; const unsigned int *sp = src;
if (platform_outl == __ia64_outl) {
volatile unsigned int *addr = __ia64_mk_io_addr(port);
while (count--)
*addr = *sp++;
__ia64_mf_a();
} else
while (count--) while (count--)
platform_outl(*sp++, port); platform_outl(*sp++, port);
return;
} }
/* /*
...@@ -318,25 +282,25 @@ __outsl (unsigned long port, void *src, unsigned long count) ...@@ -318,25 +282,25 @@ __outsl (unsigned long port, void *src, unsigned long count)
* hopefully it'll stay that way). * hopefully it'll stay that way).
*/ */
static inline unsigned char static inline unsigned char
__ia64_readb (void *addr) ___ia64_readb (void *addr)
{ {
return *(volatile unsigned char *)addr; return *(volatile unsigned char *)addr;
} }
static inline unsigned short static inline unsigned short
__ia64_readw (void *addr) ___ia64_readw (void *addr)
{ {
return *(volatile unsigned short *)addr; return *(volatile unsigned short *)addr;
} }
static inline unsigned int static inline unsigned int
__ia64_readl (void *addr) ___ia64_readl (void *addr)
{ {
return *(volatile unsigned int *) addr; return *(volatile unsigned int *) addr;
} }
static inline unsigned long static inline unsigned long
__ia64_readq (void *addr) ___ia64_readq (void *addr)
{ {
return *(volatile unsigned long *) addr; return *(volatile unsigned long *) addr;
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) Vijay Chander <vijay@engr.sgi.com> * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) 1999-2001 Hewlett-Packard Co. * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#ifndef _ASM_IA64_MACHVEC_H #ifndef _ASM_IA64_MACHVEC_H
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
/* forward declarations: */ /* forward declarations: */
struct pci_dev; struct device;
struct pt_regs; struct pt_regs;
struct scatterlist; struct scatterlist;
struct irq_desc; struct irq_desc;
...@@ -33,17 +33,17 @@ typedef struct irq_desc *ia64_mv_irq_desc (unsigned int); ...@@ -33,17 +33,17 @@ typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
typedef u8 ia64_mv_irq_to_vector (u8); typedef u8 ia64_mv_irq_to_vector (u8);
typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector); typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector);
/* PCI-DMA interface: */ /* DMA-mapping interface: */
typedef void ia64_mv_pci_dma_init (void); typedef void ia64_mv_dma_init (void);
typedef void *ia64_mv_pci_alloc_consistent (struct pci_dev *, size_t, dma_addr_t *); typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
typedef void ia64_mv_pci_free_consistent (struct pci_dev *, size_t, void *, dma_addr_t); typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
typedef dma_addr_t ia64_mv_pci_map_single (struct pci_dev *, void *, size_t, int); typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
typedef void ia64_mv_pci_unmap_single (struct pci_dev *, dma_addr_t, size_t, int); typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int); typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int); typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int); typedef void ia64_mv_dma_sync_single (struct device *, dma_addr_t, size_t, int);
typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int); typedef void ia64_mv_dma_sync_sg (struct device *, struct scatterlist *, int, int);
typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64); typedef int ia64_mv_dma_supported (struct device *, u64);
/* /*
* WARNING: The legacy I/O space is _architected_. Platforms are * WARNING: The legacy I/O space is _architected_. Platforms are
...@@ -66,6 +66,7 @@ typedef unsigned int ia64_mv_readl_t (void *); ...@@ -66,6 +66,7 @@ typedef unsigned int ia64_mv_readl_t (void *);
typedef unsigned long ia64_mv_readq_t (void *); typedef unsigned long ia64_mv_readq_t (void *);
extern void machvec_noop (void); extern void machvec_noop (void);
extern void machvec_memory_fence (void);
# if defined (CONFIG_IA64_HP_SIM) # if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h> # include <asm/machvec_hpsim.h>
...@@ -92,16 +93,16 @@ extern void machvec_noop (void); ...@@ -92,16 +93,16 @@ extern void machvec_noop (void);
# define platform_log_print ia64_mv.log_print # define platform_log_print ia64_mv.log_print
# define platform_send_ipi ia64_mv.send_ipi # define platform_send_ipi ia64_mv.send_ipi
# define platform_global_tlb_purge ia64_mv.global_tlb_purge # define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_pci_dma_init ia64_mv.dma_init # define platform_dma_init ia64_mv.dma_init
# define platform_pci_alloc_consistent ia64_mv.alloc_consistent # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
# define platform_pci_free_consistent ia64_mv.free_consistent # define platform_dma_free_coherent ia64_mv.dma_free_coherent
# define platform_pci_map_single ia64_mv.map_single # define platform_dma_map_single ia64_mv.dma_map_single
# define platform_pci_unmap_single ia64_mv.unmap_single # define platform_dma_unmap_single ia64_mv.dma_unmap_single
# define platform_pci_map_sg ia64_mv.map_sg # define platform_dma_map_sg ia64_mv.dma_map_sg
# define platform_pci_unmap_sg ia64_mv.unmap_sg # define platform_dma_unmap_sg ia64_mv.dma_unmap_sg
# define platform_pci_dma_sync_single ia64_mv.sync_single # define platform_dma_sync_single ia64_mv.dma_sync_single
# define platform_pci_dma_sync_sg ia64_mv.sync_sg # define platform_dma_sync_sg ia64_mv.dma_sync_sg
# define platform_pci_dma_supported ia64_mv.dma_supported # define platform_dma_supported ia64_mv.dma_supported
# define platform_irq_desc ia64_mv.irq_desc # define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
...@@ -133,16 +134,16 @@ struct ia64_machine_vector { ...@@ -133,16 +134,16 @@ struct ia64_machine_vector {
ia64_mv_log_print_t *log_print; ia64_mv_log_print_t *log_print;
ia64_mv_send_ipi_t *send_ipi; ia64_mv_send_ipi_t *send_ipi;
ia64_mv_global_tlb_purge_t *global_tlb_purge; ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_pci_dma_init *dma_init; ia64_mv_dma_init *dma_init;
ia64_mv_pci_alloc_consistent *alloc_consistent; ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
ia64_mv_pci_free_consistent *free_consistent; ia64_mv_dma_free_coherent *dma_free_coherent;
ia64_mv_pci_map_single *map_single; ia64_mv_dma_map_single *dma_map_single;
ia64_mv_pci_unmap_single *unmap_single; ia64_mv_dma_unmap_single *dma_unmap_single;
ia64_mv_pci_map_sg *map_sg; ia64_mv_dma_map_sg *dma_map_sg;
ia64_mv_pci_unmap_sg *unmap_sg; ia64_mv_dma_unmap_sg *dma_unmap_sg;
ia64_mv_pci_dma_sync_single *sync_single; ia64_mv_dma_sync_single *dma_sync_single;
ia64_mv_pci_dma_sync_sg *sync_sg; ia64_mv_dma_sync_sg *dma_sync_sg;
ia64_mv_pci_dma_supported *dma_supported; ia64_mv_dma_supported *dma_supported;
ia64_mv_irq_desc *irq_desc; ia64_mv_irq_desc *irq_desc;
ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq; ia64_mv_local_vector_to_irq *local_vector_to_irq;
...@@ -170,16 +171,16 @@ struct ia64_machine_vector { ...@@ -170,16 +171,16 @@ struct ia64_machine_vector {
platform_log_print, \ platform_log_print, \
platform_send_ipi, \ platform_send_ipi, \
platform_global_tlb_purge, \ platform_global_tlb_purge, \
platform_pci_dma_init, \ platform_dma_init, \
platform_pci_alloc_consistent, \ platform_dma_alloc_coherent, \
platform_pci_free_consistent, \ platform_dma_free_coherent, \
platform_pci_map_single, \ platform_dma_map_single, \
platform_pci_unmap_single, \ platform_dma_unmap_single, \
platform_pci_map_sg, \ platform_dma_map_sg, \
platform_pci_unmap_sg, \ platform_dma_unmap_sg, \
platform_pci_dma_sync_single, \ platform_dma_sync_single, \
platform_pci_dma_sync_sg, \ platform_dma_sync_sg, \
platform_pci_dma_supported, \ platform_dma_supported, \
platform_irq_desc, \ platform_irq_desc, \
platform_irq_to_vector, \ platform_irq_to_vector, \
platform_local_vector_to_irq, \ platform_local_vector_to_irq, \
...@@ -205,16 +206,16 @@ extern void machvec_init (const char *name); ...@@ -205,16 +206,16 @@ extern void machvec_init (const char *name);
/* /*
* Declare default routines which aren't declared anywhere else: * Declare default routines which aren't declared anywhere else:
*/ */
extern ia64_mv_pci_dma_init swiotlb_init; extern ia64_mv_dma_init swiotlb_init;
extern ia64_mv_pci_alloc_consistent swiotlb_alloc_consistent; extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
extern ia64_mv_pci_free_consistent swiotlb_free_consistent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
extern ia64_mv_pci_map_single swiotlb_map_single; extern ia64_mv_dma_map_single swiotlb_map_single;
extern ia64_mv_pci_unmap_single swiotlb_unmap_single; extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
extern ia64_mv_pci_map_sg swiotlb_map_sg; extern ia64_mv_dma_map_sg swiotlb_map_sg;
extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg; extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_pci_dma_sync_single swiotlb_sync_single; extern ia64_mv_dma_sync_single swiotlb_sync_single;
extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg; extern ia64_mv_dma_sync_sg swiotlb_sync_sg;
extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; extern ia64_mv_dma_supported swiotlb_dma_supported;
/* /*
* Define default versions so we can extend machvec for new platforms without having * Define default versions so we can extend machvec for new platforms without having
...@@ -247,35 +248,35 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; ...@@ -247,35 +248,35 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_global_tlb_purge #ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif #endif
#ifndef platform_pci_dma_init #ifndef platform_dma_init
# define platform_pci_dma_init swiotlb_init # define platform_dma_init swiotlb_init
#endif #endif
#ifndef platform_pci_alloc_consistent #ifndef platform_dma_alloc_coherent
# define platform_pci_alloc_consistent swiotlb_alloc_consistent # define platform_dma_alloc_coherent swiotlb_alloc_coherent
#endif #endif
#ifndef platform_pci_free_consistent #ifndef platform_dma_free_coherent
# define platform_pci_free_consistent swiotlb_free_consistent # define platform_dma_free_coherent swiotlb_free_coherent
#endif #endif
#ifndef platform_pci_map_single #ifndef platform_dma_map_single
# define platform_pci_map_single swiotlb_map_single # define platform_dma_map_single swiotlb_map_single
#endif #endif
#ifndef platform_pci_unmap_single #ifndef platform_dma_unmap_single
# define platform_pci_unmap_single swiotlb_unmap_single # define platform_dma_unmap_single swiotlb_unmap_single
#endif #endif
#ifndef platform_pci_map_sg #ifndef platform_dma_map_sg
# define platform_pci_map_sg swiotlb_map_sg # define platform_dma_map_sg swiotlb_map_sg
#endif #endif
#ifndef platform_pci_unmap_sg #ifndef platform_dma_unmap_sg
# define platform_pci_unmap_sg swiotlb_unmap_sg # define platform_dma_unmap_sg swiotlb_unmap_sg
#endif #endif
#ifndef platform_pci_dma_sync_single #ifndef platform_dma_sync_single
# define platform_pci_dma_sync_single swiotlb_sync_single # define platform_dma_sync_single swiotlb_sync_single
#endif #endif
#ifndef platform_pci_dma_sync_sg #ifndef platform_dma_sync_sg
# define platform_pci_dma_sync_sg swiotlb_sync_sg # define platform_dma_sync_sg swiotlb_sync_sg
#endif #endif
#ifndef platform_pci_dma_supported #ifndef platform_dma_supported
# define platform_pci_dma_supported swiotlb_pci_dma_supported # define platform_dma_supported swiotlb_dma_supported
#endif #endif
#ifndef platform_irq_desc #ifndef platform_irq_desc
# define platform_irq_desc __ia64_irq_desc # define platform_irq_desc __ia64_irq_desc
......
...@@ -2,13 +2,13 @@ ...@@ -2,13 +2,13 @@
#define _ASM_IA64_MACHVEC_HPZX1_h #define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t dig_setup;
extern ia64_mv_pci_alloc_consistent sba_alloc_consistent; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
extern ia64_mv_pci_free_consistent sba_free_consistent; extern ia64_mv_dma_free_coherent sba_free_coherent;
extern ia64_mv_pci_map_single sba_map_single; extern ia64_mv_dma_map_single sba_map_single;
extern ia64_mv_pci_unmap_single sba_unmap_single; extern ia64_mv_dma_unmap_single sba_unmap_single;
extern ia64_mv_pci_map_sg sba_map_sg; extern ia64_mv_dma_map_sg sba_map_sg;
extern ia64_mv_pci_unmap_sg sba_unmap_sg; extern ia64_mv_dma_unmap_sg sba_unmap_sg;
extern ia64_mv_pci_dma_supported sba_dma_supported; extern ia64_mv_dma_supported sba_dma_supported;
/* /*
* This stuff has dual use! * This stuff has dual use!
...@@ -19,15 +19,15 @@ extern ia64_mv_pci_dma_supported sba_dma_supported; ...@@ -19,15 +19,15 @@ extern ia64_mv_pci_dma_supported sba_dma_supported;
*/ */
#define platform_name "hpzx1" #define platform_name "hpzx1"
#define platform_setup dig_setup #define platform_setup dig_setup
#define platform_pci_dma_init ((ia64_mv_pci_dma_init *) machvec_noop) #define platform_dma_init ((ia64_mv_dma_init *) machvec_noop)
#define platform_pci_alloc_consistent sba_alloc_consistent #define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_pci_free_consistent sba_free_consistent #define platform_dma_free_coherent sba_free_coherent
#define platform_pci_map_single sba_map_single #define platform_dma_map_single sba_map_single
#define platform_pci_unmap_single sba_unmap_single #define platform_dma_unmap_single sba_unmap_single
#define platform_pci_map_sg sba_map_sg #define platform_dma_map_sg sba_map_sg
#define platform_pci_unmap_sg sba_unmap_sg #define platform_dma_unmap_sg sba_unmap_sg
#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop) #define platform_dma_sync_single ((ia64_mv_dma_sync_single *) machvec_memory_fence)
#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop) #define platform_dma_sync_sg ((ia64_mv_dma_sync_sg *) machvec_memory_fence)
#define platform_pci_dma_supported sba_dma_supported #define platform_dma_supported sba_dma_supported
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ #endif /* _ASM_IA64_MACHVEC_HPZX1_h */
#define __MACHVEC_HDR(n) <asm/machvec_##n##.h>
#define __MACHVEC_EXPAND(n) __MACHVEC_HDR(n)
#define MACHVEC_PLATFORM_HEADER __MACHVEC_EXPAND(MACHVEC_PLATFORM_NAME)
#include <asm/machvec.h> #include <asm/machvec.h>
extern ia64_mv_send_ipi_t ia64_send_ipi; extern ia64_mv_send_ipi_t ia64_send_ipi;
......
...@@ -44,14 +44,14 @@ extern ia64_mv_inl_t sn1_inl; ...@@ -44,14 +44,14 @@ extern ia64_mv_inl_t sn1_inl;
extern ia64_mv_outb_t sn1_outb; extern ia64_mv_outb_t sn1_outb;
extern ia64_mv_outw_t sn1_outw; extern ia64_mv_outw_t sn1_outw;
extern ia64_mv_outl_t sn1_outl; extern ia64_mv_outl_t sn1_outl;
extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent; extern ia64_mv_dma_alloc_coherent sn1_dma_alloc_coherent;
extern ia64_mv_pci_free_consistent sn1_pci_free_consistent; extern ia64_mv_dma_free_coherent sn1_dma_free_coherent;
extern ia64_mv_pci_map_single sn1_pci_map_single; extern ia64_mv_dma_map_single sn1_dma_map_single;
extern ia64_mv_pci_unmap_single sn1_pci_unmap_single; extern ia64_mv_dma_unmap_single sn1_dma_unmap_single;
extern ia64_mv_pci_map_sg sn1_pci_map_sg; extern ia64_mv_dma_map_sg sn1_dma_map_sg;
extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg; extern ia64_mv_dma_unmap_sg sn1_dma_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single; extern ia64_mv_dma_sync_single sn1_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg; extern ia64_mv_dma_sync_sg sn1_dma_sync_sg;
/* /*
* This stuff has dual use! * This stuff has dual use!
...@@ -72,14 +72,14 @@ extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg; ...@@ -72,14 +72,14 @@ extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg;
#define platform_outb sn1_outb #define platform_outb sn1_outb
#define platform_outw sn1_outw #define platform_outw sn1_outw
#define platform_outl sn1_outl #define platform_outl sn1_outl
#define platform_pci_dma_init machvec_noop #define platform_dma_init machvec_noop
#define platform_pci_alloc_consistent sn1_pci_alloc_consistent #define platform_dma_alloc_coherent sn1_dma_alloc_coherent
#define platform_pci_free_consistent sn1_pci_free_consistent #define platform_dma_free_coherent sn1_dma_free_coherent
#define platform_pci_map_single sn1_pci_map_single #define platform_dma_map_single sn1_dma_map_single
#define platform_pci_unmap_single sn1_pci_unmap_single #define platform_dma_unmap_single sn1_dma_unmap_single
#define platform_pci_map_sg sn1_pci_map_sg #define platform_dma_map_sg sn1_dma_map_sg
#define platform_pci_unmap_sg sn1_pci_unmap_sg #define platform_dma_unmap_sg sn1_dma_unmap_sg
#define platform_pci_dma_sync_single sn1_pci_dma_sync_single #define platform_dma_sync_single sn1_dma_sync_single
#define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg #define platform_dma_sync_sg sn1_dma_sync_sg
#endif /* _ASM_IA64_MACHVEC_SN1_h */ #endif /* _ASM_IA64_MACHVEC_SN1_h */
...@@ -51,15 +51,15 @@ extern ia64_mv_readb_t __sn_readb; ...@@ -51,15 +51,15 @@ extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw; extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl; extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq; extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent; extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
extern ia64_mv_pci_free_consistent sn_pci_free_consistent; extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
extern ia64_mv_pci_map_single sn_pci_map_single; extern ia64_mv_dma_map_single sn_dma_map_single;
extern ia64_mv_pci_unmap_single sn_pci_unmap_single; extern ia64_mv_dma_unmap_single sn_dma_unmap_single;
extern ia64_mv_pci_map_sg sn_pci_map_sg; extern ia64_mv_dma_map_sg sn_dma_map_sg;
extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg; extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single; extern ia64_mv_dma_sync_single sn_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg; extern ia64_mv_dma_sync_sg sn_dma_sync_sg;
extern ia64_mv_pci_dma_supported sn_pci_dma_supported; extern ia64_mv_dma_supported sn_dma_supported;
/* /*
* This stuff has dual use! * This stuff has dual use!
...@@ -88,15 +88,15 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported; ...@@ -88,15 +88,15 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_irq_desc sn_irq_desc #define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector #define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq #define platform_local_vector_to_irq sn_local_vector_to_irq
#define platform_pci_dma_init machvec_noop #define platform_dma_init machvec_noop
#define platform_pci_alloc_consistent sn_pci_alloc_consistent #define platform_dma_alloc_coherent sn_dma_alloc_coherent
#define platform_pci_free_consistent sn_pci_free_consistent #define platform_dma_free_coherent sn_dma_free_coherent
#define platform_pci_map_single sn_pci_map_single #define platform_dma_map_single sn_dma_map_single
#define platform_pci_unmap_single sn_pci_unmap_single #define platform_dma_unmap_single sn_dma_unmap_single
#define platform_pci_map_sg sn_pci_map_sg #define platform_dma_map_sg sn_dma_map_sg
#define platform_pci_unmap_sg sn_pci_unmap_sg #define platform_dma_unmap_sg sn_dma_unmap_sg
#define platform_pci_dma_sync_single sn_pci_dma_sync_single #define platform_dma_sync_single sn_dma_sync_single
#define platform_pci_dma_sync_sg sn_pci_dma_sync_sg #define platform_dma_sync_sg sn_dma_sync_sg
#define platform_pci_dma_supported sn_pci_dma_supported #define platform_dma_supported sn_dma_supported
#endif /* _ASM_IA64_MACHVEC_SN2_H */ #endif /* _ASM_IA64_MACHVEC_SN2_H */
...@@ -11,7 +11,10 @@ ...@@ -11,7 +11,10 @@
#define _ASM_IA64_MCA_H #define _ASM_IA64_MCA_H
#if !defined(__ASSEMBLY__) #if !defined(__ASSEMBLY__)
#include <linux/interrupt.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/param.h> #include <asm/param.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -129,10 +132,10 @@ extern void ia64_os_mca_dispatch_end(void); ...@@ -129,10 +132,10 @@ extern void ia64_os_mca_dispatch_end(void);
extern void ia64_mca_ucmc_handler(void); extern void ia64_mca_ucmc_handler(void);
extern void ia64_monarch_init_handler(void); extern void ia64_monarch_init_handler(void);
extern void ia64_slave_init_handler(void); extern void ia64_slave_init_handler(void);
extern void ia64_mca_rendez_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_rendez_int_handler(int,void *,struct pt_regs *);
extern void ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *);
extern void ia64_mca_cmc_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_cmc_int_handler(int,void *,struct pt_regs *);
extern void ia64_mca_cpe_int_handler(int,void *,struct pt_regs *); extern irqreturn_t ia64_mca_cpe_int_handler(int,void *,struct pt_regs *);
extern int ia64_log_print(int,prfunc_t); extern int ia64_log_print(int,prfunc_t);
extern void ia64_mca_cmc_vector_setup(void); extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_mca_check_errors(void); extern int ia64_mca_check_errors(void);
......
...@@ -47,18 +47,7 @@ pcibios_penalize_isa_irq (int irq) ...@@ -47,18 +47,7 @@ pcibios_penalize_isa_irq (int irq)
#define HAVE_ARCH_PCI_MWI 1 #define HAVE_ARCH_PCI_MWI 1
extern int pcibios_prep_mwi (struct pci_dev *); extern int pcibios_prep_mwi (struct pci_dev *);
/* #include <asm-generic/pci-dma-compat.h>
* Dynamic DMA mapping API. See Documentation/DMA-mapping.txt for details.
*/
#define pci_alloc_consistent platform_pci_alloc_consistent
#define pci_free_consistent platform_pci_free_consistent
#define pci_map_single platform_pci_map_single
#define pci_unmap_single platform_pci_unmap_single
#define pci_map_sg platform_pci_map_sg
#define pci_unmap_sg platform_pci_unmap_sg
#define pci_dma_sync_single platform_pci_dma_sync_single
#define pci_dma_sync_sg platform_pci_dma_sync_sg
#define pci_dma_supported platform_pci_dma_supported
/* pci_unmap_{single,page} is not a nop, thus... */ /* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
...@@ -74,18 +63,12 @@ extern int pcibios_prep_mwi (struct pci_dev *); ...@@ -74,18 +63,12 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL)) (((PTR)->LEN_NAME) = (VAL))
#define pci_map_page(dev,pg,off,size,dir) \
pci_map_single((dev), page_address(pg) + (off), (size), (dir))
#define pci_unmap_page(dev,dma_addr,size,dir) \
pci_unmap_single((dev), (dma_addr), (size), (dir))
/* The ia64 platform always supports 64-bit addressing. */ /* The ia64 platform always supports 64-bit addressing. */
#define pci_dac_dma_supported(pci_dev, mask) (1) #define pci_dac_dma_supported(pci_dev, mask) (1)
#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) #define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off))
#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) #define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr)))
#define pci_dac_dma_to_offset(dev,dma_addr) ((dma_addr) & ~PAGE_MASK) #define pci_dac_dma_to_offset(dev,dma_addr) ((dma_addr) & ~PAGE_MASK)
#define pci_dac_dma_sync_single(dev,dma_addr,len,dir) do { /* nothing */ } while (0) #define pci_dac_dma_sync_single(dev,dma_addr,len,dir) do { mb(); } while (0)
/* Return the index of the PCI controller for device PDEV. */ /* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0) #define pci_controller_num(PDEV) (0)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
/* /*
* Copyright (C) 2002 Hewlett-Packard Co * Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
...@@ -35,6 +35,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; ...@@ -35,6 +35,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu) #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu)
extern void setup_per_cpu_areas (void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PERCPU_H */ #endif /* _ASM_IA64_PERCPU_H */
...@@ -38,6 +38,14 @@ ...@@ -38,6 +38,14 @@
*/ */
#define TASK_SIZE (current->thread.task_size) #define TASK_SIZE (current->thread.task_size)
/*
* MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
* address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
* because the kernel may have installed helper-mappings above TASK_SIZE. For example,
* for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
*/
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
/* /*
* This decides where the kernel will search for a free chunk of vm * This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
......
...@@ -212,24 +212,27 @@ extern void ia64_load_extra (struct task_struct *task); ...@@ -212,24 +212,27 @@ extern void ia64_load_extra (struct task_struct *task);
# define PERFMON_IS_SYSWIDE() (0) # define PERFMON_IS_SYSWIDE() (0)
#endif #endif
#define IA64_HAS_EXTRA_STATE(t) \
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
|| IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \ #define __switch_to(prev,next,last) do { \
if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \ struct task_struct *__fpu_owner = ia64_get_fpu_owner(); \
|| IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE()) \ if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \ ia64_save_extra(prev); \
if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \ if (IA64_HAS_EXTRA_STATE(next)) \
|| IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE()) \
ia64_load_extra(next); \ ia64_load_extra(next); \
ia64_psr(ia64_task_regs(next))->dfh = \
!(__fpu_owner == (next) && ((next)->thread.last_fph_cpu == smp_processor_id())); \
(last) = ia64_switch_to((next)); \ (last) = ia64_switch_to((next)); \
} while (0) } while (0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* In the SMP case, we save the fph state when context-switching * In the SMP case, we save the fph state when context-switching away from a thread that
* away from a thread that modified fph. This way, when the thread * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
* gets scheduled on another CPU, the CPU can pick up the state from * pick up the state from task->thread.fph, avoiding the complication of having to fetch
* task->thread.fph, avoiding the complication of having to fetch * the latest fph state from another CPU. In other words: eager save, lazy restore.
* the latest fph state from another CPU.
*/ */
# define switch_to(prev,next,last) do { \ # define switch_to(prev,next,last) do { \
if (ia64_psr(ia64_task_regs(prev))->mfh) { \ if (ia64_psr(ia64_task_regs(prev))->mfh) { \
...@@ -238,22 +241,10 @@ extern void ia64_load_extra (struct task_struct *task); ...@@ -238,22 +241,10 @@ extern void ia64_load_extra (struct task_struct *task);
__ia64_save_fpu((prev)->thread.fph); \ __ia64_save_fpu((prev)->thread.fph); \
(prev)->thread.last_fph_cpu = smp_processor_id(); \ (prev)->thread.last_fph_cpu = smp_processor_id(); \
} \ } \
if ((next)->thread.flags & IA64_THREAD_FPH_VALID) { \ __switch_to(prev, next, last); \
if (((next)->thread.last_fph_cpu == smp_processor_id()) \
&& (ia64_get_fpu_owner() == next)) \
{ \
ia64_psr(ia64_task_regs(next))->dfh = 0; \
ia64_psr(ia64_task_regs(next))->mfh = 0; \
} else \
ia64_psr(ia64_task_regs(next))->dfh = 1; \
} \
__switch_to(prev,next,last); \
} while (0)
#else
# define switch_to(prev,next,last) do { \
ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
__switch_to(prev,next,last); \
} while (0) } while (0)
#else
# define switch_to(prev,next,last) __switch_to(prev, next, last)
#endif #endif
/* /*
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999, 2002 Hewlett-Packard Co. * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
*/ */
...@@ -21,12 +21,12 @@ ...@@ -21,12 +21,12 @@
#include <asm/system.h> #include <asm/system.h>
#define EFI_SUCCESS 0 #define EFI_SUCCESS 0
#define EFI_LOAD_ERROR (1L | (1L << 63)) #define EFI_LOAD_ERROR ( 1 | (1UL << 63))
#define EFI_INVALID_PARAMETER (2L | (1L << 63)) #define EFI_INVALID_PARAMETER ( 2 | (1UL << 63))
#define EFI_UNSUPPORTED (3L | (1L << 63)) #define EFI_UNSUPPORTED ( 3 | (1UL << 63))
#define EFI_BAD_BUFFER_SIZE (4L | (1L << 63)) #define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << 63))
#define EFI_BUFFER_TOO_SMALL (5L | (1L << 63)) #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << 63))
#define EFI_NOT_FOUND (14L | (1L << 63)) #define EFI_NOT_FOUND (14 | (1UL << 63))
typedef unsigned long efi_status_t; typedef unsigned long efi_status_t;
typedef u8 efi_bool_t; typedef u8 efi_bool_t;
...@@ -260,7 +260,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out) ...@@ -260,7 +260,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out)
extern void efi_init (void); extern void efi_init (void);
extern void efi_map_pal_code (void); extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timeval *tv); extern void efi_gettimeofday (struct timespec *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
extern u64 efi_get_iobase (void); extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr); extern u32 efi_mem_type (unsigned long phys_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment