Commit a0863130 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] kprobes arch consolidation build fix
  [IA64] update efi region debugging to use MB, GB and TB as well as KB
  [IA64] use dev_printk in video quirk
  [IA64] remove remaining __FUNCTION__ occurrences
  [IA64] remove unnecessary nfs includes from sys_ia32.c
  [IA64] remove CONFIG_SMP ifdef in ia64_send_ipi()
  [IA64] arch_ptrace() cleanup
  [IA64] remove duplicate code from arch_ptrace()
  [IA64] convert sys_ptrace to arch_ptrace
  [IA64] remove find_thread_for_addr()
  [IA64] do not sync RBS when changing PT_AR_BSP or PT_CFM
  [IA64] access user RBS directly
parents b8815026 45e18c22
...@@ -71,7 +71,7 @@ hwsw_init (void) ...@@ -71,7 +71,7 @@ hwsw_init (void)
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
/* Better to have normal DMA than panic */ /* Better to have normal DMA than panic */
printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," printk(KERN_WARNING "%s: Failed to initialize software I/O TLB,"
" reverting to hpzx1 platform vector\n", __FUNCTION__); " reverting to hpzx1 platform vector\n", __func__);
machvec_init("hpzx1"); machvec_init("hpzx1");
#else #else
panic("Unable to initialize software I/O TLB services"); panic("Unable to initialize software I/O TLB services");
......
...@@ -529,7 +529,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) ...@@ -529,7 +529,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
base_mask = RESMAP_MASK(bits_wanted); base_mask = RESMAP_MASK(bits_wanted);
mask = base_mask << bitshiftcnt; mask = base_mask << bitshiftcnt;
DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
for(; res_ptr < res_end ; res_ptr++) for(; res_ptr < res_end ; res_ptr++)
{ {
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
...@@ -679,7 +679,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) ...@@ -679,7 +679,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
#endif #endif
DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
__FUNCTION__, size, pages_needed, pide, __func__, size, pages_needed, pide,
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift ); ioc->res_bitshift );
...@@ -722,8 +722,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) ...@@ -722,8 +722,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
bits_not_wanted = 0; bits_not_wanted = 0;
DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr); bits_not_wanted, m, pide, res_ptr, *res_ptr);
ASSERT(m != 0); ASSERT(m != 0);
ASSERT(bits_not_wanted); ASSERT(bits_not_wanted);
...@@ -940,8 +940,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) ...@@ -940,8 +940,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
iovp = (dma_addr_t) pide << iovp_shift; iovp = (dma_addr_t) pide << iovp_shift;
DBG_RUN("%s() 0x%p -> 0x%lx\n", DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
__FUNCTION__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]); pdir_start = &(ioc->pdir_base[pide]);
...@@ -1029,8 +1028,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) ...@@ -1029,8 +1028,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
#endif #endif
offset = iova & ~iovp_mask; offset = iova & ~iovp_mask;
DBG_RUN("%s() iovp 0x%lx/%x\n", DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
__FUNCTION__, (long) iova, size);
iova ^= offset; /* clear offset bits */ iova ^= offset; /* clear offset bits */
size += offset; size += offset;
...@@ -1404,7 +1402,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di ...@@ -1404,7 +1402,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
struct scatterlist *sg; struct scatterlist *sg;
#endif #endif
DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
ASSERT(ioc); ASSERT(ioc);
...@@ -1468,7 +1466,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di ...@@ -1468,7 +1466,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
#endif #endif
ASSERT(coalesced == filled); ASSERT(coalesced == filled);
DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
return filled; return filled;
} }
...@@ -1491,7 +1489,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in ...@@ -1491,7 +1489,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
#endif #endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n", DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__FUNCTION__, nents, sba_sg_address(sglist), sglist->length); __func__, nents, sba_sg_address(sglist), sglist->length);
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
...@@ -1509,7 +1507,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in ...@@ -1509,7 +1507,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
nents--; nents--;
} }
DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags); spin_lock_irqsave(&ioc->res_lock, flags);
...@@ -1546,7 +1544,7 @@ ioc_iova_init(struct ioc *ioc) ...@@ -1546,7 +1544,7 @@ ioc_iova_init(struct ioc *ioc)
ioc->iov_size = ~ioc->imask + 1; ioc->iov_size = ~ioc->imask + 1;
DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
__FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
ioc->iov_size >> 20); ioc->iov_size >> 20);
switch (iovp_size) { switch (iovp_size) {
...@@ -1569,7 +1567,7 @@ ioc_iova_init(struct ioc *ioc) ...@@ -1569,7 +1567,7 @@ ioc_iova_init(struct ioc *ioc)
memset(ioc->pdir_base, 0, ioc->pdir_size); memset(ioc->pdir_base, 0, ioc->pdir_size);
DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
...@@ -1612,7 +1610,7 @@ ioc_iova_init(struct ioc *ioc) ...@@ -1612,7 +1610,7 @@ ioc_iova_init(struct ioc *ioc)
prefetch_spill_page = virt_to_phys(addr); prefetch_spill_page = virt_to_phys(addr);
DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
} }
/* /*
** Set all the PDIR entries valid w/ the spill page as the target ** Set all the PDIR entries valid w/ the spill page as the target
...@@ -1641,7 +1639,7 @@ ioc_resource_init(struct ioc *ioc) ...@@ -1641,7 +1639,7 @@ ioc_resource_init(struct ioc *ioc)
/* resource map size dictated by pdir_size */ /* resource map size dictated by pdir_size */
ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
ioc->res_size >>= 3; /* convert bit count to byte count */ ioc->res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
get_order(ioc->res_size)); get_order(ioc->res_size));
...@@ -1664,7 +1662,7 @@ ioc_resource_init(struct ioc *ioc) ...@@ -1664,7 +1662,7 @@ ioc_resource_init(struct ioc *ioc)
| prefetch_spill_page); | prefetch_spill_page);
#endif #endif
DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, DBG_INIT("%s() res_map %x %p\n", __func__,
ioc->res_size, (void *) ioc->res_map); ioc->res_size, (void *) ioc->res_map);
} }
...@@ -1767,7 +1765,7 @@ ioc_init(u64 hpa, void *handle) ...@@ -1767,7 +1765,7 @@ ioc_init(u64 hpa, void *handle)
iovp_size = (1 << iovp_shift); iovp_size = (1 << iovp_shift);
iovp_mask = ~(iovp_size - 1); iovp_mask = ~(iovp_size - 1);
DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
PAGE_SIZE >> 10, iovp_size >> 10); PAGE_SIZE >> 10, iovp_size >> 10);
if (!ioc->name) { if (!ioc->name) {
...@@ -2137,7 +2135,7 @@ sba_page_override(char *str) ...@@ -2137,7 +2135,7 @@ sba_page_override(char *str)
break; break;
default: default:
printk("%s: unknown/unsupported iommu page size %ld\n", printk("%s: unknown/unsupported iommu page size %ld\n",
__FUNCTION__, page_size); __func__, page_size);
} }
return 1; return 1;
......
...@@ -222,7 +222,7 @@ simeth_probe1(void) ...@@ -222,7 +222,7 @@ simeth_probe1(void)
} }
if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__); panic("%s: out of interrupt vectors!\n", __func__);
dev->irq = rc; dev->irq = rc;
/* /*
......
...@@ -1000,7 +1000,7 @@ simrs_init (void) ...@@ -1000,7 +1000,7 @@ simrs_init (void)
if (!state->irq) { if (!state->irq) {
if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0)
panic("%s: out of interrupt vectors!\n", panic("%s: out of interrupt vectors!\n",
__FUNCTION__); __func__);
state->irq = rc; state->irq = rc;
ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq);
} }
......
...@@ -32,13 +32,8 @@ ...@@ -32,13 +32,8 @@
#include <linux/shm.h> #include <linux/shm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/nfs_fs.h> #include <linux/socket.h>
#include <linux/quota.h> #include <linux/quota.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/eventpoll.h> #include <linux/eventpoll.h>
#include <linux/personality.h> #include <linux/personality.h>
......
...@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) ...@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
if (val == DIE_INIT_MONARCH_LEAVE) if (val == DIE_INIT_MONARCH_LEAVE)
ia64_mca_printk(KERN_NOTICE ia64_mca_printk(KERN_NOTICE
"%s: kdump not configured\n", "%s: kdump not configured\n",
__FUNCTION__); __func__);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
......
...@@ -379,8 +379,8 @@ efi_get_pal_addr (void) ...@@ -379,8 +379,8 @@ efi_get_pal_addr (void)
* a dedicated ITR for the PAL code. * a dedicated ITR for the PAL code.
*/ */
if ((vaddr & mask) == (KERNEL_START & mask)) { if ((vaddr & mask) == (KERNEL_START & mask)) {
printk(KERN_INFO "%s: no need to install ITR for " printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
"PAL code\n", __FUNCTION__); __func__);
continue; continue;
} }
...@@ -399,7 +399,7 @@ efi_get_pal_addr (void) ...@@ -399,7 +399,7 @@ efi_get_pal_addr (void)
return __va(md->phys_addr); return __va(md->phys_addr);
} }
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
__FUNCTION__); __func__);
return NULL; return NULL;
} }
...@@ -543,12 +543,30 @@ efi_init (void) ...@@ -543,12 +543,30 @@ efi_init (void)
for (i = 0, p = efi_map_start; p < efi_map_end; for (i = 0, p = efi_map_start; p < efi_map_end;
++i, p += efi_desc_size) ++i, p += efi_desc_size)
{ {
const char *unit;
unsigned long size;
md = p; md = p;
printk("mem%02u: type=%u, attr=0x%lx, " size = md->num_pages << EFI_PAGE_SHIFT;
"range=[0x%016lx-0x%016lx) (%luMB)\n",
if ((size >> 40) > 0) {
size >>= 40;
unit = "TB";
} else if ((size >> 30) > 0) {
size >>= 30;
unit = "GB";
} else if ((size >> 20) > 0) {
size >>= 20;
unit = "MB";
} else {
size >>= 10;
unit = "KB";
}
printk("mem%02d: type=%2u, attr=0x%016lx, "
"range=[0x%016lx-0x%016lx) (%4lu%s)\n",
i, md->type, md->attribute, md->phys_addr, i, md->type, md->attribute, md->phys_addr,
md->phys_addr + efi_md_size(md), md->phys_addr + efi_md_size(md), size, unit);
md->num_pages >> (20 - EFI_PAGE_SHIFT));
} }
} }
#endif #endif
......
...@@ -534,7 +534,7 @@ iosapic_reassign_vector (int irq) ...@@ -534,7 +534,7 @@ iosapic_reassign_vector (int irq)
if (iosapic_intr_info[irq].count) { if (iosapic_intr_info[irq].count) {
new_irq = create_irq(); new_irq = create_irq();
if (new_irq < 0) if (new_irq < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__); panic("%s: out of interrupt vectors!\n", __func__);
printk(KERN_INFO "Reassigning vector %d to %d\n", printk(KERN_INFO "Reassigning vector %d to %d\n",
irq_to_vector(irq), irq_to_vector(new_irq)); irq_to_vector(irq), irq_to_vector(new_irq));
memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
...@@ -599,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, ...@@ -599,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
index = find_iosapic(gsi); index = find_iosapic(gsi);
if (index < 0) { if (index < 0) {
printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
__FUNCTION__, gsi); __func__, gsi);
return -ENODEV; return -ENODEV;
} }
...@@ -608,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, ...@@ -608,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
rte = iosapic_alloc_rte(); rte = iosapic_alloc_rte();
if (!rte) { if (!rte) {
printk(KERN_WARNING "%s: cannot allocate memory\n", printk(KERN_WARNING "%s: cannot allocate memory\n",
__FUNCTION__); __func__);
return -ENOMEM; return -ENOMEM;
} }
...@@ -625,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, ...@@ -625,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
(info->trigger != trigger || info->polarity != polarity)){ (info->trigger != trigger || info->polarity != polarity)){
printk (KERN_WARNING printk (KERN_WARNING
"%s: cannot override the interrupt\n", "%s: cannot override the interrupt\n",
__FUNCTION__); __func__);
return -EINVAL; return -EINVAL;
} }
rte->refcnt++; rte->refcnt++;
...@@ -647,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, ...@@ -647,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
if (idesc->chip != &no_irq_type) if (idesc->chip != &no_irq_type)
printk(KERN_WARNING printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n", "%s: changing vector %d from %s to %s\n",
__FUNCTION__, irq_to_vector(irq), __func__, irq_to_vector(irq),
idesc->chip->name, irq_type->name); idesc->chip->name, irq_type->name);
idesc->chip = irq_type; idesc->chip = irq_type;
} }
...@@ -920,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -920,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
case ACPI_INTERRUPT_INIT: case ACPI_INTERRUPT_INIT:
irq = create_irq(); irq = create_irq();
if (irq < 0) if (irq < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__); panic("%s: out of interrupt vectors!\n", __func__);
vector = irq_to_vector(irq); vector = irq_to_vector(irq);
delivery = IOSAPIC_INIT; delivery = IOSAPIC_INIT;
break; break;
...@@ -931,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -931,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
mask = 1; mask = 1;
break; break;
default: default:
printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
int_type); int_type);
return -1; return -1;
} }
...@@ -996,7 +996,7 @@ iosapic_system_init (int system_pcat_compat) ...@@ -996,7 +996,7 @@ iosapic_system_init (int system_pcat_compat)
*/ */
printk(KERN_INFO printk(KERN_INFO
"%s: Disabling PC-AT compatible 8259 interrupts\n", "%s: Disabling PC-AT compatible 8259 interrupts\n",
__FUNCTION__); __func__);
outb(0xff, 0xA1); outb(0xff, 0xA1);
outb(0xff, 0x21); outb(0xff, 0x21);
} }
...@@ -1011,7 +1011,7 @@ iosapic_alloc (void) ...@@ -1011,7 +1011,7 @@ iosapic_alloc (void)
if (!iosapic_lists[index].addr) if (!iosapic_lists[index].addr)
return index; return index;
printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__); printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
return -1; return -1;
} }
...@@ -1109,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base) ...@@ -1109,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base)
index = find_iosapic(gsi_base); index = find_iosapic(gsi_base);
if (index < 0) { if (index < 0) {
printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
__FUNCTION__, gsi_base); __func__, gsi_base);
goto out; goto out;
} }
if (iosapic_lists[index].rtes_inuse) { if (iosapic_lists[index].rtes_inuse) {
err = -EBUSY; err = -EBUSY;
printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
__FUNCTION__, gsi_base); __func__, gsi_base);
goto out; goto out;
} }
...@@ -1137,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node) ...@@ -1137,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
index = find_iosapic(gsi_base); index = find_iosapic(gsi_base);
if (index < 0) { if (index < 0) {
printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
__FUNCTION__, gsi_base); __func__, gsi_base);
return; return;
} }
iosapic_lists[index].node = node; iosapic_lists[index].node = node;
......
...@@ -507,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -507,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
if (unlikely(irq < 0)) { if (unlikely(irq < 0)) {
printk(KERN_ERR "%s: Unexpected interrupt " printk(KERN_ERR "%s: Unexpected interrupt "
"vector %d on CPU %d is not mapped " "vector %d on CPU %d is not mapped "
"to any IRQ!\n", __FUNCTION__, vector, "to any IRQ!\n", __func__, vector,
smp_processor_id()); smp_processor_id());
} else } else
generic_handle_irq(irq); generic_handle_irq(irq);
...@@ -572,7 +572,7 @@ void ia64_process_pending_intr(void) ...@@ -572,7 +572,7 @@ void ia64_process_pending_intr(void)
if (unlikely(irq < 0)) { if (unlikely(irq < 0)) {
printk(KERN_ERR "%s: Unexpected interrupt " printk(KERN_ERR "%s: Unexpected interrupt "
"vector %d on CPU %d not being mapped " "vector %d on CPU %d not being mapped "
"to any IRQ!!\n", __FUNCTION__, vector, "to any IRQ!!\n", __func__, vector,
smp_processor_id()); smp_processor_id());
} else { } else {
vectors_in_migration[irq]=0; vectors_in_migration[irq]=0;
...@@ -666,11 +666,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) ...@@ -666,11 +666,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
unsigned long ipi_data; unsigned long ipi_data;
unsigned long phys_cpu_id; unsigned long phys_cpu_id;
#ifdef CONFIG_SMP
phys_cpu_id = cpu_physical_id(cpu); phys_cpu_id = cpu_physical_id(cpu);
#else
phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
#endif
/* /*
* cpu number is in 8bit ID and 8bit EID * cpu number is in 8bit ID and 8bit EID
......
...@@ -838,7 +838,7 @@ static int __kprobes post_kprobes_handler(struct pt_regs *regs) ...@@ -838,7 +838,7 @@ static int __kprobes post_kprobes_handler(struct pt_regs *regs)
return 1; return 1;
} }
int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{ {
struct kprobe *cur = kprobe_running(); struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......
...@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) ...@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
IA64_LOG_INDEX_INC(sal_info_type); IA64_LOG_INDEX_INC(sal_info_type);
IA64_LOG_UNLOCK(sal_info_type); IA64_LOG_UNLOCK(sal_info_type);
if (irq_safe) { if (irq_safe) {
IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
"Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); __func__, sal_info_type, total_len);
} }
*buffer = (u8 *) log_buffer; *buffer = (u8 *) log_buffer;
return total_len; return total_len;
...@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg) ...@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
static DEFINE_SPINLOCK(cpe_history_lock); static DEFINE_SPINLOCK(cpe_history_lock);
IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
__FUNCTION__, cpe_irq, smp_processor_id()); __func__, cpe_irq, smp_processor_id());
/* SAL spec states this should run w/ interrupts enabled */ /* SAL spec states this should run w/ interrupts enabled */
local_irq_enable(); local_irq_enable();
...@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev) ...@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev)
} }
IA64_MCA_DEBUG("%s: corrected platform error " IA64_MCA_DEBUG("%s: corrected platform error "
"vector %#x registered\n", __FUNCTION__, cpev); "vector %#x registered\n", __func__, cpev);
} }
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
...@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void) ...@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void)
cmcv.cmcv_vector = IA64_CMC_VECTOR; cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected " IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
"machine check vector %#x registered.\n", __func__, smp_processor_id(), IA64_CMC_VECTOR);
__FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
__FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
} }
/* /*
...@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy) ...@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy)
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected " IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
"machine check vector %#x disabled.\n", __func__, smp_processor_id(), cmcv.cmcv_vector);
__FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
} }
/* /*
...@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy) ...@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy)
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected " IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
"machine check vector %#x enabled.\n", __func__, smp_processor_id(), cmcv.cmcv_vector);
__FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
} }
/* /*
...@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
local_irq_save(flags); local_irq_save(flags);
if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP) (long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has /* Register with the SAL monarch that the slave has
...@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP) (long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
/* Wait for the monarch cpu to exit. */ /* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1) while (monarch_cpu != -1)
...@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) ...@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
(long)&nd, 0, 0) == NOTIFY_STOP) (long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */ /* Enable all interrupts */
...@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) { if (sos->monarch) {
...@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mca_wakeup_all(); ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
} else { } else {
while (cpu_isset(cpu, mca_cpu)) while (cpu_isset(cpu, mca_cpu))
cpu_relax(); /* spin until monarch wakes us */ cpu_relax(); /* spin until monarch wakes us */
...@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
} }
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
if (atomic_dec_return(&mca_count) > 0) { if (atomic_dec_return(&mca_count) > 0) {
...@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg) ...@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
static DEFINE_SPINLOCK(cmc_history_lock); static DEFINE_SPINLOCK(cmc_history_lock);
IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
__FUNCTION__, cmc_irq, smp_processor_id()); __func__, cmc_irq, smp_processor_id());
/* SAL spec states this should run w/ interrupts enabled */ /* SAL spec states this should run w/ interrupts enabled */
local_irq_enable(); local_irq_enable();
...@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/ */
if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
__FUNCTION__, cpu); __func__, cpu);
atomic_dec(&slaves); atomic_dec(&slaves);
sos->monarch = 1; sos->monarch = 1;
} }
...@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/ */
if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
__FUNCTION__, cpu); __func__, cpu);
atomic_dec(&monarchs); atomic_dec(&monarchs);
sos->monarch = 0; sos->monarch = 0;
} }
...@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
cpu_relax(); /* spin until monarch enters */ cpu_relax(); /* spin until monarch enters */
if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
while (monarch_cpu != -1) while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */ cpu_relax(); /* spin until monarch leaves */
if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
mprintk("Slave on cpu %d returning to normal service.\n", cpu); mprintk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current); set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
...@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
monarch_cpu = cpu; monarch_cpu = cpu;
if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
/* /*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
...@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, ...@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/ */
if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__); ia64_mca_spin(__func__);
mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs); atomic_dec(&monarchs);
set_curr_task(cpu, previous_current); set_curr_task(cpu, previous_current);
...@@ -1884,7 +1881,7 @@ ia64_mca_init(void) ...@@ -1884,7 +1881,7 @@ ia64_mca_init(void)
.priority = 0/* we need to notified last */ .priority = 0/* we need to notified last */
}; };
IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); IA64_MCA_DEBUG("%s: begin\n", __func__);
/* Clear the Rendez checkin flag for all cpus */ /* Clear the Rendez checkin flag for all cpus */
for(i = 0 ; i < NR_CPUS; i++) for(i = 0 ; i < NR_CPUS; i++)
...@@ -1928,7 +1925,7 @@ ia64_mca_init(void) ...@@ -1928,7 +1925,7 @@ ia64_mca_init(void)
return; return;
} }
IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
/* /*
...@@ -1949,7 +1946,7 @@ ia64_mca_init(void) ...@@ -1949,7 +1946,7 @@ ia64_mca_init(void)
return; return;
} }
IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
/* /*
...@@ -1961,7 +1958,7 @@ ia64_mca_init(void) ...@@ -1961,7 +1958,7 @@ ia64_mca_init(void)
ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
ia64_mc_info.imi_slave_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler_size = 0;
IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
ia64_mc_info.imi_monarch_init_handler); ia64_mc_info.imi_monarch_init_handler);
/* Register the os init handler with SAL */ /* Register the os init handler with SAL */
...@@ -1982,7 +1979,7 @@ ia64_mca_init(void) ...@@ -1982,7 +1979,7 @@ ia64_mca_init(void)
return; return;
} }
IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
/* /*
* Configure the CMCI/P vector and handler. Interrupts for CMC are * Configure the CMCI/P vector and handler. Interrupts for CMC are
...@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void) ...@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void)
cmc_polling_enabled = 0; cmc_polling_enabled = 0;
schedule_work(&cmc_enable_work); schedule_work(&cmc_enable_work);
IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */ /* Setup the CPEI/P vector and handler */
...@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void) ...@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void)
ia64_cpe_irq = irq; ia64_cpe_irq = irq;
ia64_mca_register_cpev(cpe_vector); ia64_mca_register_cpev(cpe_vector);
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
__FUNCTION__); __func__);
return 0; return 0;
} }
printk(KERN_ERR "%s: Failed to find irq for CPE " printk(KERN_ERR "%s: Failed to find irq for CPE "
"interrupt handler, vector %d\n", "interrupt handler, vector %d\n",
__FUNCTION__, cpe_vector); __func__, cpe_vector);
} }
/* If platform doesn't support CPEI, get the timer going. */ /* If platform doesn't support CPEI, get the timer going. */
if (cpe_poll_enabled) { if (cpe_poll_enabled) {
ia64_mca_cpe_poll(0UL); ia64_mca_cpe_poll(0UL);
IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
} }
} }
#endif #endif
......
...@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, ...@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
mod->arch.opd->sh_addralign = 8; mod->arch.opd->sh_addralign = 8;
mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
__FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
mod->arch.got->sh_size, mod->arch.opd->sh_size); mod->arch.got->sh_size, mod->arch.opd->sh_size);
return 0; return 0;
} }
...@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp) ...@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
#if ARCH_MODULE_DEBUG #if ARCH_MODULE_DEBUG
if (plt_target(plt) != target_ip) { if (plt_target(plt) != target_ip) {
printk("%s: mistargeted PLT: wanted %lx, got %lx\n", printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
__FUNCTION__, target_ip, plt_target(plt)); __func__, target_ip, plt_target(plt));
*okp = 0; *okp = 0;
return 0; return 0;
} }
...@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, ...@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
if (r_type == R_IA64_PCREL21BI) { if (r_type == R_IA64_PCREL21BI) {
if (!is_internal(mod, val)) { if (!is_internal(mod, val)) {
printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
__FUNCTION__, reloc_name[r_type], val); __func__, reloc_name[r_type], val);
return -ENOEXEC; return -ENOEXEC;
} }
format = RF_INSN21B; format = RF_INSN21B;
...@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, ...@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
case R_IA64_LDXMOV: case R_IA64_LDXMOV:
if (gp_addressable(mod, val)) { if (gp_addressable(mod, val)) {
/* turn "ld8" into "mov": */ /* turn "ld8" into "mov": */
DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location); DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
} }
return 0; return 0;
...@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, ...@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
if (!ok) if (!ok)
return -ENOEXEC; return -ENOEXEC;
DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val, DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
switch (format) { switch (format) {
...@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind ...@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
Elf64_Shdr *target_sec; Elf64_Shdr *target_sec;
int ret; int ret;
DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__, DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
relsec, n, sechdrs[relsec].sh_info); relsec, n, sechdrs[relsec].sh_info);
target_sec = sechdrs + sechdrs[relsec].sh_info; target_sec = sechdrs + sechdrs[relsec].sh_info;
...@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind ...@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
gp = mod->core_size / 2; gp = mod->core_size / 2;
gp = (uint64_t) mod->module_core + ((gp + 7) & -8); gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
mod->arch.gp = gp; mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
} }
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
...@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod) ...@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod)
init = start + num_core; init = start + num_core;
} }
DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__, DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
mod->name, mod->arch.gp, num_init, num_core); mod->name, mod->arch.gp, num_init, num_core);
/* /*
...@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod) ...@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod)
if (num_core > 0) { if (num_core > 0) {
mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
core, core + num_core); core, core + num_core);
DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__, DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
mod->arch.core_unw_table, core, core + num_core); mod->arch.core_unw_table, core, core + num_core);
} }
if (num_init > 0) { if (num_init > 0) {
mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
init, init + num_init); init, init + num_init);
DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__, DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
mod->arch.init_unw_table, init, init + num_init); mod->arch.init_unw_table, init, init + num_init);
} }
} }
...@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod) ...@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod)
int int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{ {
DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init); DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind) if (mod->arch.unwind)
register_unwind_table(mod); register_unwind_table(mod);
return 0; return 0;
......
...@@ -227,12 +227,12 @@ ...@@ -227,12 +227,12 @@
#ifdef PFM_DEBUGGING #ifdef PFM_DEBUGGING
#define DPRINT(a) \ #define DPRINT(a) \
do { \ do { \
if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
} while (0) } while (0)
#define DPRINT_ovfl(a) \ #define DPRINT_ovfl(a) \
do { \ do { \
if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
} while (0) } while (0)
#endif #endif
......
...@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL"); ...@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL");
#ifdef DEFAULT_DEBUG #ifdef DEFAULT_DEBUG
#define DPRINT(a) \ #define DPRINT(a) \
do { \ do { \
if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
} while (0) } while (0)
#define DPRINT_ovfl(a) \ #define DPRINT_ovfl(a) \
do { \ do { \
if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
} while (0) } while (0)
#else #else
......
This diff is collapsed.
...@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model) ...@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model)
if (overflow++ == 0) if (overflow++ == 0)
printk(KERN_ERR printk(KERN_ERR
"%s: Table overflow. Some processor model information will be missing\n", "%s: Table overflow. Some processor model information will be missing\n",
__FUNCTION__); __func__);
return "Unknown"; return "Unknown";
} }
...@@ -785,7 +785,7 @@ get_max_cacheline_size (void) ...@@ -785,7 +785,7 @@ get_max_cacheline_size (void)
status = ia64_pal_cache_summary(&levels, &unique_caches); status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) { if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
__FUNCTION__, status); __func__, status);
max = SMP_CACHE_BYTES; max = SMP_CACHE_BYTES;
/* Safest setup for "flush_icache_range()" */ /* Safest setup for "flush_icache_range()" */
ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
...@@ -798,7 +798,7 @@ get_max_cacheline_size (void) ...@@ -798,7 +798,7 @@ get_max_cacheline_size (void)
if (status != 0) { if (status != 0) {
printk(KERN_ERR printk(KERN_ERR
"%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
__FUNCTION__, l, status); __func__, l, status);
max = SMP_CACHE_BYTES; max = SMP_CACHE_BYTES;
/* The safest setup for "flush_icache_range()" */ /* The safest setup for "flush_icache_range()" */
cci.pcci_stride = I_CACHE_STRIDE_SHIFT; cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
...@@ -814,7 +814,7 @@ get_max_cacheline_size (void) ...@@ -814,7 +814,7 @@ get_max_cacheline_size (void)
if (status != 0) { if (status != 0) {
printk(KERN_ERR printk(KERN_ERR
"%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
__FUNCTION__, l, status); __func__, l, status);
/* The safest setup for "flush_icache_range()" */ /* The safest setup for "flush_icache_range()" */
cci.pcci_stride = I_CACHE_STRIDE_SHIFT; cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
} }
......
...@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err); ...@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
#undef DEBUG_UNALIGNED_TRAP #undef DEBUG_UNALIGNED_TRAP
#ifdef DEBUG_UNALIGNED_TRAP #ifdef DEBUG_UNALIGNED_TRAP
# define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0) # define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
# define DDUMP(str,vp,len) dump(str, vp, len) # define DDUMP(str,vp,len) dump(str, vp, len)
static void static void
...@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi ...@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
* just in case. * just in case.
*/ */
if (ld.x6_op == 1 || ld.x6_op == 3) { if (ld.x6_op == 1 || ld.x6_op == 3) {
printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
if (die_if_kernel("unaligned reference on speculative load with register update\n", if (die_if_kernel("unaligned reference on speculative load with register update\n",
regs, 30)) regs, 30))
return; return;
...@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs ...@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
*/ */
if (ld.x6_op == 1 || ld.x6_op == 3) if (ld.x6_op == 1 || ld.x6_op == 3)
printk(KERN_ERR "%s: register update on speculative load pair, error\n", printk(KERN_ERR "%s: register update on speculative load pair, error\n",
__FUNCTION__); __func__);
setreg(ld.r3, ifa, 0, regs); setreg(ld.r3, ifa, 0, regs);
} }
......
This diff is collapsed.
...@@ -26,7 +26,7 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) ...@@ -26,7 +26,7 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
if (!user_mode(regs)) { if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */ /* kprobe_running() needs smp_processor_id() */
preempt_disable(); preempt_disable();
if (kprobe_running() && kprobes_fault_handler(regs, trap)) if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1; ret = 1;
preempt_enable(); preempt_enable();
} }
......
...@@ -714,7 +714,7 @@ int arch_add_memory(int nid, u64 start, u64 size) ...@@ -714,7 +714,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
if (ret) if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n", printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__FUNCTION__, ret); __func__, ret);
return ret; return ret;
} }
......
...@@ -63,7 +63,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) ...@@ -63,7 +63,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
pci_read_config_word(pdev, PCI_COMMAND, &config); pci_read_config_word(pdev, PCI_COMMAND, &config);
if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
} }
} }
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
...@@ -765,7 +765,7 @@ static void __init set_pci_cacheline_size(void) ...@@ -765,7 +765,7 @@ static void __init set_pci_cacheline_size(void)
status = ia64_pal_cache_summary(&levels, &unique_caches); status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) { if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
"(status=%ld)\n", __FUNCTION__, status); "(status=%ld)\n", __func__, status);
return; return;
} }
...@@ -773,7 +773,7 @@ static void __init set_pci_cacheline_size(void) ...@@ -773,7 +773,7 @@ static void __init set_pci_cacheline_size(void)
/* cache_type (data_or_unified)= */ 2, &cci); /* cache_type (data_or_unified)= */ 2, &cci);
if (status != 0) { if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
"(status=%ld)\n", __FUNCTION__, status); "(status=%ld)\n", __func__, status);
return; return;
} }
pci_cache_line_size = (1 << cci.pcci_line_size) / 4; pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
......
...@@ -37,7 +37,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg) ...@@ -37,7 +37,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
(u64) nasid, 0, 0, 0, 0, 0, 0); (u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0) if ((int)ret_stuff.v0)
panic("%s: Fatal %s Error", __FUNCTION__, panic("%s: Fatal %s Error", __func__,
((nasid & 1) ? "TIO" : "HUBII")); ((nasid & 1) ? "TIO" : "HUBII"));
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
...@@ -48,7 +48,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg) ...@@ -48,7 +48,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
(u64) nasid, 0, 0, 0, 0, 0, 0); (u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0) if ((int)ret_stuff.v0)
panic("%s: Fatal TIO Error", __FUNCTION__); panic("%s: Fatal TIO Error", __func__);
} else } else
bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
......
...@@ -133,7 +133,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus) ...@@ -133,7 +133,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: " printk(KERN_ERR "%s: "
"acpi_get_vendor_resource() failed (0x%x) for: ", "acpi_get_vendor_resource() failed (0x%x) for: ",
__FUNCTION__, status); __func__, status);
acpi_ns_print_node_pathname(handle, NULL); acpi_ns_print_node_pathname(handle, NULL);
printk("\n"); printk("\n");
return NULL; return NULL;
...@@ -145,7 +145,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus) ...@@ -145,7 +145,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
sizeof(struct pcibus_bussoft *)) { sizeof(struct pcibus_bussoft *)) {
printk(KERN_ERR printk(KERN_ERR
"%s: Invalid vendor data length %d\n", "%s: Invalid vendor data length %d\n",
__FUNCTION__, vendor->byte_length); __func__, vendor->byte_length);
kfree(buffer.pointer); kfree(buffer.pointer);
return NULL; return NULL;
} }
...@@ -184,7 +184,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, ...@@ -184,7 +184,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR printk(KERN_ERR
"%s: acpi_get_vendor_resource() failed (0x%x) for: ", "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
__FUNCTION__, status); __func__, status);
acpi_ns_print_node_pathname(handle, NULL); acpi_ns_print_node_pathname(handle, NULL);
printk("\n"); printk("\n");
return 1; return 1;
...@@ -196,7 +196,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, ...@@ -196,7 +196,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
sizeof(struct pci_devdev_info *)) { sizeof(struct pci_devdev_info *)) {
printk(KERN_ERR printk(KERN_ERR
"%s: Invalid vendor data length: %d for: ", "%s: Invalid vendor data length: %d for: ",
__FUNCTION__, vendor->byte_length); __func__, vendor->byte_length);
acpi_ns_print_node_pathname(handle, NULL); acpi_ns_print_node_pathname(handle, NULL);
printk("\n"); printk("\n");
ret = 1; ret = 1;
...@@ -205,7 +205,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, ...@@ -205,7 +205,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_ptr) if (!pcidev_ptr)
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); panic("%s: Unable to alloc memory for pcidev_info", __func__);
memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *)); memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
pcidev_prom_ptr = __va(addr); pcidev_prom_ptr = __va(addr);
...@@ -214,7 +214,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, ...@@ -214,7 +214,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
/* Get the IRQ info */ /* Get the IRQ info */
irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!irq_info) if (!irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); panic("%s: Unable to alloc memory for sn_irq_info", __func__);
if (pcidev_ptr->pdi_sn_irq_info) { if (pcidev_ptr->pdi_sn_irq_info) {
irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info); irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
...@@ -249,10 +249,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) ...@@ -249,10 +249,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
status = acpi_get_parent(child, &parent); status = acpi_get_parent(child, &parent);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: acpi_get_parent() failed " printk(KERN_ERR "%s: acpi_get_parent() failed "
"(0x%x) for: ", __FUNCTION__, status); "(0x%x) for: ", __func__, status);
acpi_ns_print_node_pathname(child, NULL); acpi_ns_print_node_pathname(child, NULL);
printk("\n"); printk("\n");
panic("%s: Unable to find host devfn\n", __FUNCTION__); panic("%s: Unable to find host devfn\n", __func__);
} }
if (parent == rootbus_handle) if (parent == rootbus_handle)
break; break;
...@@ -260,7 +260,7 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) ...@@ -260,7 +260,7 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
} }
if (!child) { if (!child) {
printk(KERN_ERR "%s: Unable to find root bus for: ", printk(KERN_ERR "%s: Unable to find root bus for: ",
__FUNCTION__); __func__);
acpi_ns_print_node_pathname(device_handle, NULL); acpi_ns_print_node_pathname(device_handle, NULL);
printk("\n"); printk("\n");
BUG(); BUG();
...@@ -269,10 +269,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) ...@@ -269,10 +269,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
__FUNCTION__, status); __func__, status);
acpi_ns_print_node_pathname(child, NULL); acpi_ns_print_node_pathname(child, NULL);
printk("\n"); printk("\n");
panic("%s: Unable to find host devfn\n", __FUNCTION__); panic("%s: Unable to find host devfn\n", __func__);
} }
slot = (adr >> 16) & 0xffff; slot = (adr >> 16) & 0xffff;
...@@ -308,7 +308,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) ...@@ -308,7 +308,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR printk(KERN_ERR
"%s: acpi_get_parent() failed (0x%x) for: ", "%s: acpi_get_parent() failed (0x%x) for: ",
__FUNCTION__, status); __func__, status);
acpi_ns_print_node_pathname(handle, NULL); acpi_ns_print_node_pathname(handle, NULL);
printk("\n"); printk("\n");
return AE_OK; return AE_OK;
...@@ -318,7 +318,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) ...@@ -318,7 +318,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR printk(KERN_ERR
"%s: Failed to find _BBN in parent of: ", "%s: Failed to find _BBN in parent of: ",
__FUNCTION__); __func__);
acpi_ns_print_node_pathname(handle, NULL); acpi_ns_print_node_pathname(handle, NULL);
printk("\n"); printk("\n");
return AE_OK; return AE_OK;
...@@ -358,14 +358,14 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, ...@@ -358,14 +358,14 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
if (segment != pci_domain_nr(dev)) { if (segment != pci_domain_nr(dev)) {
printk(KERN_ERR printk(KERN_ERR
"%s: Segment number mismatch, 0x%lx vs 0x%x for: ", "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
__FUNCTION__, segment, pci_domain_nr(dev)); __func__, segment, pci_domain_nr(dev));
acpi_ns_print_node_pathname(rootbus_handle, NULL); acpi_ns_print_node_pathname(rootbus_handle, NULL);
printk("\n"); printk("\n");
return 1; return 1;
} }
} else { } else {
printk(KERN_ERR "%s: Unable to get __SEG from: ", printk(KERN_ERR "%s: Unable to get __SEG from: ",
__FUNCTION__); __func__);
acpi_ns_print_node_pathname(rootbus_handle, NULL); acpi_ns_print_node_pathname(rootbus_handle, NULL);
printk("\n"); printk("\n");
return 1; return 1;
...@@ -386,7 +386,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, ...@@ -386,7 +386,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
if (!pcidev_match.handle) { if (!pcidev_match.handle) {
printk(KERN_ERR printk(KERN_ERR
"%s: Could not find matching ACPI device for %s.\n", "%s: Could not find matching ACPI device for %s.\n",
__FUNCTION__, pci_name(dev)); __func__, pci_name(dev));
return 1; return 1;
} }
...@@ -422,7 +422,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev) ...@@ -422,7 +422,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev)
if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
panic("%s: Failure obtaining pcidev_info for %s\n", panic("%s: Failure obtaining pcidev_info for %s\n",
__FUNCTION__, pci_name(dev)); __func__, pci_name(dev));
} }
if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
...@@ -463,7 +463,7 @@ sn_acpi_bus_fixup(struct pci_bus *bus) ...@@ -463,7 +463,7 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
printk(KERN_ERR printk(KERN_ERR
"%s: 0x%04x:0x%02x Unable to " "%s: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n", "obtain prom_bussoft_ptr\n",
__FUNCTION__, pci_domain_nr(bus), bus->number); __func__, pci_domain_nr(bus), bus->number);
return; return;
} }
sn_common_bus_fixup(bus, prom_bussoft_ptr); sn_common_bus_fixup(bus, prom_bussoft_ptr);
......
...@@ -364,7 +364,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev) ...@@ -364,7 +364,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
if (!element) { if (!element) {
dev_dbg(&dev->dev, "%s: out of memory!\n", __FUNCTION__); dev_dbg(&dev->dev, "%s: out of memory!\n", __func__);
return; return;
} }
element->sysdata = SN_PCIDEV_INFO(dev); element->sysdata = SN_PCIDEV_INFO(dev);
......
...@@ -209,11 +209,11 @@ sn_io_slot_fixup(struct pci_dev *dev) ...@@ -209,11 +209,11 @@ sn_io_slot_fixup(struct pci_dev *dev)
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info) if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); panic("%s: Unable to alloc memory for pcidev_info", __func__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info) if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); panic("%s: Unable to alloc memory for sn_irq_info", __func__);
/* Call to retrieve pci device information needed by kernel. */ /* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev), status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
......
...@@ -100,7 +100,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, ...@@ -100,7 +100,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
if (!newbuf) { if (!newbuf) {
mutex_unlock(&sn_oemdata_mutex); mutex_unlock(&sn_oemdata_mutex);
printk(KERN_ERR "%s: unable to extend sn_oemdata\n", printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__); __func__);
return 1; return 1;
} }
vfree(*sn_oemdata); vfree(*sn_oemdata);
......
...@@ -116,7 +116,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -116,7 +116,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
SN_DMA_ADDR_PHYS); SN_DMA_ADDR_PHYS);
if (!*dma_handle) { if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __func__);
free_pages((unsigned long)cpuaddr, get_order(size)); free_pages((unsigned long)cpuaddr, get_order(size));
return NULL; return NULL;
} }
...@@ -179,7 +179,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, ...@@ -179,7 +179,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
phys_addr = __pa(cpu_addr); phys_addr = __pa(cpu_addr);
dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
if (!dma_addr) { if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __func__);
return 0; return 0;
} }
return dma_addr; return dma_addr;
...@@ -266,7 +266,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, ...@@ -266,7 +266,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
SN_DMA_ADDR_PHYS); SN_DMA_ADDR_PHYS);
if (!sg->dma_address) { if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __func__);
/* /*
* Free any successfully allocated entries. * Free any successfully allocated entries.
......
...@@ -88,7 +88,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) ...@@ -88,7 +88,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
break; break;
default: default:
printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
"0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE); "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
return -1; return -1;
} }
...@@ -124,7 +124,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) ...@@ -124,7 +124,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
if (!tmp) { if (!tmp) {
printk(KERN_ERR "%s: Could not allocate " printk(KERN_ERR "%s: Could not allocate "
"%lu bytes (order %d) for GART\n", "%lu bytes (order %d) for GART\n",
__FUNCTION__, __func__,
tioca_kern->ca_gart_size, tioca_kern->ca_gart_size,
get_order(tioca_kern->ca_gart_size)); get_order(tioca_kern->ca_gart_size));
return -ENOMEM; return -ENOMEM;
...@@ -341,7 +341,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) ...@@ -341,7 +341,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
if (node_upper > 64) { if (node_upper > 64) {
printk(KERN_ERR "%s: coretalk addr 0x%p node id out " printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
"of range\n", __FUNCTION__, (void *)ct_addr); "of range\n", __func__, (void *)ct_addr);
return 0; return 0;
} }
...@@ -349,7 +349,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) ...@@ -349,7 +349,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
printk(KERN_ERR "%s: coretalk upper node (%u) " printk(KERN_ERR "%s: coretalk upper node (%u) "
"mismatch with ca_agp_dma_addr_extn (%lu)\n", "mismatch with ca_agp_dma_addr_extn (%lu)\n",
__FUNCTION__, __func__,
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
return 0; return 0;
} }
...@@ -597,7 +597,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont ...@@ -597,7 +597,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
if (is_shub1() && sn_sal_rev() < 0x0406) { if (is_shub1() && sn_sal_rev() < 0x0406) {
printk printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required " (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
"for tioca support\n", __FUNCTION__); "for tioca support\n", __func__);
return NULL; return NULL;
} }
...@@ -651,7 +651,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont ...@@ -651,7 +651,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
printk(KERN_WARNING printk(KERN_WARNING
"%s: Unable to get irq %d. " "%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n", "Error interrupts won't be routed for TIOCA bus %d\n",
__FUNCTION__, SGI_TIOCA_ERROR, __func__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum); (int)tioca_common->ca_common.bs_persist_busnum);
sn_set_err_irq_affinity(SGI_TIOCA_ERROR); sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
......
...@@ -494,7 +494,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) ...@@ -494,7 +494,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
printk(KERN_WARNING printk(KERN_WARNING
"%s: %s - no map found for bus_addr 0x%lx\n", "%s: %s - no map found for bus_addr 0x%lx\n",
__FUNCTION__, pci_name(pdev), bus_addr); __func__, pci_name(pdev), bus_addr);
} else if (--map->refcnt == 0) { } else if (--map->refcnt == 0) {
for (i = 0; i < map->ate_count; i++) { for (i = 0; i < map->ate_count; i++) {
map->ate_shadow[i] = 0; map->ate_shadow[i] = 0;
...@@ -1030,7 +1030,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont ...@@ -1030,7 +1030,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
"%s: Unable to get irq %d. " "%s: Unable to get irq %d. "
"Error interrupts won't be routed for " "Error interrupts won't be routed for "
"TIOCE bus %04x:%02x\n", "TIOCE bus %04x:%02x\n",
__FUNCTION__, SGI_PCIASIC_ERROR, __func__, SGI_PCIASIC_ERROR,
tioce_common->ce_pcibus.bs_persist_segment, tioce_common->ce_pcibus.bs_persist_segment,
tioce_common->ce_pcibus.bs_persist_busnum); tioce_common->ce_pcibus.bs_persist_busnum);
......
...@@ -117,7 +117,7 @@ struct arch_specific_insn { ...@@ -117,7 +117,7 @@ struct arch_specific_insn {
unsigned short slot; unsigned short slot;
}; };
extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data); unsigned long val, void *data);
......
...@@ -233,8 +233,6 @@ struct switch_stack { ...@@ -233,8 +233,6 @@ struct switch_stack {
#include <asm/current.h> #include <asm/current.h>
#include <asm/page.h> #include <asm/page.h>
#define __ARCH_SYS_PTRACE 1
/* /*
* We use the ia64_psr(regs)->ri to determine which of the three * We use the ia64_psr(regs)->ri to determine which of the three
* instructions in bundle (16 bytes) took the sample. Generate * instructions in bundle (16 bytes) took the sample. Generate
...@@ -314,6 +312,13 @@ struct switch_stack { ...@@ -314,6 +312,13 @@ struct switch_stack {
#define arch_ptrace_attach(child) \ #define arch_ptrace_attach(child) \
ptrace_attach_sync_user_rbs(child) ptrace_attach_sync_user_rbs(child)
#define arch_has_single_step() (1)
extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#define arch_has_block_step() (1)
extern void user_enable_block_step(struct task_struct *);
#endif /* !__KERNEL__ */ #endif /* !__KERNEL__ */
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment