Commit 6ee56a96 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents c9c8b0d8 0a912921
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 4
EXTRAVERSION =-rc3
EXTRAVERSION =
NAME=Feisty Dunnart
# *DOCUMENTATION*
......
......@@ -2152,10 +2152,6 @@ static inline void check_timer(void)
{
int pin1, pin2;
int vector;
unsigned int ver;
ver = apic_read(APIC_LVR);
ver = GET_APIC_VERSION(ver);
/*
* get/set the timer IRQ vector:
......@@ -2169,17 +2165,11 @@ static inline void check_timer(void)
* mode for the 8259A whenever interrupts are routed
* through I/O APICs. Also IRQ0 has to be enabled in
* the 8259A which implies the virtual wire has to be
* disabled in the local APIC. Finally timer interrupts
* need to be acknowledged manually in the 8259A for
* do_slow_timeoffset() and for the i82489DX when using
* the NMI watchdog.
* disabled in the local APIC.
*/
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
if (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver))
timer_ack = 1;
else
timer_ack = !cpu_has_tsc;
enable_8259A_irq(0);
pin1 = find_isa_irq_pin(0, mp_INT);
......@@ -2197,8 +2187,7 @@ static inline void check_timer(void)
disable_8259A_irq(0);
setup_nmi();
enable_8259A_irq(0);
if (check_nmi_watchdog() < 0);
timer_ack = !cpu_has_tsc;
check_nmi_watchdog();
}
return;
}
......@@ -2221,8 +2210,7 @@ static inline void check_timer(void)
add_pin_to_irq(0, 0, pin2);
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
if (check_nmi_watchdog() < 0);
timer_ack = !cpu_has_tsc;
check_nmi_watchdog();
}
return;
}
......
......@@ -221,7 +221,7 @@ static long __pmac g5_fw_enable(struct device_node* node, long param, long value
mb();
k2_skiplist[1] = NULL;
} else {
k2_skiplist[0] = pdev;
k2_skiplist[1] = pdev;
mb();
MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
}
......
......@@ -152,7 +152,6 @@ static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
struct pci_controller *hose;
struct device_node *busdn;
unsigned long addr;
int i;
if (bus->self)
busdn = pci_device_to_OF_node(bus->self);
......@@ -164,24 +163,6 @@ static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* When a device in K2 is powered down, we die on config
* cycle accesses. Fix that here.
*/
for (i=0; i<2; i++)
if (k2_skiplist[i] && k2_skiplist[i]->bus == bus &&
k2_skiplist[i]->devfn == devfn) {
switch (len) {
case 1:
*val = 0xff; break;
case 2:
*val = 0xffff; break;
default:
*val = 0xfffffffful; break;
}
return PCIBIOS_SUCCESSFUL;
}
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
......@@ -209,7 +190,6 @@ static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
struct pci_controller *hose;
struct device_node *busdn;
unsigned long addr;
int i;
if (bus->self)
busdn = pci_device_to_OF_node(bus->self);
......@@ -221,15 +201,6 @@ static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* When a device in K2 is powered down, we die on config
* cycle accesses. Fix that here.
*/
for (i=0; i<2; i++)
if (k2_skiplist[i] && k2_skiplist[i]->bus == bus &&
k2_skiplist[i]->devfn == devfn)
return PCIBIOS_SUCCESSFUL;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
......@@ -265,6 +236,17 @@ static struct pci_ops macrisc_pci_ops =
* implement self-view of the HT host yet
*/
static int skip_k2_device(struct pci_bus *bus, unsigned int devfn)
{
int i;
for (i=0; i<2; i++)
if (k2_skiplist[i] && k2_skiplist[i]->bus == bus &&
k2_skiplist[i]->devfn == devfn)
return 1;
return 0;
}
#define U3_HT_CFA0(devfn, off) \
((((unsigned long)devfn) << 8) | offset)
#define U3_HT_CFA1(bus, devfn, off) \
......@@ -305,6 +287,24 @@ static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* When a device in K2 is powered down, we die on config
* cycle accesses. Fix that here. We may ultimately want
* to cache the config space for those instead of returning
* 0xffffffff's to make life easier to HW detection tools
*/
if (skip_k2_device(bus, devfn)) {
switch (len) {
case 1:
*val = 0xff; break;
case 2:
*val = 0xffff; break;
default:
*val = 0xfffffffful; break;
}
return PCIBIOS_SUCCESSFUL;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
......@@ -343,6 +343,13 @@ static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* When a device in K2 is powered down, we die on config
* cycle accesses. Fix that here.
*/
if (skip_k2_device(bus, devfn))
return PCIBIOS_SUCCESSFUL;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
......
......@@ -69,7 +69,7 @@ static struct pci_device_id pcnet32_pci_tbl[] = {
MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl);
int cards_found __initdata;
static int cards_found;
/*
* VLB I/O addresses
......
......@@ -187,6 +187,7 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
switch (ed->type) {
case PIPE_CONTROL:
if (ohci->ed_controltail == NULL) {
WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
writel (ed->dma, &ohci->regs->ed_controlhead);
} else {
ohci->ed_controltail->ed_next = ed;
......@@ -203,6 +204,7 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
case PIPE_BULK:
if (ohci->ed_bulktail == NULL) {
WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
writel (ed->dma, &ohci->regs->ed_bulkhead);
} else {
ohci->ed_bulktail->ed_next = ed;
......@@ -271,27 +273,56 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
* just the link to the ed is unlinked.
* the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed
* (assuming it already started that, which needn't be true).
*
* ED_UNLINK is a transient state: the HC may still see this ED, but soon
* it won't. ED_SKIP means the HC will finish its current transaction,
* but won't start anything new. The TD queue may still grow; device
* drivers don't know about this HCD-internal state.
*
* When the HC can't see the ED, something changes ED_UNLINK to one of:
*
* - ED_OPER: when there's any request queued, the ED gets rescheduled
* immediately. HC should be working on them.
*
* - ED_IDLE: when there's no TD queue. there's no reason for the HC
* to care about this ED; safe to disable the endpoint.
*
* When finish_unlinks() runs later, after SOF interrupt, it will often
* complete one or more URB unlinks before making that state change.
*/
static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
{
ed->hwINFO |= ED_SKIP;
wmb ();
ed->state = ED_UNLINK;
/* To deschedule something from the control or bulk list, just
* clear CLE/BLE and wait. There's no safe way to scrub out list
* head/current registers until later, and "later" isn't very
* tightly specified. Figure 6-5 and Section 6.4.2.2 show how
* the HC is reading the ED queues (while we modify them).
*
* For now, ed_schedule() is "later". It might be good paranoia
* to scrub those registers in finish_unlinks(), in case of bugs
* that make the HC try to use them.
*/
switch (ed->type) {
case PIPE_CONTROL:
/* remove ED from the HC's list: */
if (ed->ed_prev == NULL) {
if (!ed->hwNextED) {
ohci->hc_control &= ~OHCI_CTRL_CLE;
writel (ohci->hc_control, &ohci->regs->control);
writel (0, &ohci->regs->ed_controlcurrent);
// post those pci writes
(void) readl (&ohci->regs->control);
}
// a readl() later syncs CLE with the HC
} else
writel (le32_to_cpup (&ed->hwNextED),
&ohci->regs->ed_controlhead);
} else {
ed->ed_prev->ed_next = ed->ed_next;
ed->ed_prev->hwNextED = ed->hwNextED;
}
/* remove ED from the HCD's list: */
if (ohci->ed_controltail == ed) {
ohci->ed_controltail = ed->ed_prev;
if (ohci->ed_controltail)
......@@ -302,20 +333,20 @@ static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
break;
case PIPE_BULK:
/* remove ED from the HC's list: */
if (ed->ed_prev == NULL) {
if (!ed->hwNextED) {
ohci->hc_control &= ~OHCI_CTRL_BLE;
writel (ohci->hc_control, &ohci->regs->control);
writel (0, &ohci->regs->ed_bulkcurrent);
// post those pci writes
(void) readl (&ohci->regs->control);
}
// a readl() later syncs BLE with the HC
} else
writel (le32_to_cpup (&ed->hwNextED),
&ohci->regs->ed_bulkhead);
} else {
ed->ed_prev->ed_next = ed->ed_next;
ed->ed_prev->hwNextED = ed->hwNextED;
}
/* remove ED from the HCD's list: */
if (ohci->ed_bulktail == ed) {
ohci->ed_bulktail = ed->ed_prev;
if (ohci->ed_bulktail)
......@@ -426,32 +457,32 @@ static struct ed *ed_get (
/* request unlinking of an endpoint from an operational HC.
* put the ep on the rm_list
* real work is done at the next start frame (SF) hardware interrupt
* caller guarantees HCD is running, so hardware access is safe,
* and that ed->state is ED_OPER
*/
static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
ed->hwINFO |= ED_DEQUEUE;
ed->state = ED_UNLINK;
ed_deschedule (ohci, ed);
/* SF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks
* behave. frame_no wraps every 2^16 msec, and changes right before
* SF is triggered.
*/
ed->tick = OHCI_FRAME_NO(ohci->hcca) + 1;
/* rm_list is just singly linked, for simplicity */
ed->ed_next = ohci->ed_rm_list;
ed->ed_prev = 0;
ohci->ed_rm_list = ed;
/* enable SOF interrupt */
if (HCD_IS_RUNNING (ohci->hcd.state)) {
writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
writel (OHCI_INTR_SF, &ohci->regs->intrenable);
// flush those pci writes
// flush those writes, and get latest HCCA contents
(void) readl (&ohci->regs->control);
}
/* SF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks
* behave. frame_no wraps every 2^16 msec, and changes right before
* SF is triggered.
*/
ed->tick = OHCI_FRAME_NO(ohci->hcca) + 1;
}
/*-------------------------------------------------------------------------*
......@@ -794,8 +825,6 @@ ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
next->next_dl_td = rev;
rev = next;
if (ed->hwTailP == cpu_to_le32 (next->td_dma))
ed->hwTailP = next->hwNextTD;
ed->hwHeadP = next->hwNextTD | toggle;
}
......@@ -922,6 +951,10 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
/* unlink urbs as requested, but rescan the list after
* we call a completion since it might have unlinked
* another (earlier) urb
*
* When we get here, the HC doesn't see this ed. But it
* must not be rescheduled until all completed URBs have
* been given back to the driver.
*/
rescan_this:
completed = 0;
......@@ -941,12 +974,7 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
continue;
}
/* patch pointers hc uses ... tail, if we're removing
* an otherwise active td, and whatever td pointer
* points to this td
*/
if (ed->hwTailP == cpu_to_le32 (td->td_dma))
ed->hwTailP = td->hwNextTD;
/* patch pointer hc uses */
savebits = *prev & ~cpu_to_le32 (TD_MASK);
*prev = td->hwNextTD | savebits;
......@@ -965,9 +993,10 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
/* ED's now officially unlinked, hc doesn't see */
ed->state = ED_IDLE;
ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
ed->hwHeadP &= ~ED_H;
ed->hwNextED = 0;
wmb ();
ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
/* but if there's work queued, reschedule */
if (!list_empty (&ed->td_list)) {
......@@ -1040,7 +1069,7 @@ dl_done_list (struct ohci_hcd *ohci, struct td *td, struct pt_regs *regs)
finish_urb (ohci, urb, regs);
/* clean schedule: unlink EDs that are no longer busy */
if (list_empty (&ed->td_list))
if (list_empty (&ed->td_list) && ed->state == ED_OPER)
start_ed_unlink (ohci, ed);
/* ... reenabling halted EDs only after fault cleanup */
else if ((ed->hwINFO & (ED_SKIP | ED_DEQUEUE)) == ED_SKIP) {
......
......@@ -71,7 +71,7 @@ static int bad_range(struct zone *zone, struct page *page)
static void bad_page(const char *function, struct page *page)
{
printk("Bad page state at %s (in process '%s')\n", function, current->comm);
printk("Bad page state at %s (in process '%s', page %p)\n", function, current->comm, page);
printk("flags:0x%08lx mapping:%p mapped:%d count:%d\n",
page->flags, page->mapping,
page_mapped(page), page_count(page));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment