Commit 1d0df486 authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branches 'pci/host-rcar', 'pci/hotplug', 'pci/iommu', 'pci/misc' and 'pci/msi' into next

* pci/host-rcar:
  PCI: rcar: Remove rcar_pcie_setup_window() resource argument
  PCI: rcar: Cleanup style and formatting
  PCI: rcar: Use correct initial HW settings
  PCI: rcar: Remove redundant config accessor register number checks

* pci/hotplug:
  PCI: cpqphp: Remove unnecessary null test before debugfs_remove()
  PCI: pciehp: Clear Data Link Layer State Changed during init
  PCI: pciehp: Remove struct controller.no_cmd_complete
  PCI: pciehp: Remove assumptions about which commands cause completion events
  PCI: pciehp: Compute timeout from hotplug command start time
  PCI: pciehp: Wait for hotplug command completion lazily
  PCI: pciehp: Make pcie_wait_cmd() self-contained
  PCI: Prevent NULL dereference during pciehp probe

* pci/iommu:
  PCI: Add bridge DMA alias quirk for Intel 82801 bridge

* pci/misc:
  ACPI / PCI: Fix sysfs acpi_index and label errors
  PCI/portdrv: Remove warning about invalid IRQ for hot-added PCIe ports

* pci/msi:
  PCI/MSI: Cache Multiple Message Capable in struct msi_desc
  PCI/MSI: Remove unused msi_enabled_mask()
  PCI/MSI: Add internal msix_clear_and_set_ctrl() function
This diff is collapsed.
...@@ -216,7 +216,6 @@ void cpqhp_create_debugfs_files(struct controller *ctrl) ...@@ -216,7 +216,6 @@ void cpqhp_create_debugfs_files(struct controller *ctrl)
void cpqhp_remove_debugfs_files(struct controller *ctrl) void cpqhp_remove_debugfs_files(struct controller *ctrl)
{ {
if (ctrl->dentry)
debugfs_remove(ctrl->dentry); debugfs_remove(ctrl->dentry);
ctrl->dentry = NULL; ctrl->dentry = NULL;
} }
......
...@@ -92,9 +92,10 @@ struct controller { ...@@ -92,9 +92,10 @@ struct controller {
struct slot *slot; struct slot *slot;
wait_queue_head_t queue; /* sleep & wake process */ wait_queue_head_t queue; /* sleep & wake process */
u32 slot_cap; u32 slot_cap;
u32 slot_ctrl;
struct timer_list poll_timer; struct timer_list poll_timer;
unsigned long cmd_started; /* jiffies */
unsigned int cmd_busy:1; unsigned int cmd_busy:1;
unsigned int no_cmd_complete:1;
unsigned int link_active_reporting:1; unsigned int link_active_reporting:1;
unsigned int notification_enabled:1; unsigned int notification_enabled:1;
unsigned int power_fault_detected; unsigned int power_fault_detected;
......
...@@ -255,6 +255,13 @@ static int pciehp_probe(struct pcie_device *dev) ...@@ -255,6 +255,13 @@ static int pciehp_probe(struct pcie_device *dev)
else if (pciehp_acpi_slot_detection_check(dev->port)) else if (pciehp_acpi_slot_detection_check(dev->port))
goto err_out_none; goto err_out_none;
if (!dev->port->subordinate) {
/* Can happen if we run out of bus numbers during probe */
dev_err(&dev->device,
"Hotplug bridge without secondary bus, ignoring\n");
goto err_out_none;
}
ctrl = pcie_init(dev); ctrl = pcie_init(dev);
if (!ctrl) { if (!ctrl) {
dev_err(&dev->device, "Controller initialization failed\n"); dev_err(&dev->device, "Controller initialization failed\n");
......
...@@ -104,11 +104,10 @@ static inline void pciehp_free_irq(struct controller *ctrl) ...@@ -104,11 +104,10 @@ static inline void pciehp_free_irq(struct controller *ctrl)
free_irq(ctrl->pcie->irq, ctrl); free_irq(ctrl->pcie->irq, ctrl);
} }
static int pcie_poll_cmd(struct controller *ctrl) static int pcie_poll_cmd(struct controller *ctrl, int timeout)
{ {
struct pci_dev *pdev = ctrl_dev(ctrl); struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status; u16 slot_status;
int timeout = 1000;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status & PCI_EXP_SLTSTA_CC) { if (slot_status & PCI_EXP_SLTSTA_CC) {
...@@ -129,18 +128,52 @@ static int pcie_poll_cmd(struct controller *ctrl) ...@@ -129,18 +128,52 @@ static int pcie_poll_cmd(struct controller *ctrl)
return 0; /* timeout */ return 0; /* timeout */
} }
static void pcie_wait_cmd(struct controller *ctrl, int poll) static void pcie_wait_cmd(struct controller *ctrl)
{ {
unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
unsigned long timeout = msecs_to_jiffies(msecs); unsigned long duration = msecs_to_jiffies(msecs);
unsigned long cmd_timeout = ctrl->cmd_started + duration;
unsigned long now, timeout;
int rc; int rc;
if (poll) /*
rc = pcie_poll_cmd(ctrl); * If the controller does not generate notifications for command
* completions, we never need to wait between writes.
*/
if (NO_CMD_CMPL(ctrl))
return;
if (!ctrl->cmd_busy)
return;
/*
* Even if the command has already timed out, we want to call
* pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
*/
now = jiffies;
if (time_before_eq(cmd_timeout, now))
timeout = 1;
else else
timeout = cmd_timeout - now;
if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
else
rc = pcie_poll_cmd(ctrl, timeout);
/*
* Controllers with errata like Intel CF118 don't generate
* completion notifications unless the power/indicator/interlock
* control bits are changed. On such controllers, we'll emit this
* timeout message when we wait for completion of commands that
* don't change those bits, e.g., commands that merely enable
* interrupts.
*/
if (!rc) if (!rc)
ctrl_dbg(ctrl, "Command not completed in 1000 msec\n"); ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n",
ctrl->slot_ctrl,
jiffies_to_msecs(now - ctrl->cmd_started));
} }
/** /**
...@@ -152,34 +185,12 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll) ...@@ -152,34 +185,12 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
{ {
struct pci_dev *pdev = ctrl_dev(ctrl); struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
u16 slot_ctrl; u16 slot_ctrl;
mutex_lock(&ctrl->ctrl_lock); mutex_lock(&ctrl->ctrl_lock);
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); /* Wait for any previous command that might still be in progress */
if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_wait_cmd(ctrl);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_CC);
if (!ctrl->no_cmd_complete) {
/*
* After 1 sec and CMD_COMPLETED still not set, just
* proceed forward to issue the next command according
* to spec. Just print out the error message.
*/
ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
} else if (!NO_CMD_CMPL(ctrl)) {
/*
* This controller seems to notify of command completed
* event even though it supports none of power
* controller, attention led, power led and EMI.
*/
ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n");
ctrl->no_cmd_complete = 0;
} else {
ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n");
}
}
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
slot_ctrl &= ~mask; slot_ctrl &= ~mask;
...@@ -187,22 +198,9 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) ...@@ -187,22 +198,9 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
ctrl->cmd_busy = 1; ctrl->cmd_busy = 1;
smp_mb(); smp_mb();
pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
ctrl->cmd_started = jiffies;
ctrl->slot_ctrl = slot_ctrl;
/*
* Wait for command completion.
*/
if (!ctrl->no_cmd_complete) {
int poll = 0;
/*
* if hotplug interrupt is not enabled or command
* completed interrupt is not enabled, we need to poll
* command completed event.
*/
if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
!(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
poll = 1;
pcie_wait_cmd(ctrl, poll);
}
mutex_unlock(&ctrl->ctrl_lock); mutex_unlock(&ctrl->ctrl_lock);
} }
...@@ -773,15 +771,6 @@ struct controller *pcie_init(struct pcie_device *dev) ...@@ -773,15 +771,6 @@ struct controller *pcie_init(struct pcie_device *dev)
mutex_init(&ctrl->ctrl_lock); mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue); init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl); dbg_ctrl(ctrl);
/*
* Controller doesn't notify of command completion if the "No
* Command Completed Support" bit is set in Slot Capability
* register or the controller supports none of power
* controller, attention led, power led and EMI.
*/
if (NO_CMD_CMPL(ctrl) ||
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
ctrl->no_cmd_complete = 1;
/* Check if Data Link Layer Link Active Reporting is implemented */ /* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
...@@ -794,7 +783,7 @@ struct controller *pcie_init(struct pcie_device *dev) ...@@ -794,7 +783,7 @@ struct controller *pcie_init(struct pcie_device *dev)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC); PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
/* Disable software notification */ /* Disable software notification */
pcie_disable_notification(ctrl); pcie_disable_notification(ctrl);
......
...@@ -149,15 +149,14 @@ static void msi_set_enable(struct pci_dev *dev, int enable) ...@@ -149,15 +149,14 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
} }
static void msix_set_enable(struct pci_dev *dev, int enable) static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
{ {
u16 control; u16 ctrl;
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
control &= ~PCI_MSIX_FLAGS_ENABLE; ctrl &= ~clear;
if (enable) ctrl |= set;
control |= PCI_MSIX_FLAGS_ENABLE; pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
} }
static inline __attribute_const__ u32 msi_mask(unsigned x) static inline __attribute_const__ u32 msi_mask(unsigned x)
...@@ -168,16 +167,6 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) ...@@ -168,16 +167,6 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
return (1 << (1 << x)) - 1; return (1 << (1 << x)) - 1;
} }
static inline __attribute_const__ u32 msi_capable_mask(u16 control)
{
return msi_mask((control >> 1) & 7);
}
static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
{
return msi_mask((control >> 4) & 7);
}
/* /*
* PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
* mask all MSI interrupts by clearing the MSI enable bit does not work * mask all MSI interrupts by clearing the MSI enable bit does not work
...@@ -460,7 +449,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev) ...@@ -460,7 +449,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
arch_restore_msi_irqs(dev); arch_restore_msi_irqs(dev);
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked); msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
entry->masked);
control &= ~PCI_MSI_FLAGS_QSIZE; control &= ~PCI_MSI_FLAGS_QSIZE;
control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
...@@ -469,26 +459,23 @@ static void __pci_restore_msi_state(struct pci_dev *dev) ...@@ -469,26 +459,23 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
static void __pci_restore_msix_state(struct pci_dev *dev) static void __pci_restore_msix_state(struct pci_dev *dev)
{ {
struct msi_desc *entry; struct msi_desc *entry;
u16 control;
if (!dev->msix_enabled) if (!dev->msix_enabled)
return; return;
BUG_ON(list_empty(&dev->msi_list)); BUG_ON(list_empty(&dev->msi_list));
entry = list_first_entry(&dev->msi_list, struct msi_desc, list); entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* route the table */ /* route the table */
pci_intx_for_msi(dev, 0); pci_intx_for_msi(dev, 0);
control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; msix_clear_and_set_ctrl(dev, 0,
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
arch_restore_msi_irqs(dev); arch_restore_msi_irqs(dev);
list_for_each_entry(entry, &dev->msi_list, list) { list_for_each_entry(entry, &dev->msi_list, list) {
msix_mask_irq(entry, entry->masked); msix_mask_irq(entry, entry->masked);
} }
control &= ~PCI_MSIX_FLAGS_MASKALL; msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
} }
void pci_restore_msi_state(struct pci_dev *dev) void pci_restore_msi_state(struct pci_dev *dev)
...@@ -626,6 +613,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) ...@@ -626,6 +613,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = dev->msi_cap; entry->msi_attrib.pos = dev->msi_cap;
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
if (control & PCI_MSI_FLAGS_64BIT) if (control & PCI_MSI_FLAGS_64BIT)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
...@@ -634,7 +622,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) ...@@ -634,7 +622,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* All MSIs are unmasked by default, Mask them all */ /* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit) if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked); pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
mask = msi_capable_mask(control); mask = msi_mask(entry->msi_attrib.multi_cap);
msi_mask_irq(entry, mask, mask); msi_mask_irq(entry, mask, mask);
list_add_tail(&entry->list, &dev->msi_list); list_add_tail(&entry->list, &dev->msi_list);
...@@ -743,12 +731,10 @@ static int msix_capability_init(struct pci_dev *dev, ...@@ -743,12 +731,10 @@ static int msix_capability_init(struct pci_dev *dev,
u16 control; u16 control;
void __iomem *base; void __iomem *base;
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Ensure MSI-X is disabled while it is set up */ /* Ensure MSI-X is disabled while it is set up */
control &= ~PCI_MSIX_FLAGS_ENABLE; msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */ /* Request & Map MSI-X table region */
base = msix_map_region(dev, msix_table_size(control)); base = msix_map_region(dev, msix_table_size(control));
if (!base) if (!base)
...@@ -767,8 +753,8 @@ static int msix_capability_init(struct pci_dev *dev, ...@@ -767,8 +753,8 @@ static int msix_capability_init(struct pci_dev *dev,
* MSI-X registers. We need to mask all the vectors to prevent * MSI-X registers. We need to mask all the vectors to prevent
* interrupts coming in before they're fully set up. * interrupts coming in before they're fully set up.
*/ */
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; msix_clear_and_set_ctrl(dev, 0,
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
msix_program_entries(dev, entries); msix_program_entries(dev, entries);
...@@ -780,8 +766,7 @@ static int msix_capability_init(struct pci_dev *dev, ...@@ -780,8 +766,7 @@ static int msix_capability_init(struct pci_dev *dev,
pci_intx_for_msi(dev, 0); pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1; dev->msix_enabled = 1;
control &= ~PCI_MSIX_FLAGS_MASKALL; msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
return 0; return 0;
...@@ -882,7 +867,6 @@ void pci_msi_shutdown(struct pci_dev *dev) ...@@ -882,7 +867,6 @@ void pci_msi_shutdown(struct pci_dev *dev)
{ {
struct msi_desc *desc; struct msi_desc *desc;
u32 mask; u32 mask;
u16 ctrl;
if (!pci_msi_enable || !dev || !dev->msi_enabled) if (!pci_msi_enable || !dev || !dev->msi_enabled)
return; return;
...@@ -895,8 +879,7 @@ void pci_msi_shutdown(struct pci_dev *dev) ...@@ -895,8 +879,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
dev->msi_enabled = 0; dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */ /* Return the device with MSI unmasked as initial states */
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); mask = msi_mask(desc->msi_attrib.multi_cap);
mask = msi_capable_mask(ctrl);
/* Keep cached state to be restored */ /* Keep cached state to be restored */
arch_msi_mask_irq(desc, mask, ~mask); arch_msi_mask_irq(desc, mask, ~mask);
...@@ -1001,7 +984,7 @@ void pci_msix_shutdown(struct pci_dev *dev) ...@@ -1001,7 +984,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
arch_msix_mask_irq(entry, 1); arch_msix_mask_irq(entry, 1);
} }
msix_set_enable(dev, 0); msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1); pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0; dev->msix_enabled = 0;
} }
...@@ -1065,7 +1048,7 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) ...@@ -1065,7 +1048,7 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
if (dev->msix_cap) if (dev->msix_cap)
msix_set_enable(dev, 0); msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
} }
/** /**
......
...@@ -161,8 +161,8 @@ enum acpi_attr_enum { ...@@ -161,8 +161,8 @@ enum acpi_attr_enum {
static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
{ {
int len; int len;
len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer, len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
obj->string.length, obj->buffer.length,
UTF16_LITTLE_ENDIAN, UTF16_LITTLE_ENDIAN,
buf, PAGE_SIZE); buf, PAGE_SIZE);
buf[len] = '\n'; buf[len] = '\n';
...@@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf, ...@@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf,
tmp = obj->package.elements; tmp = obj->package.elements;
if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 && if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
tmp[0].type == ACPI_TYPE_INTEGER && tmp[0].type == ACPI_TYPE_INTEGER &&
tmp[1].type == ACPI_TYPE_STRING) { (tmp[1].type == ACPI_TYPE_STRING ||
tmp[1].type == ACPI_TYPE_BUFFER)) {
/* /*
* The second string element is optional even when * The second string element is optional even when
* this _DSM is implemented; when not implemented, * this _DSM is implemented; when not implemented,
* this entry must return a null string. * this entry must return a null string.
*/ */
if (attr == ACPI_ATTR_INDEX_SHOW) if (attr == ACPI_ATTR_INDEX_SHOW) {
scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value); scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
else if (attr == ACPI_ATTR_LABEL_SHOW) } else if (attr == ACPI_ATTR_LABEL_SHOW) {
if (tmp[1].type == ACPI_TYPE_STRING)
scnprintf(buf, PAGE_SIZE, "%s\n",
tmp[1].string.pointer);
else if (tmp[1].type == ACPI_TYPE_BUFFER)
dsm_label_utf16s_to_utf8s(tmp + 1, buf); dsm_label_utf16s_to_utf8s(tmp + 1, buf);
}
len = strlen(buf) > 0 ? strlen(buf) : -1; len = strlen(buf) > 0 ? strlen(buf) : -1;
} }
......
...@@ -203,10 +203,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev, ...@@ -203,10 +203,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
(pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
return -ENODEV; return -ENODEV;
if (!dev->irq && dev->pin) {
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n",
dev->vendor, dev->device);
}
status = pcie_port_device_register(dev); status = pcie_port_device_register(dev);
if (status) if (status)
return status; return status;
......
...@@ -3405,6 +3405,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, ...@@ -3405,6 +3405,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev) static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
{ {
......
...@@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg); ...@@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc { struct msi_desc {
struct { struct {
__u8 is_msix : 1; __u8 is_msix : 1;
__u8 multiple: 3; /* log2 number of messages */ __u8 multiple: 3; /* log2 num of messages allocated */
__u8 multi_cap : 3; /* log2 num of messages supported */
__u8 maskbit : 1; /* mask-pending bit supported ? */ __u8 maskbit : 1; /* mask-pending bit supported ? */
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
__u8 pos; /* Location of the msi capability */ __u8 pos; /* Location of the msi capability */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment