Commit bf71d0e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.6-rc4-tag' of...

Merge tag 'stable/for-linus-3.6-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull Xen bug-fixes from Konrad Rzeszutek Wilk:
 * Fix for TLB flushing introduced in v3.6
 * Fix Xen-SWIOTLB not using proper DMA mask - device had 64bit but
   in a 32-bit kernel we need to allocate for coherent pages from a
   32-bit pool.
 * When trying to re-use P2M nodes we had a one-off error and triggered
   a BUG_ON check with specific CONFIG_ option.
 * When doing FLR in Xen-PCI-backend we would first do FLR then save the
   PCI configuration space. We needed to do it the other way around.

* tag 'stable/for-linus-3.6-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen/pciback: Fix proper FLR steps.
  xen: Use correct masking in xen_swiotlb_alloc_coherent.
  xen: fix logical error in tlb flushing
  xen/p2m: Fix one-off error in checking the P2M tree directory.
parents f8b9cf0f 80ba77df
...@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, ...@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = start; args->op.arg1.linear_addr = start;
} }
......
...@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ ...@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
if (p2m_index(set_pfn)) if (p2m_index(set_pfn))
return false; return false;
for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
topidx = p2m_top_index(pfn); topidx = p2m_top_index(pfn);
if (!p2m_top[topidx]) if (!p2m_top[topidx])
......
...@@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
return ret; return ret;
if (hwdev && hwdev->coherent_dma_mask) if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask; dma_mask = dma_alloc_coherent_mask(hwdev, flags);
phys = virt_to_phys(ret); phys = virt_to_phys(ret);
dev_addr = xen_phys_to_bus(phys); dev_addr = xen_phys_to_bus(phys);
......
...@@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) ...@@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
if (err) if (err)
goto config_release; goto config_release;
dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
__pci_reset_function_locked(dev);
/* We need the device active to save the state. */ /* We need the device active to save the state. */
dev_dbg(&dev->dev, "save state of device\n"); dev_dbg(&dev->dev, "save state of device\n");
pci_save_state(dev); pci_save_state(dev);
dev_data->pci_saved_state = pci_store_saved_state(dev); dev_data->pci_saved_state = pci_store_saved_state(dev);
if (!dev_data->pci_saved_state) if (!dev_data->pci_saved_state)
dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
else {
dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
__pci_reset_function_locked(dev);
}
/* Now disable the device (this also ensures some private device /* Now disable the device (this also ensures some private device
* data is setup before we export) * data is setup before we export)
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment