Commit 032fd3e5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.7b-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:

 - fix x86 PV dom0 crash during early boot on some hardware

 - fix two pciback bugs affects certain devices

 - fix potential overflow when clearing page tables in x86 PV

* tag 'for-linus-4.7b-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-pciback: return proper values during BAR sizing
  x86/xen: avoid m2p lookup when setting early page table entries
  xen/pciback: Fix conf_space read/write overlap check.
  x86/xen: fix upper bound of pmd loop in xen_cleanhighmap()
  xen/balloon: Fix declared-but-not-defined warning
parents d05be0d7 d2bd05d8
...@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, ...@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
/* NOTE: The loop is more greedy than the cleanup_highmap variant. /* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */ * We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) { pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd)) if (pmd_none(*pmd))
continue; continue;
...@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif #endif
} }
#ifdef CONFIG_X86_32
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
{
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
return pte;
}
#else /* CONFIG_X86_64 */
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
{
unsigned long pfn;
if (xen_feature(XENFEAT_writable_page_tables) ||
xen_feature(XENFEAT_auto_translated_physmap) ||
xen_start_info->mfn_list >= __START_KERNEL_map)
return pte;
/*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = pte_pfn(pte);
if (pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
return pte;
}
#endif /* CONFIG_X86_64 */
/* /*
* Init-time set_pte while constructing initial pagetables, which * Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW. * doesn't allow RO page table pages to be remapped RW.
...@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) ...@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
* so always write the PTE directly and rely on Xen trapping and * so always write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary. * emulating any updates as necessary.
*/ */
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) __visible pte_t xen_make_pte_init(pteval_t pte)
{ {
if (pte_mfn(pte) != INVALID_P2M_ENTRY) #ifdef CONFIG_X86_64
pte = mask_rw_pte(ptep, pte); unsigned long pfn;
else
pte = __pte_ma(0); /*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
if (xen_start_info->mfn_list < __START_KERNEL_map &&
pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte &= ~_PAGE_RW;
#endif
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
#ifdef CONFIG_X86_32
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_mfn(pte) != INVALID_P2M_ENTRY
&& pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
#endif
native_set_pte(ptep, pte); native_set_pte(ptep, pte);
} }
...@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void) ...@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud; pv_mmu_ops.release_pud = xen_release_pud;
#endif #endif
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3; pv_mmu_ops.write_cr3 = &xen_write_cr3;
...@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { ...@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pte_val = PV_CALLEE_SAVE(xen_pte_val), .pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val), .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
.make_pte = PV_CALLEE_SAVE(xen_make_pte), .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd), .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
......
...@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); ...@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
static void balloon_process(struct work_struct *work); static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
static void release_memory_resource(struct resource *resource);
/* When ballooning out (allocating memory to return to Xen) we don't really /* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */ want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \ #define GFP_BALLOON \
...@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state) ...@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
} }
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
static void release_memory_resource(struct resource *resource)
{
if (!resource)
return;
/*
* No need to reset region to identity mapped since we now
* know that no I/O can be in this region
*/
release_resource(resource);
kfree(resource);
}
static struct resource *additional_memory_resource(phys_addr_t size) static struct resource *additional_memory_resource(phys_addr_t size)
{ {
struct resource *res; struct resource *res;
...@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) ...@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return res; return res;
} }
static void release_memory_resource(struct resource *resource)
{
if (!resource)
return;
/*
* No need to reset region to identity mapped since we now
* know that no I/O can be in this region
*/
release_resource(resource);
kfree(resource);
}
static enum bp_state reserve_additional_memory(void) static enum bp_state reserve_additional_memory(void)
{ {
long credit; long credit;
......
...@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, ...@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
field_start = OFFSET(cfg_entry); field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size; field_end = OFFSET(cfg_entry) + field->size;
if ((req_start >= field_start && req_start < field_end) if (req_end > field_start && field_end > req_start) {
|| (req_end > field_start && req_end <= field_end)) {
err = conf_space_read(dev, cfg_entry, field_start, err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val); &tmp_val);
if (err) if (err)
...@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) ...@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
field_start = OFFSET(cfg_entry); field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size; field_end = OFFSET(cfg_entry) + field->size;
if ((req_start >= field_start && req_start < field_end) if (req_end > field_start && field_end > req_start) {
|| (req_end > field_start && req_end <= field_end)) {
tmp_val = 0; tmp_val = 0;
err = xen_pcibk_config_read(dev, field_start, err = xen_pcibk_config_read(dev, field_start,
......
...@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) ...@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
/* A write to obtain the length must happen as a 32-bit write. /* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes * This does not (yet) support writing individual bytes
*/ */
if (value == ~PCI_ROM_ADDRESS_ENABLE) if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
bar->which = 1; bar->which = 1;
else { else {
u32 tmpval; u32 tmpval;
...@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev, ...@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
(PCI_BASE_ADDRESS_SPACE_MEMORY | (PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64))) { PCI_BASE_ADDRESS_MEM_TYPE_64))) {
bar_info->val = res[pos - 1].start >> 32; bar_info->val = res[pos - 1].start >> 32;
bar_info->len_val = res[pos - 1].end >> 32; bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
return; return;
} }
} }
if (!res[pos].flags ||
(res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
IORESOURCE_BUSY)))
return;
bar_info->val = res[pos].start | bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK); (res[pos].flags & PCI_REGION_FLAG_MASK);
bar_info->len_val = resource_size(&res[pos]); bar_info->len_val = -resource_size(&res[pos]) |
(res[pos].flags & PCI_REGION_FLAG_MASK);
} }
static void *bar_init(struct pci_dev *dev, int offset) static void *bar_init(struct pci_dev *dev, int offset)
{ {
struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar) if (!bar)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~0); read_dev_bar(dev, bar, offset, ~0);
bar->which = 0;
return bar; return bar;
} }
static void *rom_init(struct pci_dev *dev, int offset) static void *rom_init(struct pci_dev *dev, int offset)
{ {
struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar) if (!bar)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
bar->which = 0;
return bar; return bar;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment