Commit 08ddddda authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

mm/hmm: check the device private page owner in hmm_range_fault()

hmm_range_fault() will succeed for any kind of device private memory, even
if it doesn't belong to the calling entity.  While nouveau has some crude
checks for that, they are broken because they assume nouveau is the only
user of device private memory.  Fix this by passing in an expected pgmap
owner in the hmm_range_fault structure.

If a device_private page is found and doesn't match the owner then it is
treated as an non-present and non-faultable page.

This prevents a bug in amdgpu, where it doesn't know how to handle
device_private pages, but hmm_range_fault would return them anyhow.

Fixes: 4ef589dc ("mm/hmm/devmem: device memory hotplug using ZONE_DEVICE")
Link: https://lore.kernel.org/r/20200316193216.920734-5-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 17ffdc48
...@@ -672,12 +672,6 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, ...@@ -672,12 +672,6 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
return ret; return ret;
} }
static inline bool
nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
{
return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
}
void void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm, nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range) struct hmm_range *range)
...@@ -696,12 +690,6 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm, ...@@ -696,12 +690,6 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (!is_device_private_page(page)) if (!is_device_private_page(page))
continue; continue;
if (!nouveau_dmem_page(drm, page)) {
WARN(1, "Some unknown device memory !\n");
range->pfns[i] = 0;
continue;
}
addr = nouveau_dmem_page_addr(page); addr = nouveau_dmem_page_addr(page);
range->pfns[i] &= ((1UL << range->pfn_shift) - 1); range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift; range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
......
...@@ -132,6 +132,7 @@ enum hmm_pfn_value_e { ...@@ -132,6 +132,7 @@ enum hmm_pfn_value_e {
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function * @valid: pfns array did not change since it has been fill by an HMM function
* @dev_private_owner: owner of device private pages
*/ */
struct hmm_range { struct hmm_range {
struct mmu_interval_notifier *notifier; struct mmu_interval_notifier *notifier;
...@@ -144,6 +145,7 @@ struct hmm_range { ...@@ -144,6 +145,7 @@ struct hmm_range {
uint64_t default_flags; uint64_t default_flags;
uint64_t pfn_flags_mask; uint64_t pfn_flags_mask;
uint8_t pfn_shift; uint8_t pfn_shift;
void *dev_private_owner;
}; };
/* /*
......
...@@ -218,6 +218,14 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, ...@@ -218,6 +218,14 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
unsigned long end, uint64_t *pfns, pmd_t pmd); unsigned long end, uint64_t *pfns, pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool hmm_is_device_private_entry(struct hmm_range *range,
swp_entry_t entry)
{
return is_device_private_entry(entry) &&
device_private_entry_to_page(entry)->pgmap->owner ==
range->dev_private_owner;
}
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{ {
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
...@@ -256,7 +264,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -256,7 +264,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
* Never fault in device private pages pages, but just report * Never fault in device private pages pages, but just report
* the PFN even if not present. * the PFN even if not present.
*/ */
if (is_device_private_entry(entry)) { if (hmm_is_device_private_entry(range, entry)) {
*pfn = hmm_device_entry_from_pfn(range, *pfn = hmm_device_entry_from_pfn(range,
swp_offset(entry)); swp_offset(entry));
*pfn |= range->flags[HMM_PFN_VALID]; *pfn |= range->flags[HMM_PFN_VALID];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment