hmm.c 19 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4
/*
 * Copyright 2013 Red Hat Inc.
 *
Jérôme Glisse's avatar
Jérôme Glisse committed
5
 * Authors: Jérôme Glisse <jglisse@redhat.com>
6 7 8 9 10
 */
/*
 * Refer to include/linux/hmm.h for information about heterogeneous memory
 * management or HMM for short.
 */
11
#include <linux/pagewalk.h>
12
#include <linux/hmm.h>
13
#include <linux/init.h>
14 15
#include <linux/rmap.h>
#include <linux/swap.h>
16 17
#include <linux/slab.h>
#include <linux/sched.h>
18 19
#include <linux/mmzone.h>
#include <linux/pagemap.h>
20 21
#include <linux/swapops.h>
#include <linux/hugetlb.h>
22
#include <linux/memremap.h>
23
#include <linux/sched/mm.h>
24
#include <linux/jump_label.h>
25
#include <linux/dma-mapping.h>
26
#include <linux/mmu_notifier.h>
27 28
#include <linux/memory_hotplug.h>

29 30
struct hmm_vma_walk {
	struct hmm_range	*range;
31
	struct dev_pagemap	*pgmap;
32
	unsigned long		last;
33
	unsigned int		flags;
34 35
};

36 37
static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
			    bool write_fault, uint64_t *pfn)
38
{
39
	unsigned int flags = FAULT_FLAG_REMOTE;
40
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
41
	struct hmm_range *range = hmm_vma_walk->range;
42
	struct vm_area_struct *vma = walk->vma;
43
	vm_fault_t ret;
44

45 46 47
	if (!vma)
		goto err;

48 49 50
	if (write_fault)
		flags |= FAULT_FLAG_WRITE;

51
	ret = handle_mm_fault(vma, addr, flags);
52 53
	if (ret & VM_FAULT_ERROR)
		goto err;
54

55
	return -EBUSY;
56 57 58 59

err:
	*pfn = range->values[HMM_PFN_ERROR];
	return -EFAULT;
60 61
}

62 63
static int hmm_pfns_fill(unsigned long addr, unsigned long end,
		struct hmm_range *range, enum hmm_pfn_value_e value)
64
{
65
	uint64_t *pfns = range->pfns;
66 67 68 69
	unsigned long i;

	i = (addr - range->start) >> PAGE_SHIFT;
	for (; addr < end; addr += PAGE_SIZE, i++)
70
		pfns[i] = range->values[value];
71 72 73 74

	return 0;
}

75
/*
76 77
 * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
 * @addr: range virtual start address (inclusive)
78
 * @end: range virtual end address (exclusive)
79 80
 * @fault: should we fault or not ?
 * @write_fault: write fault ?
81
 * @walk: mm_walk structure
82
 * Return: 0 on success, -EBUSY after page fault, or page fault error
83 84 85 86
 *
 * This function will be called whenever pmd_none() or pte_none() returns true,
 * or whenever there is no page directory covering the virtual address range.
 */
87 88 89
static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
			      bool fault, bool write_fault,
			      struct mm_walk *walk)
90
{
91 92
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
93
	uint64_t *pfns = range->pfns;
94
	unsigned long i;
95

96
	hmm_vma_walk->last = addr;
97
	i = (addr - range->start) >> PAGE_SHIFT;
98

99 100 101
	if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
		return -EPERM;

102
	for (; addr < end; addr += PAGE_SIZE, i++) {
103
		pfns[i] = range->values[HMM_PFN_NONE];
104
		if (fault || write_fault) {
105
			int ret;
106

107 108
			ret = hmm_vma_do_fault(walk, addr, write_fault,
					       &pfns[i]);
109
			if (ret != -EBUSY)
110 111 112 113
				return ret;
		}
	}

114
	return (fault || write_fault) ? -EBUSY : 0;
115 116 117 118 119 120
}

static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
				      uint64_t pfns, uint64_t cpu_flags,
				      bool *fault, bool *write_fault)
{
121 122
	struct hmm_range *range = hmm_vma_walk->range;

123
	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
124 125
		return;

126 127 128
	/*
	 * So we not only consider the individual per page request we also
	 * consider the default flags requested for the range. The API can
129 130 131
	 * be used 2 ways. The first one where the HMM user coalesces
	 * multiple page faults into one request and sets flags per pfn for
	 * those faults. The second one where the HMM user wants to pre-
132 133 134 135 136 137
	 * fault a range with specific flags. For the latter one it is a
	 * waste to have the user pre-fill the pfn arrays with a default
	 * flags value.
	 */
	pfns = (pfns & range->pfn_flags_mask) | range->default_flags;

138
	/* We aren't ask to do anything ... */
139
	if (!(pfns & range->flags[HMM_PFN_VALID]))
140
		return;
141
	/* If this is device memory then only fault if explicitly requested */
142 143 144 145 146 147
	if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
		/* Do we fault on device memory ? */
		if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
			*write_fault = pfns & range->flags[HMM_PFN_WRITE];
			*fault = true;
		}
148 149
		return;
	}
150 151 152 153 154 155 156

	/* If CPU page table is not valid then we need to fault */
	*fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
	/* Need to write fault ? */
	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
	    !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
		*write_fault = true;
157 158 159 160 161 162 163 164 165 166 167
		*fault = true;
	}
}

static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
				 const uint64_t *pfns, unsigned long npages,
				 uint64_t cpu_flags, bool *fault,
				 bool *write_fault)
{
	unsigned long i;

168
	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
169 170 171 172
		*fault = *write_fault = false;
		return;
	}

173
	*fault = *write_fault = false;
174 175 176
	for (i = 0; i < npages; ++i) {
		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
				   fault, write_fault);
177
		if ((*write_fault))
178 179 180 181 182
			return;
	}
}

static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
183
			     __always_unused int depth, struct mm_walk *walk)
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
{
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
	bool fault, write_fault;
	unsigned long i, npages;
	uint64_t *pfns;

	i = (addr - range->start) >> PAGE_SHIFT;
	npages = (end - addr) >> PAGE_SHIFT;
	pfns = &range->pfns[i];
	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
			     0, &fault, &write_fault);
	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
}

199
static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
200 201 202
{
	if (pmd_protnone(pmd))
		return 0;
203 204 205
	return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
				range->flags[HMM_PFN_WRITE] :
				range->flags[HMM_PFN_VALID];
206 207
}

208
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
209 210 211
static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
		unsigned long end, uint64_t *pfns, pmd_t pmd)
{
212
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
213
	struct hmm_range *range = hmm_vma_walk->range;
214 215
	unsigned long pfn, npages, i;
	bool fault, write_fault;
216
	uint64_t cpu_flags;
217

218
	npages = (end - addr) >> PAGE_SHIFT;
219
	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
220 221
	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
			     &fault, &write_fault);
222

223
	if (fault || write_fault)
224
		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
225

226
	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
227 228 229 230 231 232 233
	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
		if (pmd_devmap(pmd)) {
			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
					      hmm_vma_walk->pgmap);
			if (unlikely(!hmm_vma_walk->pgmap))
				return -EBUSY;
		}
234
		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
235 236 237 238 239
	}
	if (hmm_vma_walk->pgmap) {
		put_dev_pagemap(hmm_vma_walk->pgmap);
		hmm_vma_walk->pgmap = NULL;
	}
240 241 242
	hmm_vma_walk->last = end;
	return 0;
}
243 244 245 246 247
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
/* stub to allow the code below to compile */
int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
		unsigned long end, uint64_t *pfns, pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
248

249
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
250
{
251
	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
252
		return 0;
253 254 255
	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
				range->flags[HMM_PFN_WRITE] :
				range->flags[HMM_PFN_VALID];
256 257
}

258 259 260 261 262
static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
			      uint64_t *pfn)
{
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
263
	struct hmm_range *range = hmm_vma_walk->range;
264 265
	bool fault, write_fault;
	uint64_t cpu_flags;
266
	pte_t pte = *ptep;
267
	uint64_t orig_pfn = *pfn;
268

269
	*pfn = range->values[HMM_PFN_NONE];
270
	fault = write_fault = false;
271 272

	if (pte_none(pte)) {
273 274
		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
				   &fault, &write_fault);
275
		if (fault || write_fault)
276 277 278 279 280 281 282 283 284 285 286 287
			goto fault;
		return 0;
	}

	if (!pte_present(pte)) {
		swp_entry_t entry = pte_to_swp_entry(pte);

		/*
		 * This is a special swap entry, ignore migration, use
		 * device and report anything else as error.
		 */
		if (is_device_private_entry(entry)) {
288 289
			cpu_flags = range->flags[HMM_PFN_VALID] |
				range->flags[HMM_PFN_DEVICE_PRIVATE];
290
			cpu_flags |= is_write_device_private_entry(entry) ?
291 292 293 294 295
				range->flags[HMM_PFN_WRITE] : 0;
			hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
					   &fault, &write_fault);
			if (fault || write_fault)
				goto fault;
296 297
			*pfn = hmm_device_entry_from_pfn(range,
					    swp_offset(entry));
298
			*pfn |= cpu_flags;
299 300 301
			return 0;
		}

302 303 304
		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
				   &write_fault);
		if (!fault && !write_fault)
305
			return 0;
306 307 308 309 310 311 312 313 314

		if (!non_swap_entry(entry))
			goto fault;

		if (is_migration_entry(entry)) {
			pte_unmap(ptep);
			hmm_vma_walk->last = addr;
			migration_entry_wait(walk->mm, pmdp, addr);
			return -EBUSY;
315 316 317
		}

		/* Report error for everything else */
318
		pte_unmap(ptep);
319
		*pfn = range->values[HMM_PFN_ERROR];
320 321 322
		return -EFAULT;
	}

323 324 325
	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
			   &write_fault);
326
	if (fault || write_fault)
327 328
		goto fault;

329 330 331
	if (pte_devmap(pte)) {
		hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
					      hmm_vma_walk->pgmap);
332 333
		if (unlikely(!hmm_vma_walk->pgmap)) {
			pte_unmap(ptep);
334
			return -EBUSY;
335
		}
336 337 338 339 340 341 342 343 344 345
	}

	/*
	 * Since each architecture defines a struct page for the zero page, just
	 * fall through and treat it like a normal page.
	 */
	if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
				   &write_fault);
		if (fault || write_fault) {
346
			pte_unmap(ptep);
347 348
			return -EFAULT;
		}
349 350
		*pfn = range->values[HMM_PFN_SPECIAL];
		return 0;
351 352
	}

353
	*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
354 355 356
	return 0;

fault:
357 358 359 360
	if (hmm_vma_walk->pgmap) {
		put_dev_pagemap(hmm_vma_walk->pgmap);
		hmm_vma_walk->pgmap = NULL;
	}
361 362
	pte_unmap(ptep);
	/* Fault any virtual address we were asked to fault */
363
	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
364 365
}

366 367 368 369 370
static int hmm_vma_walk_pmd(pmd_t *pmdp,
			    unsigned long start,
			    unsigned long end,
			    struct mm_walk *walk)
{
371 372
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
373 374 375 376
	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
	unsigned long npages = (end - start) >> PAGE_SHIFT;
	unsigned long addr = start;
	bool fault, write_fault;
377
	pte_t *ptep;
378
	pmd_t pmd;
379 380

again:
381 382
	pmd = READ_ONCE(*pmdp);
	if (pmd_none(pmd))
383
		return hmm_vma_walk_hole(start, end, -1, walk);
384

385 386 387 388 389
	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
				     0, &fault, &write_fault);
		if (fault || write_fault) {
			hmm_vma_walk->last = addr;
390
			pmd_migration_entry_wait(walk->mm, pmdp);
391
			return -EBUSY;
392
		}
393
		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
394 395 396 397 398 399 400
	}

	if (!pmd_present(pmd)) {
		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
				     &write_fault);
		if (fault || write_fault)
			return -EFAULT;
401
		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
402
	}
403

404
	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
405
		/*
406
		 * No need to take pmd_lock here, even if some other thread
407 408 409
		 * is splitting the huge pmd we will get that event through
		 * mmu_notifier callback.
		 *
410
		 * So just read pmd value and check again it's a transparent
411 412 413 414 415 416 417
		 * huge or device mapping one and compute corresponding pfn
		 * values.
		 */
		pmd = pmd_read_atomic(pmdp);
		barrier();
		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
			goto again;
418

419
		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
420 421
	}

422
	/*
423
	 * We have handled all the valid cases above ie either none, migration,
424 425 426 427
	 * huge or transparent huge. At this point either it is a valid pmd
	 * entry pointing to pte directory or it is a bad pmd that will not
	 * recover.
	 */
428 429 430 431 432
	if (pmd_bad(pmd)) {
		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
				     &write_fault);
		if (fault || write_fault)
			return -EFAULT;
433
		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
434
	}
435 436

	ptep = pte_offset_map(pmdp, addr);
437
	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
438
		int r;
439

440
		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
441
		if (r) {
442
			/* hmm_vma_handle_pte() did pte_unmap() */
443 444
			hmm_vma_walk->last = addr;
			return r;
445
		}
446
	}
447 448 449 450 451 452 453 454 455 456
	if (hmm_vma_walk->pgmap) {
		/*
		 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
		 * so that we can leverage get_dev_pagemap() optimization which
		 * will not re-take a reference on a pgmap if we already have
		 * one.
		 */
		put_dev_pagemap(hmm_vma_walk->pgmap);
		hmm_vma_walk->pgmap = NULL;
	}
457 458
	pte_unmap(ptep - 1);

459
	hmm_vma_walk->last = addr;
460 461 462
	return 0;
}

463 464 465 466 467 468 469 470 471 472 473 474 475
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
    defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
{
	if (!pud_present(pud))
		return 0;
	return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
				range->flags[HMM_PFN_WRITE] :
				range->flags[HMM_PFN_VALID];
}

static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
		struct mm_walk *walk)
476 477 478
{
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
479
	unsigned long addr = start;
480
	pud_t pud;
481 482 483 484 485 486 487 488
	int ret = 0;
	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);

	if (!ptl)
		return 0;

	/* Normally we don't want to split the huge page */
	walk->action = ACTION_CONTINUE;
489 490

	pud = READ_ONCE(*pudp);
491
	if (pud_none(pud)) {
492 493
		spin_unlock(ptl);
		return hmm_vma_walk_hole(start, end, -1, walk);
494
	}
495 496 497 498 499 500

	if (pud_huge(pud) && pud_devmap(pud)) {
		unsigned long i, npages, pfn;
		uint64_t *pfns, cpu_flags;
		bool fault, write_fault;

501
		if (!pud_present(pud)) {
502 503
			spin_unlock(ptl);
			return hmm_vma_walk_hole(start, end, -1, walk);
504
		}
505 506 507 508 509 510 511 512

		i = (addr - range->start) >> PAGE_SHIFT;
		npages = (end - addr) >> PAGE_SHIFT;
		pfns = &range->pfns[i];

		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
				     cpu_flags, &fault, &write_fault);
513
		if (fault || write_fault) {
514 515 516
			spin_unlock(ptl);
			return hmm_vma_walk_hole_(addr, end, fault, write_fault,
						  walk);
517
		}
518 519 520 521 522

		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
		for (i = 0; i < npages; ++i, ++pfn) {
			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
					      hmm_vma_walk->pgmap);
523 524 525 526
			if (unlikely(!hmm_vma_walk->pgmap)) {
				ret = -EBUSY;
				goto out_unlock;
			}
527 528
			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
				  cpu_flags;
529 530 531 532 533 534
		}
		if (hmm_vma_walk->pgmap) {
			put_dev_pagemap(hmm_vma_walk->pgmap);
			hmm_vma_walk->pgmap = NULL;
		}
		hmm_vma_walk->last = end;
535
		goto out_unlock;
536 537
	}

538 539
	/* Ask for the PUD to be split */
	walk->action = ACTION_SUBTREE;
540

541 542 543
out_unlock:
	spin_unlock(ptl);
	return ret;
544
}
545 546 547
#else
#define hmm_vma_walk_pud	NULL
#endif
548

549
#ifdef CONFIG_HUGETLB_PAGE
550 551 552 553
static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
				      unsigned long start, unsigned long end,
				      struct mm_walk *walk)
{
554
	unsigned long addr = start, i, pfn;
555 556 557 558 559 560 561 562
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
	struct vm_area_struct *vma = walk->vma;
	uint64_t orig_pfn, cpu_flags;
	bool fault, write_fault;
	spinlock_t *ptl;
	pte_t entry;

563
	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
564 565
	entry = huge_ptep_get(pte);

566
	i = (start - range->start) >> PAGE_SHIFT;
567 568 569 570 571 572 573
	orig_pfn = range->pfns[i];
	range->pfns[i] = range->values[HMM_PFN_NONE];
	cpu_flags = pte_to_hmm_pfn_flags(range, entry);
	fault = write_fault = false;
	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
			   &fault, &write_fault);
	if (fault || write_fault) {
574 575
		spin_unlock(ptl);
		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
576 577
	}

578
	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
579
	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
580 581
		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
				 cpu_flags;
582 583
	hmm_vma_walk->last = end;
	spin_unlock(ptl);
584
	return 0;
585
}
586 587 588
#else
#define hmm_vma_walk_hugetlb_entry NULL
#endif /* CONFIG_HUGETLB_PAGE */
589

590 591
static int hmm_vma_walk_test(unsigned long start, unsigned long end,
			     struct mm_walk *walk)
592
{
593 594 595 596 597
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
	struct vm_area_struct *vma = walk->vma;

	/*
598 599 600
	 * Skip vma ranges that don't have struct page backing them or map I/O
	 * devices directly.
	 *
601
	 * If the vma does not allow read access, then assume that it does not
602 603
	 * allow write access either. HMM does not support architectures that
	 * allow write without read.
604
	 */
605 606
	if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
	    !(vma->vm_flags & VM_READ)) {
607 608 609 610 611 612 613 614 615 616 617 618 619
		bool fault, write_fault;

		/*
		 * Check to see if a fault is requested for any page in the
		 * range.
		 */
		hmm_range_need_fault(hmm_vma_walk, range->pfns +
					((start - range->start) >> PAGE_SHIFT),
					(end - start) >> PAGE_SHIFT,
					0, &fault, &write_fault);
		if (fault || write_fault)
			return -EFAULT;

620
		hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
621 622 623 624 625 626 627
		hmm_vma_walk->last = end;

		/* Skip this vma and continue processing the next vma. */
		return 1;
	}

	return 0;
628 629
}

630 631 632 633 634
static const struct mm_walk_ops hmm_walk_ops = {
	.pud_entry	= hmm_vma_walk_pud,
	.pmd_entry	= hmm_vma_walk_pmd,
	.pte_hole	= hmm_vma_walk_hole,
	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
635
	.test_walk	= hmm_vma_walk_test,
636 637
};

638 639 640 641 642 643 644 645
/**
 * hmm_range_fault - try to fault some address in a virtual address range
 * @range:	range being faulted
 * @flags:	HMM_FAULT_* flags
 *
 * Return: the number of valid pages in range->pfns[] (from range start
 * address), which may be zero.  On error one of the following status codes
 * can be returned:
646
 *
647 648 649 650 651 652 653 654 655 656
 * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
 *		(e.g., device file vma).
 * -ENOMEM:	Out of memory.
 * -EPERM:	Invalid permission (e.g., asking for write and range is read
 *		only).
 * -EBUSY:	The range has been invalidated and the caller needs to wait for
 *		the invalidation to finish.
 * -EFAULT:	Invalid (i.e., either no valid vma or it is illegal to access
 *		that range) number of valid pages in range->pfns[] (from
 *              range start address).
657 658
 *
 * This is similar to a regular CPU page fault except that it will not trigger
659 660
 * any memory migration if the memory being faulted is not accessible by CPUs
 * and caller does not ask for migration.
661
 *
662 663
 * On error, for one virtual address in the range, the function will mark the
 * corresponding HMM pfn entry with an error flag.
664
 */
665
long hmm_range_fault(struct hmm_range *range, unsigned int flags)
666
{
667 668 669 670 671
	struct hmm_vma_walk hmm_vma_walk = {
		.range = range,
		.last = range->start,
		.flags = flags,
	};
672
	struct mm_struct *mm = range->notifier->mm;
673 674
	int ret;

675
	lockdep_assert_held(&mm->mmap_sem);
676

677 678
	do {
		/* If range is no longer valid force retry. */
679 680
		if (mmu_interval_check_retry(range->notifier,
					     range->notifier_seq))
681
			return -EBUSY;
682 683 684
		ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
				      &hmm_walk_ops, &hmm_vma_walk);
	} while (ret == -EBUSY);
685

686 687
	if (ret)
		return ret;
688
	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
689
}
690
EXPORT_SYMBOL(hmm_range_fault);