mmu.c 39 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Catalin Marinas's avatar
Catalin Marinas committed
2 3 4 5 6 7 8
/*
 * Based on arch/arm/mm/mmu.c
 *
 * Copyright (C) 1995-2005 Russell King
 * Copyright (C) 2012 ARM Ltd.
 */

9
#include <linux/cache.h>
Catalin Marinas's avatar
Catalin Marinas committed
10 11 12 13
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
14 15
#include <linux/ioport.h>
#include <linux/kexec.h>
16
#include <linux/libfdt.h>
Catalin Marinas's avatar
Catalin Marinas committed
17 18 19
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
20
#include <linux/memremap.h>
21
#include <linux/memory.h>
Catalin Marinas's avatar
Catalin Marinas committed
22
#include <linux/fs.h>
23
#include <linux/io.h>
24
#include <linux/mm.h>
25
#include <linux/vmalloc.h>
26
#include <linux/set_memory.h>
27
#include <linux/kfence.h>
Catalin Marinas's avatar
Catalin Marinas committed
28

29
#include <asm/barrier.h>
Catalin Marinas's avatar
Catalin Marinas committed
30
#include <asm/cputype.h>
31
#include <asm/fixmap.h>
32
#include <asm/kasan.h>
33
#include <asm/kernel-pgtable.h>
Catalin Marinas's avatar
Catalin Marinas committed
34 35
#include <asm/sections.h>
#include <asm/setup.h>
36
#include <linux/sizes.h>
Catalin Marinas's avatar
Catalin Marinas committed
37 38
#include <asm/tlb.h>
#include <asm/mmu_context.h>
39
#include <asm/ptdump.h>
40
#include <asm/tlbflush.h>
41
#include <asm/pgalloc.h>
42
#include <asm/kfence.h>
Catalin Marinas's avatar
Catalin Marinas committed
43

44
#define NO_BLOCK_MAPPINGS	BIT(0)
45
#define NO_CONT_MAPPINGS	BIT(1)
46
#define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
47

48
int idmap_t0sz __ro_after_init;
49

50 51
#if VA_BITS > 48
u64 vabits_actual __ro_after_init = VA_BITS_MIN;
52
EXPORT_SYMBOL(vabits_actual);
53
#endif
54

55 56
u64 kimage_vaddr __ro_after_init = (u64)&_text;
EXPORT_SYMBOL(kimage_vaddr);
57

58
u64 kimage_voffset __ro_after_init;
59 60
EXPORT_SYMBOL(kimage_voffset);

61 62 63 64 65 66 67 68
u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };

/*
 * The booting CPU updates the failed status @__early_cpu_boot_status,
 * with MMU turned off.
 */
long __section(".mmuoff.data.write") __early_cpu_boot_status;

Catalin Marinas's avatar
Catalin Marinas committed
69 70 71 72
/*
 * Empty_zero_page is a special page that is used for zero-initialized data
 * and COW.
 */
73
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
Catalin Marinas's avatar
Catalin Marinas committed
74 75
EXPORT_SYMBOL(empty_zero_page);

76
static DEFINE_SPINLOCK(swapper_pgdir_lock);
77
static DEFINE_MUTEX(fixmap_lock);
78 79 80 81 82 83

void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
{
	pgd_t *fixmap_pgdp;

	spin_lock(&swapper_pgdir_lock);
84
	fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
85 86 87 88 89 90 91 92 93 94
	WRITE_ONCE(*fixmap_pgdp, pgd);
	/*
	 * We need dsb(ishst) here to ensure the page-table-walker sees
	 * our new entry before set_p?d() returns. The fixmap's
	 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
	 */
	pgd_clear_fixmap();
	spin_unlock(&swapper_pgdir_lock);
}

Catalin Marinas's avatar
Catalin Marinas committed
95 96 97
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
98
	if (!pfn_is_map_memory(pfn))
Catalin Marinas's avatar
Catalin Marinas committed
99 100 101 102 103 104 105
		return pgprot_noncached(vma_prot);
	else if (file->f_flags & O_SYNC)
		return pgprot_writecombine(vma_prot);
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

106
static phys_addr_t __init early_pgtable_alloc(int shift)
Catalin Marinas's avatar
Catalin Marinas committed
107
{
108 109 110
	phys_addr_t phys;
	void *ptr;

111 112
	phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
					 MEMBLOCK_ALLOC_NOLEAKTRACE);
113 114
	if (!phys)
		panic("Failed to allocate page table page\n");
115 116 117 118 119 120 121 122

	/*
	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
	 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
	 * any level of table.
	 */
	ptr = pte_set_fixmap(phys);

123 124
	memset(ptr, 0, PAGE_SIZE);

125 126 127 128 129 130 131
	/*
	 * Implicit barriers also ensure the zeroed page is visible to the page
	 * table walker
	 */
	pte_clear_fixmap();

	return phys;
Catalin Marinas's avatar
Catalin Marinas committed
132 133
}

134
bool pgattr_change_is_safe(u64 old, u64 new)
135 136 137 138 139
{
	/*
	 * The following mapping attributes may be updated in live
	 * kernel mappings without the need for break-before-make.
	 */
140
	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
141

142
	/* creating or taking down mappings is always safe */
143
	if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
144 145
		return true;

146 147 148 149
	/* A live entry's pfn should not change */
	if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
		return false;

150 151 152 153
	/* live contiguous mappings may not be manipulated at all */
	if ((old | new) & PTE_CONT)
		return false;

154 155 156
	/* Transitioning from Non-Global to Global is unsafe */
	if (old & ~new & PTE_NG)
		return false;
157

158 159 160 161 162 163 164 165 166 167 168
	/*
	 * Changing the memory type between Normal and Normal-Tagged is safe
	 * since Tagged is considered a permission attribute from the
	 * mismatched attribute aliases perspective.
	 */
	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
		mask |= PTE_ATTRINDX_MASK;

169
	return ((old ^ new) & ~mask) == 0;
170 171
}

172
static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
173
		     phys_addr_t phys, pgprot_t prot)
Catalin Marinas's avatar
Catalin Marinas committed
174
{
175
	pte_t *ptep;
Catalin Marinas's avatar
Catalin Marinas committed
176

177
	ptep = pte_set_fixmap_offset(pmdp, addr);
Catalin Marinas's avatar
Catalin Marinas committed
178
	do {
179
		pte_t old_pte = READ_ONCE(*ptep);
180

181
		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
182 183 184 185 186

		/*
		 * After the PTE entry has been populated once, we
		 * only allow updates to the permission attributes.
		 */
187 188
		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
					      READ_ONCE(pte_val(*ptep))));
189

190
		phys += PAGE_SIZE;
191
	} while (ptep++, addr += PAGE_SIZE, addr != end);
192 193

	pte_clear_fixmap();
Catalin Marinas's avatar
Catalin Marinas committed
194 195
}

196
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
197 198
				unsigned long end, phys_addr_t phys,
				pgprot_t prot,
199
				phys_addr_t (*pgtable_alloc)(int),
200
				int flags)
Catalin Marinas's avatar
Catalin Marinas committed
201 202
{
	unsigned long next;
203
	pmd_t pmd = READ_ONCE(*pmdp);
Catalin Marinas's avatar
Catalin Marinas committed
204

205 206
	BUG_ON(pmd_sect(pmd));
	if (pmd_none(pmd)) {
207
		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
208
		phys_addr_t pte_phys;
209 210 211

		if (flags & NO_EXEC_MAPPINGS)
			pmdval |= PMD_TABLE_PXN;
212
		BUG_ON(!pgtable_alloc);
213
		pte_phys = pgtable_alloc(PAGE_SHIFT);
214
		__pmd_populate(pmdp, pte_phys, pmdval);
215
		pmd = READ_ONCE(*pmdp);
Catalin Marinas's avatar
Catalin Marinas committed
216
	}
217
	BUG_ON(pmd_bad(pmd));
218 219 220 221 222 223 224 225 226 227 228

	do {
		pgprot_t __prot = prot;

		next = pte_cont_addr_end(addr, end);

		/* use a contiguous mapping if the range is suitably aligned */
		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
		    (flags & NO_CONT_MAPPINGS) == 0)
			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);

229
		init_pte(pmdp, addr, next, phys, __prot);
230 231 232 233 234

		phys += next - addr;
	} while (addr = next, addr != end);
}

235
static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
236
		     phys_addr_t phys, pgprot_t prot,
237
		     phys_addr_t (*pgtable_alloc)(int), int flags)
238 239
{
	unsigned long next;
240
	pmd_t *pmdp;
Catalin Marinas's avatar
Catalin Marinas committed
241

242
	pmdp = pmd_set_fixmap_offset(pudp, addr);
Catalin Marinas's avatar
Catalin Marinas committed
243
	do {
244
		pmd_t old_pmd = READ_ONCE(*pmdp);
245

Catalin Marinas's avatar
Catalin Marinas committed
246
		next = pmd_addr_end(addr, end);
247

Catalin Marinas's avatar
Catalin Marinas committed
248
		/* try section mapping first */
249
		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
250
		    (flags & NO_BLOCK_MAPPINGS) == 0) {
251
			pmd_set_huge(pmdp, phys, prot);
252

253
			/*
254 255
			 * After the PMD entry has been populated once, we
			 * only allow updates to the permission attributes.
256
			 */
257
			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
258
						      READ_ONCE(pmd_val(*pmdp))));
259
		} else {
260
			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
261
					    pgtable_alloc, flags);
262 263

			BUG_ON(pmd_val(old_pmd) != 0 &&
264
			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
265
		}
Catalin Marinas's avatar
Catalin Marinas committed
266
		phys += next - addr;
267
	} while (pmdp++, addr = next, addr != end);
268 269

	pmd_clear_fixmap();
Catalin Marinas's avatar
Catalin Marinas committed
270 271
}

272
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
273 274
				unsigned long end, phys_addr_t phys,
				pgprot_t prot,
275
				phys_addr_t (*pgtable_alloc)(int), int flags)
276 277
{
	unsigned long next;
278
	pud_t pud = READ_ONCE(*pudp);
279 280 281 282

	/*
	 * Check for initial section mappings in the pgd/pud.
	 */
283 284
	BUG_ON(pud_sect(pud));
	if (pud_none(pud)) {
285
		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
286
		phys_addr_t pmd_phys;
287 288 289

		if (flags & NO_EXEC_MAPPINGS)
			pudval |= PUD_TABLE_PXN;
290
		BUG_ON(!pgtable_alloc);
291
		pmd_phys = pgtable_alloc(PMD_SHIFT);
292
		__pud_populate(pudp, pmd_phys, pudval);
293
		pud = READ_ONCE(*pudp);
294
	}
295
	BUG_ON(pud_bad(pud));
296 297 298 299 300 301 302 303 304 305 306

	do {
		pgprot_t __prot = prot;

		next = pmd_cont_addr_end(addr, end);

		/* use a contiguous mapping if the range is suitably aligned */
		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
		    (flags & NO_CONT_MAPPINGS) == 0)
			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);

307
		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
308 309 310 311 312

		phys += next - addr;
	} while (addr = next, addr != end);
}

313 314
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
			   phys_addr_t phys, pgprot_t prot,
315
			   phys_addr_t (*pgtable_alloc)(int),
316
			   int flags)
Catalin Marinas's avatar
Catalin Marinas committed
317 318
{
	unsigned long next;
319
	pud_t *pudp;
320 321
	p4d_t *p4dp = p4d_offset(pgdp, addr);
	p4d_t p4d = READ_ONCE(*p4dp);
Catalin Marinas's avatar
Catalin Marinas committed
322

323
	if (p4d_none(p4d)) {
324
		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
325
		phys_addr_t pud_phys;
326 327 328

		if (flags & NO_EXEC_MAPPINGS)
			p4dval |= P4D_TABLE_PXN;
329
		BUG_ON(!pgtable_alloc);
330
		pud_phys = pgtable_alloc(PUD_SHIFT);
331
		__p4d_populate(p4dp, pud_phys, p4dval);
332
		p4d = READ_ONCE(*p4dp);
333
	}
334
	BUG_ON(p4d_bad(p4d));
335

336
	pudp = pud_set_fixmap_offset(p4dp, addr);
Catalin Marinas's avatar
Catalin Marinas committed
337
	do {
338
		pud_t old_pud = READ_ONCE(*pudp);
339

Catalin Marinas's avatar
Catalin Marinas committed
340
		next = pud_addr_end(addr, end);
341 342 343 344

		/*
		 * For 4K granule only, attempt to put down a 1GB block
		 */
345 346
		if (pud_sect_supported() &&
		   ((addr | next | phys) & ~PUD_MASK) == 0 &&
347
		    (flags & NO_BLOCK_MAPPINGS) == 0) {
348
			pud_set_huge(pudp, phys, prot);
349 350

			/*
351 352
			 * After the PUD entry has been populated once, we
			 * only allow updates to the permission attributes.
353
			 */
354
			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
355
						      READ_ONCE(pud_val(*pudp))));
356
		} else {
357
			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
358
					    pgtable_alloc, flags);
359 360

			BUG_ON(pud_val(old_pud) != 0 &&
361
			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
362
		}
Catalin Marinas's avatar
Catalin Marinas committed
363
		phys += next - addr;
364
	} while (pudp++, addr = next, addr != end);
365 366

	pud_clear_fixmap();
Catalin Marinas's avatar
Catalin Marinas committed
367 368
}

369 370 371 372 373
static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
					unsigned long virt, phys_addr_t size,
					pgprot_t prot,
					phys_addr_t (*pgtable_alloc)(int),
					int flags)
Catalin Marinas's avatar
Catalin Marinas committed
374
{
375
	unsigned long addr, end, next;
376
	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
Catalin Marinas's avatar
Catalin Marinas committed
377

378 379 380 381 382 383 384
	/*
	 * If the virtual and physical address don't have the same offset
	 * within a page, we cannot map the region as the caller expects.
	 */
	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
		return;

385
	phys &= PAGE_MASK;
Catalin Marinas's avatar
Catalin Marinas committed
386
	addr = virt & PAGE_MASK;
387
	end = PAGE_ALIGN(virt + size);
Catalin Marinas's avatar
Catalin Marinas committed
388 389 390

	do {
		next = pgd_addr_end(addr, end);
391
		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
392
			       flags);
Catalin Marinas's avatar
Catalin Marinas committed
393
		phys += next - addr;
394
	} while (pgdp++, addr = next, addr != end);
Catalin Marinas's avatar
Catalin Marinas committed
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
				 unsigned long virt, phys_addr_t size,
				 pgprot_t prot,
				 phys_addr_t (*pgtable_alloc)(int),
				 int flags)
{
	mutex_lock(&fixmap_lock);
	__create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
				    pgtable_alloc, flags);
	mutex_unlock(&fixmap_lock);
}

409
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
410
extern __alias(__create_pgd_mapping_locked)
411 412 413 414 415
void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
			     phys_addr_t size, pgprot_t prot,
			     phys_addr_t (*pgtable_alloc)(int), int flags);
#endif

416
static phys_addr_t __pgd_pgtable_alloc(int shift)
417
{
418
	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
419 420 421 422 423 424 425
	BUG_ON(!ptr);

	/* Ensure the zeroed page is visible to the page table walker */
	dsb(ishst);
	return __pa(ptr);
}

426
static phys_addr_t pgd_pgtable_alloc(int shift)
427
{
428
	phys_addr_t pa = __pgd_pgtable_alloc(shift);
429
	struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa));
430 431 432 433 434 435 436

	/*
	 * Call proper page table ctor in case later we need to
	 * call core mm functions like apply_to_page_range() on
	 * this pre-allocated page table.
	 *
	 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
437
	 * folded, and if so pagetable_pte_ctor() becomes nop.
438 439
	 */
	if (shift == PAGE_SHIFT)
440
		BUG_ON(!pagetable_pte_ctor(ptdesc));
441
	else if (shift == PMD_SHIFT)
442
		BUG_ON(!pagetable_pmd_ctor(ptdesc));
443

444
	return pa;
445 446
}

447 448 449 450 451
/*
 * This function can only be used to modify existing table entries,
 * without allocating new levels of table. Note that this permits the
 * creation of new section or page entries.
 */
452 453
void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
				   phys_addr_t size, pgprot_t prot)
454
{
455
	if (virt < PAGE_OFFSET) {
456 457 458 459
		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
			&phys, virt);
		return;
	}
460 461
	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
			     NO_CONT_MAPPINGS);
462 463
}

464 465
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
			       unsigned long virt, phys_addr_t size,
466
			       pgprot_t prot, bool page_mappings_only)
467
{
468 469
	int flags = 0;

470 471
	BUG_ON(mm == &init_mm);

472
	if (page_mappings_only)
473
		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
474

475
	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
476
			     pgd_pgtable_alloc, flags);
477 478
}

479 480
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
				phys_addr_t size, pgprot_t prot)
481
{
482
	if (virt < PAGE_OFFSET) {
483
		pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
484 485 486 487
			&phys, virt);
		return;
	}

488 489
	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
			     NO_CONT_MAPPINGS);
490 491 492

	/* flush the TLBs after updating live kernel mappings */
	flush_tlb_kernel_range(virt, virt + size);
493 494
}

495
static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
496 497
				  phys_addr_t end, pgprot_t prot, int flags)
{
498
	__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
499 500 501 502 503 504 505 506
			     prot, early_pgtable_alloc, flags);
}

void __init mark_linear_text_alias_ro(void)
{
	/*
	 * Remove the write permissions from the linear alias of .text/.rodata
	 */
507 508
	update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
			    (unsigned long)__init_begin - (unsigned long)_stext,
509 510 511
			    PAGE_KERNEL_RO);
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
#ifdef CONFIG_KFENCE

bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;

/* early_param() will be parsed before map_mem() below. */
static int __init parse_kfence_early_init(char *arg)
{
	int val;

	if (get_option(&arg, &val))
		kfence_early_init = !!val;
	return 0;
}
early_param("kfence.sample_interval", parse_kfence_early_init);

static phys_addr_t __init arm64_kfence_alloc_pool(void)
{
	phys_addr_t kfence_pool;

	if (!kfence_early_init)
		return 0;

	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
	if (!kfence_pool) {
		pr_err("failed to allocate kfence pool\n");
		kfence_early_init = false;
		return 0;
	}

	/* Temporarily mark as NOMAP. */
	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);

	return kfence_pool;
}

static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
{
	if (!kfence_pool)
		return;

	/* KFENCE pool needs page-level mapping. */
	__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
			pgprot_tagged(PAGE_KERNEL),
			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
	__kfence_pool = phys_to_virt(kfence_pool);
}
#else /* CONFIG_KFENCE */

static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }

#endif /* CONFIG_KFENCE */

566
static void __init map_mem(pgd_t *pgdp)
567
{
568
	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
569
	phys_addr_t kernel_start = __pa_symbol(_stext);
570
	phys_addr_t kernel_end = __pa_symbol(__init_begin);
571
	phys_addr_t start, end;
572
	phys_addr_t early_kfence_pool;
573
	int flags = NO_EXEC_MAPPINGS;
574
	u64 i;
575

576 577 578 579 580 581 582 583 584
	/*
	 * Setting hierarchical PXNTable attributes on table entries covering
	 * the linear region is only possible if it is guaranteed that no table
	 * entries at any level are being shared between the linear region and
	 * the vmalloc region. Check whether this is true for the PGD level, in
	 * which case it is guaranteed to be true for all other levels as well.
	 */
	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));

585 586
	early_kfence_pool = arm64_kfence_alloc_pool();

587
	if (can_set_direct_map())
588
		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
589

590
	/*
591 592
	 * Take care not to create a writable alias for the
	 * read-only text and rodata sections of the kernel image.
593 594
	 * So temporarily mark them as NOMAP to skip mappings in
	 * the following for-loop
595
	 */
596
	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
597

598
	/* map all the memory banks */
599
	for_each_mem_range(i, &start, &end) {
600 601
		if (start >= end)
			break;
602 603 604 605 606
		/*
		 * The linear map must allow allocation tags reading/writing
		 * if MTE is present. Otherwise, it has the same attributes as
		 * PAGE_KERNEL.
		 */
607 608
		__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
			       flags);
609
	}
610 611

	/*
612
	 * Map the linear alias of the [_stext, __init_begin) interval
613 614 615 616 617
	 * as non-executable now, and remove the write permission in
	 * mark_linear_text_alias_ro() below (which will be called after
	 * alternative patching has completed). This makes the contents
	 * of the region accessible to subsystems such as hibernate,
	 * but protects it from inadvertent modification or execution.
618 619
	 * Note that contiguous mappings cannot be remapped in this way,
	 * so we should avoid them here.
620
	 */
621
	__map_memblock(pgdp, kernel_start, kernel_end,
622 623
		       PAGE_KERNEL, NO_CONT_MAPPINGS);
	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
624
	arm64_kfence_map_pool(early_kfence_pool, pgdp);
Catalin Marinas's avatar
Catalin Marinas committed
625 626
}

627 628
void mark_rodata_ro(void)
{
629
	unsigned long section_size;
630

631
	/*
632 633
	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
	 * to cover NOTES and EXCEPTION_TABLE.
634
	 */
635
	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
636
	update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
637
			    section_size, PAGE_KERNEL_RO);
638

639
	debug_checkwx();
640 641
}

642
static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
643
				      pgprot_t prot, struct vm_struct *vma,
644
				      int flags, unsigned long vm_flags)
645
{
646
	phys_addr_t pa_start = __pa_symbol(va_start);
647 648 649 650 651
	unsigned long size = va_end - va_start;

	BUG_ON(!PAGE_ALIGNED(pa_start));
	BUG_ON(!PAGE_ALIGNED(size));

652
	__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
653
			     early_pgtable_alloc, flags);
654

655 656 657
	if (!(vm_flags & VM_NO_GUARD))
		size += PAGE_SIZE;

658 659 660
	vma->addr	= va_start;
	vma->phys_addr	= pa_start;
	vma->size	= size;
661
	vma->flags	= VM_MAP | vm_flags;
662 663 664
	vma->caller	= __builtin_return_address(0);

	vm_area_add_early(vma);
665 666
}

667 668 669 670 671
static pgprot_t kernel_exec_prot(void)
{
	return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
}

672 673 674
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
675 676
	int i;

677
	pgprot_t prot = kernel_exec_prot();
678 679 680 681 682 683 684
	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);

	/* The trampoline is always mapped and can therefore be global */
	pgprot_val(prot) &= ~PTE_NG;

	/* Map only the text into the trampoline page table */
	memset(tramp_pg_dir, 0, PGD_SIZE);
685 686 687
	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
			     entry_tramp_text_size(), prot,
			     __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
688

689
	/* Map both the text and data into the kernel page table */
690 691 692 693
	for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
		__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
			     pa_start + i * PAGE_SIZE, prot);

694 695 696
	if (IS_ENABLED(CONFIG_RELOCATABLE))
		__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
			     pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
697

698 699 700 701 702
	return 0;
}
core_initcall(map_entry_trampoline);
#endif

703 704 705 706 707 708 709 710 711 712 713
/*
 * Open coded check for BTI, only for use to determine configuration
 * for early mappings for before the cpufeature code has run.
 */
static bool arm64_early_this_cpu_has_bti(void)
{
	u64 pfr1;

	if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
		return false;

714
	pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
715
	return cpuid_feature_extract_unsigned_field(pfr1,
716
						    ID_AA64PFR1_EL1_BT_SHIFT);
717 718
}

719 720 721
/*
 * Create fine-grained mappings for the kernel.
 */
722
static void __init map_kernel(pgd_t *pgdp)
723
{
724 725
	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
				vmlinux_initdata, vmlinux_data;
726

727 728 729 730 731
	/*
	 * External debuggers may need to write directly to the text
	 * mapping to install SW breakpoints. Allow this (only) when
	 * explicitly requested with rodata=off.
	 */
732
	pgprot_t text_prot = kernel_exec_prot();
733

734 735 736 737 738 739 740 741
	/*
	 * If we have a CPU that supports BTI and a kernel built for
	 * BTI then mark the kernel executable text as guarded pages
	 * now so we don't have to rewrite the page tables later.
	 */
	if (arm64_early_this_cpu_has_bti())
		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);

742 743 744 745
	/*
	 * Only rodata will be remapped with different permissions later on,
	 * all other segments are allowed to use contiguous mappings.
	 */
746
	map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
747
			   VM_NO_GUARD);
748
	map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
749
			   &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
750
	map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
751
			   &vmlinux_inittext, 0, VM_NO_GUARD);
752
	map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
753
			   &vmlinux_initdata, 0, VM_NO_GUARD);
754
	map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
755

756
	fixmap_copy(pgdp);
757
	kasan_copy_shadow(pgdp);
758 759
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
static void __init create_idmap(void)
{
	u64 start = __pa_symbol(__idmap_text_start);
	u64 size = __pa_symbol(__idmap_text_end) - start;
	pgd_t *pgd = idmap_pg_dir;
	u64 pgd_phys;

	/* check if we need an additional level of translation */
	if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
		pgd_phys = early_pgtable_alloc(PAGE_SHIFT);
		set_pgd(&idmap_pg_dir[start >> VA_BITS],
			__pgd(pgd_phys | P4D_TYPE_TABLE));
		pgd = __va(pgd_phys);
	}
	__create_pgd_mapping(pgd, start, start, size, PAGE_KERNEL_ROX,
			     early_pgtable_alloc, 0);

	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
		extern u32 __idmap_kpti_flag;
		u64 pa = __pa_symbol(&__idmap_kpti_flag);

		/*
		 * The KPTI G-to-nG conversion code needs a read-write mapping
		 * of its synchronization flag in the ID map.
		 */
		__create_pgd_mapping(pgd, pa, pa, sizeof(u32), PAGE_KERNEL,
				     early_pgtable_alloc, 0);
	}
}

Catalin Marinas's avatar
Catalin Marinas committed
790 791
void __init paging_init(void)
{
792
	pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
793
	extern pgd_t init_idmap_pg_dir[];
794

795
	idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
796

797 798
	map_kernel(pgdp);
	map_mem(pgdp);
799 800 801

	pgd_clear_fixmap();

802
	cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
803
	init_mm.pgd = swapper_pg_dir;
804

805 806
	memblock_phys_free(__pa_symbol(init_pg_dir),
			   __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
807 808

	memblock_allow_resize();
809 810

	create_idmap();
Catalin Marinas's avatar
Catalin Marinas committed
811 812
}

813
#ifdef CONFIG_MEMORY_HOTPLUG
814 815
static void free_hotplug_page_range(struct page *page, size_t size,
				    struct vmem_altmap *altmap)
816
{
817 818 819 820 821 822
	if (altmap) {
		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
	} else {
		WARN_ON(PageReserved(page));
		free_pages((unsigned long)page_address(page), get_order(size));
	}
823 824 825 826
}

static void free_hotplug_pgtable_page(struct page *page)
{
827
	free_hotplug_page_range(page, PAGE_SIZE, NULL);
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
}

static bool pgtable_range_aligned(unsigned long start, unsigned long end,
				  unsigned long floor, unsigned long ceiling,
				  unsigned long mask)
{
	start &= mask;
	if (start < floor)
		return false;

	if (ceiling) {
		ceiling &= mask;
		if (!ceiling)
			return false;
	}

	if (end - 1 > ceiling - 1)
		return false;
	return true;
}

static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
850 851
				    unsigned long end, bool free_mapped,
				    struct vmem_altmap *altmap)
852 853 854 855 856 857 858 859 860 861 862 863 864
{
	pte_t *ptep, pte;

	do {
		ptep = pte_offset_kernel(pmdp, addr);
		pte = READ_ONCE(*ptep);
		if (pte_none(pte))
			continue;

		WARN_ON(!pte_present(pte));
		pte_clear(&init_mm, addr, ptep);
		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
		if (free_mapped)
865 866
			free_hotplug_page_range(pte_page(pte),
						PAGE_SIZE, altmap);
867 868 869 870
	} while (addr += PAGE_SIZE, addr < end);
}

static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
871 872
				    unsigned long end, bool free_mapped,
				    struct vmem_altmap *altmap)
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
{
	unsigned long next;
	pmd_t *pmdp, pmd;

	do {
		next = pmd_addr_end(addr, end);
		pmdp = pmd_offset(pudp, addr);
		pmd = READ_ONCE(*pmdp);
		if (pmd_none(pmd))
			continue;

		WARN_ON(!pmd_present(pmd));
		if (pmd_sect(pmd)) {
			pmd_clear(pmdp);

			/*
			 * One TLBI should be sufficient here as the PMD_SIZE
			 * range is mapped with a single block entry.
			 */
			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
			if (free_mapped)
				free_hotplug_page_range(pmd_page(pmd),
895
							PMD_SIZE, altmap);
896 897 898
			continue;
		}
		WARN_ON(!pmd_table(pmd));
899
		unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
900 901 902 903
	} while (addr = next, addr < end);
}

static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
904 905
				    unsigned long end, bool free_mapped,
				    struct vmem_altmap *altmap)
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
{
	unsigned long next;
	pud_t *pudp, pud;

	do {
		next = pud_addr_end(addr, end);
		pudp = pud_offset(p4dp, addr);
		pud = READ_ONCE(*pudp);
		if (pud_none(pud))
			continue;

		WARN_ON(!pud_present(pud));
		if (pud_sect(pud)) {
			pud_clear(pudp);

			/*
			 * One TLBI should be sufficient here as the PUD_SIZE
			 * range is mapped with a single block entry.
			 */
			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
			if (free_mapped)
				free_hotplug_page_range(pud_page(pud),
928
							PUD_SIZE, altmap);
929 930 931
			continue;
		}
		WARN_ON(!pud_table(pud));
932
		unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
933 934 935 936
	} while (addr = next, addr < end);
}

static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
937 938
				    unsigned long end, bool free_mapped,
				    struct vmem_altmap *altmap)
939 940 941 942 943 944 945 946 947 948 949 950
{
	unsigned long next;
	p4d_t *p4dp, p4d;

	do {
		next = p4d_addr_end(addr, end);
		p4dp = p4d_offset(pgdp, addr);
		p4d = READ_ONCE(*p4dp);
		if (p4d_none(p4d))
			continue;

		WARN_ON(!p4d_present(p4d));
951
		unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
952 953 954 955
	} while (addr = next, addr < end);
}

static void unmap_hotplug_range(unsigned long addr, unsigned long end,
956
				bool free_mapped, struct vmem_altmap *altmap)
957 958 959 960
{
	unsigned long next;
	pgd_t *pgdp, pgd;

961 962 963 964 965 966 967 968
	/*
	 * altmap can only be used as vmemmap mapping backing memory.
	 * In case the backing memory itself is not being freed, then
	 * altmap is irrelevant. Warn about this inconsistency when
	 * encountered.
	 */
	WARN_ON(!free_mapped && altmap);

969 970 971 972 973 974 975 976
	do {
		next = pgd_addr_end(addr, end);
		pgdp = pgd_offset_k(addr);
		pgd = READ_ONCE(*pgdp);
		if (pgd_none(pgd))
			continue;

		WARN_ON(!pgd_present(pgd));
977
		unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	} while (addr = next, addr < end);
}

static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
				 unsigned long end, unsigned long floor,
				 unsigned long ceiling)
{
	pte_t *ptep, pte;
	unsigned long i, start = addr;

	do {
		ptep = pte_offset_kernel(pmdp, addr);
		pte = READ_ONCE(*ptep);

		/*
		 * This is just a sanity check here which verifies that
		 * pte clearing has been done by earlier unmap loops.
		 */
		WARN_ON(!pte_none(pte));
	} while (addr += PAGE_SIZE, addr < end);

	if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
		return;

	/*
	 * Check whether we can free the pte page if the rest of the
	 * entries are empty. Overlap with other regions have been
	 * handled by the floor/ceiling check.
	 */
	ptep = pte_offset_kernel(pmdp, 0UL);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		if (!pte_none(READ_ONCE(ptep[i])))
			return;
	}

	pmd_clear(pmdp);
	__flush_tlb_kernel_pgtable(start);
	free_hotplug_pgtable_page(virt_to_page(ptep));
}

static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
				 unsigned long end, unsigned long floor,
				 unsigned long ceiling)
{
	pmd_t *pmdp, pmd;
	unsigned long i, next, start = addr;

	do {
		next = pmd_addr_end(addr, end);
		pmdp = pmd_offset(pudp, addr);
		pmd = READ_ONCE(*pmdp);
		if (pmd_none(pmd))
			continue;

		WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
		free_empty_pte_table(pmdp, addr, next, floor, ceiling);
	} while (addr = next, addr < end);

	if (CONFIG_PGTABLE_LEVELS <= 2)
		return;

	if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
		return;

	/*
	 * Check whether we can free the pmd page if the rest of the
	 * entries are empty. Overlap with other regions have been
	 * handled by the floor/ceiling check.
	 */
	pmdp = pmd_offset(pudp, 0UL);
	for (i = 0; i < PTRS_PER_PMD; i++) {
		if (!pmd_none(READ_ONCE(pmdp[i])))
			return;
	}

	pud_clear(pudp);
	__flush_tlb_kernel_pgtable(start);
	free_hotplug_pgtable_page(virt_to_page(pmdp));
}

static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
				 unsigned long end, unsigned long floor,
				 unsigned long ceiling)
{
	pud_t *pudp, pud;
	unsigned long i, next, start = addr;

	do {
		next = pud_addr_end(addr, end);
		pudp = pud_offset(p4dp, addr);
		pud = READ_ONCE(*pudp);
		if (pud_none(pud))
			continue;

		WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
		free_empty_pmd_table(pudp, addr, next, floor, ceiling);
	} while (addr = next, addr < end);

	if (CONFIG_PGTABLE_LEVELS <= 3)
		return;

	if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
		return;

	/*
	 * Check whether we can free the pud page if the rest of the
	 * entries are empty. Overlap with other regions have been
	 * handled by the floor/ceiling check.
	 */
	pudp = pud_offset(p4dp, 0UL);
	for (i = 0; i < PTRS_PER_PUD; i++) {
		if (!pud_none(READ_ONCE(pudp[i])))
			return;
	}

	p4d_clear(p4dp);
	__flush_tlb_kernel_pgtable(start);
	free_hotplug_pgtable_page(virt_to_page(pudp));
}

static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
				 unsigned long end, unsigned long floor,
				 unsigned long ceiling)
{
	unsigned long next;
	p4d_t *p4dp, p4d;

	do {
		next = p4d_addr_end(addr, end);
		p4dp = p4d_offset(pgdp, addr);
		p4d = READ_ONCE(*p4dp);
		if (p4d_none(p4d))
			continue;

		WARN_ON(!p4d_present(p4d));
		free_empty_pud_table(p4dp, addr, next, floor, ceiling);
	} while (addr = next, addr < end);
}

static void free_empty_tables(unsigned long addr, unsigned long end,
			      unsigned long floor, unsigned long ceiling)
{
	unsigned long next;
	pgd_t *pgdp, pgd;

	do {
		next = pgd_addr_end(addr, end);
		pgdp = pgd_offset_k(addr);
		pgd = READ_ONCE(*pgdp);
		if (pgd_none(pgd))
			continue;

		WARN_ON(!pgd_present(pgd));
		free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
	} while (addr = next, addr < end);
}
#endif

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
			       unsigned long addr, unsigned long next)
{
	pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
}

int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
				unsigned long addr, unsigned long next)
{
	vmemmap_verify((pte_t *)pmdp, node, addr, next);
	return 1;
}

1149 1150
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
Catalin Marinas's avatar
Catalin Marinas committed
1151
{
1152
	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1153

1154
	if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
1155
		return vmemmap_populate_basepages(start, end, node, altmap);
1156 1157
	else
		return vmemmap_populate_hugepages(start, end, node, altmap);
Catalin Marinas's avatar
Catalin Marinas committed
1158
}
1159 1160

#ifdef CONFIG_MEMORY_HOTPLUG
1161 1162
void vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
1163
{
1164 1165
	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));

1166
	unmap_hotplug_range(start, end, true, altmap);
1167
	free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1168
}
1169
#endif /* CONFIG_MEMORY_HOTPLUG */
1170

1171
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1172
{
1173
	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1174

1175 1176 1177
	/* Only allow permission changes for now */
	if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
				   pud_val(new_pud)))
1178 1179
		return 0;

1180
	VM_BUG_ON(phys & ~PUD_MASK);
1181
	set_pud(pudp, new_pud);
1182 1183 1184
	return 1;
}

1185
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1186
{
1187
	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1188

1189 1190 1191
	/* Only allow permission changes for now */
	if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
				   pmd_val(new_pmd)))
1192 1193
		return 0;

1194
	VM_BUG_ON(phys & ~PMD_MASK);
1195
	set_pmd(pmdp, new_pmd);
1196 1197 1198
	return 1;
}

1199 1200 1201 1202 1203 1204 1205 1206
int pud_clear_huge(pud_t *pudp)
{
	if (!pud_sect(READ_ONCE(*pudp)))
		return 0;
	pud_clear(pudp);
	return 1;
}

1207
int pmd_clear_huge(pmd_t *pmdp)
1208
{
1209
	if (!pmd_sect(READ_ONCE(*pmdp)))
1210
		return 0;
1211
	pmd_clear(pmdp);
1212 1213
	return 1;
}
1214

1215
int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1216
{
1217 1218 1219 1220 1221
	pte_t *table;
	pmd_t pmd;

	pmd = READ_ONCE(*pmdp);

1222
	if (!pmd_table(pmd)) {
1223
		VM_WARN_ON(1);
1224 1225 1226 1227 1228 1229 1230 1231
		return 1;
	}

	table = pte_offset_kernel(pmdp, addr);
	pmd_clear(pmdp);
	__flush_tlb_kernel_pgtable(addr);
	pte_free_kernel(NULL, table);
	return 1;
1232 1233
}

1234
int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1235
{
1236 1237 1238 1239 1240 1241 1242
	pmd_t *table;
	pmd_t *pmdp;
	pud_t pud;
	unsigned long next, end;

	pud = READ_ONCE(*pudp);

1243
	if (!pud_table(pud)) {
1244
		VM_WARN_ON(1);
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
		return 1;
	}

	table = pmd_offset(pudp, addr);
	pmdp = table;
	next = addr;
	end = addr + PUD_SIZE;
	do {
		pmd_free_pte_page(pmdp, next);
	} while (pmdp++, next += PMD_SIZE, next != end);

	pud_clear(pudp);
	__flush_tlb_kernel_pgtable(addr);
	pmd_free(NULL, table);
	return 1;
1260
}
1261 1262

#ifdef CONFIG_MEMORY_HOTPLUG
1263 1264 1265 1266 1267 1268 1269
static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
{
	unsigned long end = start + size;

	WARN_ON(pgdir != init_mm.pgd);
	WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));

1270
	unmap_hotplug_range(start, end, false, NULL);
1271 1272 1273
	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
}

1274
struct range arch_get_mappable_range(void)
1275
{
1276
	struct range mhp_range;
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
	u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
	u64 end_linear_pa = __pa(PAGE_END - 1);

	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
		/*
		 * Check for a wrap, it is possible because of randomized linear
		 * mapping the start physical address is actually bigger than
		 * the end physical address. In this case set start to zero
		 * because [0, end_linear_pa] range must still be able to cover
		 * all addressable physical addresses.
		 */
		if (start_linear_pa > end_linear_pa)
			start_linear_pa = 0;
	}

	WARN_ON(start_linear_pa > end_linear_pa);
1293

1294 1295 1296 1297 1298 1299
	/*
	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
	 * accommodating both its ends but excluding PAGE_END. Max physical
	 * range which can be mapped inside this linear mapping range, must
	 * also be derived from its end points.
	 */
1300 1301 1302
	mhp_range.start = start_linear_pa;
	mhp_range.end =  end_linear_pa;

1303
	return mhp_range;
1304 1305
}

1306
int arch_add_memory(int nid, u64 start, u64 size,
1307
		    struct mhp_params *params)
1308
{
1309
	int ret, flags = NO_EXEC_MAPPINGS;
1310

1311
	VM_BUG_ON(!mhp_range_allowed(start, size, true));
1312

1313
	if (can_set_direct_map())
1314
		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1315 1316

	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1317 1318
			     size, params->pgprot, __pgd_pgtable_alloc,
			     flags);
1319

1320 1321
	memblock_clear_nomap(start, size);

1322
	ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1323
			   params);
1324 1325 1326
	if (ret)
		__remove_pgd_mapping(swapper_pg_dir,
				     __phys_to_virt(start), size);
1327 1328 1329 1330 1331
	else {
		max_pfn = PFN_UP(start + size);
		max_low_pfn = max_pfn;
	}

1332
	return ret;
1333
}
1334

1335
void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1336 1337 1338 1339
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

1340
	__remove_pages(start_pfn, nr_pages, altmap);
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
	__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
}

/*
 * This memory hotplug notifier helps prevent boot memory from being
 * inadvertently removed as it blocks pfn range offlining process in
 * __offline_pages(). Hence this prevents both offlining as well as
 * removal process for boot memory which is initially always online.
 * In future if and when boot memory could be removed, this notifier
 * should be dropped and free_hotplug_page_range() should handle any
 * reserved pages allocated during boot.
 */
static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
					   unsigned long action, void *data)
{
	struct mem_section *ms;
	struct memory_notify *arg = data;
	unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
	unsigned long pfn = arg->start_pfn;

1361
	if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
1362 1363 1364
		return NOTIFY_OK;

	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1365 1366 1367
		unsigned long start = PFN_PHYS(pfn);
		unsigned long end = start + (1UL << PA_SECTION_SHIFT);

1368
		ms = __pfn_to_section(pfn);
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		if (!early_section(ms))
			continue;

		if (action == MEM_GOING_OFFLINE) {
			/*
			 * Boot memory removal is not supported. Prevent
			 * it via blocking any attempted offline request
			 * for the boot memory and just report it.
			 */
			pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1379
			return NOTIFY_BAD;
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
		} else if (action == MEM_OFFLINE) {
			/*
			 * This should have never happened. Boot memory
			 * offlining should have been prevented by this
			 * very notifier. Probably some memory removal
			 * procedure might have changed which would then
			 * require further debug.
			 */
			pr_err("Boot memory [%lx %lx] offlined\n", start, end);

			/*
			 * Core memory hotplug does not process a return
			 * code from the notifier for MEM_OFFLINE events.
			 * The error condition has been reported. Return
			 * from here as if ignored.
			 */
			return NOTIFY_DONE;
		}
1398 1399 1400 1401 1402 1403 1404 1405
	}
	return NOTIFY_OK;
}

static struct notifier_block prevent_bootmem_remove_nb = {
	.notifier_call = prevent_bootmem_remove_notifier,
};

1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
/*
 * This ensures that boot memory sections on the platform are online
 * from early boot. Memory sections could not be prevented from being
 * offlined, unless for some reason they are not online to begin with.
 * This helps validate the basic assumption on which the above memory
 * event notifier works to prevent boot memory section offlining and
 * its possible removal.
 */
static void validate_bootmem_online(void)
{
	phys_addr_t start, end, addr;
	struct mem_section *ms;
	u64 i;

	/*
	 * Scanning across all memblock might be expensive
	 * on some big memory systems. Hence enable this
	 * validation only with DEBUG_VM.
	 */
	if (!IS_ENABLED(CONFIG_DEBUG_VM))
		return;

	for_each_mem_range(i, &start, &end) {
		for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
			ms = __pfn_to_section(PHYS_PFN(addr));

			/*
			 * All memory ranges in the system at this point
			 * should have been marked as early sections.
			 */
			WARN_ON(!early_section(ms));

			/*
			 * Memory notifier mechanism here to prevent boot
			 * memory offlining depends on the fact that each
			 * early section memory on the system is initially
			 * online. Otherwise a given memory section which
			 * is already offline will be overlooked and can
			 * be removed completely. Call out such sections.
			 */
			if (!online_section(ms))
				pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
					addr, addr + (1UL << PA_SECTION_SHIFT));
		}
	}
}

1453 1454
static int __init prevent_bootmem_remove_init(void)
{
1455 1456 1457 1458 1459
	int ret = 0;

	if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
		return ret;

1460
	validate_bootmem_online();
1461 1462 1463 1464 1465
	ret = register_memory_notifier(&prevent_bootmem_remove_nb);
	if (ret)
		pr_err("%s: Notifier registration failed %d\n", __func__, ret);

	return ret;
1466
}
1467
early_initcall(prevent_bootmem_remove_init);
1468
#endif
1469 1470 1471

pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
1472
	if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
		/*
		 * Break-before-make (BBM) is required for all user space mappings
		 * when the permission changes from executable to non-executable
		 * in cases where cpu is affected with errata #2645198.
		 */
		if (pte_user_exec(READ_ONCE(*ptep)))
			return ptep_clear_flush(vma, addr, ptep);
	}
	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
}

void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
			     pte_t old_pte, pte_t pte)
{
	set_pte_at(vma->vm_mm, addr, ptep, pte);
}