Commit 17f8fc19 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "We've got a smattering of changes all over the place which we've
  acrued since -rc1. To my knowledge, there aren't any pending issues at
  the moment, but there's still plenty of time for something else to
  crop up...

  Summary:

   - Fix booting a 52-bit-VA-aware kernel on Qualcomm Amberwing

   - Fix pfn_valid() not to reject all ZONE_DEVICE memory

   - Fix memory tagging setup for hotplugged memory regions

   - Fix KASAN tagging in page_alloc() when DEBUG_VIRTUAL is enabled

   - Fix accidental truncation of CPU PMU event counters

   - Fix error code initialisation when failing probe of DMC620 PMU

   - Fix return value initialisation for sve-ptrace selftest

   - Drop broken support for CMDLINE_EXTEND"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  perf/arm_dmc620_pmu: Fix error return code in dmc620_pmu_device_probe()
  arm64: mm: remove unused __cpu_uses_extended_idmap[_level()]
  arm64: mm: use a 48-bit ID map when possible on 52-bit VA builds
  arm64: perf: Fix 64-bit event counter read truncation
  arm64/mm: Fix __enable_mmu() for new TGRAN range values
  kselftest: arm64: Fix exit code of sve-ptrace
  arm64: mte: Map hotplugged memory as Normal Tagged
  arm64: kasan: fix page_alloc tagging with DEBUG_VIRTUAL
  arm64/mm: Reorganize pfn_valid()
  arm64/mm: Fix pfn_valid() for ZONE_DEVICE based memory
  arm64/mm: Drop THP conditionality from FORCE_MAX_ZONEORDER
  arm64/mm: Drop redundant ARCH_WANT_HUGE_PMD_SHARE
  arm64: Drop support for CMDLINE_EXTEND
  arm64: cpufeatures: Fix handling of CONFIG_CMDLINE for idreg overrides
parents 6bf8819f c8e38668
...@@ -1055,8 +1055,6 @@ config HW_PERF_EVENTS ...@@ -1055,8 +1055,6 @@ config HW_PERF_EVENTS
config SYS_SUPPORTS_HUGETLBFS config SYS_SUPPORTS_HUGETLBFS
def_bool y def_bool y
config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE config ARCH_HAS_CACHE_LINE_SIZE
def_bool y def_bool y
...@@ -1157,8 +1155,8 @@ config XEN ...@@ -1157,8 +1155,8 @@ config XEN
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int int
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) default "14" if ARM64_64K_PAGES
default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE) default "12" if ARM64_16K_PAGES
default "11" default "11"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
...@@ -1855,12 +1853,6 @@ config CMDLINE_FROM_BOOTLOADER ...@@ -1855,12 +1853,6 @@ config CMDLINE_FROM_BOOTLOADER
the boot loader doesn't provide any, the default kernel command the boot loader doesn't provide any, the default kernel command
string provided in CMDLINE will be used. string provided in CMDLINE will be used.
config CMDLINE_EXTEND
bool "Extend bootloader kernel arguments"
help
The command-line arguments provided by the boot loader will be
appended to the default kernel command string.
config CMDLINE_FORCE config CMDLINE_FORCE
bool "Always use the default kernel command string" bool "Always use the default kernel command string"
help help
......
...@@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
#define page_to_virt(x) ({ \
__typeof__(x) __page = x; \
void *__addr = __va(page_to_phys(__page)); \
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
})
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) #define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else #else
#define page_to_virt(x) ({ \ #define page_to_virt(x) ({ \
......
...@@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) ...@@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
extern u64 idmap_t0sz; extern u64 idmap_t0sz;
extern u64 idmap_ptrs_per_pgd; extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
return false;
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
}
/*
* True if the extended ID map requires an extra level of translation table
* to be configured.
*/
static inline bool __cpu_uses_extended_idmap_level(void)
{
return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
}
/* /*
* Ensure TCR.T0SZ is set to the provided value. * Ensure TCR.T0SZ is set to the provided value.
*/ */
......
...@@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings; ...@@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) #define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define PAGE_KERNEL __pgprot(PROT_NORMAL) #define PAGE_KERNEL __pgprot(PROT_NORMAL)
#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) #define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) #define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
......
...@@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) ...@@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_device(prot) \ #define pgprot_device(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
#define pgprot_tagged(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define pgprot_mhp pgprot_tagged
/* /*
* DMA allocations for non-coherent devices use what the Arm architecture calls * DMA allocations for non-coherent devices use what the Arm architecture calls
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
......
...@@ -796,6 +796,11 @@ ...@@ -796,6 +796,11 @@
#define ID_AA64MMFR0_PARANGE_48 0x5 #define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6 #define ID_AA64MMFR0_PARANGE_52 0x6
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 #define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#else #else
...@@ -961,14 +966,17 @@ ...@@ -961,14 +966,17 @@
#define ID_PFR1_PROGMOD_SHIFT 0 #define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED #define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#elif defined(CONFIG_ARM64_16K_PAGES) #elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED #define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
#elif defined(CONFIG_ARM64_64K_PAGES) #elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED #define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#endif #endif
#define MVFR2_FPMISC_SHIFT 4 #define MVFR2_FPMISC_SHIFT 4
......
...@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables) ...@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/ */
adrp x5, __idmap_text_end adrp x5, __idmap_text_end
clz x5, x5 clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
b.ge 1f // .. then skip VA range extension b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz adr_l x6, idmap_t0sz
...@@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow) ...@@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
SYM_FUNC_START(__enable_mmu) SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1 mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
b.ne __no_granule_support b.lt __no_granule_support
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support
update_early_cpu_boot_status 0, x2, x3 update_early_cpu_boot_status 0, x2, x3
adrp x2, idmap_pg_dir adrp x2, idmap_pg_dir
phys_to_ttbr x1, x1 phys_to_ttbr x1, x1
......
...@@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases) ...@@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
} while (1); } while (1);
} }
static __init void parse_cmdline(void) static __init const u8 *get_bootargs_cmdline(void)
{ {
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { const u8 *prop;
const u8 *prop; void *fdt;
void *fdt; int node;
int node;
fdt = get_early_fdt_ptr(); fdt = get_early_fdt_ptr();
if (!fdt) if (!fdt)
goto out; return NULL;
node = fdt_path_offset(fdt, "/chosen"); node = fdt_path_offset(fdt, "/chosen");
if (node < 0) if (node < 0)
goto out; return NULL;
prop = fdt_getprop(fdt, node, "bootargs", NULL); prop = fdt_getprop(fdt, node, "bootargs", NULL);
if (!prop) if (!prop)
goto out; return NULL;
__parse_cmdline(prop, true); return strlen(prop) ? prop : NULL;
}
if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND)) static __init void parse_cmdline(void)
return; {
} const u8 *prop = get_bootargs_cmdline();
out: if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
__parse_cmdline(CONFIG_CMDLINE, true); __parse_cmdline(CONFIG_CMDLINE, true);
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
__parse_cmdline(prop, true);
} }
/* Keep checkers quiet */ /* Keep checkers quiet */
......
...@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) ...@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
} }
static inline u32 armv8pmu_read_evcntr(int idx) static inline u64 armv8pmu_read_evcntr(int idx)
{ {
u32 counter = ARMV8_IDX_TO_COUNTER(idx); u32 counter = ARMV8_IDX_TO_COUNTER(idx);
......
...@@ -311,16 +311,18 @@ int kvm_set_ipa_limit(void) ...@@ -311,16 +311,18 @@ int kvm_set_ipa_limit(void)
} }
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) { switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
default: case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
case 1:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL; return -EINVAL;
case 0: case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break; break;
case 2: case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break; break;
default:
kvm_err("Unsupported value for TGRAN_2, giving up\n");
return -EINVAL;
} }
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange); kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
......
...@@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) ...@@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
int pfn_valid(unsigned long pfn) int pfn_valid(unsigned long pfn)
{ {
phys_addr_t addr = pfn << PAGE_SHIFT; phys_addr_t addr = PFN_PHYS(pfn);
if ((addr >> PAGE_SHIFT) != pfn) /*
* Ensure the upper PAGE_SHIFT bits are clear in the
* pfn. Else it might lead to false positives when
* some of the upper bits are set, but the lower bits
* match a valid pfn.
*/
if (PHYS_PFN(addr) != pfn)
return 0; return 0;
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
{
struct mem_section *ms;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0; return 0;
if (!valid_section(__pfn_to_section(pfn))) ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0; return 0;
/*
* ZONE_DEVICE memory does not have the memblock entries.
* memblock_is_map_memory() check for ZONE_DEVICE based
* addresses will always fail. Even the normal hotplugged
* memory will never have MEMBLOCK_NOMAP flag set in their
* memblock entries. Skip memblock search for all non early
* memory sections covering all of hotplug memory including
* both normal and ZONE_DEVICE based.
*/
if (!early_section(ms))
return pfn_section_valid(ms, pfn);
}
#endif #endif
return memblock_is_map_memory(addr); return memblock_is_map_memory(addr);
} }
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define NO_BLOCK_MAPPINGS BIT(0) #define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1) #define NO_CONT_MAPPINGS BIT(1)
u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual; u64 __section(".mmuoff.data.write") vabits_actual;
...@@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp) ...@@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
* if MTE is present. Otherwise, it has the same attributes as * if MTE is present. Otherwise, it has the same attributes as
* PAGE_KERNEL. * PAGE_KERNEL.
*/ */
__map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags); __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
flags);
} }
/* /*
......
...@@ -24,7 +24,7 @@ efi_status_t check_platform_features(void) ...@@ -24,7 +24,7 @@ efi_status_t check_platform_features(void)
return EFI_SUCCESS; return EFI_SUCCESS;
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n"); efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else else
......
...@@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev) ...@@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
if (!name) { if (!name) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Create name failed, PMU @%pa\n", &res->start); "Create name failed, PMU @%pa\n", &res->start);
ret = -ENOMEM;
goto out_teardown_dev; goto out_teardown_dev;
} }
......
...@@ -904,6 +904,10 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, ...@@ -904,6 +904,10 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
#define pgprot_device pgprot_noncached #define pgprot_device pgprot_noncached
#endif #endif
#ifndef pgprot_mhp
#define pgprot_mhp(prot) (prot)
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef pgprot_modify #ifndef pgprot_modify
#define pgprot_modify pgprot_modify #define pgprot_modify pgprot_modify
......
...@@ -1072,7 +1072,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) ...@@ -1072,7 +1072,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
*/ */
int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{ {
struct mhp_params params = { .pgprot = PAGE_KERNEL }; struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
u64 start, size; u64 start, size;
bool new_node = false; bool new_node = false;
int ret; int ret;
......
...@@ -332,5 +332,5 @@ int main(void) ...@@ -332,5 +332,5 @@ int main(void)
ksft_print_cnts(); ksft_print_cnts();
return 0; return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment