Commit 8e620b04 authored by Catalin Marinas's avatar Catalin Marinas

arm64: Distinguish between user and kernel XN bits

On AArch64, the meaning of the XN bit has changed to UXN (user). The PXN
(privileged) bit must be set to prevent kernel execution. Without the
PXN bit set, the CPU may speculatively access device memory. This patch
ensures that all the mappings that the kernel must not execute from
(including user mappings) have the PXN bit set.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 77b67063
...@@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot ...@@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr); extern void __iounmap(volatile void __iomem *addr);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
......
...@@ -38,7 +38,8 @@ ...@@ -38,7 +38,8 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
/* /*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
...@@ -57,7 +58,8 @@ ...@@ -57,7 +58,8 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
/* /*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
......
...@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default; ...@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY) #define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN) #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY) #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY) #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY) #define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN) #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG) #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
...@@ -130,10 +130,10 @@ extern struct page *empty_zero_page; ...@@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
#define pte_young(pte) (pte_val(pte) & PTE_AF) #define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
#define pte_exec(pte) (!(pte_val(pte) & PTE_XN)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_present_exec_user(pte) \ #define pte_present_exec_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
(PTE_VALID | PTE_USER)) (PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \ #define PTE_BIT_FUNC(fn,op) \
...@@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) ...@@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY; const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte; return pte;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment