Commit 1728ab54 authored by Jarkko Sakkinen's avatar Jarkko Sakkinen Committed by Borislav Petkov

x86/sgx: Add a page reclaimer

Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.

In contrast to normal page reclaim, the kernel cannot directly access
enclave memory.  To get around this, the SGX architecture provides a set of
functions to help.  Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.

Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system.  A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.

All enclave pages can be reclaimed, architecturally.  But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last.  The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented.  The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarJarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarJethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
parent 2adcba79
......@@ -17,13 +17,24 @@ u32 sgx_misc_reserved_mask;
static int sgx_open(struct inode *inode, struct file *file)
{
struct sgx_encl *encl;
int ret;
encl = kzalloc(sizeof(*encl), GFP_KERNEL);
if (!encl)
return -ENOMEM;
kref_init(&encl->refcount);
xa_init(&encl->page_array);
mutex_init(&encl->lock);
INIT_LIST_HEAD(&encl->va_pages);
INIT_LIST_HEAD(&encl->mm_list);
spin_lock_init(&encl->mm_lock);
ret = init_srcu_struct(&encl->srcu);
if (ret) {
kfree(encl);
return ret;
}
file->private_data = encl;
......@@ -33,31 +44,37 @@ static int sgx_open(struct inode *inode, struct file *file)
static int sgx_release(struct inode *inode, struct file *file)
{
struct sgx_encl *encl = file->private_data;
struct sgx_encl_page *entry;
unsigned long index;
xa_for_each(&encl->page_array, index, entry) {
if (entry->epc_page) {
sgx_free_epc_page(entry->epc_page);
encl->secs_child_cnt--;
entry->epc_page = NULL;
struct sgx_encl_mm *encl_mm;
/*
* Drain the remaining mm_list entries. At this point the list contains
* entries for processes, which have closed the enclave file but have
* not exited yet. The processes, which have exited, are gone from the
* list by sgx_mmu_notifier_release().
*/
for ( ; ; ) {
spin_lock(&encl->mm_lock);
if (list_empty(&encl->mm_list)) {
encl_mm = NULL;
} else {
encl_mm = list_first_entry(&encl->mm_list,
struct sgx_encl_mm, list);
list_del_rcu(&encl_mm->list);
}
kfree(entry);
}
spin_unlock(&encl->mm_lock);
xa_destroy(&encl->page_array);
/* The enclave is no longer mapped by any mm. */
if (!encl_mm)
break;
if (!encl->secs_child_cnt && encl->secs.epc_page) {
sgx_free_epc_page(encl->secs.epc_page);
encl->secs.epc_page = NULL;
synchronize_srcu(&encl->srcu);
mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
kfree(encl_mm);
}
/* Detect EPC page leaks. */
WARN_ON_ONCE(encl->secs_child_cnt);
WARN_ON_ONCE(encl->secs.epc_page);
kfree(encl);
kref_put(&encl->refcount, sgx_encl_release);
return 0;
}
......@@ -70,6 +87,10 @@ static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
if (ret)
return ret;
ret = sgx_encl_mm_add(encl, vma->vm_mm);
if (ret)
return ret;
vma->vm_ops = &sgx_vm_ops;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
vma->vm_private_data = encl;
......
This diff is collapsed.
......@@ -19,11 +19,18 @@
#include <linux/xarray.h>
#include "sgx.h"
/* 'desc' bits holding the offset in the VA (version array) page. */
#define SGX_ENCL_PAGE_VA_OFFSET_MASK GENMASK_ULL(11, 3)
/* 'desc' bit marking that the page is being reclaimed. */
#define SGX_ENCL_PAGE_BEING_RECLAIMED BIT(3)
struct sgx_encl_page {
unsigned long desc;
unsigned long vm_max_prot_bits;
struct sgx_epc_page *epc_page;
struct sgx_encl *encl;
struct sgx_va_page *va_page;
};
enum sgx_encl_flags {
......@@ -33,6 +40,13 @@ enum sgx_encl_flags {
SGX_ENCL_INITIALIZED = BIT(3),
};
struct sgx_encl_mm {
struct sgx_encl *encl;
struct mm_struct *mm;
struct list_head list;
struct mmu_notifier mmu_notifier;
};
struct sgx_encl {
unsigned long base;
unsigned long size;
......@@ -44,6 +58,30 @@ struct sgx_encl {
struct sgx_encl_page secs;
unsigned long attributes;
unsigned long attributes_mask;
cpumask_t cpumask;
struct file *backing;
struct kref refcount;
struct list_head va_pages;
unsigned long mm_list_version;
struct list_head mm_list;
spinlock_t mm_lock;
struct srcu_struct srcu;
};
#define SGX_VA_SLOT_COUNT 512
struct sgx_va_page {
struct sgx_epc_page *epc_page;
DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
struct list_head list;
};
struct sgx_backing {
pgoff_t page_index;
struct page *contents;
struct page *pcmd;
unsigned long pcmd_offset;
};
extern const struct vm_operations_struct sgx_vm_ops;
......@@ -65,4 +103,17 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
unsigned long end, unsigned long vm_flags);
void sgx_encl_release(struct kref *ref);
int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
struct sgx_backing *backing);
void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
int sgx_encl_test_and_clear_young(struct mm_struct *mm,
struct sgx_encl_page *page);
struct sgx_epc_page *sgx_alloc_va_page(void);
unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
bool sgx_va_page_full(struct sgx_va_page *va_page);
#endif /* _X86_ENCL_H */
......@@ -16,20 +16,77 @@
#include "encl.h"
#include "encls.h"
static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
{
struct sgx_va_page *va_page = NULL;
void *err;
BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
(SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
if (!va_page)
return ERR_PTR(-ENOMEM);
va_page->epc_page = sgx_alloc_va_page();
if (IS_ERR(va_page->epc_page)) {
err = ERR_CAST(va_page->epc_page);
kfree(va_page);
return err;
}
WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
}
encl->page_cnt++;
return va_page;
}
static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
{
encl->page_cnt--;
if (va_page) {
sgx_free_epc_page(va_page->epc_page);
list_del(&va_page->list);
kfree(va_page);
}
}
static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
{
struct sgx_epc_page *secs_epc;
struct sgx_va_page *va_page;
struct sgx_pageinfo pginfo;
struct sgx_secinfo secinfo;
unsigned long encl_size;
struct file *backing;
long ret;
va_page = sgx_encl_grow(encl);
if (IS_ERR(va_page))
return PTR_ERR(va_page);
else if (va_page)
list_add(&va_page->list, &encl->va_pages);
/* else the tail page of the VA page list had free slots. */
/* The extra page goes to SECS. */
encl_size = secs->size + PAGE_SIZE;
secs_epc = __sgx_alloc_epc_page();
if (IS_ERR(secs_epc))
return PTR_ERR(secs_epc);
backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
VM_NORESERVE);
if (IS_ERR(backing)) {
ret = PTR_ERR(backing);
goto err_out_shrink;
}
encl->backing = backing;
secs_epc = sgx_alloc_epc_page(&encl->secs, true);
if (IS_ERR(secs_epc)) {
ret = PTR_ERR(secs_epc);
goto err_out_backing;
}
encl->secs.epc_page = secs_epc;
......@@ -63,6 +120,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
sgx_free_epc_page(encl->secs.epc_page);
encl->secs.epc_page = NULL;
err_out_backing:
fput(encl->backing);
encl->backing = NULL;
err_out_shrink:
sgx_encl_shrink(encl, va_page);
return ret;
}
......@@ -228,21 +292,35 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
{
struct sgx_encl_page *encl_page;
struct sgx_epc_page *epc_page;
struct sgx_va_page *va_page;
int ret;
encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
if (IS_ERR(encl_page))
return PTR_ERR(encl_page);
epc_page = __sgx_alloc_epc_page();
epc_page = sgx_alloc_epc_page(encl_page, true);
if (IS_ERR(epc_page)) {
kfree(encl_page);
return PTR_ERR(epc_page);
}
va_page = sgx_encl_grow(encl);
if (IS_ERR(va_page)) {
ret = PTR_ERR(va_page);
goto err_out_free;
}
mmap_read_lock(current->mm);
mutex_lock(&encl->lock);
/*
* Adding to encl->va_pages must be done under encl->lock. Ditto for
* deleting (via sgx_encl_shrink()) in the error path.
*/
if (va_page)
list_add(&va_page->list, &encl->va_pages);
/*
* Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
* can't be gracefully unwound, while failure on EADD/EXTEND is limited
......@@ -273,6 +351,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
goto err_out;
}
sgx_mark_page_reclaimable(encl_page->epc_page);
mutex_unlock(&encl->lock);
mmap_read_unlock(current->mm);
return ret;
......@@ -281,9 +360,11 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
err_out_unlock:
sgx_encl_shrink(encl, va_page);
mutex_unlock(&encl->lock);
mmap_read_unlock(current->mm);
err_out_free:
sgx_free_epc_page(epc_page);
kfree(encl_page);
......
This diff is collapsed.
......@@ -15,9 +15,17 @@
#define SGX_MAX_EPC_SECTIONS 8
#define SGX_EEXTEND_BLOCK_SIZE 256
#define SGX_NR_TO_SCAN 16
#define SGX_NR_LOW_PAGES 32
#define SGX_NR_HIGH_PAGES 64
/* Pages, which are being tracked by the page reclaimer. */
#define SGX_EPC_PAGE_RECLAIMER_TRACKED BIT(0)
struct sgx_epc_page {
unsigned int section;
unsigned int flags;
struct sgx_encl_page *owner;
struct list_head list;
};
......@@ -33,6 +41,7 @@ struct sgx_epc_section {
struct list_head page_list;
struct list_head laundry_list;
struct sgx_epc_page *pages;
unsigned long free_cnt;
spinlock_t lock;
};
......@@ -61,4 +70,8 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
struct sgx_epc_page *__sgx_alloc_epc_page(void);
void sgx_free_epc_page(struct sgx_epc_page *page);
void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim);
#endif /* _X86_SGX_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment