1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
4 #include <linux/freezer.h>
5 #include <linux/highmem.h>
6 #include <linux/kthread.h>
7 #include <linux/pagemap.h>
8 #include <linux/ratelimit.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
16 struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
17 static int sgx_nr_epc_sections;
18 static struct task_struct *ksgxd_tsk;
19 static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
22 * These variables are part of the state of the reclaimer, and must be accessed
23 * with sgx_reclaimer_lock acquired.
25 static LIST_HEAD(sgx_active_page_list);
27 static DEFINE_SPINLOCK(sgx_reclaimer_lock);
30 * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
31 * pages whose child pages blocked EREMOVE.
33 static void sgx_sanitize_section(struct sgx_epc_section *section)
35 struct sgx_epc_page *page;
39 /* init_laundry_list is thread-local, no need for a lock: */
40 while (!list_empty(§ion->init_laundry_list)) {
41 if (kthread_should_stop())
44 /* needed for access to ->page_list: */
45 spin_lock(§ion->lock);
47 page = list_first_entry(§ion->init_laundry_list,
48 struct sgx_epc_page, list);
50 ret = __eremove(sgx_get_epc_virt_addr(page));
52 list_move(&page->list, §ion->page_list);
54 list_move_tail(&page->list, &dirty);
56 spin_unlock(§ion->lock);
61 list_splice(&dirty, §ion->init_laundry_list);
64 static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
66 struct sgx_encl_page *page = epc_page->owner;
67 struct sgx_encl *encl = page->encl;
68 struct sgx_encl_mm *encl_mm;
72 idx = srcu_read_lock(&encl->srcu);
74 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
75 if (!mmget_not_zero(encl_mm->mm))
78 mmap_read_lock(encl_mm->mm);
79 ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page);
80 mmap_read_unlock(encl_mm->mm);
82 mmput_async(encl_mm->mm);
88 srcu_read_unlock(&encl->srcu, idx);
96 static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
98 struct sgx_encl_page *page = epc_page->owner;
99 unsigned long addr = page->desc & PAGE_MASK;
100 struct sgx_encl *encl = page->encl;
101 unsigned long mm_list_version;
102 struct sgx_encl_mm *encl_mm;
103 struct vm_area_struct *vma;
107 mm_list_version = encl->mm_list_version;
109 /* Pairs with smp_rmb() in sgx_encl_mm_add(). */
112 idx = srcu_read_lock(&encl->srcu);
114 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
115 if (!mmget_not_zero(encl_mm->mm))
118 mmap_read_lock(encl_mm->mm);
120 ret = sgx_encl_find(encl_mm->mm, addr, &vma);
121 if (!ret && encl == vma->vm_private_data)
122 zap_vma_ptes(vma, addr, PAGE_SIZE);
124 mmap_read_unlock(encl_mm->mm);
126 mmput_async(encl_mm->mm);
129 srcu_read_unlock(&encl->srcu, idx);
130 } while (unlikely(encl->mm_list_version != mm_list_version));
132 mutex_lock(&encl->lock);
134 ret = __eblock(sgx_get_epc_virt_addr(epc_page));
135 if (encls_failed(ret))
136 ENCLS_WARN(ret, "EBLOCK");
138 mutex_unlock(&encl->lock);
141 static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
142 struct sgx_backing *backing)
144 struct sgx_pageinfo pginfo;
150 pginfo.contents = (unsigned long)kmap_atomic(backing->contents);
151 pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) +
152 backing->pcmd_offset;
154 ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
156 kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
157 backing->pcmd_offset));
158 kunmap_atomic((void *)(unsigned long)pginfo.contents);
163 static void sgx_ipi_cb(void *info)
167 static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
169 cpumask_t *cpumask = &encl->cpumask;
170 struct sgx_encl_mm *encl_mm;
174 * Can race with sgx_encl_mm_add(), but ETRACK has already been
175 * executed, which means that the CPUs running in the new mm will enter
176 * into the enclave with a fresh epoch.
178 cpumask_clear(cpumask);
180 idx = srcu_read_lock(&encl->srcu);
182 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
183 if (!mmget_not_zero(encl_mm->mm))
186 cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
188 mmput_async(encl_mm->mm);
191 srcu_read_unlock(&encl->srcu, idx);
197 * Swap page to the regular memory transformed to the blocked state by using
198 * EBLOCK, which means that it can no loger be referenced (no new TLB entries).
200 * The first trial just tries to write the page assuming that some other thread
201 * has reset the count for threads inside the enlave by using ETRACK, and
202 * previous thread count has been zeroed out. The second trial calls ETRACK
203 * before EWB. If that fails we kick all the HW threads out, and then do EWB,
204 * which should be guaranteed the succeed.
206 static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
207 struct sgx_backing *backing)
209 struct sgx_encl_page *encl_page = epc_page->owner;
210 struct sgx_encl *encl = encl_page->encl;
211 struct sgx_va_page *va_page;
212 unsigned int va_offset;
216 encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED;
218 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
220 va_offset = sgx_alloc_va_slot(va_page);
221 va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset;
222 if (sgx_va_page_full(va_page))
223 list_move_tail(&va_page->list, &encl->va_pages);
225 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
226 if (ret == SGX_NOT_TRACKED) {
227 ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page));
229 if (encls_failed(ret))
230 ENCLS_WARN(ret, "ETRACK");
233 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
234 if (ret == SGX_NOT_TRACKED) {
236 * Slow path, send IPIs to kick cpus out of the
237 * enclave. Note, it's imperative that the cpu
238 * mask is generated *after* ETRACK, else we'll
239 * miss cpus that entered the enclave between
240 * generating the mask and incrementing epoch.
242 on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
243 sgx_ipi_cb, NULL, 1);
244 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
249 if (encls_failed(ret))
250 ENCLS_WARN(ret, "EWB");
252 sgx_free_va_slot(va_page, va_offset);
254 encl_page->desc |= va_offset;
255 encl_page->va_page = va_page;
259 static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
260 struct sgx_backing *backing)
262 struct sgx_encl_page *encl_page = epc_page->owner;
263 struct sgx_encl *encl = encl_page->encl;
264 struct sgx_backing secs_backing;
267 mutex_lock(&encl->lock);
269 sgx_encl_ewb(epc_page, backing);
270 encl_page->epc_page = NULL;
271 encl->secs_child_cnt--;
273 if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
274 ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
279 sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
281 sgx_free_epc_page(encl->secs.epc_page);
282 encl->secs.epc_page = NULL;
284 sgx_encl_put_backing(&secs_backing, true);
288 mutex_unlock(&encl->lock);
292 * Take a fixed number of pages from the head of the active page pool and
293 * reclaim them to the enclave's private shmem files. Skip the pages, which have
294 * been accessed since the last scan. Move those pages to the tail of active
295 * page pool so that the pages get scanned in LRU like fashion.
297 * Batch process a chunk of pages (at the moment 16) in order to degrade amount
298 * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit
299 * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI
300 * + EWB) but not sufficiently. Reclaiming one page at a time would also be
301 * problematic as it would increase the lock contention too much, which would
302 * halt forward progress.
304 static void sgx_reclaim_pages(void)
306 struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
307 struct sgx_backing backing[SGX_NR_TO_SCAN];
308 struct sgx_epc_section *section;
309 struct sgx_encl_page *encl_page;
310 struct sgx_epc_page *epc_page;
316 spin_lock(&sgx_reclaimer_lock);
317 for (i = 0; i < SGX_NR_TO_SCAN; i++) {
318 if (list_empty(&sgx_active_page_list))
321 epc_page = list_first_entry(&sgx_active_page_list,
322 struct sgx_epc_page, list);
323 list_del_init(&epc_page->list);
324 encl_page = epc_page->owner;
326 if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
327 chunk[cnt++] = epc_page;
329 /* The owner is freeing the page. No need to add the
330 * page back to the list of reclaimable pages.
332 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
334 spin_unlock(&sgx_reclaimer_lock);
336 for (i = 0; i < cnt; i++) {
338 encl_page = epc_page->owner;
340 if (!sgx_reclaimer_age(epc_page))
343 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
344 ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
348 mutex_lock(&encl_page->encl->lock);
349 encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
350 mutex_unlock(&encl_page->encl->lock);
354 spin_lock(&sgx_reclaimer_lock);
355 list_add_tail(&epc_page->list, &sgx_active_page_list);
356 spin_unlock(&sgx_reclaimer_lock);
358 kref_put(&encl_page->encl->refcount, sgx_encl_release);
363 for (i = 0; i < cnt; i++) {
366 sgx_reclaimer_block(epc_page);
369 for (i = 0; i < cnt; i++) {
374 encl_page = epc_page->owner;
375 sgx_reclaimer_write(epc_page, &backing[i]);
376 sgx_encl_put_backing(&backing[i], true);
378 kref_put(&encl_page->encl->refcount, sgx_encl_release);
379 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
381 section = &sgx_epc_sections[epc_page->section];
382 spin_lock(§ion->lock);
383 list_add_tail(&epc_page->list, §ion->page_list);
385 spin_unlock(§ion->lock);
389 static unsigned long sgx_nr_free_pages(void)
391 unsigned long cnt = 0;
394 for (i = 0; i < sgx_nr_epc_sections; i++)
395 cnt += sgx_epc_sections[i].free_cnt;
400 static bool sgx_should_reclaim(unsigned long watermark)
402 return sgx_nr_free_pages() < watermark &&
403 !list_empty(&sgx_active_page_list);
406 static int ksgxd(void *p)
413 * Sanitize pages in order to recover from kexec(). The 2nd pass is
414 * required for SECS pages, whose child pages blocked EREMOVE.
416 for (i = 0; i < sgx_nr_epc_sections; i++)
417 sgx_sanitize_section(&sgx_epc_sections[i]);
419 for (i = 0; i < sgx_nr_epc_sections; i++) {
420 sgx_sanitize_section(&sgx_epc_sections[i]);
422 /* Should never happen. */
423 if (!list_empty(&sgx_epc_sections[i].init_laundry_list))
424 WARN(1, "EPC section %d has unsanitized pages.\n", i);
427 while (!kthread_should_stop()) {
431 wait_event_freezable(ksgxd_waitq,
432 kthread_should_stop() ||
433 sgx_should_reclaim(SGX_NR_HIGH_PAGES));
435 if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
444 static bool __init sgx_page_reclaimer_init(void)
446 struct task_struct *tsk;
448 tsk = kthread_run(ksgxd, NULL, "ksgxd");
457 static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
459 struct sgx_epc_page *page;
461 spin_lock(§ion->lock);
463 if (list_empty(§ion->page_list)) {
464 spin_unlock(§ion->lock);
468 page = list_first_entry(§ion->page_list, struct sgx_epc_page, list);
469 list_del_init(&page->list);
472 spin_unlock(§ion->lock);
477 * __sgx_alloc_epc_page() - Allocate an EPC page
479 * Iterate through EPC sections and borrow a free EPC page to the caller. When a
480 * page is no longer needed it must be released with sgx_free_epc_page().
486 struct sgx_epc_page *__sgx_alloc_epc_page(void)
488 struct sgx_epc_section *section;
489 struct sgx_epc_page *page;
492 for (i = 0; i < sgx_nr_epc_sections; i++) {
493 section = &sgx_epc_sections[i];
495 page = __sgx_alloc_epc_page_from_section(section);
500 return ERR_PTR(-ENOMEM);
504 * sgx_mark_page_reclaimable() - Mark a page as reclaimable
507 * Mark a page as reclaimable and add it to the active page list. Pages
508 * are automatically removed from the active list when freed.
510 void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
512 spin_lock(&sgx_reclaimer_lock);
513 page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
514 list_add_tail(&page->list, &sgx_active_page_list);
515 spin_unlock(&sgx_reclaimer_lock);
519 * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list
522 * Clear the reclaimable flag and remove the page from the active page list.
526 * -EBUSY if the page is in the process of being reclaimed
528 int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
530 spin_lock(&sgx_reclaimer_lock);
531 if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
532 /* The page is being reclaimed. */
533 if (list_empty(&page->list)) {
534 spin_unlock(&sgx_reclaimer_lock);
538 list_del(&page->list);
539 page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
541 spin_unlock(&sgx_reclaimer_lock);
547 * sgx_alloc_epc_page() - Allocate an EPC page
548 * @owner: the owner of the EPC page
549 * @reclaim: reclaim pages if necessary
551 * Iterate through EPC sections and borrow a free EPC page to the caller. When a
552 * page is no longer needed it must be released with sgx_free_epc_page(). If
553 * @reclaim is set to true, directly reclaim pages when we are out of pages. No
554 * mm's can be locked when @reclaim is set to true.
556 * Finally, wake up ksgxd when the number of pages goes below the watermark
557 * before returning back to the caller.
563 struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
565 struct sgx_epc_page *page;
568 page = __sgx_alloc_epc_page();
574 if (list_empty(&sgx_active_page_list))
575 return ERR_PTR(-ENOMEM);
578 page = ERR_PTR(-EBUSY);
582 if (signal_pending(current)) {
583 page = ERR_PTR(-ERESTARTSYS);
591 if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
592 wake_up(&ksgxd_waitq);
598 * sgx_free_epc_page() - Free an EPC page
601 * Call EREMOVE for an EPC page and insert it back to the list of free pages.
603 void sgx_free_epc_page(struct sgx_epc_page *page)
605 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
608 WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
610 ret = __eremove(sgx_get_epc_virt_addr(page));
611 if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
614 spin_lock(§ion->lock);
615 list_add_tail(&page->list, §ion->page_list);
617 spin_unlock(§ion->lock);
620 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
622 struct sgx_epc_section *section)
624 unsigned long nr_pages = size >> PAGE_SHIFT;
627 section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB);
628 if (!section->virt_addr)
631 section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page));
632 if (!section->pages) {
633 memunmap(section->virt_addr);
637 section->phys_addr = phys_addr;
638 spin_lock_init(§ion->lock);
639 INIT_LIST_HEAD(§ion->page_list);
640 INIT_LIST_HEAD(§ion->init_laundry_list);
642 for (i = 0; i < nr_pages; i++) {
643 section->pages[i].section = index;
644 section->pages[i].flags = 0;
645 section->pages[i].owner = NULL;
646 list_add_tail(§ion->pages[i].list, §ion->init_laundry_list);
649 section->free_cnt = nr_pages;
654 * A section metric is concatenated in a way that @low bits 12-31 define the
655 * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
658 static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
660 return (low & GENMASK_ULL(31, 12)) +
661 ((high & GENMASK_ULL(19, 0)) << 32);
664 static bool __init sgx_page_cache_init(void)
666 u32 eax, ebx, ecx, edx, type;
670 for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
671 cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
673 type = eax & SGX_CPUID_EPC_MASK;
674 if (type == SGX_CPUID_EPC_INVALID)
677 if (type != SGX_CPUID_EPC_SECTION) {
678 pr_err_once("Unknown EPC section type: %u\n", type);
682 pa = sgx_calc_section_metric(eax, ebx);
683 size = sgx_calc_section_metric(ecx, edx);
685 pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
687 if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
688 pr_err("No free memory for an EPC section\n");
692 sgx_nr_epc_sections++;
695 if (!sgx_nr_epc_sections) {
696 pr_err("There are zero EPC sections.\n");
703 static void __init sgx_init(void)
708 if (!cpu_feature_enabled(X86_FEATURE_SGX))
711 if (!sgx_page_cache_init())
714 if (!sgx_page_reclaimer_init())
717 ret = sgx_drv_init();
724 kthread_stop(ksgxd_tsk);
727 for (i = 0; i < sgx_nr_epc_sections; i++) {
728 vfree(sgx_epc_sections[i].pages);
729 memunmap(sgx_epc_sections[i].virt_addr);
733 device_initcall(sgx_init);