1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
7 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #include <linux/refcount.h>
10 #include <linux/mmu_notifier.h>
11 #include <linux/amd-iommu.h>
12 #include <linux/mm_types.h>
13 #include <linux/profile.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/sched/mm.h>
17 #include <linux/wait.h>
18 #include <linux/pci.h>
19 #include <linux/gfp.h>
21 #include "amd_iommu.h"
23 MODULE_LICENSE("GPL v2");
24 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
26 #define MAX_DEVICES 0x10000
27 #define PRI_QUEUE_SIZE 512
36 struct list_head list; /* For global state-list */
37 refcount_t count; /* Reference count */
38 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
40 struct mm_struct *mm; /* mm_struct for the faults */
41 struct mmu_notifier mn; /* mmu_notifier handle */
42 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
43 struct device_state *device_state; /* Link to our device_state */
44 u32 pasid; /* PASID index */
45 bool invalid; /* Used during setup and
46 teardown of the pasid */
47 spinlock_t lock; /* Protect pri_queues and
49 wait_queue_head_t wq; /* To wait for count == 0 */
53 struct list_head list;
57 struct pasid_state **states;
58 struct iommu_domain *domain;
61 amd_iommu_invalid_ppr_cb inv_ppr_cb;
62 amd_iommu_invalidate_ctx inv_ctx_cb;
68 struct work_struct work;
69 struct device_state *dev_state;
70 struct pasid_state *state;
80 static LIST_HEAD(state_list);
81 static DEFINE_SPINLOCK(state_lock);
83 static struct workqueue_struct *iommu_wq;
85 static void free_pasid_states(struct device_state *dev_state);
87 static u16 device_id(struct pci_dev *pdev)
91 devid = pdev->bus->number;
92 devid = (devid << 8) | pdev->devfn;
97 static struct device_state *__get_device_state(u16 devid)
99 struct device_state *dev_state;
101 list_for_each_entry(dev_state, &state_list, list) {
102 if (dev_state->devid == devid)
109 static struct device_state *get_device_state(u16 devid)
111 struct device_state *dev_state;
114 spin_lock_irqsave(&state_lock, flags);
115 dev_state = __get_device_state(devid);
116 if (dev_state != NULL)
117 atomic_inc(&dev_state->count);
118 spin_unlock_irqrestore(&state_lock, flags);
123 static void free_device_state(struct device_state *dev_state)
125 struct iommu_group *group;
128 * First detach device from domain - No more PRI requests will arrive
129 * from that device after it is unbound from the IOMMUv2 domain.
131 group = iommu_group_get(&dev_state->pdev->dev);
135 iommu_detach_group(dev_state->domain, group);
137 iommu_group_put(group);
139 /* Everything is down now, free the IOMMUv2 domain */
140 iommu_domain_free(dev_state->domain);
142 /* Finally get rid of the device-state */
146 static void put_device_state(struct device_state *dev_state)
148 if (atomic_dec_and_test(&dev_state->count))
149 wake_up(&dev_state->wq);
152 /* Must be called under dev_state->lock */
153 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
154 u32 pasid, bool alloc)
156 struct pasid_state **root, **ptr;
159 level = dev_state->pasid_levels;
160 root = dev_state->states;
164 index = (pasid >> (9 * level)) & 0x1ff;
174 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
179 root = (struct pasid_state **)*ptr;
186 static int set_pasid_state(struct device_state *dev_state,
187 struct pasid_state *pasid_state,
190 struct pasid_state **ptr;
194 spin_lock_irqsave(&dev_state->lock, flags);
195 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
210 spin_unlock_irqrestore(&dev_state->lock, flags);
215 static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
217 struct pasid_state **ptr;
220 spin_lock_irqsave(&dev_state->lock, flags);
221 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
229 spin_unlock_irqrestore(&dev_state->lock, flags);
232 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
235 struct pasid_state **ptr, *ret = NULL;
238 spin_lock_irqsave(&dev_state->lock, flags);
239 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
246 refcount_inc(&ret->count);
249 spin_unlock_irqrestore(&dev_state->lock, flags);
254 static void free_pasid_state(struct pasid_state *pasid_state)
259 static void put_pasid_state(struct pasid_state *pasid_state)
261 if (refcount_dec_and_test(&pasid_state->count))
262 wake_up(&pasid_state->wq);
265 static void put_pasid_state_wait(struct pasid_state *pasid_state)
267 refcount_dec(&pasid_state->count);
268 wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
269 free_pasid_state(pasid_state);
272 static void unbind_pasid(struct pasid_state *pasid_state)
274 struct iommu_domain *domain;
276 domain = pasid_state->device_state->domain;
279 * Mark pasid_state as invalid, no more faults will we added to the
280 * work queue after this is visible everywhere.
282 pasid_state->invalid = true;
284 /* Make sure this is visible */
287 /* After this the device/pasid can't access the mm anymore */
288 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
290 /* Make sure no more pending faults are in the queue */
291 flush_workqueue(iommu_wq);
294 static void free_pasid_states_level1(struct pasid_state **tbl)
298 for (i = 0; i < 512; ++i) {
302 free_page((unsigned long)tbl[i]);
306 static void free_pasid_states_level2(struct pasid_state **tbl)
308 struct pasid_state **ptr;
311 for (i = 0; i < 512; ++i) {
315 ptr = (struct pasid_state **)tbl[i];
316 free_pasid_states_level1(ptr);
320 static void free_pasid_states(struct device_state *dev_state)
322 struct pasid_state *pasid_state;
325 for (i = 0; i < dev_state->max_pasids; ++i) {
326 pasid_state = get_pasid_state(dev_state, i);
327 if (pasid_state == NULL)
330 put_pasid_state(pasid_state);
333 * This will call the mn_release function and
336 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
338 put_pasid_state_wait(pasid_state); /* Reference taken in
339 amd_iommu_bind_pasid */
341 /* Drop reference taken in amd_iommu_bind_pasid */
342 put_device_state(dev_state);
345 if (dev_state->pasid_levels == 2)
346 free_pasid_states_level2(dev_state->states);
347 else if (dev_state->pasid_levels == 1)
348 free_pasid_states_level1(dev_state->states);
350 BUG_ON(dev_state->pasid_levels != 0);
352 free_page((unsigned long)dev_state->states);
355 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
357 return container_of(mn, struct pasid_state, mn);
360 static void mn_invalidate_range(struct mmu_notifier *mn,
361 struct mm_struct *mm,
362 unsigned long start, unsigned long end)
364 struct pasid_state *pasid_state;
365 struct device_state *dev_state;
367 pasid_state = mn_to_state(mn);
368 dev_state = pasid_state->device_state;
370 if ((start ^ (end - 1)) < PAGE_SIZE)
371 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
374 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
377 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
379 struct pasid_state *pasid_state;
380 struct device_state *dev_state;
385 pasid_state = mn_to_state(mn);
386 dev_state = pasid_state->device_state;
387 run_inv_ctx_cb = !pasid_state->invalid;
389 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
390 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
392 unbind_pasid(pasid_state);
395 static const struct mmu_notifier_ops iommu_mn = {
396 .release = mn_release,
397 .invalidate_range = mn_invalidate_range,
400 static void set_pri_tag_status(struct pasid_state *pasid_state,
405 spin_lock_irqsave(&pasid_state->lock, flags);
406 pasid_state->pri[tag].status = status;
407 spin_unlock_irqrestore(&pasid_state->lock, flags);
410 static void finish_pri_tag(struct device_state *dev_state,
411 struct pasid_state *pasid_state,
416 spin_lock_irqsave(&pasid_state->lock, flags);
417 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
418 pasid_state->pri[tag].finish) {
419 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
420 pasid_state->pri[tag].status, tag);
421 pasid_state->pri[tag].finish = false;
422 pasid_state->pri[tag].status = PPR_SUCCESS;
424 spin_unlock_irqrestore(&pasid_state->lock, flags);
427 static void handle_fault_error(struct fault *fault)
431 if (!fault->dev_state->inv_ppr_cb) {
432 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
436 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
441 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
442 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
444 case AMD_IOMMU_INV_PRI_RSP_INVALID:
445 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
447 case AMD_IOMMU_INV_PRI_RSP_FAIL:
448 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
455 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
457 unsigned long requested = 0;
459 if (fault->flags & PPR_FAULT_EXEC)
460 requested |= VM_EXEC;
462 if (fault->flags & PPR_FAULT_READ)
463 requested |= VM_READ;
465 if (fault->flags & PPR_FAULT_WRITE)
466 requested |= VM_WRITE;
468 return (requested & ~vma->vm_flags) != 0;
471 static void do_fault(struct work_struct *work)
473 struct fault *fault = container_of(work, struct fault, work);
474 struct vm_area_struct *vma;
475 vm_fault_t ret = VM_FAULT_ERROR;
476 unsigned int flags = 0;
477 struct mm_struct *mm;
480 mm = fault->state->mm;
481 address = fault->address;
483 if (fault->flags & PPR_FAULT_USER)
484 flags |= FAULT_FLAG_USER;
485 if (fault->flags & PPR_FAULT_WRITE)
486 flags |= FAULT_FLAG_WRITE;
487 flags |= FAULT_FLAG_REMOTE;
490 vma = find_extend_vma(mm, address);
491 if (!vma || address < vma->vm_start)
492 /* failed to get a vma in the right range */
495 /* Check if we have the right permissions on the vma */
496 if (access_error(vma, fault))
499 ret = handle_mm_fault(vma, address, flags, NULL);
501 mmap_read_unlock(mm);
503 if (ret & VM_FAULT_ERROR)
504 /* failed to service fault */
505 handle_fault_error(fault);
507 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
509 put_pasid_state(fault->state);
514 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
516 struct amd_iommu_fault *iommu_fault;
517 struct pasid_state *pasid_state;
518 struct device_state *dev_state;
519 struct pci_dev *pdev = NULL;
527 tag = iommu_fault->tag & 0x1ff;
528 finish = (iommu_fault->tag >> 9) & 1;
530 devid = iommu_fault->device_id;
531 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
538 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
539 if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
540 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
545 dev_state = get_device_state(iommu_fault->device_id);
546 if (dev_state == NULL)
549 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
550 if (pasid_state == NULL || pasid_state->invalid) {
551 /* We know the device but not the PASID -> send INVALID */
552 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
557 spin_lock_irqsave(&pasid_state->lock, flags);
558 atomic_inc(&pasid_state->pri[tag].inflight);
560 pasid_state->pri[tag].finish = true;
561 spin_unlock_irqrestore(&pasid_state->lock, flags);
563 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
565 /* We are OOM - send success and let the device re-fault */
566 finish_pri_tag(dev_state, pasid_state, tag);
570 fault->dev_state = dev_state;
571 fault->address = iommu_fault->address;
572 fault->state = pasid_state;
574 fault->finish = finish;
575 fault->pasid = iommu_fault->pasid;
576 fault->flags = iommu_fault->flags;
577 INIT_WORK(&fault->work, do_fault);
579 queue_work(iommu_wq, &fault->work);
585 if (ret != NOTIFY_OK && pasid_state)
586 put_pasid_state(pasid_state);
588 put_device_state(dev_state);
594 static struct notifier_block ppr_nb = {
595 .notifier_call = ppr_notifier,
598 int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
599 struct task_struct *task)
601 struct pasid_state *pasid_state;
602 struct device_state *dev_state;
603 struct mm_struct *mm;
609 if (!amd_iommu_v2_supported())
612 devid = device_id(pdev);
613 dev_state = get_device_state(devid);
615 if (dev_state == NULL)
619 if (pasid >= dev_state->max_pasids)
623 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
624 if (pasid_state == NULL)
628 refcount_set(&pasid_state->count, 1);
629 init_waitqueue_head(&pasid_state->wq);
630 spin_lock_init(&pasid_state->lock);
632 mm = get_task_mm(task);
633 pasid_state->mm = mm;
634 pasid_state->device_state = dev_state;
635 pasid_state->pasid = pasid;
636 pasid_state->invalid = true; /* Mark as valid only if we are
637 done with setting up the pasid */
638 pasid_state->mn.ops = &iommu_mn;
640 if (pasid_state->mm == NULL)
643 mmu_notifier_register(&pasid_state->mn, mm);
645 ret = set_pasid_state(dev_state, pasid_state, pasid);
649 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
650 __pa(pasid_state->mm->pgd));
652 goto out_clear_state;
654 /* Now we are ready to handle faults */
655 pasid_state->invalid = false;
658 * Drop the reference to the mm_struct here. We rely on the
659 * mmu_notifier release call-back to inform us when the mm
667 clear_pasid_state(dev_state, pasid);
670 mmu_notifier_unregister(&pasid_state->mn, mm);
674 free_pasid_state(pasid_state);
677 put_device_state(dev_state);
681 EXPORT_SYMBOL(amd_iommu_bind_pasid);
683 void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
685 struct pasid_state *pasid_state;
686 struct device_state *dev_state;
691 if (!amd_iommu_v2_supported())
694 devid = device_id(pdev);
695 dev_state = get_device_state(devid);
696 if (dev_state == NULL)
699 if (pasid >= dev_state->max_pasids)
702 pasid_state = get_pasid_state(dev_state, pasid);
703 if (pasid_state == NULL)
706 * Drop reference taken here. We are safe because we still hold
707 * the reference taken in the amd_iommu_bind_pasid function.
709 put_pasid_state(pasid_state);
711 /* Clear the pasid state so that the pasid can be re-used */
712 clear_pasid_state(dev_state, pasid_state->pasid);
715 * Call mmu_notifier_unregister to drop our reference
718 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
720 put_pasid_state_wait(pasid_state); /* Reference taken in
721 amd_iommu_bind_pasid */
723 /* Drop reference taken in this function */
724 put_device_state(dev_state);
726 /* Drop reference taken in amd_iommu_bind_pasid */
727 put_device_state(dev_state);
729 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
733 struct device_state *dev_state;
734 struct iommu_group *group;
742 * When memory encryption is active the device is likely not in a
743 * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
745 if (mem_encrypt_active())
748 if (!amd_iommu_v2_supported())
751 if (pasids <= 0 || pasids > (PASID_MASK + 1))
754 devid = device_id(pdev);
756 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
757 if (dev_state == NULL)
760 spin_lock_init(&dev_state->lock);
761 init_waitqueue_head(&dev_state->wq);
762 dev_state->pdev = pdev;
763 dev_state->devid = devid;
766 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
767 dev_state->pasid_levels += 1;
769 atomic_set(&dev_state->count, 1);
770 dev_state->max_pasids = pasids;
773 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
774 if (dev_state->states == NULL)
775 goto out_free_dev_state;
777 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
778 if (dev_state->domain == NULL)
779 goto out_free_states;
781 amd_iommu_domain_direct_map(dev_state->domain);
783 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
785 goto out_free_domain;
787 group = iommu_group_get(&pdev->dev);
790 goto out_free_domain;
793 ret = iommu_attach_group(dev_state->domain, group);
797 iommu_group_put(group);
799 spin_lock_irqsave(&state_lock, flags);
801 if (__get_device_state(devid) != NULL) {
802 spin_unlock_irqrestore(&state_lock, flags);
804 goto out_free_domain;
807 list_add_tail(&dev_state->list, &state_list);
809 spin_unlock_irqrestore(&state_lock, flags);
814 iommu_group_put(group);
817 iommu_domain_free(dev_state->domain);
820 free_page((unsigned long)dev_state->states);
827 EXPORT_SYMBOL(amd_iommu_init_device);
829 void amd_iommu_free_device(struct pci_dev *pdev)
831 struct device_state *dev_state;
835 if (!amd_iommu_v2_supported())
838 devid = device_id(pdev);
840 spin_lock_irqsave(&state_lock, flags);
842 dev_state = __get_device_state(devid);
843 if (dev_state == NULL) {
844 spin_unlock_irqrestore(&state_lock, flags);
848 list_del(&dev_state->list);
850 spin_unlock_irqrestore(&state_lock, flags);
852 /* Get rid of any remaining pasid states */
853 free_pasid_states(dev_state);
855 put_device_state(dev_state);
857 * Wait until the last reference is dropped before freeing
860 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
861 free_device_state(dev_state);
863 EXPORT_SYMBOL(amd_iommu_free_device);
865 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
866 amd_iommu_invalid_ppr_cb cb)
868 struct device_state *dev_state;
873 if (!amd_iommu_v2_supported())
876 devid = device_id(pdev);
878 spin_lock_irqsave(&state_lock, flags);
881 dev_state = __get_device_state(devid);
882 if (dev_state == NULL)
885 dev_state->inv_ppr_cb = cb;
890 spin_unlock_irqrestore(&state_lock, flags);
894 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
896 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
897 amd_iommu_invalidate_ctx cb)
899 struct device_state *dev_state;
904 if (!amd_iommu_v2_supported())
907 devid = device_id(pdev);
909 spin_lock_irqsave(&state_lock, flags);
912 dev_state = __get_device_state(devid);
913 if (dev_state == NULL)
916 dev_state->inv_ctx_cb = cb;
921 spin_unlock_irqrestore(&state_lock, flags);
925 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
927 static int __init amd_iommu_v2_init(void)
931 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
933 if (!amd_iommu_v2_supported()) {
934 pr_info("AMD IOMMUv2 functionality not available on this system\n");
936 * Load anyway to provide the symbols to other modules
937 * which may use AMD IOMMUv2 optionally.
943 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
944 if (iommu_wq == NULL)
947 amd_iommu_register_ppr_notifier(&ppr_nb);
955 static void __exit amd_iommu_v2_exit(void)
957 struct device_state *dev_state;
960 if (!amd_iommu_v2_supported())
963 amd_iommu_unregister_ppr_notifier(&ppr_nb);
965 flush_workqueue(iommu_wq);
968 * The loop below might call flush_workqueue(), so call
969 * destroy_workqueue() after it
971 for (i = 0; i < MAX_DEVICES; ++i) {
972 dev_state = get_device_state(i);
974 if (dev_state == NULL)
979 put_device_state(dev_state);
980 amd_iommu_free_device(dev_state->pdev);
983 destroy_workqueue(iommu_wq);
986 module_init(amd_iommu_v2_init);
987 module_exit(amd_iommu_v2_exit);