2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/iommu.h>
27 #include <linux/wait.h>
28 #include <linux/pci.h>
29 #include <linux/gfp.h>
31 #include "amd_iommu_types.h"
32 #include "amd_iommu_proto.h"
34 MODULE_LICENSE("GPL v2");
35 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
37 #define MAX_DEVICES 0x10000
38 #define PRI_QUEUE_SIZE 512
47 struct list_head list; /* For global state-list */
48 atomic_t count; /* Reference count */
49 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
51 struct mm_struct *mm; /* mm_struct for the faults */
52 struct mmu_notifier mn; /* mmu_notifier handle */
53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
54 struct device_state *device_state; /* Link to our device_state */
55 int pasid; /* PASID index */
56 bool invalid; /* Used during setup and
57 teardown of the pasid */
58 spinlock_t lock; /* Protect pri_queues and
60 wait_queue_head_t wq; /* To wait for count == 0 */
64 struct list_head list;
68 struct pasid_state **states;
69 struct iommu_domain *domain;
72 amd_iommu_invalid_ppr_cb inv_ppr_cb;
73 amd_iommu_invalidate_ctx inv_ctx_cb;
79 struct work_struct work;
80 struct device_state *dev_state;
81 struct pasid_state *state;
91 static LIST_HEAD(state_list);
92 static spinlock_t state_lock;
94 static struct workqueue_struct *iommu_wq;
96 static void free_pasid_states(struct device_state *dev_state);
98 static u16 device_id(struct pci_dev *pdev)
102 devid = pdev->bus->number;
103 devid = (devid << 8) | pdev->devfn;
108 static struct device_state *__get_device_state(u16 devid)
110 struct device_state *dev_state;
112 list_for_each_entry(dev_state, &state_list, list) {
113 if (dev_state->devid == devid)
120 static struct device_state *get_device_state(u16 devid)
122 struct device_state *dev_state;
125 spin_lock_irqsave(&state_lock, flags);
126 dev_state = __get_device_state(devid);
127 if (dev_state != NULL)
128 atomic_inc(&dev_state->count);
129 spin_unlock_irqrestore(&state_lock, flags);
134 static void free_device_state(struct device_state *dev_state)
136 struct iommu_group *group;
139 * First detach device from domain - No more PRI requests will arrive
140 * from that device after it is unbound from the IOMMUv2 domain.
142 group = iommu_group_get(&dev_state->pdev->dev);
146 iommu_detach_group(dev_state->domain, group);
148 iommu_group_put(group);
150 /* Everything is down now, free the IOMMUv2 domain */
151 iommu_domain_free(dev_state->domain);
153 /* Finally get rid of the device-state */
157 static void put_device_state(struct device_state *dev_state)
159 if (atomic_dec_and_test(&dev_state->count))
160 wake_up(&dev_state->wq);
163 /* Must be called under dev_state->lock */
164 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
165 int pasid, bool alloc)
167 struct pasid_state **root, **ptr;
170 level = dev_state->pasid_levels;
171 root = dev_state->states;
175 index = (pasid >> (9 * level)) & 0x1ff;
185 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
190 root = (struct pasid_state **)*ptr;
197 static int set_pasid_state(struct device_state *dev_state,
198 struct pasid_state *pasid_state,
201 struct pasid_state **ptr;
205 spin_lock_irqsave(&dev_state->lock, flags);
206 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
221 spin_unlock_irqrestore(&dev_state->lock, flags);
226 static void clear_pasid_state(struct device_state *dev_state, int pasid)
228 struct pasid_state **ptr;
231 spin_lock_irqsave(&dev_state->lock, flags);
232 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
240 spin_unlock_irqrestore(&dev_state->lock, flags);
243 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
246 struct pasid_state **ptr, *ret = NULL;
249 spin_lock_irqsave(&dev_state->lock, flags);
250 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
257 atomic_inc(&ret->count);
260 spin_unlock_irqrestore(&dev_state->lock, flags);
265 static void free_pasid_state(struct pasid_state *pasid_state)
270 static void put_pasid_state(struct pasid_state *pasid_state)
272 if (atomic_dec_and_test(&pasid_state->count))
273 wake_up(&pasid_state->wq);
276 static void put_pasid_state_wait(struct pasid_state *pasid_state)
278 atomic_dec(&pasid_state->count);
279 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
280 free_pasid_state(pasid_state);
283 static void unbind_pasid(struct pasid_state *pasid_state)
285 struct iommu_domain *domain;
287 domain = pasid_state->device_state->domain;
290 * Mark pasid_state as invalid, no more faults will we added to the
291 * work queue after this is visible everywhere.
293 pasid_state->invalid = true;
295 /* Make sure this is visible */
298 /* After this the device/pasid can't access the mm anymore */
299 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
301 /* Make sure no more pending faults are in the queue */
302 flush_workqueue(iommu_wq);
305 static void free_pasid_states_level1(struct pasid_state **tbl)
309 for (i = 0; i < 512; ++i) {
313 free_page((unsigned long)tbl[i]);
317 static void free_pasid_states_level2(struct pasid_state **tbl)
319 struct pasid_state **ptr;
322 for (i = 0; i < 512; ++i) {
326 ptr = (struct pasid_state **)tbl[i];
327 free_pasid_states_level1(ptr);
331 static void free_pasid_states(struct device_state *dev_state)
333 struct pasid_state *pasid_state;
336 for (i = 0; i < dev_state->max_pasids; ++i) {
337 pasid_state = get_pasid_state(dev_state, i);
338 if (pasid_state == NULL)
341 put_pasid_state(pasid_state);
344 * This will call the mn_release function and
347 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
349 put_pasid_state_wait(pasid_state); /* Reference taken in
350 amd_iommu_bind_pasid */
352 /* Drop reference taken in amd_iommu_bind_pasid */
353 put_device_state(dev_state);
356 if (dev_state->pasid_levels == 2)
357 free_pasid_states_level2(dev_state->states);
358 else if (dev_state->pasid_levels == 1)
359 free_pasid_states_level1(dev_state->states);
361 BUG_ON(dev_state->pasid_levels != 0);
363 free_page((unsigned long)dev_state->states);
366 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
368 return container_of(mn, struct pasid_state, mn);
371 static void __mn_flush_page(struct mmu_notifier *mn,
372 unsigned long address)
374 struct pasid_state *pasid_state;
375 struct device_state *dev_state;
377 pasid_state = mn_to_state(mn);
378 dev_state = pasid_state->device_state;
380 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
383 static int mn_clear_flush_young(struct mmu_notifier *mn,
384 struct mm_struct *mm,
388 for (; start < end; start += PAGE_SIZE)
389 __mn_flush_page(mn, start);
394 static void mn_invalidate_range(struct mmu_notifier *mn,
395 struct mm_struct *mm,
396 unsigned long start, unsigned long end)
398 struct pasid_state *pasid_state;
399 struct device_state *dev_state;
401 pasid_state = mn_to_state(mn);
402 dev_state = pasid_state->device_state;
404 if ((start ^ (end - 1)) < PAGE_SIZE)
405 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
408 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
411 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
413 struct pasid_state *pasid_state;
414 struct device_state *dev_state;
419 pasid_state = mn_to_state(mn);
420 dev_state = pasid_state->device_state;
421 run_inv_ctx_cb = !pasid_state->invalid;
423 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
424 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
426 unbind_pasid(pasid_state);
429 static const struct mmu_notifier_ops iommu_mn = {
430 .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
431 .release = mn_release,
432 .clear_flush_young = mn_clear_flush_young,
433 .invalidate_range = mn_invalidate_range,
436 static void set_pri_tag_status(struct pasid_state *pasid_state,
441 spin_lock_irqsave(&pasid_state->lock, flags);
442 pasid_state->pri[tag].status = status;
443 spin_unlock_irqrestore(&pasid_state->lock, flags);
446 static void finish_pri_tag(struct device_state *dev_state,
447 struct pasid_state *pasid_state,
452 spin_lock_irqsave(&pasid_state->lock, flags);
453 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
454 pasid_state->pri[tag].finish) {
455 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
456 pasid_state->pri[tag].status, tag);
457 pasid_state->pri[tag].finish = false;
458 pasid_state->pri[tag].status = PPR_SUCCESS;
460 spin_unlock_irqrestore(&pasid_state->lock, flags);
463 static void handle_fault_error(struct fault *fault)
467 if (!fault->dev_state->inv_ppr_cb) {
468 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
472 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
477 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
478 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
480 case AMD_IOMMU_INV_PRI_RSP_INVALID:
481 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
483 case AMD_IOMMU_INV_PRI_RSP_FAIL:
484 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
491 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
493 unsigned long requested = 0;
495 if (fault->flags & PPR_FAULT_EXEC)
496 requested |= VM_EXEC;
498 if (fault->flags & PPR_FAULT_READ)
499 requested |= VM_READ;
501 if (fault->flags & PPR_FAULT_WRITE)
502 requested |= VM_WRITE;
504 return (requested & ~vma->vm_flags) != 0;
507 static void do_fault(struct work_struct *work)
509 struct fault *fault = container_of(work, struct fault, work);
510 struct vm_area_struct *vma;
511 int ret = VM_FAULT_ERROR;
512 unsigned int flags = 0;
513 struct mm_struct *mm;
516 mm = fault->state->mm;
517 address = fault->address;
519 if (fault->flags & PPR_FAULT_USER)
520 flags |= FAULT_FLAG_USER;
521 if (fault->flags & PPR_FAULT_WRITE)
522 flags |= FAULT_FLAG_WRITE;
523 flags |= FAULT_FLAG_REMOTE;
525 down_read(&mm->mmap_sem);
526 vma = find_extend_vma(mm, address);
527 if (!vma || address < vma->vm_start)
528 /* failed to get a vma in the right range */
531 /* Check if we have the right permissions on the vma */
532 if (access_error(vma, fault))
535 ret = handle_mm_fault(vma, address, flags);
537 up_read(&mm->mmap_sem);
539 if (ret & VM_FAULT_ERROR)
540 /* failed to service fault */
541 handle_fault_error(fault);
543 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
545 put_pasid_state(fault->state);
550 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
552 struct amd_iommu_fault *iommu_fault;
553 struct pasid_state *pasid_state;
554 struct device_state *dev_state;
560 struct iommu_dev_data *dev_data;
561 struct pci_dev *pdev = NULL;
564 tag = iommu_fault->tag & 0x1ff;
565 finish = (iommu_fault->tag >> 9) & 1;
567 devid = iommu_fault->device_id;
568 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
572 dev_data = get_dev_data(&pdev->dev);
574 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
576 if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
577 && dev_data->defer_attach) {
578 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
583 dev_state = get_device_state(iommu_fault->device_id);
584 if (dev_state == NULL)
587 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
588 if (pasid_state == NULL || pasid_state->invalid) {
589 /* We know the device but not the PASID -> send INVALID */
590 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
595 spin_lock_irqsave(&pasid_state->lock, flags);
596 atomic_inc(&pasid_state->pri[tag].inflight);
598 pasid_state->pri[tag].finish = true;
599 spin_unlock_irqrestore(&pasid_state->lock, flags);
601 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
603 /* We are OOM - send success and let the device re-fault */
604 finish_pri_tag(dev_state, pasid_state, tag);
608 fault->dev_state = dev_state;
609 fault->address = iommu_fault->address;
610 fault->state = pasid_state;
612 fault->finish = finish;
613 fault->pasid = iommu_fault->pasid;
614 fault->flags = iommu_fault->flags;
615 INIT_WORK(&fault->work, do_fault);
617 queue_work(iommu_wq, &fault->work);
623 if (ret != NOTIFY_OK && pasid_state)
624 put_pasid_state(pasid_state);
626 put_device_state(dev_state);
632 static struct notifier_block ppr_nb = {
633 .notifier_call = ppr_notifier,
636 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
637 struct task_struct *task)
639 struct pasid_state *pasid_state;
640 struct device_state *dev_state;
641 struct mm_struct *mm;
647 if (!amd_iommu_v2_supported())
650 devid = device_id(pdev);
651 dev_state = get_device_state(devid);
653 if (dev_state == NULL)
657 if (pasid < 0 || pasid >= dev_state->max_pasids)
661 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
662 if (pasid_state == NULL)
666 atomic_set(&pasid_state->count, 1);
667 init_waitqueue_head(&pasid_state->wq);
668 spin_lock_init(&pasid_state->lock);
670 mm = get_task_mm(task);
671 pasid_state->mm = mm;
672 pasid_state->device_state = dev_state;
673 pasid_state->pasid = pasid;
674 pasid_state->invalid = true; /* Mark as valid only if we are
675 done with setting up the pasid */
676 pasid_state->mn.ops = &iommu_mn;
678 if (pasid_state->mm == NULL)
681 mmu_notifier_register(&pasid_state->mn, mm);
683 ret = set_pasid_state(dev_state, pasid_state, pasid);
687 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
688 __pa(pasid_state->mm->pgd));
690 goto out_clear_state;
692 /* Now we are ready to handle faults */
693 pasid_state->invalid = false;
696 * Drop the reference to the mm_struct here. We rely on the
697 * mmu_notifier release call-back to inform us when the mm
705 clear_pasid_state(dev_state, pasid);
708 mmu_notifier_unregister(&pasid_state->mn, mm);
712 free_pasid_state(pasid_state);
715 put_device_state(dev_state);
719 EXPORT_SYMBOL(amd_iommu_bind_pasid);
721 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
723 struct pasid_state *pasid_state;
724 struct device_state *dev_state;
729 if (!amd_iommu_v2_supported())
732 devid = device_id(pdev);
733 dev_state = get_device_state(devid);
734 if (dev_state == NULL)
737 if (pasid < 0 || pasid >= dev_state->max_pasids)
740 pasid_state = get_pasid_state(dev_state, pasid);
741 if (pasid_state == NULL)
744 * Drop reference taken here. We are safe because we still hold
745 * the reference taken in the amd_iommu_bind_pasid function.
747 put_pasid_state(pasid_state);
749 /* Clear the pasid state so that the pasid can be re-used */
750 clear_pasid_state(dev_state, pasid_state->pasid);
753 * Call mmu_notifier_unregister to drop our reference
756 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
758 put_pasid_state_wait(pasid_state); /* Reference taken in
759 amd_iommu_bind_pasid */
761 /* Drop reference taken in this function */
762 put_device_state(dev_state);
764 /* Drop reference taken in amd_iommu_bind_pasid */
765 put_device_state(dev_state);
767 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
769 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
771 struct device_state *dev_state;
772 struct iommu_group *group;
779 if (!amd_iommu_v2_supported())
782 if (pasids <= 0 || pasids > (PASID_MASK + 1))
785 devid = device_id(pdev);
787 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
788 if (dev_state == NULL)
791 spin_lock_init(&dev_state->lock);
792 init_waitqueue_head(&dev_state->wq);
793 dev_state->pdev = pdev;
794 dev_state->devid = devid;
797 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
798 dev_state->pasid_levels += 1;
800 atomic_set(&dev_state->count, 1);
801 dev_state->max_pasids = pasids;
804 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
805 if (dev_state->states == NULL)
806 goto out_free_dev_state;
808 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
809 if (dev_state->domain == NULL)
810 goto out_free_states;
812 amd_iommu_domain_direct_map(dev_state->domain);
814 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
816 goto out_free_domain;
818 group = iommu_group_get(&pdev->dev);
821 goto out_free_domain;
824 ret = iommu_attach_group(dev_state->domain, group);
828 iommu_group_put(group);
830 spin_lock_irqsave(&state_lock, flags);
832 if (__get_device_state(devid) != NULL) {
833 spin_unlock_irqrestore(&state_lock, flags);
835 goto out_free_domain;
838 list_add_tail(&dev_state->list, &state_list);
840 spin_unlock_irqrestore(&state_lock, flags);
845 iommu_group_put(group);
848 iommu_domain_free(dev_state->domain);
851 free_page((unsigned long)dev_state->states);
858 EXPORT_SYMBOL(amd_iommu_init_device);
860 void amd_iommu_free_device(struct pci_dev *pdev)
862 struct device_state *dev_state;
866 if (!amd_iommu_v2_supported())
869 devid = device_id(pdev);
871 spin_lock_irqsave(&state_lock, flags);
873 dev_state = __get_device_state(devid);
874 if (dev_state == NULL) {
875 spin_unlock_irqrestore(&state_lock, flags);
879 list_del(&dev_state->list);
881 spin_unlock_irqrestore(&state_lock, flags);
883 /* Get rid of any remaining pasid states */
884 free_pasid_states(dev_state);
886 put_device_state(dev_state);
888 * Wait until the last reference is dropped before freeing
891 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
892 free_device_state(dev_state);
894 EXPORT_SYMBOL(amd_iommu_free_device);
896 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
897 amd_iommu_invalid_ppr_cb cb)
899 struct device_state *dev_state;
904 if (!amd_iommu_v2_supported())
907 devid = device_id(pdev);
909 spin_lock_irqsave(&state_lock, flags);
912 dev_state = __get_device_state(devid);
913 if (dev_state == NULL)
916 dev_state->inv_ppr_cb = cb;
921 spin_unlock_irqrestore(&state_lock, flags);
925 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
927 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
928 amd_iommu_invalidate_ctx cb)
930 struct device_state *dev_state;
935 if (!amd_iommu_v2_supported())
938 devid = device_id(pdev);
940 spin_lock_irqsave(&state_lock, flags);
943 dev_state = __get_device_state(devid);
944 if (dev_state == NULL)
947 dev_state->inv_ctx_cb = cb;
952 spin_unlock_irqrestore(&state_lock, flags);
956 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
958 static int __init amd_iommu_v2_init(void)
962 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
964 if (!amd_iommu_v2_supported()) {
965 pr_info("AMD IOMMUv2 functionality not available on this system\n");
967 * Load anyway to provide the symbols to other modules
968 * which may use AMD IOMMUv2 optionally.
973 spin_lock_init(&state_lock);
976 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
977 if (iommu_wq == NULL)
980 amd_iommu_register_ppr_notifier(&ppr_nb);
988 static void __exit amd_iommu_v2_exit(void)
990 struct device_state *dev_state;
993 if (!amd_iommu_v2_supported())
996 amd_iommu_unregister_ppr_notifier(&ppr_nb);
998 flush_workqueue(iommu_wq);
1001 * The loop below might call flush_workqueue(), so call
1002 * destroy_workqueue() after it
1004 for (i = 0; i < MAX_DEVICES; ++i) {
1005 dev_state = get_device_state(i);
1007 if (dev_state == NULL)
1012 put_device_state(dev_state);
1013 amd_iommu_free_device(dev_state->pdev);
1016 destroy_workqueue(iommu_wq);
1019 module_init(amd_iommu_v2_init);
1020 module_exit(amd_iommu_v2_exit);