2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
50 struct mm_struct *mm; /* mm_struct for the faults */
51 struct mmu_notifier mn; /* mmu_notifier handle */
52 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
53 struct device_state *device_state; /* Link to our device_state */
54 int pasid; /* PASID index */
55 bool invalid; /* Used during setup and
56 teardown of the pasid */
57 spinlock_t lock; /* Protect pri_queues and
59 wait_queue_head_t wq; /* To wait for count == 0 */
63 struct list_head list;
67 struct pasid_state **states;
68 struct iommu_domain *domain;
71 amd_iommu_invalid_ppr_cb inv_ppr_cb;
72 amd_iommu_invalidate_ctx inv_ctx_cb;
78 struct work_struct work;
79 struct device_state *dev_state;
80 struct pasid_state *state;
90 static LIST_HEAD(state_list);
91 static spinlock_t state_lock;
93 static struct workqueue_struct *iommu_wq;
95 static void free_pasid_states(struct device_state *dev_state);
97 static u16 device_id(struct pci_dev *pdev)
101 devid = pdev->bus->number;
102 devid = (devid << 8) | pdev->devfn;
107 static struct device_state *__get_device_state(u16 devid)
109 struct device_state *dev_state;
111 list_for_each_entry(dev_state, &state_list, list) {
112 if (dev_state->devid == devid)
119 static struct device_state *get_device_state(u16 devid)
121 struct device_state *dev_state;
124 spin_lock_irqsave(&state_lock, flags);
125 dev_state = __get_device_state(devid);
126 if (dev_state != NULL)
127 atomic_inc(&dev_state->count);
128 spin_unlock_irqrestore(&state_lock, flags);
133 static void free_device_state(struct device_state *dev_state)
136 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain.
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
141 /* Everything is down now, free the IOMMUv2 domain */
142 iommu_domain_free(dev_state->domain);
144 /* Finally get rid of the device-state */
148 static void put_device_state(struct device_state *dev_state)
150 if (atomic_dec_and_test(&dev_state->count))
151 wake_up(&dev_state->wq);
154 /* Must be called under dev_state->lock */
155 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
156 int pasid, bool alloc)
158 struct pasid_state **root, **ptr;
161 level = dev_state->pasid_levels;
162 root = dev_state->states;
166 index = (pasid >> (9 * level)) & 0x1ff;
176 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
181 root = (struct pasid_state **)*ptr;
188 static int set_pasid_state(struct device_state *dev_state,
189 struct pasid_state *pasid_state,
192 struct pasid_state **ptr;
196 spin_lock_irqsave(&dev_state->lock, flags);
197 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
212 spin_unlock_irqrestore(&dev_state->lock, flags);
217 static void clear_pasid_state(struct device_state *dev_state, int pasid)
219 struct pasid_state **ptr;
222 spin_lock_irqsave(&dev_state->lock, flags);
223 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
231 spin_unlock_irqrestore(&dev_state->lock, flags);
234 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
237 struct pasid_state **ptr, *ret = NULL;
240 spin_lock_irqsave(&dev_state->lock, flags);
241 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
248 atomic_inc(&ret->count);
251 spin_unlock_irqrestore(&dev_state->lock, flags);
256 static void free_pasid_state(struct pasid_state *pasid_state)
261 static void put_pasid_state(struct pasid_state *pasid_state)
263 if (atomic_dec_and_test(&pasid_state->count))
264 wake_up(&pasid_state->wq);
267 static void put_pasid_state_wait(struct pasid_state *pasid_state)
271 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
273 if (!atomic_dec_and_test(&pasid_state->count))
276 finish_wait(&pasid_state->wq, &wait);
277 free_pasid_state(pasid_state);
280 static void unbind_pasid(struct pasid_state *pasid_state)
282 struct iommu_domain *domain;
284 domain = pasid_state->device_state->domain;
287 * Mark pasid_state as invalid, no more faults will we added to the
288 * work queue after this is visible everywhere.
290 pasid_state->invalid = true;
292 /* Make sure this is visible */
295 /* After this the device/pasid can't access the mm anymore */
296 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
298 /* Make sure no more pending faults are in the queue */
299 flush_workqueue(iommu_wq);
302 static void free_pasid_states_level1(struct pasid_state **tbl)
306 for (i = 0; i < 512; ++i) {
310 free_page((unsigned long)tbl[i]);
314 static void free_pasid_states_level2(struct pasid_state **tbl)
316 struct pasid_state **ptr;
319 for (i = 0; i < 512; ++i) {
323 ptr = (struct pasid_state **)tbl[i];
324 free_pasid_states_level1(ptr);
328 static void free_pasid_states(struct device_state *dev_state)
330 struct pasid_state *pasid_state;
333 for (i = 0; i < dev_state->max_pasids; ++i) {
334 pasid_state = get_pasid_state(dev_state, i);
335 if (pasid_state == NULL)
338 put_pasid_state(pasid_state);
341 * This will call the mn_release function and
344 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
346 put_pasid_state_wait(pasid_state); /* Reference taken in
347 amd_iommu_bind_pasid */
349 /* Drop reference taken in amd_iommu_bind_pasid */
350 put_device_state(dev_state);
353 if (dev_state->pasid_levels == 2)
354 free_pasid_states_level2(dev_state->states);
355 else if (dev_state->pasid_levels == 1)
356 free_pasid_states_level1(dev_state->states);
357 else if (dev_state->pasid_levels != 0)
360 free_page((unsigned long)dev_state->states);
363 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
365 return container_of(mn, struct pasid_state, mn);
368 static void __mn_flush_page(struct mmu_notifier *mn,
369 unsigned long address)
371 struct pasid_state *pasid_state;
372 struct device_state *dev_state;
374 pasid_state = mn_to_state(mn);
375 dev_state = pasid_state->device_state;
377 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
380 static int mn_clear_flush_young(struct mmu_notifier *mn,
381 struct mm_struct *mm,
385 for (; start < end; start += PAGE_SIZE)
386 __mn_flush_page(mn, start);
391 static void mn_invalidate_page(struct mmu_notifier *mn,
392 struct mm_struct *mm,
393 unsigned long address)
395 __mn_flush_page(mn, address);
398 static void mn_invalidate_range(struct mmu_notifier *mn,
399 struct mm_struct *mm,
400 unsigned long start, unsigned long end)
402 struct pasid_state *pasid_state;
403 struct device_state *dev_state;
405 pasid_state = mn_to_state(mn);
406 dev_state = pasid_state->device_state;
408 if ((start ^ (end - 1)) < PAGE_SIZE)
409 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
412 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
415 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
417 struct pasid_state *pasid_state;
418 struct device_state *dev_state;
423 pasid_state = mn_to_state(mn);
424 dev_state = pasid_state->device_state;
425 run_inv_ctx_cb = !pasid_state->invalid;
427 if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
428 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
430 unbind_pasid(pasid_state);
433 static struct mmu_notifier_ops iommu_mn = {
434 .release = mn_release,
435 .clear_flush_young = mn_clear_flush_young,
436 .invalidate_page = mn_invalidate_page,
437 .invalidate_range = mn_invalidate_range,
440 static void set_pri_tag_status(struct pasid_state *pasid_state,
445 spin_lock_irqsave(&pasid_state->lock, flags);
446 pasid_state->pri[tag].status = status;
447 spin_unlock_irqrestore(&pasid_state->lock, flags);
450 static void finish_pri_tag(struct device_state *dev_state,
451 struct pasid_state *pasid_state,
456 spin_lock_irqsave(&pasid_state->lock, flags);
457 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
458 pasid_state->pri[tag].finish) {
459 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
460 pasid_state->pri[tag].status, tag);
461 pasid_state->pri[tag].finish = false;
462 pasid_state->pri[tag].status = PPR_SUCCESS;
464 spin_unlock_irqrestore(&pasid_state->lock, flags);
467 static void handle_fault_error(struct fault *fault)
471 if (!fault->dev_state->inv_ppr_cb) {
472 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
476 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
481 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
482 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
484 case AMD_IOMMU_INV_PRI_RSP_INVALID:
485 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
487 case AMD_IOMMU_INV_PRI_RSP_FAIL:
488 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
495 static void do_fault(struct work_struct *work)
497 struct fault *fault = container_of(work, struct fault, work);
498 struct mm_struct *mm;
499 struct vm_area_struct *vma;
503 write = !!(fault->flags & PPR_FAULT_WRITE);
505 mm = fault->state->mm;
506 address = fault->address;
508 down_read(&mm->mmap_sem);
509 vma = find_extend_vma(mm, address);
510 if (!vma || address < vma->vm_start) {
511 /* failed to get a vma in the right range */
512 up_read(&mm->mmap_sem);
513 handle_fault_error(fault);
517 ret = handle_mm_fault(mm, vma, address, write);
518 if (ret & VM_FAULT_ERROR) {
519 /* failed to service fault */
520 up_read(&mm->mmap_sem);
521 handle_fault_error(fault);
525 up_read(&mm->mmap_sem);
528 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
530 put_pasid_state(fault->state);
535 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
537 struct amd_iommu_fault *iommu_fault;
538 struct pasid_state *pasid_state;
539 struct device_state *dev_state;
547 tag = iommu_fault->tag & 0x1ff;
548 finish = (iommu_fault->tag >> 9) & 1;
551 dev_state = get_device_state(iommu_fault->device_id);
552 if (dev_state == NULL)
555 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
556 if (pasid_state == NULL || pasid_state->invalid) {
557 /* We know the device but not the PASID -> send INVALID */
558 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
563 spin_lock_irqsave(&pasid_state->lock, flags);
564 atomic_inc(&pasid_state->pri[tag].inflight);
566 pasid_state->pri[tag].finish = true;
567 spin_unlock_irqrestore(&pasid_state->lock, flags);
569 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
571 /* We are OOM - send success and let the device re-fault */
572 finish_pri_tag(dev_state, pasid_state, tag);
576 fault->dev_state = dev_state;
577 fault->address = iommu_fault->address;
578 fault->state = pasid_state;
580 fault->finish = finish;
581 fault->pasid = iommu_fault->pasid;
582 fault->flags = iommu_fault->flags;
583 INIT_WORK(&fault->work, do_fault);
585 queue_work(iommu_wq, &fault->work);
591 if (ret != NOTIFY_OK && pasid_state)
592 put_pasid_state(pasid_state);
594 put_device_state(dev_state);
600 static struct notifier_block ppr_nb = {
601 .notifier_call = ppr_notifier,
604 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
605 struct task_struct *task)
607 struct pasid_state *pasid_state;
608 struct device_state *dev_state;
609 struct mm_struct *mm;
615 if (!amd_iommu_v2_supported())
618 devid = device_id(pdev);
619 dev_state = get_device_state(devid);
621 if (dev_state == NULL)
625 if (pasid < 0 || pasid >= dev_state->max_pasids)
629 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
630 if (pasid_state == NULL)
634 atomic_set(&pasid_state->count, 1);
635 init_waitqueue_head(&pasid_state->wq);
636 spin_lock_init(&pasid_state->lock);
638 mm = get_task_mm(task);
639 pasid_state->mm = mm;
640 pasid_state->device_state = dev_state;
641 pasid_state->pasid = pasid;
642 pasid_state->invalid = true; /* Mark as valid only if we are
643 done with setting up the pasid */
644 pasid_state->mn.ops = &iommu_mn;
646 if (pasid_state->mm == NULL)
649 mmu_notifier_register(&pasid_state->mn, mm);
651 ret = set_pasid_state(dev_state, pasid_state, pasid);
655 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
656 __pa(pasid_state->mm->pgd));
658 goto out_clear_state;
660 /* Now we are ready to handle faults */
661 pasid_state->invalid = false;
664 * Drop the reference to the mm_struct here. We rely on the
665 * mmu_notifier release call-back to inform us when the mm
673 clear_pasid_state(dev_state, pasid);
676 mmu_notifier_unregister(&pasid_state->mn, mm);
680 free_pasid_state(pasid_state);
683 put_device_state(dev_state);
687 EXPORT_SYMBOL(amd_iommu_bind_pasid);
689 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
691 struct pasid_state *pasid_state;
692 struct device_state *dev_state;
697 if (!amd_iommu_v2_supported())
700 devid = device_id(pdev);
701 dev_state = get_device_state(devid);
702 if (dev_state == NULL)
705 if (pasid < 0 || pasid >= dev_state->max_pasids)
708 pasid_state = get_pasid_state(dev_state, pasid);
709 if (pasid_state == NULL)
712 * Drop reference taken here. We are safe because we still hold
713 * the reference taken in the amd_iommu_bind_pasid function.
715 put_pasid_state(pasid_state);
717 /* Clear the pasid state so that the pasid can be re-used */
718 clear_pasid_state(dev_state, pasid_state->pasid);
721 * Call mmu_notifier_unregister to drop our reference
724 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
726 put_pasid_state_wait(pasid_state); /* Reference taken in
727 amd_iommu_bind_pasid */
729 /* Drop reference taken in this function */
730 put_device_state(dev_state);
732 /* Drop reference taken in amd_iommu_bind_pasid */
733 put_device_state(dev_state);
735 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
737 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
739 struct device_state *dev_state;
746 if (!amd_iommu_v2_supported())
749 if (pasids <= 0 || pasids > (PASID_MASK + 1))
752 devid = device_id(pdev);
754 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
755 if (dev_state == NULL)
758 spin_lock_init(&dev_state->lock);
759 init_waitqueue_head(&dev_state->wq);
760 dev_state->pdev = pdev;
761 dev_state->devid = devid;
764 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
765 dev_state->pasid_levels += 1;
767 atomic_set(&dev_state->count, 1);
768 dev_state->max_pasids = pasids;
771 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
772 if (dev_state->states == NULL)
773 goto out_free_dev_state;
775 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
776 if (dev_state->domain == NULL)
777 goto out_free_states;
779 amd_iommu_domain_direct_map(dev_state->domain);
781 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
783 goto out_free_domain;
785 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
787 goto out_free_domain;
789 spin_lock_irqsave(&state_lock, flags);
791 if (__get_device_state(devid) != NULL) {
792 spin_unlock_irqrestore(&state_lock, flags);
794 goto out_free_domain;
797 list_add_tail(&dev_state->list, &state_list);
799 spin_unlock_irqrestore(&state_lock, flags);
804 iommu_domain_free(dev_state->domain);
807 free_page((unsigned long)dev_state->states);
814 EXPORT_SYMBOL(amd_iommu_init_device);
816 void amd_iommu_free_device(struct pci_dev *pdev)
818 struct device_state *dev_state;
822 if (!amd_iommu_v2_supported())
825 devid = device_id(pdev);
827 spin_lock_irqsave(&state_lock, flags);
829 dev_state = __get_device_state(devid);
830 if (dev_state == NULL) {
831 spin_unlock_irqrestore(&state_lock, flags);
835 list_del(&dev_state->list);
837 spin_unlock_irqrestore(&state_lock, flags);
839 /* Get rid of any remaining pasid states */
840 free_pasid_states(dev_state);
842 put_device_state(dev_state);
844 * Wait until the last reference is dropped before freeing
847 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
848 free_device_state(dev_state);
850 EXPORT_SYMBOL(amd_iommu_free_device);
852 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
853 amd_iommu_invalid_ppr_cb cb)
855 struct device_state *dev_state;
860 if (!amd_iommu_v2_supported())
863 devid = device_id(pdev);
865 spin_lock_irqsave(&state_lock, flags);
868 dev_state = __get_device_state(devid);
869 if (dev_state == NULL)
872 dev_state->inv_ppr_cb = cb;
877 spin_unlock_irqrestore(&state_lock, flags);
881 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
883 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
884 amd_iommu_invalidate_ctx cb)
886 struct device_state *dev_state;
891 if (!amd_iommu_v2_supported())
894 devid = device_id(pdev);
896 spin_lock_irqsave(&state_lock, flags);
899 dev_state = __get_device_state(devid);
900 if (dev_state == NULL)
903 dev_state->inv_ctx_cb = cb;
908 spin_unlock_irqrestore(&state_lock, flags);
912 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
914 static int __init amd_iommu_v2_init(void)
918 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
920 if (!amd_iommu_v2_supported()) {
921 pr_info("AMD IOMMUv2 functionality not available on this system\n");
923 * Load anyway to provide the symbols to other modules
924 * which may use AMD IOMMUv2 optionally.
929 spin_lock_init(&state_lock);
932 iommu_wq = create_workqueue("amd_iommu_v2");
933 if (iommu_wq == NULL)
936 amd_iommu_register_ppr_notifier(&ppr_nb);
944 static void __exit amd_iommu_v2_exit(void)
946 struct device_state *dev_state;
949 if (!amd_iommu_v2_supported())
952 amd_iommu_unregister_ppr_notifier(&ppr_nb);
954 flush_workqueue(iommu_wq);
957 * The loop below might call flush_workqueue(), so call
958 * destroy_workqueue() after it
960 for (i = 0; i < MAX_DEVICES; ++i) {
961 dev_state = get_device_state(i);
963 if (dev_state == NULL)
968 put_device_state(dev_state);
969 amd_iommu_free_device(dev_state->pdev);
972 destroy_workqueue(iommu_wq);
975 module_init(amd_iommu_v2_init);
976 module_exit(amd_iommu_v2_exit);