1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2015 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>
8 #include <linux/intel-iommu.h>
9 #include <linux/mmu_notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/slab.h>
13 #include <linux/intel-svm.h>
14 #include <linux/rculist.h>
15 #include <linux/pci.h>
16 #include <linux/pci-ats.h>
17 #include <linux/dmar.h>
18 #include <linux/interrupt.h>
19 #include <linux/mm_types.h>
20 #include <linux/xarray.h>
21 #include <linux/ioasid.h>
23 #include <asm/fpu/api.h>
24 #include <trace/events/intel_iommu.h>
28 #include "../iommu-sva-lib.h"
30 static irqreturn_t prq_event_thread(int irq, void *d);
31 static void intel_svm_drain_prq(struct device *dev, u32 pasid);
32 #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
34 static DEFINE_XARRAY_ALLOC(pasid_private_array);
35 static int pasid_private_add(ioasid_t pasid, void *priv)
37 return xa_alloc(&pasid_private_array, &pasid, priv,
38 XA_LIMIT(pasid, pasid), GFP_ATOMIC);
41 static void pasid_private_remove(ioasid_t pasid)
43 xa_erase(&pasid_private_array, pasid);
46 static void *pasid_private_find(ioasid_t pasid)
48 return xa_load(&pasid_private_array, pasid);
51 static struct intel_svm_dev *
52 svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
54 struct intel_svm_dev *sdev = NULL, *t;
57 list_for_each_entry_rcu(t, &svm->devs, list) {
68 static struct intel_svm_dev *
69 svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
71 struct intel_svm_dev *sdev = NULL, *t;
74 list_for_each_entry_rcu(t, &svm->devs, list) {
85 int intel_svm_enable_prq(struct intel_iommu *iommu)
87 struct iopf_queue *iopfq;
91 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
93 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
97 iommu->prq = page_address(pages);
99 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
101 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
108 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
109 "dmar%d-iopfq", iommu->seq_id);
110 iopfq = iopf_queue_alloc(iommu->iopfq_name);
112 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
116 iommu->iopf_queue = iopfq;
118 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
120 ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
121 iommu->prq_name, iommu);
123 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
127 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
128 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
129 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
131 init_completion(&iommu->prq_complete);
136 iopf_queue_free(iommu->iopf_queue);
137 iommu->iopf_queue = NULL;
139 dmar_free_hwirq(irq);
142 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
148 int intel_svm_finish_prq(struct intel_iommu *iommu)
150 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
151 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
152 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
155 free_irq(iommu->pr_irq, iommu);
156 dmar_free_hwirq(iommu->pr_irq);
160 if (iommu->iopf_queue) {
161 iopf_queue_free(iommu->iopf_queue);
162 iommu->iopf_queue = NULL;
165 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
171 static inline bool intel_svm_capable(struct intel_iommu *iommu)
173 return iommu->flags & VTD_FLAG_SVM_CAPABLE;
176 void intel_svm_check(struct intel_iommu *iommu)
178 if (!pasid_supported(iommu))
181 if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
182 !cap_fl1gp_support(iommu->cap)) {
183 pr_err("%s SVM disabled, incompatible 1GB page capability\n",
188 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
189 !cap_5lp_support(iommu->cap)) {
190 pr_err("%s SVM disabled, incompatible paging mode\n",
195 iommu->flags |= VTD_FLAG_SVM_CAPABLE;
198 static void __flush_svm_range_dev(struct intel_svm *svm,
199 struct intel_svm_dev *sdev,
200 unsigned long address,
201 unsigned long pages, int ih)
203 struct device_domain_info *info = get_domain_info(sdev->dev);
208 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
209 if (info->ats_enabled)
210 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
211 svm->pasid, sdev->qdep, address,
212 order_base_2(pages));
215 static void intel_flush_svm_range_dev(struct intel_svm *svm,
216 struct intel_svm_dev *sdev,
217 unsigned long address,
218 unsigned long pages, int ih)
220 unsigned long shift = ilog2(__roundup_pow_of_two(pages));
221 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
222 unsigned long start = ALIGN_DOWN(address, align);
223 unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
225 while (start < end) {
226 __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
231 static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
232 unsigned long pages, int ih)
234 struct intel_svm_dev *sdev;
237 list_for_each_entry_rcu(sdev, &svm->devs, list)
238 intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
242 /* Pages have been freed at this point */
243 static void intel_invalidate_range(struct mmu_notifier *mn,
244 struct mm_struct *mm,
245 unsigned long start, unsigned long end)
247 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
249 intel_flush_svm_range(svm, start,
250 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
253 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
255 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
256 struct intel_svm_dev *sdev;
258 /* This might end up being called from exit_mmap(), *before* the page
259 * tables are cleared. And __mmu_notifier_release() will delete us from
260 * the list of notifiers so that our invalidate_range() callback doesn't
261 * get called when the page tables are cleared. So we need to protect
262 * against hardware accessing those page tables.
264 * We do it by clearing the entry in the PASID table and then flushing
265 * the IOTLB and the PASID table caches. This might upset hardware;
266 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
267 * page) so that we end up taking a fault that the hardware really
268 * *has* to handle gracefully without affecting other processes.
271 list_for_each_entry_rcu(sdev, &svm->devs, list)
272 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
278 static const struct mmu_notifier_ops intel_mmuops = {
279 .release = intel_mm_release,
280 .invalidate_range = intel_invalidate_range,
283 static DEFINE_MUTEX(pasid_mutex);
285 static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
286 struct intel_svm **rsvm,
287 struct intel_svm_dev **rsdev)
289 struct intel_svm_dev *sdev = NULL;
290 struct intel_svm *svm;
292 /* The caller should hold the pasid_mutex lock */
293 if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
296 if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
299 svm = pasid_private_find(pasid);
307 * If we found svm for the PASID, there must be at least one device
310 if (WARN_ON(list_empty(&svm->devs)))
312 sdev = svm_lookup_device_by_dev(svm, dev);
321 int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
322 struct iommu_gpasid_bind_data *data)
324 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
325 struct intel_svm_dev *sdev = NULL;
326 struct dmar_domain *dmar_domain;
327 struct device_domain_info *info;
328 struct intel_svm *svm = NULL;
329 unsigned long iflags;
332 if (WARN_ON(!iommu) || !data)
335 if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
338 /* IOMMU core ensures argsz is more than the start of the union */
339 if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
342 /* Make sure no undefined flags are used in vendor data */
343 if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
346 if (!dev_is_pci(dev))
349 /* VT-d supports devices with full 20 bit PASIDs only */
350 if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
354 * We only check host PASID range, we have no knowledge to check
357 if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
360 info = get_domain_info(dev);
364 dmar_domain = to_dmar_domain(domain);
366 mutex_lock(&pasid_mutex);
367 ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
373 * Do not allow multiple bindings of the same device-PASID since
374 * there is only one SL page tables per PASID. We may revisit
375 * once sharing PGD across domains are supported.
377 dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
384 /* We come here when PASID has never been bond to a device. */
385 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
390 /* REVISIT: upper layer/VFIO can track host process that bind
391 * the PASID. ioasid_set = mm might be sufficient for vfio to
392 * check pasid VMM ownership. We can drop the following line
393 * once VFIO and IOASID set check is in place.
395 svm->mm = get_task_mm(current);
396 svm->pasid = data->hpasid;
397 if (data->flags & IOMMU_SVA_GPASID_VAL) {
398 svm->gpasid = data->gpasid;
399 svm->flags |= SVM_FLAG_GUEST_PASID;
401 pasid_private_add(data->hpasid, svm);
402 INIT_LIST_HEAD_RCU(&svm->devs);
405 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
411 sdev->sid = PCI_DEVID(info->bus, info->devfn);
414 /* Only count users if device has aux domains */
415 if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
418 /* Set up device context entry for PASID if not enabled already */
419 ret = intel_iommu_enable_pasid(iommu, sdev->dev);
421 dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
427 * PASID table is per device for better security. Therefore, for
428 * each bind of a new device even with an existing PASID, we need to
429 * call the nested mode setup function here.
431 spin_lock_irqsave(&iommu->lock, iflags);
432 ret = intel_pasid_setup_nested(iommu, dev,
433 (pgd_t *)(uintptr_t)data->gpgd,
434 data->hpasid, &data->vendor.vtd, dmar_domain,
436 spin_unlock_irqrestore(&iommu->lock, iflags);
438 dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
441 * PASID entry should be in cleared state if nested mode
442 * set up failed. So we only need to clear IOASID tracking
443 * data such that free call will succeed.
449 svm->flags |= SVM_FLAG_GUEST_MODE;
451 init_rcu_head(&sdev->rcu);
452 list_add_rcu(&sdev->list, &svm->devs);
454 if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
455 pasid_private_remove(data->hpasid);
459 mutex_unlock(&pasid_mutex);
463 int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
465 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
466 struct intel_svm_dev *sdev;
467 struct intel_svm *svm;
473 mutex_lock(&pasid_mutex);
474 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
479 if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
482 list_del_rcu(&sdev->list);
483 intel_pasid_tear_down_entry(iommu, dev,
485 intel_svm_drain_prq(dev, svm->pasid);
486 kfree_rcu(sdev, rcu);
488 if (list_empty(&svm->devs)) {
490 * We do not free the IOASID here in that
491 * IOMMU driver did not allocate it.
492 * Unlike native SVM, IOASID for guest use was
493 * allocated prior to the bind call.
494 * In any case, if the free call comes before
495 * the unbind, IOMMU driver will get notified
496 * and perform cleanup.
498 pasid_private_remove(pasid);
504 mutex_unlock(&pasid_mutex);
508 static void _load_pasid(void *unused)
513 static void load_pasid(struct mm_struct *mm, u32 pasid)
515 mutex_lock(&mm->context.lock);
517 /* Synchronize with READ_ONCE in update_pasid(). */
518 smp_store_release(&mm->pasid, pasid);
520 /* Update PASID MSR on all CPUs running the mm's tasks. */
521 on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
523 mutex_unlock(&mm->context.lock);
526 static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
529 ioasid_t max_pasid = dev_is_pci(dev) ?
530 pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id;
532 return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
535 static void intel_svm_free_pasid(struct mm_struct *mm)
537 iommu_sva_free_pasid(mm);
540 static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
542 struct mm_struct *mm,
545 struct device_domain_info *info = get_domain_info(dev);
546 unsigned long iflags, sflags;
547 struct intel_svm_dev *sdev;
548 struct intel_svm *svm;
551 svm = pasid_private_find(mm->pasid);
553 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
555 return ERR_PTR(-ENOMEM);
557 svm->pasid = mm->pasid;
560 INIT_LIST_HEAD_RCU(&svm->devs);
562 if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) {
563 svm->notifier.ops = &intel_mmuops;
564 ret = mmu_notifier_register(&svm->notifier, mm);
571 ret = pasid_private_add(svm->pasid, svm);
573 if (svm->notifier.ops)
574 mmu_notifier_unregister(&svm->notifier, mm);
580 /* Find the matching device in svm list */
581 sdev = svm_lookup_device_by_dev(svm, dev);
587 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
595 sdev->did = FLPT_DEFAULT_DID;
596 sdev->sid = PCI_DEVID(info->bus, info->devfn);
598 sdev->pasid = svm->pasid;
600 init_rcu_head(&sdev->rcu);
601 if (info->ats_enabled) {
603 sdev->qdep = info->ats_qdep;
604 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
608 /* Setup the pasid table: */
609 sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
610 PASID_FLAG_SUPERVISOR_MODE : 0;
611 sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
612 spin_lock_irqsave(&iommu->lock, iflags);
613 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
614 FLPT_DEFAULT_DID, sflags);
615 spin_unlock_irqrestore(&iommu->lock, iflags);
620 /* The newly allocated pasid is loaded to the mm. */
621 if (!(flags & SVM_FLAG_SUPERVISOR_MODE) && list_empty(&svm->devs))
622 load_pasid(mm, svm->pasid);
624 list_add_rcu(&sdev->list, &svm->devs);
631 if (list_empty(&svm->devs)) {
632 if (svm->notifier.ops)
633 mmu_notifier_unregister(&svm->notifier, mm);
634 pasid_private_remove(mm->pasid);
641 /* Caller must hold pasid_mutex */
642 static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
644 struct intel_svm_dev *sdev;
645 struct intel_iommu *iommu;
646 struct intel_svm *svm;
647 struct mm_struct *mm;
650 iommu = device_to_iommu(dev, NULL, NULL);
654 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
662 list_del_rcu(&sdev->list);
663 /* Flush the PASID cache and IOTLB for this device.
664 * Note that we do depend on the hardware *not* using
665 * the PASID any more. Just as we depend on other
666 * devices never using PASIDs that they have no right
667 * to use. We have a *shared* PASID table, because it's
668 * large and has to be physically contiguous. So it's
669 * hard to be as defensive as we might like. */
670 intel_pasid_tear_down_entry(iommu, dev,
672 intel_svm_drain_prq(dev, svm->pasid);
673 kfree_rcu(sdev, rcu);
675 if (list_empty(&svm->devs)) {
676 if (svm->notifier.ops) {
677 mmu_notifier_unregister(&svm->notifier, mm);
678 /* Clear mm's pasid. */
679 load_pasid(mm, PASID_DISABLED);
681 pasid_private_remove(svm->pasid);
682 /* We mandate that no page faults may be outstanding
683 * for the PASID when intel_svm_unbind_mm() is called.
684 * If that is not obeyed, subtle errors will happen.
685 * Let's make them less subtle... */
686 memset(svm, 0x6b, sizeof(*svm));
690 /* Drop a PASID reference and free it if no reference. */
691 intel_svm_free_pasid(mm);
697 /* Page request queue descriptor */
698 struct page_req_dsc {
703 u64 priv_data_present:1;
726 static bool is_canonical_address(u64 addr)
728 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
729 long saddr = (long) addr;
731 return (((saddr << shift) >> shift) == saddr);
735 * intel_svm_drain_prq - Drain page requests and responses for a pasid
736 * @dev: target device
737 * @pasid: pasid for draining
739 * Drain all pending page requests and responses related to @pasid in both
740 * software and hardware. This is supposed to be called after the device
741 * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
742 * and DevTLB have been invalidated.
744 * It waits until all pending page requests for @pasid in the page fault
745 * queue are completed by the prq handling thread. Then follow the steps
746 * described in VT-d spec CH7.10 to drain all page requests and page
747 * responses pending in the hardware.
749 static void intel_svm_drain_prq(struct device *dev, u32 pasid)
751 struct device_domain_info *info;
752 struct dmar_domain *domain;
753 struct intel_iommu *iommu;
754 struct qi_desc desc[3];
755 struct pci_dev *pdev;
760 info = get_domain_info(dev);
761 if (WARN_ON(!info || !dev_is_pci(dev)))
764 if (!info->pri_enabled)
768 domain = info->domain;
769 pdev = to_pci_dev(dev);
770 sid = PCI_DEVID(info->bus, info->devfn);
771 did = domain->iommu_did[iommu->seq_id];
772 qdep = pci_ats_queue_depth(pdev);
775 * Check and wait until all pending page requests in the queue are
776 * handled by the prq handling thread.
779 reinit_completion(&iommu->prq_complete);
780 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
781 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
782 while (head != tail) {
783 struct page_req_dsc *req;
785 req = &iommu->prq[head / sizeof(*req)];
786 if (!req->pasid_present || req->pasid != pasid) {
787 head = (head + sizeof(*req)) & PRQ_RING_MASK;
791 wait_for_completion(&iommu->prq_complete);
795 iopf_queue_flush_dev(dev);
798 * Perform steps described in VT-d spec CH7.10 to drain page
799 * requests and responses in hardware.
801 memset(desc, 0, sizeof(desc));
802 desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
805 desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
807 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
809 desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
810 QI_DEV_EIOTLB_SID(sid) |
811 QI_DEV_EIOTLB_QDEP(qdep) |
813 QI_DEV_IOTLB_PFSID(info->pfsid);
815 reinit_completion(&iommu->prq_complete);
816 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
817 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
818 wait_for_completion(&iommu->prq_complete);
823 static int prq_to_iommu_prot(struct page_req_dsc *req)
828 prot |= IOMMU_FAULT_PERM_READ;
830 prot |= IOMMU_FAULT_PERM_WRITE;
832 prot |= IOMMU_FAULT_PERM_EXEC;
834 prot |= IOMMU_FAULT_PERM_PRIV;
839 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
840 struct page_req_dsc *desc)
842 struct iommu_fault_event event;
844 if (!dev || !dev_is_pci(dev))
847 /* Fill in event data for device specific processing */
848 memset(&event, 0, sizeof(struct iommu_fault_event));
849 event.fault.type = IOMMU_FAULT_PAGE_REQ;
850 event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
851 event.fault.prm.pasid = desc->pasid;
852 event.fault.prm.grpid = desc->prg_index;
853 event.fault.prm.perm = prq_to_iommu_prot(desc);
856 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
857 if (desc->pasid_present) {
858 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
859 event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
861 if (desc->priv_data_present) {
863 * Set last page in group bit if private data is present,
864 * page response is required as it does for LPIG.
865 * iommu_report_device_fault() doesn't understand this vendor
866 * specific requirement thus we set last_page as a workaround.
868 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
869 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
870 event.fault.prm.private_data[0] = desc->priv_data[0];
871 event.fault.prm.private_data[1] = desc->priv_data[1];
872 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) {
874 * If the private data fields are not used by hardware, use it
875 * to monitor the prq handle latency.
877 event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
880 return iommu_report_device_fault(dev, &event);
883 static void handle_bad_prq_event(struct intel_iommu *iommu,
884 struct page_req_dsc *req, int result)
888 pr_err("%s: Invalid page request: %08llx %08llx\n",
889 iommu->name, ((unsigned long long *)req)[0],
890 ((unsigned long long *)req)[1]);
893 * Per VT-d spec. v3.0 ch7.7, system software must
894 * respond with page group response if private data
895 * is present (PDP) or last page in group (LPIG) bit
896 * is set. This is an additional VT-d feature beyond
899 if (!req->lpig && !req->priv_data_present)
902 desc.qw0 = QI_PGRP_PASID(req->pasid) |
903 QI_PGRP_DID(req->rid) |
904 QI_PGRP_PASID_P(req->pasid_present) |
905 QI_PGRP_PDP(req->priv_data_present) |
906 QI_PGRP_RESP_CODE(result) |
908 desc.qw1 = QI_PGRP_IDX(req->prg_index) |
909 QI_PGRP_LPIG(req->lpig);
911 if (req->priv_data_present) {
912 desc.qw2 = req->priv_data[0];
913 desc.qw3 = req->priv_data[1];
919 qi_submit_sync(iommu, &desc, 1, 0);
922 static irqreturn_t prq_event_thread(int irq, void *d)
924 struct intel_svm_dev *sdev = NULL;
925 struct intel_iommu *iommu = d;
926 struct intel_svm *svm = NULL;
927 struct page_req_dsc *req;
928 int head, tail, handled;
932 * Clear PPR bit before reading head/tail registers, to ensure that
933 * we get a new interrupt if needed.
935 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
937 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
938 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
939 handled = (head != tail);
940 while (head != tail) {
941 req = &iommu->prq[head / sizeof(*req)];
942 address = (u64)req->addr << VTD_PAGE_SHIFT;
944 if (unlikely(!req->pasid_present)) {
945 pr_err("IOMMU: %s: Page request without PASID\n",
950 handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
954 if (unlikely(!is_canonical_address(address))) {
955 pr_err("IOMMU: %s: Address is not canonical\n",
960 if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
961 pr_err("IOMMU: %s: Page request in Privilege Mode\n",
966 if (unlikely(req->exe_req && req->rd_req)) {
967 pr_err("IOMMU: %s: Execution request not supported\n",
972 if (!svm || svm->pasid != req->pasid) {
974 * It can't go away, because the driver is not permitted
975 * to unbind the mm while any page faults are outstanding.
977 svm = pasid_private_find(req->pasid);
978 if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
982 if (!sdev || sdev->sid != req->rid) {
983 sdev = svm_lookup_device_by_sid(svm, req->rid);
988 sdev->prq_seq_number++;
991 * If prq is to be handled outside iommu driver via receiver of
992 * the fault notifiers, we skip the page response here.
994 if (intel_svm_prq_report(iommu, sdev->dev, req))
995 handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
997 trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
998 req->priv_data[0], req->priv_data[1],
999 sdev->prq_seq_number);
1001 head = (head + sizeof(*req)) & PRQ_RING_MASK;
1004 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
1007 * Clear the page request overflow bit and wake up all threads that
1008 * are waiting for the completion of this handling.
1010 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
1011 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
1013 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
1014 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
1016 iopf_queue_discard_partial(iommu->iopf_queue);
1017 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
1018 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
1023 if (!completion_done(&iommu->prq_complete))
1024 complete(&iommu->prq_complete);
1026 return IRQ_RETVAL(handled);
1029 struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
1031 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
1032 unsigned int flags = 0;
1033 struct iommu_sva *sva;
1037 flags = *(unsigned int *)drvdata;
1039 if (flags & SVM_FLAG_SUPERVISOR_MODE) {
1040 if (!ecap_srs(iommu->ecap)) {
1041 dev_err(dev, "%s: Supervisor PASID not supported\n",
1043 return ERR_PTR(-EOPNOTSUPP);
1047 dev_err(dev, "%s: Supervisor PASID with user provided mm\n",
1049 return ERR_PTR(-EINVAL);
1055 mutex_lock(&pasid_mutex);
1056 ret = intel_svm_alloc_pasid(dev, mm, flags);
1058 mutex_unlock(&pasid_mutex);
1059 return ERR_PTR(ret);
1062 sva = intel_svm_bind_mm(iommu, dev, mm, flags);
1063 if (IS_ERR_OR_NULL(sva))
1064 intel_svm_free_pasid(mm);
1065 mutex_unlock(&pasid_mutex);
1070 void intel_svm_unbind(struct iommu_sva *sva)
1072 struct intel_svm_dev *sdev = to_intel_svm_dev(sva);
1074 mutex_lock(&pasid_mutex);
1075 intel_svm_unbind_mm(sdev->dev, sdev->pasid);
1076 mutex_unlock(&pasid_mutex);
1079 u32 intel_svm_get_pasid(struct iommu_sva *sva)
1081 struct intel_svm_dev *sdev;
1084 mutex_lock(&pasid_mutex);
1085 sdev = to_intel_svm_dev(sva);
1086 pasid = sdev->pasid;
1087 mutex_unlock(&pasid_mutex);
1092 int intel_svm_page_response(struct device *dev,
1093 struct iommu_fault_event *evt,
1094 struct iommu_page_response *msg)
1096 struct iommu_fault_page_request *prm;
1097 struct intel_svm_dev *sdev = NULL;
1098 struct intel_svm *svm = NULL;
1099 struct intel_iommu *iommu;
1100 bool private_present;
1107 if (!dev || !dev_is_pci(dev))
1110 iommu = device_to_iommu(dev, &bus, &devfn);
1117 mutex_lock(&pasid_mutex);
1119 prm = &evt->fault.prm;
1120 sid = PCI_DEVID(bus, devfn);
1121 pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1122 private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
1123 last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1125 if (!pasid_present) {
1130 if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
1135 ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
1142 * For responses from userspace, need to make sure that the
1143 * pasid has been bound to its mm.
1145 if (svm->flags & SVM_FLAG_GUEST_MODE) {
1146 struct mm_struct *mm;
1148 mm = get_task_mm(current);
1154 if (mm != svm->mm) {
1164 * Per VT-d spec. v3.0 ch7.7, system software must respond
1165 * with page group response if private data is present (PDP)
1166 * or last page in group (LPIG) bit is set. This is an
1167 * additional VT-d requirement beyond PCI ATS spec.
1169 if (last_page || private_present) {
1170 struct qi_desc desc;
1172 desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
1173 QI_PGRP_PASID_P(pasid_present) |
1174 QI_PGRP_PDP(private_present) |
1175 QI_PGRP_RESP_CODE(msg->code) |
1177 desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
1181 if (private_present) {
1182 desc.qw2 = prm->private_data[0];
1183 desc.qw3 = prm->private_data[1];
1184 } else if (prm->private_data[0]) {
1185 dmar_latency_update(iommu, DMAR_LATENCY_PRQ,
1186 ktime_to_ns(ktime_get()) - prm->private_data[0]);
1189 qi_submit_sync(iommu, &desc, 1, 0);
1192 mutex_unlock(&pasid_mutex);