1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2015 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>
8 #include <linux/intel-iommu.h>
9 #include <linux/mmu_notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/slab.h>
13 #include <linux/intel-svm.h>
14 #include <linux/rculist.h>
15 #include <linux/pci.h>
16 #include <linux/pci-ats.h>
17 #include <linux/dmar.h>
18 #include <linux/interrupt.h>
19 #include <linux/mm_types.h>
20 #include <linux/ioasid.h>
22 #include <asm/fpu/api.h>
26 static irqreturn_t prq_event_thread(int irq, void *d);
27 static void intel_svm_drain_prq(struct device *dev, u32 pasid);
31 int intel_svm_enable_prq(struct intel_iommu *iommu)
36 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
38 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
42 iommu->prq = page_address(pages);
44 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
46 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
50 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
56 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
58 ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
59 iommu->prq_name, iommu);
61 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
67 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
68 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
69 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
71 init_completion(&iommu->prq_complete);
76 int intel_svm_finish_prq(struct intel_iommu *iommu)
78 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
79 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
80 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
83 free_irq(iommu->pr_irq, iommu);
84 dmar_free_hwirq(iommu->pr_irq);
88 free_pages((unsigned long)iommu->prq, PRQ_ORDER);
94 static inline bool intel_svm_capable(struct intel_iommu *iommu)
96 return iommu->flags & VTD_FLAG_SVM_CAPABLE;
99 void intel_svm_check(struct intel_iommu *iommu)
101 if (!pasid_supported(iommu))
104 if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
105 !cap_fl1gp_support(iommu->cap)) {
106 pr_err("%s SVM disabled, incompatible 1GB page capability\n",
111 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
112 !cap_5lp_support(iommu->cap)) {
113 pr_err("%s SVM disabled, incompatible paging mode\n",
118 iommu->flags |= VTD_FLAG_SVM_CAPABLE;
121 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
122 unsigned long address, unsigned long pages, int ih)
127 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
128 QI_EIOTLB_DID(sdev->did) |
129 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
133 int mask = ilog2(__roundup_pow_of_two(pages));
135 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
136 QI_EIOTLB_DID(sdev->did) |
137 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
139 desc.qw1 = QI_EIOTLB_ADDR(address) |
145 qi_submit_sync(svm->iommu, &desc, 1, 0);
147 if (sdev->dev_iotlb) {
148 desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
149 QI_DEV_EIOTLB_SID(sdev->sid) |
150 QI_DEV_EIOTLB_QDEP(sdev->qdep) |
153 desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
155 } else if (pages > 1) {
156 /* The least significant zero bit indicates the size. So,
157 * for example, an "address" value of 0x12345f000 will
158 * flush from 0x123440000 to 0x12347ffff (256KiB). */
159 unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
160 unsigned long mask = __rounddown_pow_of_two(address ^ last);
162 desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
163 (mask - 1)) | QI_DEV_EIOTLB_SIZE;
165 desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
169 qi_submit_sync(svm->iommu, &desc, 1, 0);
173 static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
174 unsigned long pages, int ih)
176 struct intel_svm_dev *sdev;
179 list_for_each_entry_rcu(sdev, &svm->devs, list)
180 intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
184 /* Pages have been freed at this point */
185 static void intel_invalidate_range(struct mmu_notifier *mn,
186 struct mm_struct *mm,
187 unsigned long start, unsigned long end)
189 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
191 intel_flush_svm_range(svm, start,
192 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
195 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
197 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
198 struct intel_svm_dev *sdev;
200 /* This might end up being called from exit_mmap(), *before* the page
201 * tables are cleared. And __mmu_notifier_release() will delete us from
202 * the list of notifiers so that our invalidate_range() callback doesn't
203 * get called when the page tables are cleared. So we need to protect
204 * against hardware accessing those page tables.
206 * We do it by clearing the entry in the PASID table and then flushing
207 * the IOTLB and the PASID table caches. This might upset hardware;
208 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
209 * page) so that we end up taking a fault that the hardware really
210 * *has* to handle gracefully without affecting other processes.
213 list_for_each_entry_rcu(sdev, &svm->devs, list)
214 intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
220 static const struct mmu_notifier_ops intel_mmuops = {
221 .release = intel_mm_release,
222 .invalidate_range = intel_invalidate_range,
225 static DEFINE_MUTEX(pasid_mutex);
226 static LIST_HEAD(global_svm_list);
228 #define for_each_svm_dev(sdev, svm, d) \
229 list_for_each_entry((sdev), &(svm)->devs, list) \
230 if ((d) != (sdev)->dev) {} else
232 static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
233 struct intel_svm **rsvm,
234 struct intel_svm_dev **rsdev)
236 struct intel_svm_dev *d, *sdev = NULL;
237 struct intel_svm *svm;
239 /* The caller should hold the pasid_mutex lock */
240 if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
243 if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
246 svm = ioasid_find(NULL, pasid, NULL);
254 * If we found svm for the PASID, there must be at least one device
257 if (WARN_ON(list_empty(&svm->devs)))
261 list_for_each_entry_rcu(d, &svm->devs, list) {
276 int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
277 struct iommu_gpasid_bind_data *data)
279 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
280 struct intel_svm_dev *sdev = NULL;
281 struct dmar_domain *dmar_domain;
282 struct intel_svm *svm = NULL;
285 if (WARN_ON(!iommu) || !data)
288 if (data->version != IOMMU_GPASID_BIND_VERSION_1 ||
289 data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
292 if (!dev_is_pci(dev))
295 /* VT-d supports devices with full 20 bit PASIDs only */
296 if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
300 * We only check host PASID range, we have no knowledge to check
303 if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
306 dmar_domain = to_dmar_domain(domain);
308 mutex_lock(&pasid_mutex);
309 ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
315 * Do not allow multiple bindings of the same device-PASID since
316 * there is only one SL page tables per PASID. We may revisit
317 * once sharing PGD across domains are supported.
319 dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
326 /* We come here when PASID has never been bond to a device. */
327 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
332 /* REVISIT: upper layer/VFIO can track host process that bind
333 * the PASID. ioasid_set = mm might be sufficient for vfio to
334 * check pasid VMM ownership. We can drop the following line
335 * once VFIO and IOASID set check is in place.
337 svm->mm = get_task_mm(current);
338 svm->pasid = data->hpasid;
339 if (data->flags & IOMMU_SVA_GPASID_VAL) {
340 svm->gpasid = data->gpasid;
341 svm->flags |= SVM_FLAG_GUEST_PASID;
343 ioasid_set_data(data->hpasid, svm);
344 INIT_LIST_HEAD_RCU(&svm->devs);
347 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
354 /* Only count users if device has aux domains */
355 if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
358 /* Set up device context entry for PASID if not enabled already */
359 ret = intel_iommu_enable_pasid(iommu, sdev->dev);
361 dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
367 * PASID table is per device for better security. Therefore, for
368 * each bind of a new device even with an existing PASID, we need to
369 * call the nested mode setup function here.
371 spin_lock(&iommu->lock);
372 ret = intel_pasid_setup_nested(iommu, dev,
373 (pgd_t *)(uintptr_t)data->gpgd,
374 data->hpasid, &data->vtd, dmar_domain,
376 spin_unlock(&iommu->lock);
378 dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
381 * PASID entry should be in cleared state if nested mode
382 * set up failed. So we only need to clear IOASID tracking
383 * data such that free call will succeed.
389 svm->flags |= SVM_FLAG_GUEST_MODE;
391 init_rcu_head(&sdev->rcu);
392 list_add_rcu(&sdev->list, &svm->devs);
394 if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
395 ioasid_set_data(data->hpasid, NULL);
399 mutex_unlock(&pasid_mutex);
403 int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
405 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
406 struct intel_svm_dev *sdev;
407 struct intel_svm *svm;
413 mutex_lock(&pasid_mutex);
414 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
419 if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
422 list_del_rcu(&sdev->list);
423 intel_pasid_tear_down_entry(iommu, dev,
425 intel_svm_drain_prq(dev, svm->pasid);
426 kfree_rcu(sdev, rcu);
428 if (list_empty(&svm->devs)) {
430 * We do not free the IOASID here in that
431 * IOMMU driver did not allocate it.
432 * Unlike native SVM, IOASID for guest use was
433 * allocated prior to the bind call.
434 * In any case, if the free call comes before
435 * the unbind, IOMMU driver will get notified
436 * and perform cleanup.
438 ioasid_set_data(pasid, NULL);
444 mutex_unlock(&pasid_mutex);
448 static void _load_pasid(void *unused)
453 static void load_pasid(struct mm_struct *mm, u32 pasid)
455 mutex_lock(&mm->context.lock);
457 /* Synchronize with READ_ONCE in update_pasid(). */
458 smp_store_release(&mm->pasid, pasid);
460 /* Update PASID MSR on all CPUs running the mm's tasks. */
461 on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
463 mutex_unlock(&mm->context.lock);
466 /* Caller must hold pasid_mutex, mm reference */
468 intel_svm_bind_mm(struct device *dev, unsigned int flags,
469 struct svm_dev_ops *ops,
470 struct mm_struct *mm, struct intel_svm_dev **sd)
472 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
473 struct device_domain_info *info;
474 struct intel_svm_dev *sdev;
475 struct intel_svm *svm = NULL;
479 if (!iommu || dmar_disabled)
482 if (!intel_svm_capable(iommu))
485 if (dev_is_pci(dev)) {
486 pasid_max = pci_max_pasids(to_pci_dev(dev));
492 /* Bind supervisor PASID shuld have mm = NULL */
493 if (flags & SVM_FLAG_SUPERVISOR_MODE) {
494 if (!ecap_srs(iommu->ecap) || mm) {
495 pr_err("Supervisor PASID with user provided mm.\n");
500 if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
503 list_for_each_entry(t, &global_svm_list, list) {
504 if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
508 if (svm->pasid >= pasid_max) {
510 "Limited PASID width. Cannot use existing PASID %d\n",
516 /* Find the matching device in svm list */
517 for_each_svm_dev(sdev, svm, dev) {
518 if (sdev->ops != ops) {
530 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
537 ret = intel_iommu_enable_pasid(iommu, dev);
543 info = get_domain_info(dev);
544 sdev->did = FLPT_DEFAULT_DID;
545 sdev->sid = PCI_DEVID(info->bus, info->devfn);
546 if (info->ats_enabled) {
548 sdev->qdep = info->ats_qdep;
549 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
553 /* Finish the setup now we know we're keeping it */
556 init_rcu_head(&sdev->rcu);
559 svm = kzalloc(sizeof(*svm), GFP_KERNEL);
567 if (pasid_max > intel_pasid_max_id)
568 pasid_max = intel_pasid_max_id;
570 /* Do not use PASID 0, reserved for RID to PASID */
571 svm->pasid = ioasid_alloc(NULL, PASID_MIN,
573 if (svm->pasid == INVALID_IOASID) {
579 svm->notifier.ops = &intel_mmuops;
582 INIT_LIST_HEAD_RCU(&svm->devs);
583 INIT_LIST_HEAD(&svm->list);
586 ret = mmu_notifier_register(&svm->notifier, mm);
588 ioasid_free(svm->pasid);
595 spin_lock(&iommu->lock);
596 ret = intel_pasid_setup_first_level(iommu, dev,
597 mm ? mm->pgd : init_mm.pgd,
598 svm->pasid, FLPT_DEFAULT_DID,
599 (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
600 (cpu_feature_enabled(X86_FEATURE_LA57) ?
601 PASID_FLAG_FL5LP : 0));
602 spin_unlock(&iommu->lock);
605 mmu_notifier_unregister(&svm->notifier, mm);
606 ioasid_free(svm->pasid);
612 list_add_tail(&svm->list, &global_svm_list);
614 /* The newly allocated pasid is loaded to the mm. */
615 load_pasid(mm, svm->pasid);
619 * Binding a new device with existing PASID, need to setup
622 spin_lock(&iommu->lock);
623 ret = intel_pasid_setup_first_level(iommu, dev,
624 mm ? mm->pgd : init_mm.pgd,
625 svm->pasid, FLPT_DEFAULT_DID,
626 (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
627 (cpu_feature_enabled(X86_FEATURE_LA57) ?
628 PASID_FLAG_FL5LP : 0));
629 spin_unlock(&iommu->lock);
635 list_add_rcu(&sdev->list, &svm->devs);
637 sdev->pasid = svm->pasid;
646 /* Caller must hold pasid_mutex */
647 static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
649 struct intel_svm_dev *sdev;
650 struct intel_iommu *iommu;
651 struct intel_svm *svm;
654 iommu = device_to_iommu(dev, NULL, NULL);
658 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
665 list_del_rcu(&sdev->list);
666 /* Flush the PASID cache and IOTLB for this device.
667 * Note that we do depend on the hardware *not* using
668 * the PASID any more. Just as we depend on other
669 * devices never using PASIDs that they have no right
670 * to use. We have a *shared* PASID table, because it's
671 * large and has to be physically contiguous. So it's
672 * hard to be as defensive as we might like. */
673 intel_pasid_tear_down_entry(iommu, dev,
675 intel_svm_drain_prq(dev, svm->pasid);
676 kfree_rcu(sdev, rcu);
678 if (list_empty(&svm->devs)) {
679 ioasid_free(svm->pasid);
681 mmu_notifier_unregister(&svm->notifier, svm->mm);
682 /* Clear mm's pasid. */
683 load_pasid(svm->mm, PASID_DISABLED);
685 list_del(&svm->list);
686 /* We mandate that no page faults may be outstanding
687 * for the PASID when intel_svm_unbind_mm() is called.
688 * If that is not obeyed, subtle errors will happen.
689 * Let's make them less subtle... */
690 memset(svm, 0x6b, sizeof(*svm));
699 /* Page request queue descriptor */
700 struct page_req_dsc {
705 u64 priv_data_present:1;
728 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
730 static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
732 unsigned long requested = 0;
735 requested |= VM_EXEC;
738 requested |= VM_READ;
741 requested |= VM_WRITE;
743 return (requested & ~vma->vm_flags) != 0;
746 static bool is_canonical_address(u64 addr)
748 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
749 long saddr = (long) addr;
751 return (((saddr << shift) >> shift) == saddr);
755 * intel_svm_drain_prq - Drain page requests and responses for a pasid
756 * @dev: target device
757 * @pasid: pasid for draining
759 * Drain all pending page requests and responses related to @pasid in both
760 * software and hardware. This is supposed to be called after the device
761 * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
762 * and DevTLB have been invalidated.
764 * It waits until all pending page requests for @pasid in the page fault
765 * queue are completed by the prq handling thread. Then follow the steps
766 * described in VT-d spec CH7.10 to drain all page requests and page
767 * responses pending in the hardware.
769 static void intel_svm_drain_prq(struct device *dev, u32 pasid)
771 struct device_domain_info *info;
772 struct dmar_domain *domain;
773 struct intel_iommu *iommu;
774 struct qi_desc desc[3];
775 struct pci_dev *pdev;
780 info = get_domain_info(dev);
781 if (WARN_ON(!info || !dev_is_pci(dev)))
784 if (!info->pri_enabled)
788 domain = info->domain;
789 pdev = to_pci_dev(dev);
790 sid = PCI_DEVID(info->bus, info->devfn);
791 did = domain->iommu_did[iommu->seq_id];
792 qdep = pci_ats_queue_depth(pdev);
795 * Check and wait until all pending page requests in the queue are
796 * handled by the prq handling thread.
799 reinit_completion(&iommu->prq_complete);
800 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
801 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
802 while (head != tail) {
803 struct page_req_dsc *req;
805 req = &iommu->prq[head / sizeof(*req)];
806 if (!req->pasid_present || req->pasid != pasid) {
807 head = (head + sizeof(*req)) & PRQ_RING_MASK;
811 wait_for_completion(&iommu->prq_complete);
816 * Perform steps described in VT-d spec CH7.10 to drain page
817 * requests and responses in hardware.
819 memset(desc, 0, sizeof(desc));
820 desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
823 desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
825 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
827 desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
828 QI_DEV_EIOTLB_SID(sid) |
829 QI_DEV_EIOTLB_QDEP(qdep) |
831 QI_DEV_IOTLB_PFSID(info->pfsid);
833 reinit_completion(&iommu->prq_complete);
834 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
835 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
836 wait_for_completion(&iommu->prq_complete);
841 static int prq_to_iommu_prot(struct page_req_dsc *req)
846 prot |= IOMMU_FAULT_PERM_READ;
848 prot |= IOMMU_FAULT_PERM_WRITE;
850 prot |= IOMMU_FAULT_PERM_EXEC;
852 prot |= IOMMU_FAULT_PERM_PRIV;
858 intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
860 struct iommu_fault_event event;
862 if (!dev || !dev_is_pci(dev))
865 /* Fill in event data for device specific processing */
866 memset(&event, 0, sizeof(struct iommu_fault_event));
867 event.fault.type = IOMMU_FAULT_PAGE_REQ;
868 event.fault.prm.addr = desc->addr;
869 event.fault.prm.pasid = desc->pasid;
870 event.fault.prm.grpid = desc->prg_index;
871 event.fault.prm.perm = prq_to_iommu_prot(desc);
874 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
875 if (desc->pasid_present) {
876 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
877 event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
879 if (desc->priv_data_present) {
881 * Set last page in group bit if private data is present,
882 * page response is required as it does for LPIG.
883 * iommu_report_device_fault() doesn't understand this vendor
884 * specific requirement thus we set last_page as a workaround.
886 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
887 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
888 memcpy(event.fault.prm.private_data, desc->priv_data,
889 sizeof(desc->priv_data));
892 return iommu_report_device_fault(dev, &event);
895 static irqreturn_t prq_event_thread(int irq, void *d)
897 struct intel_svm_dev *sdev = NULL;
898 struct intel_iommu *iommu = d;
899 struct intel_svm *svm = NULL;
900 int head, tail, handled = 0;
902 /* Clear PPR bit before reading head/tail registers, to
903 * ensure that we get a new interrupt if needed. */
904 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
906 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
907 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
908 while (head != tail) {
909 struct vm_area_struct *vma;
910 struct page_req_dsc *req;
918 req = &iommu->prq[head / sizeof(*req)];
920 result = QI_RESP_FAILURE;
921 address = (u64)req->addr << VTD_PAGE_SHIFT;
922 if (!req->pasid_present) {
923 pr_err("%s: Page request without PASID: %08llx %08llx\n",
924 iommu->name, ((unsigned long long *)req)[0],
925 ((unsigned long long *)req)[1]);
929 if (!svm || svm->pasid != req->pasid) {
931 svm = ioasid_find(NULL, req->pasid, NULL);
932 /* It *can't* go away, because the driver is not permitted
933 * to unbind the mm while any page faults are outstanding.
934 * So we only need RCU to protect the internal idr code. */
936 if (IS_ERR_OR_NULL(svm)) {
937 pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
938 iommu->name, req->pasid, ((unsigned long long *)req)[0],
939 ((unsigned long long *)req)[1]);
944 if (!sdev || sdev->sid != req->rid) {
945 struct intel_svm_dev *t;
949 list_for_each_entry_rcu(t, &svm->devs, list) {
950 if (t->sid == req->rid) {
958 result = QI_RESP_INVALID;
959 /* Since we're using init_mm.pgd directly, we should never take
960 * any faults on kernel addresses. */
964 /* If address is not canonical, return invalid response */
965 if (!is_canonical_address(address))
969 * If prq is to be handled outside iommu driver via receiver of
970 * the fault notifiers, we skip the page response here.
972 if (svm->flags & SVM_FLAG_GUEST_MODE) {
973 if (sdev && !intel_svm_prq_report(sdev->dev, req))
979 /* If the mm is already defunct, don't handle faults. */
980 if (!mmget_not_zero(svm->mm))
983 mmap_read_lock(svm->mm);
984 vma = find_extend_vma(svm->mm, address);
985 if (!vma || address < vma->vm_start)
988 if (access_error(vma, req))
991 ret = handle_mm_fault(vma, address,
992 req->wr_req ? FAULT_FLAG_WRITE : 0,
994 if (ret & VM_FAULT_ERROR)
997 result = QI_RESP_SUCCESS;
999 mmap_read_unlock(svm->mm);
1003 if (sdev && sdev->ops && sdev->ops->fault_cb) {
1004 int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
1005 (req->exe_req << 1) | (req->pm_req);
1006 sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
1007 req->priv_data, rwxp, result);
1009 /* We get here in the error case where the PASID lookup failed,
1010 and these can be NULL. Do not use them below this point! */
1014 if (req->lpig || req->priv_data_present) {
1016 * Per VT-d spec. v3.0 ch7.7, system software must
1017 * respond with page group response if private data
1018 * is present (PDP) or last page in group (LPIG) bit
1019 * is set. This is an additional VT-d feature beyond
1022 resp.qw0 = QI_PGRP_PASID(req->pasid) |
1023 QI_PGRP_DID(req->rid) |
1024 QI_PGRP_PASID_P(req->pasid_present) |
1025 QI_PGRP_PDP(req->pasid_present) |
1026 QI_PGRP_RESP_CODE(result) |
1028 resp.qw1 = QI_PGRP_IDX(req->prg_index) |
1029 QI_PGRP_LPIG(req->lpig);
1031 if (req->priv_data_present)
1032 memcpy(&resp.qw2, req->priv_data,
1033 sizeof(req->priv_data));
1036 qi_submit_sync(iommu, &resp, 1, 0);
1039 head = (head + sizeof(*req)) & PRQ_RING_MASK;
1042 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
1045 * Clear the page request overflow bit and wake up all threads that
1046 * are waiting for the completion of this handling.
1048 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
1049 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
1051 if (!completion_done(&iommu->prq_complete))
1052 complete(&iommu->prq_complete);
1054 return IRQ_RETVAL(handled);
1057 #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
1059 intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
1061 struct iommu_sva *sva = ERR_PTR(-EINVAL);
1062 struct intel_svm_dev *sdev = NULL;
1063 unsigned int flags = 0;
1067 * TODO: Consolidate with generic iommu-sva bind after it is merged.
1068 * It will require shared SVM data structures, i.e. combine io_mm
1069 * and intel_svm etc.
1072 flags = *(unsigned int *)drvdata;
1073 mutex_lock(&pasid_mutex);
1074 ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
1080 WARN(!sdev, "SVM bind succeeded with no sdev!\n");
1082 mutex_unlock(&pasid_mutex);
1087 void intel_svm_unbind(struct iommu_sva *sva)
1089 struct intel_svm_dev *sdev;
1091 mutex_lock(&pasid_mutex);
1092 sdev = to_intel_svm_dev(sva);
1093 intel_svm_unbind_mm(sdev->dev, sdev->pasid);
1094 mutex_unlock(&pasid_mutex);
1097 u32 intel_svm_get_pasid(struct iommu_sva *sva)
1099 struct intel_svm_dev *sdev;
1102 mutex_lock(&pasid_mutex);
1103 sdev = to_intel_svm_dev(sva);
1104 pasid = sdev->pasid;
1105 mutex_unlock(&pasid_mutex);
1110 int intel_svm_page_response(struct device *dev,
1111 struct iommu_fault_event *evt,
1112 struct iommu_page_response *msg)
1114 struct iommu_fault_page_request *prm;
1115 struct intel_svm_dev *sdev = NULL;
1116 struct intel_svm *svm = NULL;
1117 struct intel_iommu *iommu;
1118 bool private_present;
1125 if (!dev || !dev_is_pci(dev))
1128 iommu = device_to_iommu(dev, &bus, &devfn);
1135 mutex_lock(&pasid_mutex);
1137 prm = &evt->fault.prm;
1138 sid = PCI_DEVID(bus, devfn);
1139 pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1140 private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
1141 last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1143 if (!pasid_present) {
1148 if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
1153 ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
1160 * For responses from userspace, need to make sure that the
1161 * pasid has been bound to its mm.
1163 if (svm->flags & SVM_FLAG_GUEST_MODE) {
1164 struct mm_struct *mm;
1166 mm = get_task_mm(current);
1172 if (mm != svm->mm) {
1182 * Per VT-d spec. v3.0 ch7.7, system software must respond
1183 * with page group response if private data is present (PDP)
1184 * or last page in group (LPIG) bit is set. This is an
1185 * additional VT-d requirement beyond PCI ATS spec.
1187 if (last_page || private_present) {
1188 struct qi_desc desc;
1190 desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
1191 QI_PGRP_PASID_P(pasid_present) |
1192 QI_PGRP_PDP(private_present) |
1193 QI_PGRP_RESP_CODE(msg->code) |
1195 desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
1198 if (private_present)
1199 memcpy(&desc.qw2, prm->private_data,
1200 sizeof(prm->private_data));
1202 qi_submit_sync(iommu, &desc, 1, 0);
1205 mutex_unlock(&pasid_mutex);