iommu: Change iommu_iotlb_gather to use iommu_page_list
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Apr 2025 16:53:59 +0000 (13:53 -0300)
committerJoerg Roedel <jroedel@suse.de>
Thu, 17 Apr 2025 14:22:42 +0000 (16:22 +0200)
This converts the remaining places using list of pages to the new API.

The Intel free path was shared with its gather path, so it is converted at
the same time.

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/11-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c
drivers/iommu/intel/iommu.c
include/linux/iommu.h

index a775e4d..0af1ab3 100644 (file)
@@ -105,7 +105,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup);
 struct iova_fq_entry {
        unsigned long iova_pfn;
        unsigned long pages;
-       struct list_head freelist;
+       struct iommu_pages_list freelist;
        u64 counter; /* Flush counter when this entry was added */
 };
 
@@ -192,7 +192,7 @@ static void fq_flush_timeout(struct timer_list *t)
 
 static void queue_iova(struct iommu_dma_cookie *cookie,
                unsigned long pfn, unsigned long pages,
-               struct list_head *freelist)
+               struct iommu_pages_list *freelist)
 {
        struct iova_fq *fq;
        unsigned long flags;
@@ -231,7 +231,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
        fq->entries[idx].iova_pfn = pfn;
        fq->entries[idx].pages    = pages;
        fq->entries[idx].counter  = atomic64_read(&cookie->fq_flush_start_cnt);
-       list_splice(freelist, &fq->entries[idx].freelist);
+       iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
 
        spin_unlock_irqrestore(&fq->lock, flags);
 
@@ -289,7 +289,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
        spin_lock_init(&fq->lock);
 
        for (i = 0; i < fq_size; i++)
-               INIT_LIST_HEAD(&fq->entries[i].freelist);
+               fq->entries[i].freelist =
+                       IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
 }
 
 static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
index e66fd91..0de9580 100644 (file)
@@ -894,18 +894,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
    The 'pte' argument is the *parent* PTE, pointing to the page that is to
    be freed. */
 static void dma_pte_list_pagetables(struct dmar_domain *domain,
-                                   int level, struct dma_pte *pte,
-                                   struct list_head *freelist)
+                                   int level, struct dma_pte *parent_pte,
+                                   struct iommu_pages_list *freelist)
 {
-       struct page *pg;
+       struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
 
-       pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
-       list_add_tail(&pg->lru, freelist);
+       iommu_pages_list_add(freelist, pte);
 
        if (level == 1)
                return;
 
-       pte = page_address(pg);
        do {
                if (dma_pte_present(pte) && !dma_pte_superpage(pte))
                        dma_pte_list_pagetables(domain, level - 1, pte, freelist);
@@ -916,7 +914,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain,
 static void dma_pte_clear_level(struct dmar_domain *domain, int level,
                                struct dma_pte *pte, unsigned long pfn,
                                unsigned long start_pfn, unsigned long last_pfn,
-                               struct list_head *freelist)
+                               struct iommu_pages_list *freelist)
 {
        struct dma_pte *first_pte = NULL, *last_pte = NULL;
 
@@ -961,7 +959,8 @@ next:
    the page tables, and may have cached the intermediate levels. The
    pages can only be freed after the IOTLB flush has been done. */
 static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
-                        unsigned long last_pfn, struct list_head *freelist)
+                        unsigned long last_pfn,
+                        struct iommu_pages_list *freelist)
 {
        if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
            WARN_ON(start_pfn > last_pfn))
@@ -973,8 +972,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
 
        /* free pgd */
        if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
-               struct page *pgd_page = virt_to_page(domain->pgd);
-               list_add_tail(&pgd_page->lru, freelist);
+               iommu_pages_list_add(freelist, domain->pgd);
                domain->pgd = NULL;
        }
 }
@@ -1449,7 +1447,8 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
 static void domain_exit(struct dmar_domain *domain)
 {
        if (domain->pgd) {
-               LIST_HEAD(freelist);
+               struct iommu_pages_list freelist =
+                       IOMMU_PAGES_LIST_INIT(freelist);
 
                domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
                iommu_put_pages_list(&freelist);
@@ -3603,7 +3602,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
                                 struct iommu_iotlb_gather *gather)
 {
        cache_tag_flush_range(to_dmar_domain(domain), gather->start,
-                             gather->end, list_empty(&gather->freelist));
+                             gather->end,
+                             iommu_pages_list_empty(&gather->freelist));
        iommu_put_pages_list(&gather->freelist);
 }
 
index 6dd3a22..3fb6216 100644 (file)
@@ -375,7 +375,7 @@ struct iommu_iotlb_gather {
        unsigned long           start;
        unsigned long           end;
        size_t                  pgsize;
-       struct list_head        freelist;
+       struct iommu_pages_list freelist;
        bool                    queued;
 };
 
@@ -864,7 +864,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
 {
        *gather = (struct iommu_iotlb_gather) {
                .start  = ULONG_MAX,
-               .freelist = LIST_HEAD_INIT(gather->freelist),
+               .freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
        };
 }