#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot))
static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
- unsigned long pte, struct list_head *freelist)
+ unsigned long pte,
+ struct iommu_pages_list *freelist)
{
unsigned long *ptr;
int i;
}
if (freelist)
- list_add_tail(&virt_to_page(ptr)->lru, freelist);
+ iommu_pages_list_add(freelist, ptr);
else
iommu_free_pages(ptr);
}
unsigned long *ptr;
unsigned long pte, old, pte_prot;
int rc = 0;
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (!(prot & IOMMU_WRITE))
pte_prot = _PAGE_BASE | _PAGE_READ;
*mapped = size;
- if (!list_empty(&freelist)) {
+ if (!iommu_pages_list_empty(&freelist)) {
/*
* In 1.0 spec version, the smallest scope we can use to
* invalidate all levels of page table (i.e. leaf and non-leaf)