vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
authorCai Huoqing <caihuoqing@baidu.com>
Mon, 2 Aug 2021 01:37:17 +0000 (09:37 +0800)
committerMichael S. Tsirkin <mst@redhat.com>
Mon, 6 Sep 2021 06:25:16 +0000 (02:25 -0400)
it's a nice refactor to make use of
PFN_PHYS/PFN_UP/PFN_DOWN helper macro

Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
Link: https://lore.kernel.org/r/20210802013717.851-1-caihuoqing@baidu.com
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
drivers/vhost/vdpa.c

index 9479f7f..42c998c 100644 (file)
@@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
        unsigned long pfn, pinned;
 
        while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
-               pinned = map->size >> PAGE_SHIFT;
-               for (pfn = map->addr >> PAGE_SHIFT;
+               pinned = PFN_DOWN(map->size);
+               for (pfn = PFN_DOWN(map->addr);
                     pinned > 0; pfn++, pinned--) {
                        page = pfn_to_page(pfn);
                        if (map->perm & VHOST_ACCESS_WO)
                                set_page_dirty_lock(page);
                        unpin_user_page(page);
                }
-               atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+               atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
                vhost_iotlb_map_free(iotlb, map);
        }
 }
@@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
        if (r)
                vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
        else
-               atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+               atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
 
        return r;
 }
@@ -631,7 +631,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
        if (msg->perm & VHOST_ACCESS_WO)
                gup_flags |= FOLL_WRITE;
 
-       npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+       npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
        if (!npages) {
                ret = -EINVAL;
                goto free;
@@ -639,7 +639,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 
        mmap_read_lock(dev->mm);
 
-       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+       lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
        if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
                ret = -ENOMEM;
                goto unlock;
@@ -673,9 +673,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 
                        if (last_pfn && (this_pfn != last_pfn + 1)) {
                                /* Pin a contiguous chunk of memory */
-                               csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+                               csize = PFN_PHYS(last_pfn - map_pfn + 1);
                                ret = vhost_vdpa_map(v, iova, csize,
-                                                    map_pfn << PAGE_SHIFT,
+                                                    PFN_PHYS(map_pfn),
                                                     msg->perm);
                                if (ret) {
                                        /*
@@ -699,13 +699,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
                        last_pfn = this_pfn;
                }
 
-               cur_base += pinned << PAGE_SHIFT;
+               cur_base += PFN_PHYS(pinned);
                npages -= pinned;
        }
 
        /* Pin the rest chunk */
-       ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
-                            map_pfn << PAGE_SHIFT, msg->perm);
+       ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+                            PFN_PHYS(map_pfn), msg->perm);
 out:
        if (ret) {
                if (nchunks) {
@@ -945,7 +945,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
-                           notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+                           PFN_DOWN(notify.addr), PAGE_SIZE,
                            vma->vm_page_prot))
                return VM_FAULT_SIGBUS;