cdrom: Reset sector_size back it is not 2048.
[linux-2.6-microblaze.git] / lib / test_hmm.c
index a2a8226..80a7887 100644 (file)
@@ -36,7 +36,6 @@
 static const struct dev_pagemap_ops dmirror_devmem_ops;
 static const struct mmu_interval_notifier_ops dmirror_min_ops;
 static dev_t dmirror_dev;
-static struct page *dmirror_zero_page;
 
 struct dmirror_device;
 
@@ -214,6 +213,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
 {
        struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
 
+       /*
+        * Ignore invalidation callbacks for device private pages since
+        * the invalidation is handled as part of the migration process.
+        */
+       if (range->event == MMU_NOTIFY_MIGRATE &&
+           range->migrate_pgmap_owner == dmirror->mdevice)
+               return true;
+
        if (mmu_notifier_range_blockable(range))
                mutex_lock(&dmirror->mutex);
        else if (!mutex_trylock(&dmirror->mutex))
@@ -452,6 +459,22 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
        unsigned long pfn_last;
        void *ptr;
 
+       devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+       if (!devmem)
+               return false;
+
+       res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+                                     "hmm_dmirror");
+       if (IS_ERR(res))
+               goto err_devmem;
+
+       devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+       devmem->pagemap.range.start = res->start;
+       devmem->pagemap.range.end = res->end;
+       devmem->pagemap.nr_range = 1;
+       devmem->pagemap.ops = &dmirror_devmem_ops;
+       devmem->pagemap.owner = mdevice;
+
        mutex_lock(&mdevice->devmem_lock);
 
        if (mdevice->devmem_count == mdevice->devmem_capacity) {
@@ -464,33 +487,18 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
                                sizeof(new_chunks[0]) * new_capacity,
                                GFP_KERNEL);
                if (!new_chunks)
-                       goto err;
+                       goto err_release;
                mdevice->devmem_capacity = new_capacity;
                mdevice->devmem_chunks = new_chunks;
        }
 
-       res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
-                                       "hmm_dmirror");
-       if (IS_ERR(res))
-               goto err;
-
-       devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
-       if (!devmem)
-               goto err_release;
-
-       devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
-       devmem->pagemap.res = *res;
-       devmem->pagemap.ops = &dmirror_devmem_ops;
-       devmem->pagemap.owner = mdevice;
-
        ptr = memremap_pages(&devmem->pagemap, numa_node_id());
        if (IS_ERR(ptr))
-               goto err_free;
+               goto err_release;
 
        devmem->mdevice = mdevice;
-       pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT;
-       pfn_last = pfn_first +
-               (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT);
+       pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
+       pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
        mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
 
        mutex_unlock(&mdevice->devmem_lock);
@@ -517,12 +525,12 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
 
        return true;
 
-err_free:
-       kfree(devmem);
 err_release:
-       release_mem_region(res->start, resource_size(res));
-err:
        mutex_unlock(&mdevice->devmem_lock);
+       release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
+err_devmem:
+       kfree(devmem);
+
        return false;
 }
 
@@ -585,15 +593,6 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
                 */
                spage = migrate_pfn_to_page(*src);
 
-               /*
-                * Don't migrate device private pages from our own driver or
-                * others. For our own we would do a device private memory copy
-                * not a migration and for others, we would need to fault the
-                * other device's page into system memory first.
-                */
-               if (spage && is_zone_device_page(spage))
-                       continue;
-
                dpage = dmirror_devmem_alloc_page(mdevice);
                if (!dpage)
                        continue;
@@ -702,7 +701,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
                args.dst = dst_pfns;
                args.start = addr;
                args.end = next;
-               args.src_owner = NULL;
+               args.pgmap_owner = dmirror->mdevice;
+               args.flags = MIGRATE_VMA_SELECT_SYSTEM;
                ret = migrate_vma_setup(&args);
                if (ret)
                        goto out;
@@ -766,6 +766,10 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
                *perm |= HMM_DMIRROR_PROT_WRITE;
        else
                *perm |= HMM_DMIRROR_PROT_READ;
+       if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
+               *perm |= HMM_DMIRROR_PROT_PMD;
+       else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
+               *perm |= HMM_DMIRROR_PROT_PUD;
 }
 
 static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
@@ -987,7 +991,7 @@ static void dmirror_devmem_free(struct page *page)
 }
 
 static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
-                                               struct dmirror_device *mdevice)
+                                                     struct dmirror *dmirror)
 {
        const unsigned long *src = args->src;
        unsigned long *dst = args->dst;
@@ -1009,6 +1013,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
                        continue;
 
                lock_page(dpage);
+               xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
                copy_highpage(dpage, spage);
                *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
                if (*src & MIGRATE_PFN_WRITE)
@@ -1017,15 +1022,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
        return 0;
 }
 
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
-                                                 struct dmirror *dmirror)
-{
-       /* Invalidate the device's page table mapping. */
-       mutex_lock(&dmirror->mutex);
-       dmirror_do_update(dmirror, args->start, args->end);
-       mutex_unlock(&dmirror->mutex);
-}
-
 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
 {
        struct migrate_vma args;
@@ -1049,16 +1045,21 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
        args.end = args.start + PAGE_SIZE;
        args.src = &src_pfns;
        args.dst = &dst_pfns;
-       args.src_owner = dmirror->mdevice;
+       args.pgmap_owner = dmirror->mdevice;
+       args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
 
        if (migrate_vma_setup(&args))
                return VM_FAULT_SIGBUS;
 
-       ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+       ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
        if (ret)
                return ret;
        migrate_vma_pages(&args);
-       dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+       /*
+        * No device finalize step is needed since
+        * dmirror_devmem_fault_alloc_and_copy() will have already
+        * invalidated the device page table.
+        */
        migrate_vma_finalize(&args);
        return 0;
 }
@@ -1099,8 +1100,8 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
                                mdevice->devmem_chunks[i];
 
                        memunmap_pages(&devmem->pagemap);
-                       release_mem_region(devmem->pagemap.res.start,
-                                          resource_size(&devmem->pagemap.res));
+                       release_mem_region(devmem->pagemap.range.start,
+                                          range_len(&devmem->pagemap.range));
                        kfree(devmem);
                }
                kfree(mdevice->devmem_chunks);
@@ -1125,17 +1126,6 @@ static int __init hmm_dmirror_init(void)
                        goto err_chrdev;
        }
 
-       /*
-        * Allocate a zero page to simulate a reserved page of device private
-        * memory which is always zero. The zero_pfn page isn't used just to
-        * make the code here simpler (i.e., we need a struct page for it).
-        */
-       dmirror_zero_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
-       if (!dmirror_zero_page) {
-               ret = -ENOMEM;
-               goto err_chrdev;
-       }
-
        pr_info("HMM test module loaded. This is only for testing HMM.\n");
        return 0;
 
@@ -1151,8 +1141,6 @@ static void __exit hmm_dmirror_exit(void)
 {
        int id;
 
-       if (dmirror_zero_page)
-               __free_page(dmirror_zero_page);
        for (id = 0; id < DMIRROR_NDEVICES; id++)
                dmirror_device_remove(dmirror_devices + id);
        unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);