+ if (pvmw->flags & PVMW_SYNC) {
+ /* Use the stricter lookup */
+ pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
+ pvmw->address, &pvmw->ptl);
+ *ptlp = pvmw->ptl;
+ return !!pvmw->pte;
+ }
+
+ /*
+ * It is important to return the ptl corresponding to pte,
+ * in case *pvmw->pmd changes underneath us; so we need to
+ * return it even when choosing not to lock, in case caller
+ * proceeds to loop over next ptes, and finds a match later.
+ * Though, in most cases, page lock already protects this.
+ */
+ pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
+ pvmw->address, ptlp);
+ if (!pvmw->pte)
+ return false;
+
+ ptent = ptep_get(pvmw->pte);
+
+ if (pvmw->flags & PVMW_MIGRATION) {
+ if (!is_swap_pte(ptent))
+ return false;
+ } else if (is_swap_pte(ptent)) {
+ swp_entry_t entry;
+ /*
+ * Handle un-addressable ZONE_DEVICE memory.
+ *
+ * We get here when we are trying to unmap a private
+ * device page from the process address space. Such
+ * page is not CPU accessible and thus is mapped as
+ * a special swap entry, nonetheless it still does
+ * count as a valid regular mapping for the page
+ * (and is accounted as such in page maps count).
+ *
+ * So handle this special case as if it was a normal
+ * page mapping ie lock CPU page table and return true.
+ *
+ * For more details on device private memory see HMM
+ * (include/linux/hmm.h or mm/hmm.c).
+ */
+ entry = pte_to_swp_entry(ptent);
+ if (!is_device_private_entry(entry) &&
+ !is_device_exclusive_entry(entry))
+ return false;
+ } else if (!pte_present(ptent)) {
+ return false;