[PATCH] mm: fix a race condition under SMC + COW
[linux-2.6-microblaze.git] / mm / memory.c
index 92a3ebd..160f5b5 100644 (file)
@@ -1577,7 +1577,14 @@ gotten:
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                lazy_mmu_prot_update(entry);
-               ptep_establish(vma, address, page_table, entry);
+               /*
+                * Clear the pte entry and flush it first, before updating the
+                * pte with the new entry. This will avoid a race condition
+                * seen in the presence of one thread doing SMC and another
+                * thread doing COW.
+                */
+               ptep_clear_flush(vma, address, page_table);
+               set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                lru_cache_add_active(new_page);
                page_add_new_anon_rmap(new_page, vma, address);
@@ -2255,6 +2262,54 @@ oom:
        return VM_FAULT_OOM;
 }
 
+/*
+ * do_no_pfn() tries to create a new page mapping for a page without
+ * a struct_page backing it
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
+ *
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+ *
+ * It is expected that the ->nopfn handler always returns the same pfn
+ * for a given virtual mapping.
+ *
+ * Mark this `noinline' to prevent it from bloating the main pagefault code.
+ */
+static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
+                    unsigned long address, pte_t *page_table, pmd_t *pmd,
+                    int write_access)
+{
+       spinlock_t *ptl;
+       pte_t entry;
+       unsigned long pfn;
+       int ret = VM_FAULT_MINOR;
+
+       pte_unmap(page_table);
+       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
+       BUG_ON(is_cow_mapping(vma->vm_flags));
+
+       pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
+       if (pfn == NOPFN_OOM)
+               return VM_FAULT_OOM;
+       if (pfn == NOPFN_SIGBUS)
+               return VM_FAULT_SIGBUS;
+
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+
+       /* Only go through if we didn't race with anybody else... */
+       if (pte_none(*page_table)) {
+               entry = pfn_pte(pfn, vma->vm_page_prot);
+               if (write_access)
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               set_pte_at(mm, address, page_table, entry);
+       }
+       pte_unmap_unlock(page_table, ptl);
+       return ret;
+}
+
 /*
  * Fault of a previously existing named mapping. Repopulate the pte
  * from the encoded file_pte if possible. This enables swappable
@@ -2317,11 +2372,17 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        old_entry = entry = *pte;
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
-                       if (!vma->vm_ops || !vma->vm_ops->nopage)
-                               return do_anonymous_page(mm, vma, address,
-                                       pte, pmd, write_access);
-                       return do_no_page(mm, vma, address,
-                                       pte, pmd, write_access);
+                       if (vma->vm_ops) {
+                               if (vma->vm_ops->nopage)
+                                       return do_no_page(mm, vma, address,
+                                                         pte, pmd,
+                                                         write_access);
+                               if (unlikely(vma->vm_ops->nopfn))
+                                       return do_no_pfn(mm, vma, address, pte,
+                                                        pmd, write_access);
+                       }
+                       return do_anonymous_page(mm, vma, address,
+                                                pte, pmd, write_access);
                }
                if (pte_file(entry))
                        return do_file_page(mm, vma, address,
@@ -2550,3 +2611,56 @@ int in_gate_area_no_task(unsigned long addr)
 }
 
 #endif /* __HAVE_ARCH_GATE_AREA */
+
+/*
+ * Access another process' address space.
+ * Source/target buffer must be kernel space,
+ * Do not walk the page table directly, use get_user_pages
+ */
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+{
+       struct mm_struct *mm;
+       struct vm_area_struct *vma;
+       struct page *page;
+       void *old_buf = buf;
+
+       mm = get_task_mm(tsk);
+       if (!mm)
+               return 0;
+
+       down_read(&mm->mmap_sem);
+       /* ignore errors, just check how much was sucessfully transfered */
+       while (len) {
+               int bytes, ret, offset;
+               void *maddr;
+
+               ret = get_user_pages(tsk, mm, addr, 1,
+                               write, 1, &page, &vma);
+               if (ret <= 0)
+                       break;
+
+               bytes = len;
+               offset = addr & (PAGE_SIZE-1);
+               if (bytes > PAGE_SIZE-offset)
+                       bytes = PAGE_SIZE-offset;
+
+               maddr = kmap(page);
+               if (write) {
+                       copy_to_user_page(vma, page, addr,
+                                         maddr + offset, buf, bytes);
+                       set_page_dirty_lock(page);
+               } else {
+                       copy_from_user_page(vma, page, addr,
+                                           buf, maddr + offset, bytes);
+               }
+               kunmap(page);
+               page_cache_release(page);
+               len -= bytes;
+               buf += bytes;
+               addr += bytes;
+       }
+       up_read(&mm->mmap_sem);
+       mmput(mm);
+
+       return buf - old_buf;
+}