crypto: algapi - pass instance to crypto_grab_spawn()
[linux-2.6-microblaze.git] / mm / memory-failure.c
index 3151c87..41c634f 100644 (file)
@@ -303,30 +303,24 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
 /*
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
- * TBD would GFP_NOIO be enough?
  */
 static void add_to_kill(struct task_struct *tsk, struct page *p,
                       struct vm_area_struct *vma,
-                      struct list_head *to_kill,
-                      struct to_kill **tkc)
+                      struct list_head *to_kill)
 {
        struct to_kill *tk;
 
-       if (*tkc) {
-               tk = *tkc;
-               *tkc = NULL;
-       } else {
-               tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
-               if (!tk) {
-                       pr_err("Memory failure: Out of memory while machine check handling\n");
-                       return;
-               }
+       tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
+       if (!tk) {
+               pr_err("Memory failure: Out of memory while machine check handling\n");
+               return;
        }
+
        tk->addr = page_address_in_vma(p, vma);
        if (is_zone_device_page(p))
                tk->size_shift = dev_pagemap_mapping_shift(p, vma);
        else
-               tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
+               tk->size_shift = page_shift(compound_head(p));
 
        /*
         * Send SIGKILL if "tk->addr == -EFAULT". Also, as
@@ -345,6 +339,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
                kfree(tk);
                return;
        }
+
        get_task_struct(tsk);
        tk->tsk = tsk;
        list_add_tail(&tk->nd, to_kill);
@@ -436,7 +431,7 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
  * Collect processes when the error hit an anonymous page.
  */
 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
-                             struct to_kill **tkc, int force_early)
+                               int force_early)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
@@ -461,7 +456,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                        if (!page_mapped_in_vma(page, vma))
                                continue;
                        if (vma->vm_mm == t->mm)
-                               add_to_kill(t, page, vma, to_kill, tkc);
+                               add_to_kill(t, page, vma, to_kill);
                }
        }
        read_unlock(&tasklist_lock);
@@ -472,7 +467,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
  * Collect processes when the error hit a file mapped page.
  */
 static void collect_procs_file(struct page *page, struct list_head *to_kill,
-                             struct to_kill **tkc, int force_early)
+                               int force_early)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
@@ -496,7 +491,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                         * to be informed of all such data corruptions.
                         */
                        if (vma->vm_mm == t->mm)
-                               add_to_kill(t, page, vma, to_kill, tkc);
+                               add_to_kill(t, page, vma, to_kill);
                }
        }
        read_unlock(&tasklist_lock);
@@ -505,26 +500,17 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
 
 /*
  * Collect the processes who have the corrupted page mapped to kill.
- * This is done in two steps for locking reasons.
- * First preallocate one tokill structure outside the spin locks,
- * so that we can kill at least one process reasonably reliable.
  */
 static void collect_procs(struct page *page, struct list_head *tokill,
                                int force_early)
 {
-       struct to_kill *tk;
-
        if (!page->mapping)
                return;
 
-       tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
-       if (!tk)
-               return;
        if (PageAnon(page))
-               collect_procs_anon(page, tokill, &tk, force_early);
+               collect_procs_anon(page, tokill, force_early);
        else
-               collect_procs_file(page, tokill, &tk, force_early);
-       kfree(tk);
+               collect_procs_file(page, tokill, force_early);
 }
 
 static const char *action_name[] = {
@@ -1490,7 +1476,7 @@ static void memory_failure_work_func(struct work_struct *work)
                if (!gotten)
                        break;
                if (entry.flags & MF_SOFT_OFFLINE)
-                       soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
+                       soft_offline_page(entry.pfn, entry.flags);
                else
                        memory_failure(entry.pfn, entry.flags);
        }
@@ -1871,7 +1857,7 @@ static int soft_offline_free_page(struct page *page)
 
 /**
  * soft_offline_page - Soft offline a page.
- * @page: page to offline
+ * @pfn: pfn to soft-offline
  * @flags: flags. Same as memory_failure().
  *
  * Returns 0 on success, otherwise negated errno.
@@ -1891,18 +1877,17 @@ static int soft_offline_free_page(struct page *page)
  * This is not a 100% solution for all memory, but tries to be
  * ``good enough'' for the majority of memory.
  */
-int soft_offline_page(struct page *page, int flags)
+int soft_offline_page(unsigned long pfn, int flags)
 {
        int ret;
-       unsigned long pfn = page_to_pfn(page);
+       struct page *page;
 
-       if (is_zone_device_page(page)) {
-               pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
-                               pfn);
-               if (flags & MF_COUNT_INCREASED)
-                       put_page(page);
+       if (!pfn_valid(pfn))
+               return -ENXIO;
+       /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+       page = pfn_to_online_page(pfn);
+       if (!page)
                return -EIO;
-       }
 
        if (PageHWPoison(page)) {
                pr_info("soft offline: %#lx page already poisoned\n", pfn);