Merge tag 'io_uring-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / kernel / kexec.c
index c82c6c0..b5e40f0 100644 (file)
 
 #include "kexec_internal.h"
 
-static int copy_user_segment_list(struct kimage *image,
-                                 unsigned long nr_segments,
-                                 struct kexec_segment __user *segments)
-{
-       int ret;
-       size_t segment_bytes;
-
-       /* Read in the segments */
-       image->nr_segments = nr_segments;
-       segment_bytes = nr_segments * sizeof(*segments);
-       ret = copy_from_user(image->segment, segments, segment_bytes);
-       if (ret)
-               ret = -EFAULT;
-
-       return ret;
-}
-
 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
                             unsigned long nr_segments,
-                            struct kexec_segment __user *segments,
+                            struct kexec_segment *segments,
                             unsigned long flags)
 {
        int ret;
@@ -58,10 +41,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
                return -ENOMEM;
 
        image->start = entry;
-
-       ret = copy_user_segment_list(image, nr_segments, segments);
-       if (ret)
-               goto out_free_image;
+       image->nr_segments = nr_segments;
+       memcpy(image->segment, segments, nr_segments * sizeof(*segments));
 
        if (kexec_on_panic) {
                /* Enable special crash kernel control page alloc policy. */
@@ -104,12 +85,23 @@ out_free_image:
 }
 
 static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
-               struct kexec_segment __user *segments, unsigned long flags)
+               struct kexec_segment *segments, unsigned long flags)
 {
        struct kimage **dest_image, *image;
        unsigned long i;
        int ret;
 
+       /*
+        * Because we write directly to the reserved memory region when loading
+        * crash kernels we need a mutex here to prevent multiple crash kernels
+        * from attempting to load simultaneously, and to prevent a crash kernel
+        * from loading over the top of a in use crash kernel.
+        *
+        * KISS: always take the mutex.
+        */
+       if (!mutex_trylock(&kexec_mutex))
+               return -EBUSY;
+
        if (flags & KEXEC_ON_CRASH) {
                dest_image = &kexec_crash_image;
                if (kexec_crash_image)
@@ -121,7 +113,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
        if (nr_segments == 0) {
                /* Uninstall image */
                kimage_free(xchg(dest_image, NULL));
-               return 0;
+               ret = 0;
+               goto out_unlock;
        }
        if (flags & KEXEC_ON_CRASH) {
                /*
@@ -134,7 +127,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 
        ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
        if (ret)
-               return ret;
+               goto out_unlock;
 
        if (flags & KEXEC_PRESERVE_CONTEXT)
                image->preserve_context = 1;
@@ -171,6 +164,8 @@ out:
                arch_kexec_protect_crashkres();
 
        kimage_free(image);
+out_unlock:
+       mutex_unlock(&kexec_mutex);
        return ret;
 }
 
@@ -236,7 +231,8 @@ static inline int kexec_load_check(unsigned long nr_segments,
 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
                struct kexec_segment __user *, segments, unsigned long, flags)
 {
-       int result;
+       struct kexec_segment *ksegments;
+       unsigned long result;
 
        result = kexec_load_check(nr_segments, flags);
        if (result)
@@ -247,20 +243,12 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
                ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
                return -EINVAL;
 
-       /* Because we write directly to the reserved memory
-        * region when loading crash kernels we need a mutex here to
-        * prevent multiple crash  kernels from attempting to load
-        * simultaneously, and to prevent a crash kernel from loading
-        * over the top of a in use crash kernel.
-        *
-        * KISS: always take the mutex.
-        */
-       if (!mutex_trylock(&kexec_mutex))
-               return -EBUSY;
+       ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
+       if (IS_ERR(ksegments))
+               return PTR_ERR(ksegments);
 
-       result = do_kexec_load(entry, nr_segments, segments, flags);
-
-       mutex_unlock(&kexec_mutex);
+       result = do_kexec_load(entry, nr_segments, ksegments, flags);
+       kfree(ksegments);
 
        return result;
 }
@@ -272,7 +260,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
                       compat_ulong_t, flags)
 {
        struct compat_kexec_segment in;
-       struct kexec_segment out, __user *ksegments;
+       struct kexec_segment *ksegments;
        unsigned long i, result;
 
        result = kexec_load_check(nr_segments, flags);
@@ -285,37 +273,26 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
        if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
                return -EINVAL;
 
-       ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
+       ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]),
+                       GFP_KERNEL);
+       if (!ksegments)
+               return -ENOMEM;
+
        for (i = 0; i < nr_segments; i++) {
                result = copy_from_user(&in, &segments[i], sizeof(in));
                if (result)
-                       return -EFAULT;
+                       goto fail;
 
-               out.buf   = compat_ptr(in.buf);
-               out.bufsz = in.bufsz;
-               out.mem   = in.mem;
-               out.memsz = in.memsz;
-
-               result = copy_to_user(&ksegments[i], &out, sizeof(out));
-               if (result)
-                       return -EFAULT;
+               ksegments[i].buf   = compat_ptr(in.buf);
+               ksegments[i].bufsz = in.bufsz;
+               ksegments[i].mem   = in.mem;
+               ksegments[i].memsz = in.memsz;
        }
 
-       /* Because we write directly to the reserved memory
-        * region when loading crash kernels we need a mutex here to
-        * prevent multiple crash  kernels from attempting to load
-        * simultaneously, and to prevent a crash kernel from loading
-        * over the top of a in use crash kernel.
-        *
-        * KISS: always take the mutex.
-        */
-       if (!mutex_trylock(&kexec_mutex))
-               return -EBUSY;
-
        result = do_kexec_load(entry, nr_segments, ksegments, flags);
 
-       mutex_unlock(&kexec_mutex);
-
+fail:
+       kfree(ksegments);
        return result;
 }
 #endif