perf stat: Fix --no-scale
[linux-2.6-microblaze.git] / crypto / testmgr.c
index e4f3f5f..8386038 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2019 Google LLC
  *
  * Updated RFC4106 AES-GCM testing.
  *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
@@ -26,6 +27,8 @@
 #include <linux/err.h>
 #include <linux/fips.h>
 #include <linux/module.h>
+#include <linux/once.h>
+#include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/string.h>
@@ -41,6 +44,16 @@ static bool notests;
 module_param(notests, bool, 0644);
 MODULE_PARM_DESC(notests, "disable crypto self-tests");
 
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+static bool noextratests;
+module_param(noextratests, bool, 0644);
+MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
+
+static unsigned int fuzz_iterations = 100;
+module_param(fuzz_iterations, uint, 0644);
+MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+#endif
+
 #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
 
 /* a perfect nop */
@@ -58,18 +71,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
  */
 #define XBUFSIZE       8
 
-/*
- * Indexes into the xbuf to simulate cross-page access.
- */
-#define IDX1           32
-#define IDX2           32400
-#define IDX3           1511
-#define IDX4           8193
-#define IDX5           22222
-#define IDX6           17101
-#define IDX7           27333
-#define IDX8           3000
-
 /*
 * Used by test_cipher()
 */
@@ -136,9 +137,6 @@ struct alg_test_desc {
        } suite;
 };
 
-static const unsigned int IDX[8] = {
-       IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
-
 static void hexdump(unsigned char *buf, unsigned int len)
 {
        print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
@@ -146,12 +144,12 @@ static void hexdump(unsigned char *buf, unsigned int len)
                        buf, len, false);
 }
 
-static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order)
 {
        int i;
 
        for (i = 0; i < XBUFSIZE; i++) {
-               buf[i] = (void *)__get_free_page(GFP_KERNEL);
+               buf[i] = (char *)__get_free_pages(GFP_KERNEL, order);
                if (!buf[i])
                        goto err_free_buf;
        }
@@ -160,1242 +158,1597 @@ static int testmgr_alloc_buf(char *buf[XBUFSIZE])
 
 err_free_buf:
        while (i-- > 0)
-               free_page((unsigned long)buf[i]);
+               free_pages((unsigned long)buf[i], order);
 
        return -ENOMEM;
 }
 
-static void testmgr_free_buf(char *buf[XBUFSIZE])
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+       return __testmgr_alloc_buf(buf, 0);
+}
+
+static void __testmgr_free_buf(char *buf[XBUFSIZE], int order)
 {
        int i;
 
        for (i = 0; i < XBUFSIZE; i++)
-               free_page((unsigned long)buf[i]);
+               free_pages((unsigned long)buf[i], order);
 }
 
-static int ahash_guard_result(char *result, char c, int size)
+static void testmgr_free_buf(char *buf[XBUFSIZE])
 {
-       int i;
+       __testmgr_free_buf(buf, 0);
+}
 
-       for (i = 0; i < size; i++) {
-               if (result[i] != c)
-                       return -EINVAL;
-       }
+#define TESTMGR_POISON_BYTE    0xfe
+#define TESTMGR_POISON_LEN     16
 
-       return 0;
+static inline void testmgr_poison(void *addr, size_t len)
+{
+       memset(addr, TESTMGR_POISON_BYTE, len);
 }
 
-static int ahash_partial_update(struct ahash_request **preq,
-       struct crypto_ahash *tfm, const struct hash_testvec *template,
-       void *hash_buff, int k, int temp, struct scatterlist *sg,
-       const char *algo, char *result, struct crypto_wait *wait)
+/* Is the memory region still fully poisoned? */
+static inline bool testmgr_is_poison(const void *addr, size_t len)
 {
-       char *state;
-       struct ahash_request *req;
-       int statesize, ret = -EINVAL;
-       static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 };
-       int digestsize = crypto_ahash_digestsize(tfm);
-
-       req = *preq;
-       statesize = crypto_ahash_statesize(
-                       crypto_ahash_reqtfm(req));
-       state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
-       if (!state) {
-               pr_err("alg: hash: Failed to alloc state for %s\n", algo);
-               goto out_nostate;
-       }
-       memcpy(state + statesize, guard, sizeof(guard));
-       memset(result, 1, digestsize);
-       ret = crypto_ahash_export(req, state);
-       WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
-       if (ret) {
-               pr_err("alg: hash: Failed to export() for %s\n", algo);
-               goto out;
-       }
-       ret = ahash_guard_result(result, 1, digestsize);
-       if (ret) {
-               pr_err("alg: hash: Failed, export used req->result for %s\n",
-                      algo);
-               goto out;
-       }
-       ahash_request_free(req);
-       req = ahash_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               pr_err("alg: hash: Failed to alloc request for %s\n", algo);
-               goto out_noreq;
-       }
-       ahash_request_set_callback(req,
-               CRYPTO_TFM_REQ_MAY_BACKLOG,
-               crypto_req_done, wait);
-
-       memcpy(hash_buff, template->plaintext + temp,
-               template->tap[k]);
-       sg_init_one(&sg[0], hash_buff, template->tap[k]);
-       ahash_request_set_crypt(req, sg, result, template->tap[k]);
-       ret = crypto_ahash_import(req, state);
-       if (ret) {
-               pr_err("alg: hash: Failed to import() for %s\n", algo);
-               goto out;
+       return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL;
+}
+
+/* flush type for hash algorithms */
+enum flush_type {
+       /* merge with update of previous buffer(s) */
+       FLUSH_TYPE_NONE = 0,
+
+       /* update with previous buffer(s) before doing this one */
+       FLUSH_TYPE_FLUSH,
+
+       /* likewise, but also export and re-import the intermediate state */
+       FLUSH_TYPE_REIMPORT,
+};
+
+/* finalization function for hash algorithms */
+enum finalization_type {
+       FINALIZATION_TYPE_FINAL,        /* use final() */
+       FINALIZATION_TYPE_FINUP,        /* use finup() */
+       FINALIZATION_TYPE_DIGEST,       /* use digest() */
+};
+
+#define TEST_SG_TOTAL  10000
+
+/**
+ * struct test_sg_division - description of a scatterlist entry
+ *
+ * This struct describes one entry of a scatterlist being constructed to check a
+ * crypto test vector.
+ *
+ * @proportion_of_total: length of this chunk relative to the total length,
+ *                      given as a proportion out of TEST_SG_TOTAL so that it
+ *                      scales to fit any test vector
+ * @offset: byte offset into a 2-page buffer at which this chunk will start
+ * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the
+ *                               @offset
+ * @flush_type: for hashes, whether an update() should be done now vs.
+ *             continuing to accumulate data
+ */
+struct test_sg_division {
+       unsigned int proportion_of_total;
+       unsigned int offset;
+       bool offset_relative_to_alignmask;
+       enum flush_type flush_type;
+};
+
+/**
+ * struct testvec_config - configuration for testing a crypto test vector
+ *
+ * This struct describes the data layout and other parameters with which each
+ * crypto test vector can be tested.
+ *
+ * @name: name of this config, logged for debugging purposes if a test fails
+ * @inplace: operate on the data in-place, if applicable for the algorithm type?
+ * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP
+ * @src_divs: description of how to arrange the source scatterlist
+ * @dst_divs: description of how to arrange the dst scatterlist, if applicable
+ *           for the algorithm type.  Defaults to @src_divs if unset.
+ * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1],
+ *            where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary
+ * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
+ *                                  the @iv_offset
+ * @finalization_type: what finalization function to use for hashes
+ */
+struct testvec_config {
+       const char *name;
+       bool inplace;
+       u32 req_flags;
+       struct test_sg_division src_divs[XBUFSIZE];
+       struct test_sg_division dst_divs[XBUFSIZE];
+       unsigned int iv_offset;
+       bool iv_offset_relative_to_alignmask;
+       enum finalization_type finalization_type;
+};
+
+#define TESTVEC_CONFIG_NAMELEN 192
+
+/*
+ * The following are the lists of testvec_configs to test for each algorithm
+ * type when the basic crypto self-tests are enabled, i.e. when
+ * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset.  They aim to provide good test
+ * coverage, while keeping the test time much shorter than the full fuzz tests
+ * so that the basic tests can be enabled in a wider range of circumstances.
+ */
+
+/* Configs for skciphers and aeads */
+static const struct testvec_config default_cipher_testvec_configs[] = {
+       {
+               .name = "in-place",
+               .inplace = true,
+               .src_divs = { { .proportion_of_total = 10000 } },
+       }, {
+               .name = "out-of-place",
+               .src_divs = { { .proportion_of_total = 10000 } },
+       }, {
+               .name = "unaligned buffer, offset=1",
+               .src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
+               .iv_offset = 1,
+       }, {
+               .name = "buffer aligned only to alignmask",
+               .src_divs = {
+                       {
+                               .proportion_of_total = 10000,
+                               .offset = 1,
+                               .offset_relative_to_alignmask = true,
+                       },
+               },
+               .iv_offset = 1,
+               .iv_offset_relative_to_alignmask = true,
+       }, {
+               .name = "two even aligned splits",
+               .src_divs = {
+                       { .proportion_of_total = 5000 },
+                       { .proportion_of_total = 5000 },
+               },
+       }, {
+               .name = "uneven misaligned splits, may sleep",
+               .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+               .src_divs = {
+                       { .proportion_of_total = 1900, .offset = 33 },
+                       { .proportion_of_total = 3300, .offset = 7  },
+                       { .proportion_of_total = 4800, .offset = 18 },
+               },
+               .iv_offset = 3,
+       }, {
+               .name = "misaligned splits crossing pages, inplace",
+               .inplace = true,
+               .src_divs = {
+                       {
+                               .proportion_of_total = 7500,
+                               .offset = PAGE_SIZE - 32
+                       }, {
+                               .proportion_of_total = 2500,
+                               .offset = PAGE_SIZE - 7
+                       },
+               },
        }
-       ret = ahash_guard_result(result, 1, digestsize);
-       if (ret) {
-               pr_err("alg: hash: Failed, import used req->result for %s\n",
-                      algo);
-               goto out;
+};
+
+static const struct testvec_config default_hash_testvec_configs[] = {
+       {
+               .name = "init+update+final aligned buffer",
+               .src_divs = { { .proportion_of_total = 10000 } },
+               .finalization_type = FINALIZATION_TYPE_FINAL,
+       }, {
+               .name = "init+finup aligned buffer",
+               .src_divs = { { .proportion_of_total = 10000 } },
+               .finalization_type = FINALIZATION_TYPE_FINUP,
+       }, {
+               .name = "digest aligned buffer",
+               .src_divs = { { .proportion_of_total = 10000 } },
+               .finalization_type = FINALIZATION_TYPE_DIGEST,
+       }, {
+               .name = "init+update+final misaligned buffer",
+               .src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
+               .finalization_type = FINALIZATION_TYPE_FINAL,
+       }, {
+               .name = "digest buffer aligned only to alignmask",
+               .src_divs = {
+                       {
+                               .proportion_of_total = 10000,
+                               .offset = 1,
+                               .offset_relative_to_alignmask = true,
+                       },
+               },
+               .finalization_type = FINALIZATION_TYPE_DIGEST,
+       }, {
+               .name = "init+update+update+final two even splits",
+               .src_divs = {
+                       { .proportion_of_total = 5000 },
+                       {
+                               .proportion_of_total = 5000,
+                               .flush_type = FLUSH_TYPE_FLUSH,
+                       },
+               },
+               .finalization_type = FINALIZATION_TYPE_FINAL,
+       }, {
+               .name = "digest uneven misaligned splits, may sleep",
+               .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+               .src_divs = {
+                       { .proportion_of_total = 1900, .offset = 33 },
+                       { .proportion_of_total = 3300, .offset = 7  },
+                       { .proportion_of_total = 4800, .offset = 18 },
+               },
+               .finalization_type = FINALIZATION_TYPE_DIGEST,
+       }, {
+               .name = "digest misaligned splits crossing pages",
+               .src_divs = {
+                       {
+                               .proportion_of_total = 7500,
+                               .offset = PAGE_SIZE - 32,
+                       }, {
+                               .proportion_of_total = 2500,
+                               .offset = PAGE_SIZE - 7,
+                       },
+               },
+               .finalization_type = FINALIZATION_TYPE_DIGEST,
+       }, {
+               .name = "import/export",
+               .src_divs = {
+                       {
+                               .proportion_of_total = 6500,
+                               .flush_type = FLUSH_TYPE_REIMPORT,
+                       }, {
+                               .proportion_of_total = 3500,
+                               .flush_type = FLUSH_TYPE_REIMPORT,
+                       },
+               },
+               .finalization_type = FINALIZATION_TYPE_FINAL,
        }
-       ret = crypto_wait_req(crypto_ahash_update(req), wait);
-       if (ret)
-               goto out;
-       *preq = req;
-       ret = 0;
-       goto out_noreq;
-out:
-       ahash_request_free(req);
-out_noreq:
-       kfree(state);
-out_nostate:
-       return ret;
+};
+
+static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
+{
+       unsigned int remaining = TEST_SG_TOTAL;
+       unsigned int ndivs = 0;
+
+       do {
+               remaining -= divs[ndivs++].proportion_of_total;
+       } while (remaining);
+
+       return ndivs;
 }
 
-enum hash_test {
-       HASH_TEST_DIGEST,
-       HASH_TEST_FINAL,
-       HASH_TEST_FINUP
-};
+static bool valid_sg_divisions(const struct test_sg_division *divs,
+                              unsigned int count, bool *any_flushes_ret)
+{
+       unsigned int total = 0;
+       unsigned int i;
 
-static int __test_hash(struct crypto_ahash *tfm,
-                      const struct hash_testvec *template, unsigned int tcount,
-                      enum hash_test test_type, const int align_offset)
+       for (i = 0; i < count && total != TEST_SG_TOTAL; i++) {
+               if (divs[i].proportion_of_total <= 0 ||
+                   divs[i].proportion_of_total > TEST_SG_TOTAL - total)
+                       return false;
+               total += divs[i].proportion_of_total;
+               if (divs[i].flush_type != FLUSH_TYPE_NONE)
+                       *any_flushes_ret = true;
+       }
+       return total == TEST_SG_TOTAL &&
+               memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
+}
+
+/*
+ * Check whether the given testvec_config is valid.  This isn't strictly needed
+ * since every testvec_config should be valid, but check anyway so that people
+ * don't unknowingly add broken configs that don't do what they wanted.
+ */
+static bool valid_testvec_config(const struct testvec_config *cfg)
 {
-       const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
-       size_t digest_size = crypto_ahash_digestsize(tfm);
-       unsigned int i, j, k, temp;
-       struct scatterlist sg[8];
-       char *result;
-       char *key;
-       struct ahash_request *req;
-       struct crypto_wait wait;
-       void *hash_buff;
-       char *xbuf[XBUFSIZE];
-       int ret = -ENOMEM;
+       bool any_flushes = false;
 
-       result = kmalloc(digest_size, GFP_KERNEL);
-       if (!result)
-               return ret;
-       key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
-       if (!key)
-               goto out_nobuf;
-       if (testmgr_alloc_buf(xbuf))
-               goto out_nobuf;
+       if (cfg->name == NULL)
+               return false;
 
-       crypto_init_wait(&wait);
+       if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
+                               &any_flushes))
+               return false;
 
-       req = ahash_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               printk(KERN_ERR "alg: hash: Failed to allocate request for "
-                      "%s\n", algo);
-               goto out_noreq;
+       if (cfg->dst_divs[0].proportion_of_total) {
+               if (!valid_sg_divisions(cfg->dst_divs,
+                                       ARRAY_SIZE(cfg->dst_divs),
+                                       &any_flushes))
+                       return false;
+       } else {
+               if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
+                       return false;
+               /* defaults to dst_divs=src_divs */
        }
-       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &wait);
 
-       j = 0;
-       for (i = 0; i < tcount; i++) {
-               if (template[i].np)
-                       continue;
+       if (cfg->iv_offset +
+           (cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) >
+           MAX_ALGAPI_ALIGNMASK + 1)
+               return false;
 
-               ret = -EINVAL;
-               if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE))
-                       goto out;
+       if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+               return false;
 
-               j++;
-               memset(result, 0, digest_size);
+       return true;
+}
 
-               hash_buff = xbuf[0];
-               hash_buff += align_offset;
+struct test_sglist {
+       char *bufs[XBUFSIZE];
+       struct scatterlist sgl[XBUFSIZE];
+       struct scatterlist sgl_saved[XBUFSIZE];
+       struct scatterlist *sgl_ptr;
+       unsigned int nents;
+};
 
-               memcpy(hash_buff, template[i].plaintext, template[i].psize);
-               sg_init_one(&sg[0], hash_buff, template[i].psize);
+static int init_test_sglist(struct test_sglist *tsgl)
+{
+       return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */);
+}
 
-               if (template[i].ksize) {
-                       crypto_ahash_clear_flags(tfm, ~0);
-                       if (template[i].ksize > MAX_KEYLEN) {
-                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
-                                      j, algo, template[i].ksize, MAX_KEYLEN);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       memcpy(key, template[i].key, template[i].ksize);
-                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
-                       if (ret) {
-                               printk(KERN_ERR "alg: hash: setkey failed on "
-                                      "test %d for %s: ret=%d\n", j, algo,
-                                      -ret);
-                               goto out;
-                       }
-               }
+static void destroy_test_sglist(struct test_sglist *tsgl)
+{
+       return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */);
+}
 
-               ahash_request_set_crypt(req, sg, result, template[i].psize);
-               switch (test_type) {
-               case HASH_TEST_DIGEST:
-                       ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
-                       if (ret) {
-                               pr_err("alg: hash: digest failed on test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-                       break;
+/**
+ * build_test_sglist() - build a scatterlist for a crypto test
+ *
+ * @tsgl: the scatterlist to build.  @tsgl->bufs[] contains an array of 2-page
+ *       buffers which the scatterlist @tsgl->sgl[] will be made to point into.
+ * @divs: the layout specification on which the scatterlist will be based
+ * @alignmask: the algorithm's alignmask
+ * @total_len: the total length of the scatterlist to build in bytes
+ * @data: if non-NULL, the buffers will be filled with this data until it ends.
+ *       Otherwise the buffers will be poisoned.  In both cases, some bytes
+ *       past the end of each buffer will be poisoned to help detect overruns.
+ * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry
+ *           corresponds will be returned here.  This will match @divs except
+ *           that divisions resolving to a length of 0 are omitted as they are
+ *           not included in the scatterlist.
+ *
+ * Return: 0 or a -errno value
+ */
+static int build_test_sglist(struct test_sglist *tsgl,
+                            const struct test_sg_division *divs,
+                            const unsigned int alignmask,
+                            const unsigned int total_len,
+                            struct iov_iter *data,
+                            const struct test_sg_division *out_divs[XBUFSIZE])
+{
+       struct {
+               const struct test_sg_division *div;
+               size_t length;
+       } partitions[XBUFSIZE];
+       const unsigned int ndivs = count_test_sg_divisions(divs);
+       unsigned int len_remaining = total_len;
+       unsigned int i;
 
-               case HASH_TEST_FINAL:
-                       memset(result, 1, digest_size);
-                       ret = crypto_wait_req(crypto_ahash_init(req), &wait);
-                       if (ret) {
-                               pr_err("alg: hash: init failed on test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-                       ret = ahash_guard_result(result, 1, digest_size);
-                       if (ret) {
-                               pr_err("alg: hash: init failed on test %d "
-                                      "for %s: used req->result\n", j, algo);
-                               goto out;
-                       }
-                       ret = crypto_wait_req(crypto_ahash_update(req), &wait);
-                       if (ret) {
-                               pr_err("alg: hash: update failed on test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-                       ret = ahash_guard_result(result, 1, digest_size);
-                       if (ret) {
-                               pr_err("alg: hash: update failed on test %d "
-                                      "for %s: used req->result\n", j, algo);
-                               goto out;
-                       }
-                       ret = crypto_wait_req(crypto_ahash_final(req), &wait);
-                       if (ret) {
-                               pr_err("alg: hash: final failed on test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-                       break;
+       BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl));
+       if (WARN_ON(ndivs > ARRAY_SIZE(partitions)))
+               return -EINVAL;
 
-               case HASH_TEST_FINUP:
-                       memset(result, 1, digest_size);
-                       ret = crypto_wait_req(crypto_ahash_init(req), &wait);
-                       if (ret) {
-                               pr_err("alg: hash: init failed on test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-                       ret = ahash_guard_result(result, 1, digest_size);
-                       if (ret) {
-                               pr_err("alg: hash: init failed on test %d "
-                                      "for %s: used req->result\n", j, algo);
-                               goto out;
-                       }
-                       ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
-                       if (ret) {
-                               pr_err("alg: hash: final failed on test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-                       break;
-               }
+       /* Calculate the (div, length) pairs */
+       tsgl->nents = 0;
+       for (i = 0; i < ndivs; i++) {
+               unsigned int len_this_sg =
+                       min(len_remaining,
+                           (total_len * divs[i].proportion_of_total +
+                            TEST_SG_TOTAL / 2) / TEST_SG_TOTAL);
 
-               if (memcmp(result, template[i].digest,
-                          crypto_ahash_digestsize(tfm))) {
-                       printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
-                              j, algo);
-                       hexdump(result, crypto_ahash_digestsize(tfm));
-                       ret = -EINVAL;
-                       goto out;
+               if (len_this_sg != 0) {
+                       partitions[tsgl->nents].div = &divs[i];
+                       partitions[tsgl->nents].length = len_this_sg;
+                       tsgl->nents++;
+                       len_remaining -= len_this_sg;
                }
        }
+       if (tsgl->nents == 0) {
+               partitions[tsgl->nents].div = &divs[0];
+               partitions[tsgl->nents].length = 0;
+               tsgl->nents++;
+       }
+       partitions[tsgl->nents - 1].length += len_remaining;
 
-       if (test_type)
-               goto out;
+       /* Set up the sgl entries and fill the data or poison */
+       sg_init_table(tsgl->sgl, tsgl->nents);
+       for (i = 0; i < tsgl->nents; i++) {
+               unsigned int offset = partitions[i].div->offset;
+               void *addr;
 
-       j = 0;
-       for (i = 0; i < tcount; i++) {
-               /* alignment tests are only done with continuous buffers */
-               if (align_offset != 0)
-                       break;
+               if (partitions[i].div->offset_relative_to_alignmask)
+                       offset += alignmask;
 
-               if (!template[i].np)
-                       continue;
+               while (offset + partitions[i].length + TESTMGR_POISON_LEN >
+                      2 * PAGE_SIZE) {
+                       if (WARN_ON(offset <= 0))
+                               return -EINVAL;
+                       offset /= 2;
+               }
 
-               j++;
-               memset(result, 0, digest_size);
+               addr = &tsgl->bufs[i][offset];
+               sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length);
 
-               temp = 0;
-               sg_init_table(sg, template[i].np);
-               ret = -EINVAL;
-               for (k = 0; k < template[i].np; k++) {
-                       if (WARN_ON(offset_in_page(IDX[k]) +
-                                   template[i].tap[k] > PAGE_SIZE))
-                               goto out;
-                       sg_set_buf(&sg[k],
-                                  memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
-                                         offset_in_page(IDX[k]),
-                                         template[i].plaintext + temp,
-                                         template[i].tap[k]),
-                                  template[i].tap[k]);
-                       temp += template[i].tap[k];
-               }
-
-               if (template[i].ksize) {
-                       if (template[i].ksize > MAX_KEYLEN) {
-                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
-                                      j, algo, template[i].ksize, MAX_KEYLEN);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       crypto_ahash_clear_flags(tfm, ~0);
-                       memcpy(key, template[i].key, template[i].ksize);
-                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
-
-                       if (ret) {
-                               printk(KERN_ERR "alg: hash: setkey "
-                                      "failed on chunking test %d "
-                                      "for %s: ret=%d\n", j, algo, -ret);
-                               goto out;
-                       }
-               }
+               if (out_divs)
+                       out_divs[i] = partitions[i].div;
 
-               ahash_request_set_crypt(req, sg, result, template[i].psize);
-               ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
-               if (ret) {
-                       pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
-                              j, algo, -ret);
-                       goto out;
-               }
+               if (data) {
+                       size_t copy_len, copied;
 
-               if (memcmp(result, template[i].digest,
-                          crypto_ahash_digestsize(tfm))) {
-                       printk(KERN_ERR "alg: hash: Chunking test %d "
-                              "failed for %s\n", j, algo);
-                       hexdump(result, crypto_ahash_digestsize(tfm));
-                       ret = -EINVAL;
-                       goto out;
+                       copy_len = min(partitions[i].length, data->count);
+                       copied = copy_from_iter(addr, copy_len, data);
+                       if (WARN_ON(copied != copy_len))
+                               return -EINVAL;
+                       testmgr_poison(addr + copy_len, partitions[i].length +
+                                      TESTMGR_POISON_LEN - copy_len);
+               } else {
+                       testmgr_poison(addr, partitions[i].length +
+                                      TESTMGR_POISON_LEN);
                }
        }
 
-       /* partial update exercise */
-       j = 0;
-       for (i = 0; i < tcount; i++) {
-               /* alignment tests are only done with continuous buffers */
-               if (align_offset != 0)
-                       break;
+       sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
+       tsgl->sgl_ptr = tsgl->sgl;
+       memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
+       return 0;
+}
 
-               if (template[i].np < 2)
-                       continue;
+/*
+ * Verify that a scatterlist crypto operation produced the correct output.
+ *
+ * @tsgl: scatterlist containing the actual output
+ * @expected_output: buffer containing the expected output
+ * @len_to_check: length of @expected_output in bytes
+ * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result
+ * @check_poison: verify that the poison bytes after each chunk are intact?
+ *
+ * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun.
+ */
+static int verify_correct_output(const struct test_sglist *tsgl,
+                                const char *expected_output,
+                                unsigned int len_to_check,
+                                unsigned int unchecked_prefix_len,
+                                bool check_poison)
+{
+       unsigned int i;
 
-               j++;
-               memset(result, 0, digest_size);
+       for (i = 0; i < tsgl->nents; i++) {
+               struct scatterlist *sg = &tsgl->sgl_ptr[i];
+               unsigned int len = sg->length;
+               unsigned int offset = sg->offset;
+               const char *actual_output;
 
-               ret = -EINVAL;
-               hash_buff = xbuf[0];
-               memcpy(hash_buff, template[i].plaintext,
-                       template[i].tap[0]);
-               sg_init_one(&sg[0], hash_buff, template[i].tap[0]);
-
-               if (template[i].ksize) {
-                       crypto_ahash_clear_flags(tfm, ~0);
-                       if (template[i].ksize > MAX_KEYLEN) {
-                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
-                                       j, algo, template[i].ksize, MAX_KEYLEN);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       memcpy(key, template[i].key, template[i].ksize);
-                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
-                       if (ret) {
-                               pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n",
-                                       j, algo, -ret);
-                               goto out;
+               if (unchecked_prefix_len) {
+                       if (unchecked_prefix_len >= len) {
+                               unchecked_prefix_len -= len;
+                               continue;
                        }
+                       offset += unchecked_prefix_len;
+                       len -= unchecked_prefix_len;
+                       unchecked_prefix_len = 0;
                }
+               len = min(len, len_to_check);
+               actual_output = page_address(sg_page(sg)) + offset;
+               if (memcmp(expected_output, actual_output, len) != 0)
+                       return -EINVAL;
+               if (check_poison &&
+                   !testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN))
+                       return -EOVERFLOW;
+               len_to_check -= len;
+               expected_output += len;
+       }
+       if (WARN_ON(len_to_check != 0))
+               return -EINVAL;
+       return 0;
+}
 
-               ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
-               ret = crypto_wait_req(crypto_ahash_init(req), &wait);
-               if (ret) {
-                       pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
-                               j, algo, -ret);
-                       goto out;
-               }
-               ret = crypto_wait_req(crypto_ahash_update(req), &wait);
-               if (ret) {
-                       pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
-                               j, algo, -ret);
-                       goto out;
-               }
+static bool is_test_sglist_corrupted(const struct test_sglist *tsgl)
+{
+       unsigned int i;
 
-               temp = template[i].tap[0];
-               for (k = 1; k < template[i].np; k++) {
-                       ret = ahash_partial_update(&req, tfm, &template[i],
-                               hash_buff, k, temp, &sg[0], algo, result,
-                               &wait);
-                       if (ret) {
-                               pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
-                                       j, algo, -ret);
-                               goto out_noreq;
-                       }
-                       temp += template[i].tap[k];
-               }
-               ret = crypto_wait_req(crypto_ahash_final(req), &wait);
-               if (ret) {
-                       pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
-                               j, algo, -ret);
-                       goto out;
-               }
-               if (memcmp(result, template[i].digest,
-                          crypto_ahash_digestsize(tfm))) {
-                       pr_err("alg: hash: Partial Test %d failed for %s\n",
-                              j, algo);
-                       hexdump(result, crypto_ahash_digestsize(tfm));
-                       ret = -EINVAL;
-                       goto out;
-               }
+       for (i = 0; i < tsgl->nents; i++) {
+               if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link)
+                       return true;
+               if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset)
+                       return true;
+               if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length)
+                       return true;
        }
-
-       ret = 0;
-
-out:
-       ahash_request_free(req);
-out_noreq:
-       testmgr_free_buf(xbuf);
-out_nobuf:
-       kfree(key);
-       kfree(result);
-       return ret;
+       return false;
 }
 
-static int test_hash(struct crypto_ahash *tfm,
-                    const struct hash_testvec *template,
-                    unsigned int tcount, enum hash_test test_type)
+struct cipher_test_sglists {
+       struct test_sglist src;
+       struct test_sglist dst;
+};
+
+static struct cipher_test_sglists *alloc_cipher_test_sglists(void)
 {
-       unsigned int alignmask;
-       int ret;
+       struct cipher_test_sglists *tsgls;
 
-       ret = __test_hash(tfm, template, tcount, test_type, 0);
-       if (ret)
-               return ret;
+       tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL);
+       if (!tsgls)
+               return NULL;
 
-       /* test unaligned buffers, check with one byte offset */
-       ret = __test_hash(tfm, template, tcount, test_type, 1);
-       if (ret)
-               return ret;
+       if (init_test_sglist(&tsgls->src) != 0)
+               goto fail_kfree;
+       if (init_test_sglist(&tsgls->dst) != 0)
+               goto fail_destroy_src;
 
-       alignmask = crypto_tfm_alg_alignmask(&tfm->base);
-       if (alignmask) {
-               /* Check if alignment mask for tfm is correctly set. */
-               ret = __test_hash(tfm, template, tcount, test_type,
-                                 alignmask + 1);
-               if (ret)
-                       return ret;
-       }
+       return tsgls;
 
-       return 0;
+fail_destroy_src:
+       destroy_test_sglist(&tsgls->src);
+fail_kfree:
+       kfree(tsgls);
+       return NULL;
 }
 
-static int __test_aead(struct crypto_aead *tfm, int enc,
-                      const struct aead_testvec *template, unsigned int tcount,
-                      const bool diff_dst, const int align_offset)
+static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls)
 {
-       const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
-       unsigned int i, j, k, n, temp;
-       int ret = -ENOMEM;
-       char *q;
-       char *key;
-       struct aead_request *req;
-       struct scatterlist *sg;
-       struct scatterlist *sgout;
-       const char *e, *d;
-       struct crypto_wait wait;
-       unsigned int authsize, iv_len;
-       char *iv;
-       char *xbuf[XBUFSIZE];
-       char *xoutbuf[XBUFSIZE];
-       char *axbuf[XBUFSIZE];
-
-       iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
-       if (!iv)
-               return ret;
-       key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
-       if (!key)
-               goto out_noxbuf;
-       if (testmgr_alloc_buf(xbuf))
-               goto out_noxbuf;
-       if (testmgr_alloc_buf(axbuf))
-               goto out_noaxbuf;
-       if (diff_dst && testmgr_alloc_buf(xoutbuf))
-               goto out_nooutbuf;
-
-       /* avoid "the frame size is larger than 1024 bytes" compiler warning */
-       sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)),
-                    GFP_KERNEL);
-       if (!sg)
-               goto out_nosg;
-       sgout = &sg[16];
-
-       if (diff_dst)
-               d = "-ddst";
-       else
-               d = "";
+       if (tsgls) {
+               destroy_test_sglist(&tsgls->src);
+               destroy_test_sglist(&tsgls->dst);
+               kfree(tsgls);
+       }
+}
 
-       if (enc == ENCRYPT)
-               e = "encryption";
-       else
-               e = "decryption";
+/* Build the src and dst scatterlists for an skcipher or AEAD test */
+static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
+                                    const struct testvec_config *cfg,
+                                    unsigned int alignmask,
+                                    unsigned int src_total_len,
+                                    unsigned int dst_total_len,
+                                    const struct kvec *inputs,
+                                    unsigned int nr_inputs)
+{
+       struct iov_iter input;
+       int err;
 
-       crypto_init_wait(&wait);
+       iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
+       err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
+                               cfg->inplace ?
+                                       max(dst_total_len, src_total_len) :
+                                       src_total_len,
+                               &input, NULL);
+       if (err)
+               return err;
 
-       req = aead_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               pr_err("alg: aead%s: Failed to allocate request for %s\n",
-                      d, algo);
-               goto out;
+       if (cfg->inplace) {
+               tsgls->dst.sgl_ptr = tsgls->src.sgl;
+               tsgls->dst.nents = tsgls->src.nents;
+               return 0;
        }
+       return build_test_sglist(&tsgls->dst,
+                                cfg->dst_divs[0].proportion_of_total ?
+                                       cfg->dst_divs : cfg->src_divs,
+                                alignmask, dst_total_len, NULL, NULL);
+}
 
-       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                 crypto_req_done, &wait);
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+                                          size_t max_divs, char *p, char *end,
+                                          bool gen_flushes)
+{
+       struct test_sg_division *div = divs;
+       unsigned int remaining = TEST_SG_TOTAL;
 
-       iv_len = crypto_aead_ivsize(tfm);
+       do {
+               unsigned int this_len;
 
-       for (i = 0, j = 0; i < tcount; i++) {
-               const char *input, *expected_output;
-               unsigned int inlen, outlen;
-               char *inbuf, *outbuf, *assocbuf;
+               if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
+                       this_len = remaining;
+               else
+                       this_len = 1 + (prandom_u32() % remaining);
+               div->proportion_of_total = this_len;
 
-               if (template[i].np)
-                       continue;
-               if (enc) {
-                       if (template[i].novrfy)
-                               continue;
-                       input = template[i].ptext;
-                       inlen = template[i].plen;
-                       expected_output = template[i].ctext;
-                       outlen = template[i].clen;
-               } else {
-                       input = template[i].ctext;
-                       inlen = template[i].clen;
-                       expected_output = template[i].ptext;
-                       outlen = template[i].plen;
+               if (prandom_u32() % 4 == 0)
+                       div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128);
+               else if (prandom_u32() % 2 == 0)
+                       div->offset = prandom_u32() % 32;
+               else
+                       div->offset = prandom_u32() % PAGE_SIZE;
+               if (prandom_u32() % 8 == 0)
+                       div->offset_relative_to_alignmask = true;
+
+               div->flush_type = FLUSH_TYPE_NONE;
+               if (gen_flushes) {
+                       switch (prandom_u32() % 4) {
+                       case 0:
+                               div->flush_type = FLUSH_TYPE_REIMPORT;
+                               break;
+                       case 1:
+                               div->flush_type = FLUSH_TYPE_FLUSH;
+                               break;
+                       }
                }
 
-               j++;
+               BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
+               p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s",
+                              div->flush_type == FLUSH_TYPE_NONE ? "" :
+                              div->flush_type == FLUSH_TYPE_FLUSH ?
+                              "<flush> " : "<reimport> ",
+                              this_len / 100, this_len % 100,
+                              div->offset_relative_to_alignmask ?
+                                       "alignmask" : "",
+                              div->offset, this_len == remaining ? "" : ", ");
+               remaining -= this_len;
+               div++;
+       } while (remaining);
+
+       return p;
+}
 
-               /* some templates have no input data but they will
-                * touch input
-                */
-               inbuf = xbuf[0] + align_offset;
-               assocbuf = axbuf[0];
+/* Generate a random testvec_config for fuzz testing */
+static void generate_random_testvec_config(struct testvec_config *cfg,
+                                          char *name, size_t max_namelen)
+{
+       char *p = name;
+       char * const end = name + max_namelen;
 
-               ret = -EINVAL;
-               if (WARN_ON(align_offset + template[i].clen > PAGE_SIZE ||
-                           template[i].alen > PAGE_SIZE))
-                       goto out;
+       memset(cfg, 0, sizeof(*cfg));
 
-               memcpy(inbuf, input, inlen);
-               memcpy(assocbuf, template[i].assoc, template[i].alen);
-               if (template[i].iv)
-                       memcpy(iv, template[i].iv, iv_len);
-               else
-                       memset(iv, 0, iv_len);
+       cfg->name = name;
 
-               crypto_aead_clear_flags(tfm, ~0);
-               if (template[i].wk)
-                       crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       p += scnprintf(p, end - p, "random:");
 
-               if (template[i].klen > MAX_KEYLEN) {
-                       pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
-                              d, j, algo, template[i].klen,
-                              MAX_KEYLEN);
-                       ret = -EINVAL;
-                       goto out;
-               }
-               memcpy(key, template[i].key, template[i].klen);
-
-               ret = crypto_aead_setkey(tfm, key, template[i].klen);
-               if (template[i].fail == !ret) {
-                       pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
-                              d, j, algo, crypto_aead_get_flags(tfm));
-                       goto out;
-               } else if (ret)
-                       continue;
+       if (prandom_u32() % 2 == 0) {
+               cfg->inplace = true;
+               p += scnprintf(p, end - p, " inplace");
+       }
 
-               authsize = template[i].clen - template[i].plen;
-               ret = crypto_aead_setauthsize(tfm, authsize);
-               if (ret) {
-                       pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
-                              d, authsize, j, algo);
-                       goto out;
-               }
+       if (prandom_u32() % 2 == 0) {
+               cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
+               p += scnprintf(p, end - p, " may_sleep");
+       }
 
-               k = !!template[i].alen;
-               sg_init_table(sg, k + 1);
-               sg_set_buf(&sg[0], assocbuf, template[i].alen);
-               sg_set_buf(&sg[k], inbuf, template[i].clen);
-               outbuf = inbuf;
+       switch (prandom_u32() % 4) {
+       case 0:
+               cfg->finalization_type = FINALIZATION_TYPE_FINAL;
+               p += scnprintf(p, end - p, " use_final");
+               break;
+       case 1:
+               cfg->finalization_type = FINALIZATION_TYPE_FINUP;
+               p += scnprintf(p, end - p, " use_finup");
+               break;
+       default:
+               cfg->finalization_type = FINALIZATION_TYPE_DIGEST;
+               p += scnprintf(p, end - p, " use_digest");
+               break;
+       }
 
-               if (diff_dst) {
-                       sg_init_table(sgout, k + 1);
-                       sg_set_buf(&sgout[0], assocbuf, template[i].alen);
+       p += scnprintf(p, end - p, " src_divs=[");
+       p = generate_random_sgl_divisions(cfg->src_divs,
+                                         ARRAY_SIZE(cfg->src_divs), p, end,
+                                         (cfg->finalization_type !=
+                                          FINALIZATION_TYPE_DIGEST));
+       p += scnprintf(p, end - p, "]");
 
-                       outbuf = xoutbuf[0] + align_offset;
-                       sg_set_buf(&sgout[k], outbuf, template[i].clen);
-               }
+       if (!cfg->inplace && prandom_u32() % 2 == 0) {
+               p += scnprintf(p, end - p, " dst_divs=[");
+               p = generate_random_sgl_divisions(cfg->dst_divs,
+                                                 ARRAY_SIZE(cfg->dst_divs),
+                                                 p, end, false);
+               p += scnprintf(p, end - p, "]");
+       }
 
-               aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, inlen,
-                                      iv);
+       if (prandom_u32() % 2 == 0) {
+               cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
+               p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
+       }
 
-               aead_request_set_ad(req, template[i].alen);
+       WARN_ON_ONCE(!valid_testvec_config(cfg));
+}
+#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
 
-               ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
-                                     : crypto_aead_decrypt(req), &wait);
+static int check_nonfinal_hash_op(const char *op, int err,
+                                 u8 *result, unsigned int digestsize,
+                                 const char *driver, unsigned int vec_num,
+                                 const struct testvec_config *cfg)
+{
+       if (err) {
+               pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n",
+                      driver, op, err, vec_num, cfg->name);
+               return err;
+       }
+       if (!testmgr_is_poison(result, digestsize)) {
+               pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return -EINVAL;
+       }
+       return 0;
+}
 
-               switch (ret) {
-               case 0:
-                       if (template[i].novrfy) {
-                               /* verification was supposed to fail */
-                               pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
-                                      d, e, j, algo);
-                               /* so really, we got a bad message */
-                               ret = -EBADMSG;
-                               goto out;
-                       }
-                       break;
-               case -EBADMSG:
-                       if (template[i].novrfy)
-                               /* verification failure was expected */
-                               continue;
-                       /* fall through */
-               default:
-                       pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n",
-                              d, e, j, algo, -ret);
-                       goto out;
-               }
+static int test_hash_vec_cfg(const char *driver,
+                            const struct hash_testvec *vec,
+                            unsigned int vec_num,
+                            const struct testvec_config *cfg,
+                            struct ahash_request *req,
+                            struct test_sglist *tsgl,
+                            u8 *hashstate)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       const unsigned int alignmask = crypto_ahash_alignmask(tfm);
+       const unsigned int digestsize = crypto_ahash_digestsize(tfm);
+       const unsigned int statesize = crypto_ahash_statesize(tfm);
+       const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
+       const struct test_sg_division *divs[XBUFSIZE];
+       DECLARE_CRYPTO_WAIT(wait);
+       struct kvec _input;
+       struct iov_iter input;
+       unsigned int i;
+       struct scatterlist *pending_sgl;
+       unsigned int pending_len;
+       u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
+       int err;
 
-               if (memcmp(outbuf, expected_output, outlen)) {
-                       pr_err("alg: aead%s: Test %d failed on %s for %s\n",
-                              d, j, e, algo);
-                       hexdump(outbuf, outlen);
-                       ret = -EINVAL;
-                       goto out;
+       /* Set the key, if specified */
+       if (vec->ksize) {
+               err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
+               if (err) {
+                       pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n",
+                              driver, err, vec_num,
+                              crypto_ahash_get_flags(tfm));
+                       return err;
                }
        }
 
-       for (i = 0, j = 0; i < tcount; i++) {
-               const char *input, *expected_output;
-               unsigned int inlen, outlen;
-
-               /* alignment tests are only done with continuous buffers */
-               if (align_offset != 0)
-                       break;
-
-               if (!template[i].np)
-                       continue;
-
-               if (enc) {
-                       if (template[i].novrfy)
-                               continue;
-                       input = template[i].ptext;
-                       inlen = template[i].plen;
-                       expected_output = template[i].ctext;
-                       outlen = template[i].clen;
-               } else {
-                       input = template[i].ctext;
-                       inlen = template[i].clen;
-                       expected_output = template[i].ptext;
-                       outlen = template[i].plen;
-               }
+       /* Build the scatterlist for the source data */
+       _input.iov_base = (void *)vec->plaintext;
+       _input.iov_len = vec->psize;
+       iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize);
+       err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
+                               &input, divs);
+       if (err) {
+               pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n",
+                      driver, vec_num, cfg->name);
+               return err;
+       }
 
-               j++;
+       /* Do the actual hashing */
 
-               if (template[i].iv)
-                       memcpy(iv, template[i].iv, iv_len);
-               else
-                       memset(iv, 0, MAX_IVLEN);
+       testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
+       testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
 
-               crypto_aead_clear_flags(tfm, ~0);
-               if (template[i].wk)
-                       crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-               if (template[i].klen > MAX_KEYLEN) {
-                       pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
-                              d, j, algo, template[i].klen, MAX_KEYLEN);
-                       ret = -EINVAL;
-                       goto out;
+       if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) {
+               /* Just using digest() */
+               ahash_request_set_callback(req, req_flags, crypto_req_done,
+                                          &wait);
+               ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
+               err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+               if (err) {
+                       pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n",
+                              driver, err, vec_num, cfg->name);
+                       return err;
                }
-               memcpy(key, template[i].key, template[i].klen);
-
-               ret = crypto_aead_setkey(tfm, key, template[i].klen);
-               if (template[i].fail == !ret) {
-                       pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
-                              d, j, algo, crypto_aead_get_flags(tfm));
-                       goto out;
-               } else if (ret)
-                       continue;
+               goto result_ready;
+       }
 
-               authsize = template[i].clen - template[i].plen;
+       /* Using init(), zero or more update(), then final() or finup() */
 
-               ret = -EINVAL;
-               sg_init_table(sg, template[i].anp + template[i].np);
-               if (diff_dst)
-                       sg_init_table(sgout, template[i].anp + template[i].np);
+       ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
+       ahash_request_set_crypt(req, NULL, result, 0);
+       err = crypto_wait_req(crypto_ahash_init(req), &wait);
+       err = check_nonfinal_hash_op("init", err, result, digestsize,
+                                    driver, vec_num, cfg);
+       if (err)
+               return err;
 
-               ret = -EINVAL;
-               for (k = 0, temp = 0; k < template[i].anp; k++) {
-                       if (WARN_ON(offset_in_page(IDX[k]) +
-                                   template[i].atap[k] > PAGE_SIZE))
-                               goto out;
-                       sg_set_buf(&sg[k],
-                                  memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
-                                         offset_in_page(IDX[k]),
-                                         template[i].assoc + temp,
-                                         template[i].atap[k]),
-                                  template[i].atap[k]);
-                       if (diff_dst)
-                               sg_set_buf(&sgout[k],
-                                          axbuf[IDX[k] >> PAGE_SHIFT] +
-                                          offset_in_page(IDX[k]),
-                                          template[i].atap[k]);
-                       temp += template[i].atap[k];
-               }
-
-               for (k = 0, temp = 0; k < template[i].np; k++) {
-                       n = template[i].tap[k];
-                       if (k == template[i].np - 1 && !enc)
-                               n += authsize;
-
-                       if (WARN_ON(offset_in_page(IDX[k]) + n > PAGE_SIZE))
-                               goto out;
+       pending_sgl = NULL;
+       pending_len = 0;
+       for (i = 0; i < tsgl->nents; i++) {
+               if (divs[i]->flush_type != FLUSH_TYPE_NONE &&
+                   pending_sgl != NULL) {
+                       /* update() with the pending data */
+                       ahash_request_set_callback(req, req_flags,
+                                                  crypto_req_done, &wait);
+                       ahash_request_set_crypt(req, pending_sgl, result,
+                                               pending_len);
+                       err = crypto_wait_req(crypto_ahash_update(req), &wait);
+                       err = check_nonfinal_hash_op("update", err,
+                                                    result, digestsize,
+                                                    driver, vec_num, cfg);
+                       if (err)
+                               return err;
+                       pending_sgl = NULL;
+                       pending_len = 0;
+               }
+               if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
+                       /* Test ->export() and ->import() */
+                       testmgr_poison(hashstate + statesize,
+                                      TESTMGR_POISON_LEN);
+                       err = crypto_ahash_export(req, hashstate);
+                       err = check_nonfinal_hash_op("export", err,
+                                                    result, digestsize,
+                                                    driver, vec_num, cfg);
+                       if (err)
+                               return err;
+                       if (!testmgr_is_poison(hashstate + statesize,
+                                              TESTMGR_POISON_LEN)) {
+                               pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n",
+                                      driver, vec_num, cfg->name);
+                               return -EOVERFLOW;
+                       }
 
-                       q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
-                       memcpy(q, input + temp, n);
-                       sg_set_buf(&sg[template[i].anp + k], q, n);
+                       testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
+                       err = crypto_ahash_import(req, hashstate);
+                       err = check_nonfinal_hash_op("import", err,
+                                                    result, digestsize,
+                                                    driver, vec_num, cfg);
+                       if (err)
+                               return err;
+               }
+               if (pending_sgl == NULL)
+                       pending_sgl = &tsgl->sgl[i];
+               pending_len += tsgl->sgl[i].length;
+       }
+
+       ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
+       ahash_request_set_crypt(req, pending_sgl, result, pending_len);
+       if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
+               /* finish with update() and final() */
+               err = crypto_wait_req(crypto_ahash_update(req), &wait);
+               err = check_nonfinal_hash_op("update", err, result, digestsize,
+                                            driver, vec_num, cfg);
+               if (err)
+                       return err;
+               err = crypto_wait_req(crypto_ahash_final(req), &wait);
+               if (err) {
+                       pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n",
+                              driver, err, vec_num, cfg->name);
+                       return err;
+               }
+       } else {
+               /* finish with finup() */
+               err = crypto_wait_req(crypto_ahash_finup(req), &wait);
+               if (err) {
+                       pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n",
+                              driver, err, vec_num, cfg->name);
+                       return err;
+               }
+       }
 
-                       if (diff_dst) {
-                               q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
-                                   offset_in_page(IDX[k]);
+result_ready:
+       /* Check that the algorithm produced the correct digest */
+       if (memcmp(result, vec->digest, digestsize) != 0) {
+               pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
+                      driver, vec_num, cfg->name);
+               return -EINVAL;
+       }
+       if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
+               pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n",
+                      driver, vec_num, cfg->name);
+               return -EOVERFLOW;
+       }
 
-                               memset(q, 0, n);
+       return 0;
+}
 
-                               sg_set_buf(&sgout[template[i].anp + k], q, n);
-                       }
+static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
+                        unsigned int vec_num, struct ahash_request *req,
+                        struct test_sglist *tsgl, u8 *hashstate)
+{
+       unsigned int i;
+       int err;
 
-                       if (k == template[i].np - 1 && enc)
-                               n += authsize;
-                       if (offset_in_page(q) + n < PAGE_SIZE)
-                               q[n] = 0;
+       for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
+               err = test_hash_vec_cfg(driver, vec, vec_num,
+                                       &default_hash_testvec_configs[i],
+                                       req, tsgl, hashstate);
+               if (err)
+                       return err;
+       }
 
-                       temp += n;
-               }
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+       if (!noextratests) {
+               struct testvec_config cfg;
+               char cfgname[TESTVEC_CONFIG_NAMELEN];
 
-               ret = crypto_aead_setauthsize(tfm, authsize);
-               if (ret) {
-                       pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n",
-                              d, authsize, j, algo);
-                       goto out;
+               for (i = 0; i < fuzz_iterations; i++) {
+                       generate_random_testvec_config(&cfg, cfgname,
+                                                      sizeof(cfgname));
+                       err = test_hash_vec_cfg(driver, vec, vec_num, &cfg,
+                                               req, tsgl, hashstate);
+                       if (err)
+                               return err;
                }
+       }
+#endif
+       return 0;
+}
 
-               if (enc) {
-                       if (WARN_ON(sg[template[i].anp + k - 1].offset +
-                                   sg[template[i].anp + k - 1].length +
-                                   authsize > PAGE_SIZE)) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
+static int __alg_test_hash(const struct hash_testvec *vecs,
+                          unsigned int num_vecs, const char *driver,
+                          u32 type, u32 mask)
+{
+       struct crypto_ahash *tfm;
+       struct ahash_request *req = NULL;
+       struct test_sglist *tsgl = NULL;
+       u8 *hashstate = NULL;
+       unsigned int i;
+       int err;
 
-                       if (diff_dst)
-                               sgout[template[i].anp + k - 1].length +=
-                                       authsize;
-                       sg[template[i].anp + k - 1].length += authsize;
-               }
+       tfm = crypto_alloc_ahash(driver, type, mask);
+       if (IS_ERR(tfm)) {
+               pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
+                      driver, PTR_ERR(tfm));
+               return PTR_ERR(tfm);
+       }
 
-               aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-                                      inlen, iv);
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("alg: hash: failed to allocate request for %s\n",
+                      driver);
+               err = -ENOMEM;
+               goto out;
+       }
 
-               aead_request_set_ad(req, template[i].alen);
+       tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
+       if (!tsgl || init_test_sglist(tsgl) != 0) {
+               pr_err("alg: hash: failed to allocate test buffers for %s\n",
+                      driver);
+               kfree(tsgl);
+               tsgl = NULL;
+               err = -ENOMEM;
+               goto out;
+       }
 
-               ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
-                                     : crypto_aead_decrypt(req), &wait);
+       hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN,
+                           GFP_KERNEL);
+       if (!hashstate) {
+               pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
+                      driver);
+               err = -ENOMEM;
+               goto out;
+       }
 
-               switch (ret) {
-               case 0:
-                       if (template[i].novrfy) {
-                               /* verification was supposed to fail */
-                               pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n",
-                                      d, e, j, algo);
-                               /* so really, we got a bad message */
-                               ret = -EBADMSG;
-                               goto out;
-                       }
-                       break;
-               case -EBADMSG:
-                       if (template[i].novrfy)
-                               /* verification failure was expected */
-                               continue;
-                       /* fall through */
-               default:
-                       pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n",
-                              d, e, j, algo, -ret);
+       for (i = 0; i < num_vecs; i++) {
+               err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate);
+               if (err)
                        goto out;
-               }
-
-               ret = -EINVAL;
-               for (k = 0, temp = 0; k < template[i].np; k++) {
-                       if (diff_dst)
-                               q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
-                                   offset_in_page(IDX[k]);
-                       else
-                               q = xbuf[IDX[k] >> PAGE_SHIFT] +
-                                   offset_in_page(IDX[k]);
-
-                       n = template[i].tap[k];
-                       if (k == template[i].np - 1 && enc)
-                               n += authsize;
+       }
+       err = 0;
+out:
+       kfree(hashstate);
+       if (tsgl) {
+               destroy_test_sglist(tsgl);
+               kfree(tsgl);
+       }
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
+       return err;
+}
 
-                       if (memcmp(q, expected_output + temp, n)) {
-                               pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n",
-                                      d, j, e, k, algo);
-                               hexdump(q, n);
-                               goto out;
-                       }
+static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
+                        u32 type, u32 mask)
+{
+       const struct hash_testvec *template = desc->suite.hash.vecs;
+       unsigned int tcount = desc->suite.hash.count;
+       unsigned int nr_unkeyed, nr_keyed;
+       int err;
 
-                       q += n;
-                       if (k == template[i].np - 1 && !enc) {
-                               if (!diff_dst && memcmp(q, input + temp + n,
-                                                       authsize))
-                                       n = authsize;
-                               else
-                                       n = 0;
-                       } else {
-                               for (n = 0; offset_in_page(q + n) && q[n]; n++)
-                                       ;
-                       }
-                       if (n) {
-                               pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
-                                      d, j, e, k, algo, n);
-                               hexdump(q, n);
-                               goto out;
-                       }
+       /*
+        * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests
+        * first, before setting a key on the tfm.  To make this easier, we
+        * require that the unkeyed test vectors (if any) are listed first.
+        */
 
-                       temp += template[i].tap[k];
+       for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) {
+               if (template[nr_unkeyed].ksize)
+                       break;
+       }
+       for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) {
+               if (!template[nr_unkeyed + nr_keyed].ksize) {
+                       pr_err("alg: hash: test vectors for %s out of order, "
+                              "unkeyed ones must come first\n", desc->alg);
+                       return -EINVAL;
                }
        }
 
-       ret = 0;
+       err = 0;
+       if (nr_unkeyed) {
+               err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
+               template += nr_unkeyed;
+       }
 
-out:
-       aead_request_free(req);
-       kfree(sg);
-out_nosg:
-       if (diff_dst)
-               testmgr_free_buf(xoutbuf);
-out_nooutbuf:
-       testmgr_free_buf(axbuf);
-out_noaxbuf:
-       testmgr_free_buf(xbuf);
-out_noxbuf:
-       kfree(key);
-       kfree(iv);
-       return ret;
+       if (!err && nr_keyed)
+               err = __alg_test_hash(template, nr_keyed, driver, type, mask);
+
+       return err;
 }
 
-static int test_aead(struct crypto_aead *tfm, int enc,
-                    const struct aead_testvec *template, unsigned int tcount)
+static int test_aead_vec_cfg(const char *driver, int enc,
+                            const struct aead_testvec *vec,
+                            unsigned int vec_num,
+                            const struct testvec_config *cfg,
+                            struct aead_request *req,
+                            struct cipher_test_sglists *tsgls)
 {
-       unsigned int alignmask;
-       int ret;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       const unsigned int alignmask = crypto_aead_alignmask(tfm);
+       const unsigned int ivsize = crypto_aead_ivsize(tfm);
+       const unsigned int authsize = vec->clen - vec->plen;
+       const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
+       const char *op = enc ? "encryption" : "decryption";
+       DECLARE_CRYPTO_WAIT(wait);
+       u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
+       u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
+                cfg->iv_offset +
+                (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
+       struct kvec input[2];
+       int err;
 
-       /* test 'dst == src' case */
-       ret = __test_aead(tfm, enc, template, tcount, false, 0);
-       if (ret)
-               return ret;
+       /* Set the key */
+       if (vec->wk)
+               crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+       else
+               crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+       err = crypto_aead_setkey(tfm, vec->key, vec->klen);
+       if (err) {
+               if (vec->fail) /* expectedly failed to set key? */
+                       return 0;
+               pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n",
+                      driver, err, vec_num, crypto_aead_get_flags(tfm));
+               return err;
+       }
+       if (vec->fail) {
+               pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n",
+                      driver, vec_num);
+               return -EINVAL;
+       }
 
-       /* test 'dst != src' case */
-       ret = __test_aead(tfm, enc, template, tcount, true, 0);
-       if (ret)
-               return ret;
+       /* Set the authentication tag size */
+       err = crypto_aead_setauthsize(tfm, authsize);
+       if (err) {
+               pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n",
+                      driver, err, vec_num);
+               return err;
+       }
 
-       /* test unaligned buffers, check with one byte offset */
-       ret = __test_aead(tfm, enc, template, tcount, true, 1);
-       if (ret)
-               return ret;
+       /* The IV must be copied to a buffer, as the algorithm may modify it */
+       if (WARN_ON(ivsize > MAX_IVLEN))
+               return -EINVAL;
+       if (vec->iv)
+               memcpy(iv, vec->iv, ivsize);
+       else
+               memset(iv, 0, ivsize);
+
+       /* Build the src/dst scatterlists */
+       input[0].iov_base = (void *)vec->assoc;
+       input[0].iov_len = vec->alen;
+       input[1].iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
+       input[1].iov_len = enc ? vec->plen : vec->clen;
+       err = build_cipher_test_sglists(tsgls, cfg, alignmask,
+                                       vec->alen + (enc ? vec->plen :
+                                                    vec->clen),
+                                       vec->alen + (enc ? vec->clen :
+                                                    vec->plen),
+                                       input, 2);
+       if (err) {
+               pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return err;
+       }
 
-       alignmask = crypto_tfm_alg_alignmask(&tfm->base);
-       if (alignmask) {
-               /* Check if alignment mask for tfm is correctly set. */
-               ret = __test_aead(tfm, enc, template, tcount, true,
-                                 alignmask + 1);
-               if (ret)
-                       return ret;
+       /* Do the actual encryption or decryption */
+       testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm));
+       aead_request_set_callback(req, req_flags, crypto_req_done, &wait);
+       aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
+                              enc ? vec->plen : vec->clen, iv);
+       aead_request_set_ad(req, vec->alen);
+       err = crypto_wait_req(enc ? crypto_aead_encrypt(req) :
+                             crypto_aead_decrypt(req), &wait);
+
+       aead_request_set_tfm(req, tfm); /* TODO: get rid of this */
+
+       if (err) {
+               if (err == -EBADMSG && vec->novrfy)
+                       return 0;
+               pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
+                      driver, op, err, vec_num, cfg->name);
+               return err;
+       }
+       if (vec->novrfy) {
+               pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return -EINVAL;
+       }
+
+       /* Check that the algorithm didn't overwrite things it shouldn't have */
+       if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
+           req->assoclen != vec->alen ||
+           req->iv != iv ||
+           req->src != tsgls->src.sgl_ptr ||
+           req->dst != tsgls->dst.sgl_ptr ||
+           crypto_aead_reqtfm(req) != tfm ||
+           req->base.complete != crypto_req_done ||
+           req->base.flags != req_flags ||
+           req->base.data != &wait) {
+               pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               if (req->cryptlen != (enc ? vec->plen : vec->clen))
+                       pr_err("alg: aead: changed 'req->cryptlen'\n");
+               if (req->assoclen != vec->alen)
+                       pr_err("alg: aead: changed 'req->assoclen'\n");
+               if (req->iv != iv)
+                       pr_err("alg: aead: changed 'req->iv'\n");
+               if (req->src != tsgls->src.sgl_ptr)
+                       pr_err("alg: aead: changed 'req->src'\n");
+               if (req->dst != tsgls->dst.sgl_ptr)
+                       pr_err("alg: aead: changed 'req->dst'\n");
+               if (crypto_aead_reqtfm(req) != tfm)
+                       pr_err("alg: aead: changed 'req->base.tfm'\n");
+               if (req->base.complete != crypto_req_done)
+                       pr_err("alg: aead: changed 'req->base.complete'\n");
+               if (req->base.flags != req_flags)
+                       pr_err("alg: aead: changed 'req->base.flags'\n");
+               if (req->base.data != &wait)
+                       pr_err("alg: aead: changed 'req->base.data'\n");
+               return -EINVAL;
+       }
+       if (is_test_sglist_corrupted(&tsgls->src)) {
+               pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return -EINVAL;
+       }
+       if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
+           is_test_sglist_corrupted(&tsgls->dst)) {
+               pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return -EINVAL;
+       }
+
+       /* Check for the correct output (ciphertext or plaintext) */
+       err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
+                                   enc ? vec->clen : vec->plen,
+                                   vec->alen, enc || !cfg->inplace);
+       if (err == -EOVERFLOW) {
+               pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return err;
+       }
+       if (err) {
+               pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return err;
        }
 
        return 0;
 }
 
-static int test_cipher(struct crypto_cipher *tfm, int enc,
-                      const struct cipher_testvec *template,
-                      unsigned int tcount)
+static int test_aead_vec(const char *driver, int enc,
+                        const struct aead_testvec *vec, unsigned int vec_num,
+                        struct aead_request *req,
+                        struct cipher_test_sglists *tsgls)
 {
-       const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
-       unsigned int i, j, k;
-       char *q;
-       const char *e;
-       const char *input, *result;
-       void *data;
-       char *xbuf[XBUFSIZE];
-       int ret = -ENOMEM;
+       unsigned int i;
+       int err;
 
-       if (testmgr_alloc_buf(xbuf))
-               goto out_nobuf;
+       if (enc && vec->novrfy)
+               return 0;
 
-       if (enc == ENCRYPT)
-               e = "encryption";
-       else
-               e = "decryption";
+       for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
+               err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+                                       &default_cipher_testvec_configs[i],
+                                       req, tsgls);
+               if (err)
+                       return err;
+       }
 
-       j = 0;
-       for (i = 0; i < tcount; i++) {
-               if (template[i].np)
-                       continue;
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+       if (!noextratests) {
+               struct testvec_config cfg;
+               char cfgname[TESTVEC_CONFIG_NAMELEN];
 
-               if (fips_enabled && template[i].fips_skip)
-                       continue;
+               for (i = 0; i < fuzz_iterations; i++) {
+                       generate_random_testvec_config(&cfg, cfgname,
+                                                      sizeof(cfgname));
+                       err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+                                               &cfg, req, tsgls);
+                       if (err)
+                               return err;
+               }
+       }
+#endif
+       return 0;
+}
 
-               input  = enc ? template[i].ptext : template[i].ctext;
-               result = enc ? template[i].ctext : template[i].ptext;
-               j++;
+static int test_aead(const char *driver, int enc,
+                    const struct aead_test_suite *suite,
+                    struct aead_request *req,
+                    struct cipher_test_sglists *tsgls)
+{
+       unsigned int i;
+       int err;
 
-               ret = -EINVAL;
-               if (WARN_ON(template[i].len > PAGE_SIZE))
-                       goto out;
+       for (i = 0; i < suite->count; i++) {
+               err = test_aead_vec(driver, enc, &suite->vecs[i], i, req,
+                                   tsgls);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
 
-               data = xbuf[0];
-               memcpy(data, input, template[i].len);
+static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
+                        u32 type, u32 mask)
+{
+       const struct aead_test_suite *suite = &desc->suite.aead;
+       struct crypto_aead *tfm;
+       struct aead_request *req = NULL;
+       struct cipher_test_sglists *tsgls = NULL;
+       int err;
 
-               crypto_cipher_clear_flags(tfm, ~0);
-               if (template[i].wk)
-                       crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       if (suite->count <= 0) {
+               pr_err("alg: aead: empty test suite for %s\n", driver);
+               return -EINVAL;
+       }
 
-               ret = crypto_cipher_setkey(tfm, template[i].key,
-                                          template[i].klen);
-               if (template[i].fail == !ret) {
-                       printk(KERN_ERR "alg: cipher: setkey failed "
-                              "on test %d for %s: flags=%x\n", j,
-                              algo, crypto_cipher_get_flags(tfm));
-                       goto out;
-               } else if (ret)
-                       continue;
+       tfm = crypto_alloc_aead(driver, type, mask);
+       if (IS_ERR(tfm)) {
+               pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
+                      driver, PTR_ERR(tfm));
+               return PTR_ERR(tfm);
+       }
 
-               for (k = 0; k < template[i].len;
-                    k += crypto_cipher_blocksize(tfm)) {
-                       if (enc)
-                               crypto_cipher_encrypt_one(tfm, data + k,
-                                                         data + k);
-                       else
-                               crypto_cipher_decrypt_one(tfm, data + k,
-                                                         data + k);
-               }
+       req = aead_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("alg: aead: failed to allocate request for %s\n",
+                      driver);
+               err = -ENOMEM;
+               goto out;
+       }
 
-               q = data;
-               if (memcmp(q, result, template[i].len)) {
-                       printk(KERN_ERR "alg: cipher: Test %d failed "
-                              "on %s for %s\n", j, e, algo);
-                       hexdump(q, template[i].len);
-                       ret = -EINVAL;
-                       goto out;
-               }
+       tsgls = alloc_cipher_test_sglists();
+       if (!tsgls) {
+               pr_err("alg: aead: failed to allocate test buffers for %s\n",
+                      driver);
+               err = -ENOMEM;
+               goto out;
        }
 
-       ret = 0;
+       err = test_aead(driver, ENCRYPT, suite, req, tsgls);
+       if (err)
+               goto out;
 
+       err = test_aead(driver, DECRYPT, suite, req, tsgls);
 out:
-       testmgr_free_buf(xbuf);
-out_nobuf:
-       return ret;
+       free_cipher_test_sglists(tsgls);
+       aead_request_free(req);
+       crypto_free_aead(tfm);
+       return err;
 }
 
-static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
-                          const struct cipher_testvec *template,
-                          unsigned int tcount,
-                          const bool diff_dst, const int align_offset)
+static int test_cipher(struct crypto_cipher *tfm, int enc,
+                      const struct cipher_testvec *template,
+                      unsigned int tcount)
 {
-       const char *algo =
-               crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
-       unsigned int i, j, k, n, temp;
+       const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
+       unsigned int i, j, k;
        char *q;
-       struct skcipher_request *req;
-       struct scatterlist sg[8];
-       struct scatterlist sgout[8];
-       const char *e, *d;
-       struct crypto_wait wait;
+       const char *e;
        const char *input, *result;
        void *data;
-       char iv[MAX_IVLEN];
        char *xbuf[XBUFSIZE];
-       char *xoutbuf[XBUFSIZE];
        int ret = -ENOMEM;
-       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 
        if (testmgr_alloc_buf(xbuf))
                goto out_nobuf;
 
-       if (diff_dst && testmgr_alloc_buf(xoutbuf))
-               goto out_nooutbuf;
-
-       if (diff_dst)
-               d = "-ddst";
-       else
-               d = "";
-
        if (enc == ENCRYPT)
                e = "encryption";
        else
                e = "decryption";
 
-       crypto_init_wait(&wait);
-
-       req = skcipher_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
-                      d, algo);
-               goto out;
-       }
-
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                     crypto_req_done, &wait);
-
        j = 0;
        for (i = 0; i < tcount; i++) {
-               if (template[i].np && !template[i].also_non_np)
-                       continue;
 
                if (fips_enabled && template[i].fips_skip)
                        continue;
 
-               if (template[i].iv && !(template[i].generates_iv && enc))
-                       memcpy(iv, template[i].iv, ivsize);
-               else
-                       memset(iv, 0, MAX_IVLEN);
-
                input  = enc ? template[i].ptext : template[i].ctext;
                result = enc ? template[i].ctext : template[i].ptext;
                j++;
+
                ret = -EINVAL;
-               if (WARN_ON(align_offset + template[i].len > PAGE_SIZE))
+               if (WARN_ON(template[i].len > PAGE_SIZE))
                        goto out;
 
                data = xbuf[0];
-               data += align_offset;
                memcpy(data, input, template[i].len);
 
-               crypto_skcipher_clear_flags(tfm, ~0);
+               crypto_cipher_clear_flags(tfm, ~0);
                if (template[i].wk)
-                       crypto_skcipher_set_flags(tfm,
-                                                 CRYPTO_TFM_REQ_WEAK_KEY);
+                       crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
 
-               ret = crypto_skcipher_setkey(tfm, template[i].key,
-                                            template[i].klen);
+               ret = crypto_cipher_setkey(tfm, template[i].key,
+                                          template[i].klen);
                if (template[i].fail == !ret) {
-                       pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
-                              d, j, algo, crypto_skcipher_get_flags(tfm));
+                       printk(KERN_ERR "alg: cipher: setkey failed "
+                              "on test %d for %s: flags=%x\n", j,
+                              algo, crypto_cipher_get_flags(tfm));
                        goto out;
                } else if (ret)
                        continue;
 
-               sg_init_one(&sg[0], data, template[i].len);
-               if (diff_dst) {
-                       data = xoutbuf[0];
-                       data += align_offset;
-                       sg_init_one(&sgout[0], data, template[i].len);
-               }
-
-               skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-                                          template[i].len, iv);
-               ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
-                                     crypto_skcipher_decrypt(req), &wait);
-
-               if (ret) {
-                       pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
-                              d, e, j, algo, -ret);
-                       goto out;
+               for (k = 0; k < template[i].len;
+                    k += crypto_cipher_blocksize(tfm)) {
+                       if (enc)
+                               crypto_cipher_encrypt_one(tfm, data + k,
+                                                         data + k);
+                       else
+                               crypto_cipher_decrypt_one(tfm, data + k,
+                                                         data + k);
                }
 
                q = data;
                if (memcmp(q, result, template[i].len)) {
-                       pr_err("alg: skcipher%s: Test %d failed (invalid result) on %s for %s\n",
-                              d, j, e, algo);
+                       printk(KERN_ERR "alg: cipher: Test %d failed "
+                              "on %s for %s\n", j, e, algo);
                        hexdump(q, template[i].len);
                        ret = -EINVAL;
                        goto out;
                }
-
-               if (template[i].generates_iv && enc &&
-                   memcmp(iv, template[i].iv, crypto_skcipher_ivsize(tfm))) {
-                       pr_err("alg: skcipher%s: Test %d failed (invalid output IV) on %s for %s\n",
-                              d, j, e, algo);
-                       hexdump(iv, crypto_skcipher_ivsize(tfm));
-                       ret = -EINVAL;
-                       goto out;
-               }
        }
 
-       j = 0;
-       for (i = 0; i < tcount; i++) {
-               /* alignment tests are only done with continuous buffers */
-               if (align_offset != 0)
-                       break;
-
-               if (!template[i].np)
-                       continue;
-
-               if (fips_enabled && template[i].fips_skip)
-                       continue;
-
-               if (template[i].iv && !(template[i].generates_iv && enc))
-                       memcpy(iv, template[i].iv, ivsize);
-               else
-                       memset(iv, 0, MAX_IVLEN);
-
-               input  = enc ? template[i].ptext : template[i].ctext;
-               result = enc ? template[i].ctext : template[i].ptext;
-               j++;
-               crypto_skcipher_clear_flags(tfm, ~0);
-               if (template[i].wk)
-                       crypto_skcipher_set_flags(tfm,
-                                                 CRYPTO_TFM_REQ_WEAK_KEY);
-
-               ret = crypto_skcipher_setkey(tfm, template[i].key,
-                                            template[i].klen);
-               if (template[i].fail == !ret) {
-                       pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
-                              d, j, algo, crypto_skcipher_get_flags(tfm));
-                       goto out;
-               } else if (ret)
-                       continue;
+       ret = 0;
 
-               temp = 0;
-               ret = -EINVAL;
-               sg_init_table(sg, template[i].np);
-               if (diff_dst)
-                       sg_init_table(sgout, template[i].np);
-               for (k = 0; k < template[i].np; k++) {
-                       if (WARN_ON(offset_in_page(IDX[k]) +
-                                   template[i].tap[k] > PAGE_SIZE))
-                               goto out;
+out:
+       testmgr_free_buf(xbuf);
+out_nobuf:
+       return ret;
+}
 
-                       q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
+static int test_skcipher_vec_cfg(const char *driver, int enc,
+                                const struct cipher_testvec *vec,
+                                unsigned int vec_num,
+                                const struct testvec_config *cfg,
+                                struct skcipher_request *req,
+                                struct cipher_test_sglists *tsgls)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       const unsigned int alignmask = crypto_skcipher_alignmask(tfm);
+       const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+       const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
+       const char *op = enc ? "encryption" : "decryption";
+       DECLARE_CRYPTO_WAIT(wait);
+       u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
+       u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
+                cfg->iv_offset +
+                (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
+       struct kvec input;
+       int err;
 
-                       memcpy(q, input + temp, template[i].tap[k]);
+       /* Set the key */
+       if (vec->wk)
+               crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+       else
+               crypto_skcipher_clear_flags(tfm,
+                                           CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+       err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+       if (err) {
+               if (vec->fail) /* expectedly failed to set key? */
+                       return 0;
+               pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n",
+                      driver, err, vec_num, crypto_skcipher_get_flags(tfm));
+               return err;
+       }
+       if (vec->fail) {
+               pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n",
+                      driver, vec_num);
+               return -EINVAL;
+       }
 
-                       if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE)
-                               q[template[i].tap[k]] = 0;
+       /* The IV must be copied to a buffer, as the algorithm may modify it */
+       if (ivsize) {
+               if (WARN_ON(ivsize > MAX_IVLEN))
+                       return -EINVAL;
+               if (vec->generates_iv && !enc)
+                       memcpy(iv, vec->iv_out, ivsize);
+               else if (vec->iv)
+                       memcpy(iv, vec->iv, ivsize);
+               else
+                       memset(iv, 0, ivsize);
+       } else {
+               if (vec->generates_iv) {
+                       pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n",
+                              driver, vec_num);
+                       return -EINVAL;
+               }
+               iv = NULL;
+       }
 
-                       sg_set_buf(&sg[k], q, template[i].tap[k]);
-                       if (diff_dst) {
-                               q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
-                                   offset_in_page(IDX[k]);
+       /* Build the src/dst scatterlists */
+       input.iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
+       input.iov_len = vec->len;
+       err = build_cipher_test_sglists(tsgls, cfg, alignmask,
+                                       vec->len, vec->len, &input, 1);
+       if (err) {
+               pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return err;
+       }
 
-                               sg_set_buf(&sgout[k], q, template[i].tap[k]);
+       /* Do the actual encryption or decryption */
+       testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm));
+       skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
+       skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
+                                  vec->len, iv);
+       err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+                             crypto_skcipher_decrypt(req), &wait);
+       if (err) {
+               pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
+                      driver, op, err, vec_num, cfg->name);
+               return err;
+       }
 
-                               memset(q, 0, template[i].tap[k]);
-                               if (offset_in_page(q) +
-                                   template[i].tap[k] < PAGE_SIZE)
-                                       q[template[i].tap[k]] = 0;
-                       }
+       /* Check that the algorithm didn't overwrite things it shouldn't have */
+       if (req->cryptlen != vec->len ||
+           req->iv != iv ||
+           req->src != tsgls->src.sgl_ptr ||
+           req->dst != tsgls->dst.sgl_ptr ||
+           crypto_skcipher_reqtfm(req) != tfm ||
+           req->base.complete != crypto_req_done ||
+           req->base.flags != req_flags ||
+           req->base.data != &wait) {
+               pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               if (req->cryptlen != vec->len)
+                       pr_err("alg: skcipher: changed 'req->cryptlen'\n");
+               if (req->iv != iv)
+                       pr_err("alg: skcipher: changed 'req->iv'\n");
+               if (req->src != tsgls->src.sgl_ptr)
+                       pr_err("alg: skcipher: changed 'req->src'\n");
+               if (req->dst != tsgls->dst.sgl_ptr)
+                       pr_err("alg: skcipher: changed 'req->dst'\n");
+               if (crypto_skcipher_reqtfm(req) != tfm)
+                       pr_err("alg: skcipher: changed 'req->base.tfm'\n");
+               if (req->base.complete != crypto_req_done)
+                       pr_err("alg: skcipher: changed 'req->base.complete'\n");
+               if (req->base.flags != req_flags)
+                       pr_err("alg: skcipher: changed 'req->base.flags'\n");
+               if (req->base.data != &wait)
+                       pr_err("alg: skcipher: changed 'req->base.data'\n");
+               return -EINVAL;
+       }
+       if (is_test_sglist_corrupted(&tsgls->src)) {
+               pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return -EINVAL;
+       }
+       if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
+           is_test_sglist_corrupted(&tsgls->dst)) {
+               pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return -EINVAL;
+       }
+
+       /* Check for the correct output (ciphertext or plaintext) */
+       err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
+                                   vec->len, 0, true);
+       if (err == -EOVERFLOW) {
+               pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return err;
+       }
+       if (err) {
+               pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               return err;
+       }
 
-                       temp += template[i].tap[k];
-               }
+       /* If applicable, check that the algorithm generated the correct IV */
+       if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
+               pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n",
+                      driver, op, vec_num, cfg->name);
+               hexdump(iv, ivsize);
+               return -EINVAL;
+       }
 
-               skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-                                          template[i].len, iv);
+       return 0;
+}
 
-               ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
-                                     crypto_skcipher_decrypt(req), &wait);
+static int test_skcipher_vec(const char *driver, int enc,
+                            const struct cipher_testvec *vec,
+                            unsigned int vec_num,
+                            struct skcipher_request *req,
+                            struct cipher_test_sglists *tsgls)
+{
+       unsigned int i;
+       int err;
 
-               if (ret) {
-                       pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
-                              d, e, j, algo, -ret);
-                       goto out;
-               }
+       if (fips_enabled && vec->fips_skip)
+               return 0;
 
-               temp = 0;
-               ret = -EINVAL;
-               for (k = 0; k < template[i].np; k++) {
-                       if (diff_dst)
-                               q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
-                                   offset_in_page(IDX[k]);
-                       else
-                               q = xbuf[IDX[k] >> PAGE_SHIFT] +
-                                   offset_in_page(IDX[k]);
+       for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
+               err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+                                           &default_cipher_testvec_configs[i],
+                                           req, tsgls);
+               if (err)
+                       return err;
+       }
 
-                       if (memcmp(q, result + temp, template[i].tap[k])) {
-                               pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n",
-                                      d, j, e, k, algo);
-                               hexdump(q, template[i].tap[k]);
-                               goto out;
-                       }
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+       if (!noextratests) {
+               struct testvec_config cfg;
+               char cfgname[TESTVEC_CONFIG_NAMELEN];
 
-                       q += template[i].tap[k];
-                       for (n = 0; offset_in_page(q + n) && q[n]; n++)
-                               ;
-                       if (n) {
-                               pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
-                                      d, j, e, k, algo, n);
-                               hexdump(q, n);
-                               goto out;
-                       }
-                       temp += template[i].tap[k];
+               for (i = 0; i < fuzz_iterations; i++) {
+                       generate_random_testvec_config(&cfg, cfgname,
+                                                      sizeof(cfgname));
+                       err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+                                                   &cfg, req, tsgls);
+                       if (err)
+                               return err;
                }
        }
+#endif
+       return 0;
+}
 
-       ret = 0;
+static int test_skcipher(const char *driver, int enc,
+                        const struct cipher_test_suite *suite,
+                        struct skcipher_request *req,
+                        struct cipher_test_sglists *tsgls)
+{
+       unsigned int i;
+       int err;
 
-out:
-       skcipher_request_free(req);
-       if (diff_dst)
-               testmgr_free_buf(xoutbuf);
-out_nooutbuf:
-       testmgr_free_buf(xbuf);
-out_nobuf:
-       return ret;
+       for (i = 0; i < suite->count; i++) {
+               err = test_skcipher_vec(driver, enc, &suite->vecs[i], i, req,
+                                       tsgls);
+               if (err)
+                       return err;
+       }
+       return 0;
 }
 
-static int test_skcipher(struct crypto_skcipher *tfm, int enc,
-                        const struct cipher_testvec *template,
-                        unsigned int tcount)
+static int alg_test_skcipher(const struct alg_test_desc *desc,
+                            const char *driver, u32 type, u32 mask)
 {
-       unsigned int alignmask;
-       int ret;
+       const struct cipher_test_suite *suite = &desc->suite.cipher;
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req = NULL;
+       struct cipher_test_sglists *tsgls = NULL;
+       int err;
 
-       /* test 'dst == src' case */
-       ret = __test_skcipher(tfm, enc, template, tcount, false, 0);
-       if (ret)
-               return ret;
+       if (suite->count <= 0) {
+               pr_err("alg: skcipher: empty test suite for %s\n", driver);
+               return -EINVAL;
+       }
 
-       /* test 'dst != src' case */
-       ret = __test_skcipher(tfm, enc, template, tcount, true, 0);
-       if (ret)
-               return ret;
+       tfm = crypto_alloc_skcipher(driver, type, mask);
+       if (IS_ERR(tfm)) {
+               pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
+                      driver, PTR_ERR(tfm));
+               return PTR_ERR(tfm);
+       }
 
-       /* test unaligned buffers, check with one byte offset */
-       ret = __test_skcipher(tfm, enc, template, tcount, true, 1);
-       if (ret)
-               return ret;
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("alg: skcipher: failed to allocate request for %s\n",
+                      driver);
+               err = -ENOMEM;
+               goto out;
+       }
 
-       alignmask = crypto_tfm_alg_alignmask(&tfm->base);
-       if (alignmask) {
-               /* Check if alignment mask for tfm is correctly set. */
-               ret = __test_skcipher(tfm, enc, template, tcount, true,
-                                     alignmask + 1);
-               if (ret)
-                       return ret;
+       tsgls = alloc_cipher_test_sglists();
+       if (!tsgls) {
+               pr_err("alg: skcipher: failed to allocate test buffers for %s\n",
+                      driver);
+               err = -ENOMEM;
+               goto out;
        }
 
-       return 0;
+       err = test_skcipher(driver, ENCRYPT, suite, req, tsgls);
+       if (err)
+               goto out;
+
+       err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
+out:
+       free_cipher_test_sglists(tsgls);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
+       return err;
 }
 
 static int test_comp(struct crypto_comp *tfm,
@@ -1735,28 +2088,6 @@ out:
        return err;
 }
 
-static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
-                        u32 type, u32 mask)
-{
-       const struct aead_test_suite *suite = &desc->suite.aead;
-       struct crypto_aead *tfm;
-       int err;
-
-       tfm = crypto_alloc_aead(driver, type, mask);
-       if (IS_ERR(tfm)) {
-               printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
-                      "%ld\n", driver, PTR_ERR(tfm));
-               return PTR_ERR(tfm);
-       }
-
-       err = test_aead(tfm, ENCRYPT, suite->vecs, suite->count);
-       if (!err)
-               err = test_aead(tfm, DECRYPT, suite->vecs, suite->count);
-
-       crypto_free_aead(tfm);
-       return err;
-}
-
 static int alg_test_cipher(const struct alg_test_desc *desc,
                           const char *driver, u32 type, u32 mask)
 {
@@ -1779,28 +2110,6 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
        return err;
 }
 
-static int alg_test_skcipher(const struct alg_test_desc *desc,
-                            const char *driver, u32 type, u32 mask)
-{
-       const struct cipher_test_suite *suite = &desc->suite.cipher;
-       struct crypto_skcipher *tfm;
-       int err;
-
-       tfm = crypto_alloc_skcipher(driver, type, mask);
-       if (IS_ERR(tfm)) {
-               printk(KERN_ERR "alg: skcipher: Failed to load transform for "
-                      "%s: %ld\n", driver, PTR_ERR(tfm));
-               return PTR_ERR(tfm);
-       }
-
-       err = test_skcipher(tfm, ENCRYPT, suite->vecs, suite->count);
-       if (!err)
-               err = test_skcipher(tfm, DECRYPT, suite->vecs, suite->count);
-
-       crypto_free_skcipher(tfm);
-       return err;
-}
-
 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
                         u32 type, u32 mask)
 {
@@ -1839,67 +2148,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
-static int __alg_test_hash(const struct hash_testvec *template,
-                          unsigned int tcount, const char *driver,
-                          u32 type, u32 mask)
-{
-       struct crypto_ahash *tfm;
-       int err;
-
-       tfm = crypto_alloc_ahash(driver, type, mask);
-       if (IS_ERR(tfm)) {
-               printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
-                      "%ld\n", driver, PTR_ERR(tfm));
-               return PTR_ERR(tfm);
-       }
-
-       err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
-       if (!err)
-               err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
-       if (!err)
-               err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
-       crypto_free_ahash(tfm);
-       return err;
-}
-
-static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
-                        u32 type, u32 mask)
-{
-       const struct hash_testvec *template = desc->suite.hash.vecs;
-       unsigned int tcount = desc->suite.hash.count;
-       unsigned int nr_unkeyed, nr_keyed;
-       int err;
-
-       /*
-        * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests
-        * first, before setting a key on the tfm.  To make this easier, we
-        * require that the unkeyed test vectors (if any) are listed first.
-        */
-
-       for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) {
-               if (template[nr_unkeyed].ksize)
-                       break;
-       }
-       for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) {
-               if (!template[nr_unkeyed + nr_keyed].ksize) {
-                       pr_err("alg: hash: test vectors for %s out of order, "
-                              "unkeyed ones must come first\n", desc->alg);
-                       return -EINVAL;
-               }
-       }
-
-       err = 0;
-       if (nr_unkeyed) {
-               err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
-               template += nr_unkeyed;
-       }
-
-       if (!err && nr_keyed)
-               err = __alg_test_hash(template, nr_keyed, driver, type, mask);
-
-       return err;
-}
-
 static int alg_test_crc32c(const struct alg_test_desc *desc,
                           const char *driver, u32 type, u32 mask)
 {
@@ -1909,14 +2157,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
 
        err = alg_test_hash(desc, driver, type, mask);
        if (err)
-               goto out;
+               return err;
 
        tfm = crypto_alloc_shash(driver, type, mask);
        if (IS_ERR(tfm)) {
+               if (PTR_ERR(tfm) == -ENOENT) {
+                       /*
+                        * This crc32c implementation is only available through
+                        * ahash API, not the shash API, so the remaining part
+                        * of the test is not applicable to it.
+                        */
+                       return 0;
+               }
                printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
                       "%ld\n", driver, PTR_ERR(tfm));
-               err = PTR_ERR(tfm);
-               goto out;
+               return PTR_ERR(tfm);
        }
 
        do {
@@ -1943,7 +2198,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
 
        crypto_free_shash(tfm);
 
-out:
        return err;
 }
 
@@ -2109,12 +2363,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 
        if (vec->genkey) {
                /* Save party A's public key */
-               a_public = kzalloc(out_len_max, GFP_KERNEL);
+               a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL);
                if (!a_public) {
                        err = -ENOMEM;
                        goto free_output;
                }
-               memcpy(a_public, sg_virt(req->dst), out_len_max);
        } else {
                /* Verify calculated public key */
                if (memcmp(vec->expected_a_public, sg_virt(req->dst),
@@ -2127,13 +2380,12 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
        }
 
        /* Calculate shared secret key by using counter part (b) public key. */
-       input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
+       input_buf = kmemdup(vec->b_public, vec->b_public_size, GFP_KERNEL);
        if (!input_buf) {
                err = -ENOMEM;
                goto free_output;
        }
 
-       memcpy(input_buf, vec->b_public, vec->b_public_size);
        sg_init_one(&src, input_buf, vec->b_public_size);
        sg_init_one(&dst, output_buf, out_len_max);
        kpp_request_set_input(req, &src, vec->b_public_size);
@@ -2149,12 +2401,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 
        if (vec->genkey) {
                /* Save the shared secret obtained by party A */
-               a_ss = kzalloc(vec->expected_ss_size, GFP_KERNEL);
+               a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL);
                if (!a_ss) {
                        err = -ENOMEM;
                        goto free_all;
                }
-               memcpy(a_ss, sg_virt(req->dst), vec->expected_ss_size);
 
                /*
                 * Calculate party B's shared secret by using party A's
@@ -2734,6 +2985,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "crc32",
                .test = alg_test_hash,
+               .fips_allowed = 1,
                .suite = {
                        .hash = __VECS(crc32_tv_template)
                }
@@ -3650,18 +3902,10 @@ static const struct alg_test_desc alg_test_descs[] = {
        }
 };
 
-static bool alg_test_descs_checked;
-
-static void alg_test_descs_check_order(void)
+static void alg_check_test_descs_order(void)
 {
        int i;
 
-       /* only check once */
-       if (alg_test_descs_checked)
-               return;
-
-       alg_test_descs_checked = true;
-
        for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) {
                int diff = strcmp(alg_test_descs[i - 1].alg,
                                  alg_test_descs[i].alg);
@@ -3679,6 +3923,29 @@ static void alg_test_descs_check_order(void)
        }
 }
 
+static void alg_check_testvec_configs(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++)
+               WARN_ON(!valid_testvec_config(
+                               &default_cipher_testvec_configs[i]));
+
+       for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++)
+               WARN_ON(!valid_testvec_config(
+                               &default_hash_testvec_configs[i]));
+}
+
+static void testmgr_onetime_init(void)
+{
+       alg_check_test_descs_order();
+       alg_check_testvec_configs();
+
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+       pr_warn("alg: extra crypto tests enabled.  This is intended for developer use only.\n");
+#endif
+}
+
 static int alg_find_test(const char *alg)
 {
        int start = 0;
@@ -3715,7 +3982,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
                return 0;
        }
 
-       alg_test_descs_check_order();
+       DO_ONCE(testmgr_onetime_init);
 
        if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
                char nalg[CRYPTO_MAX_ALG_NAME];