cregit-Linux how code gets into the kernel

Release 4.11 crypto/testmgr.c

Directory: crypto
/*
 * Algorithm testing framework and tests.
 *
 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
 * Copyright (c) 2007 Nokia Siemens Networks
 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * Updated RFC4106 AES-GCM testing.
 *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
 *             Adrian Hoban <adrian.hoban@intel.com>
 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 *             Tadeusz Struk (tadeusz.struk@intel.com)
 *    Copyright (c) 2010, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <crypto/rng.h>
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>

#include "internal.h"


static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");

#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS

/* a perfect nop */

int alg_test(const char *driver, const char *alg, u32 type, u32 mask) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Shishkin24100.00%1100.00%
Total24100.00%1100.00%

#else #include "testmgr.h" /* * Need slab memory for testing (size in number of pages). */ #define XBUFSIZE 8 /* * Indexes into the xbuf to simulate cross-page access. */ #define IDX1 32 #define IDX2 32400 #define IDX3 1511 #define IDX4 8193 #define IDX5 22222 #define IDX6 17101 #define IDX7 27333 #define IDX8 3000 /* * Used by test_cipher() */ #define ENCRYPT 1 #define DECRYPT 0 struct tcrypt_result { struct completion completion; int err; }; struct aead_test_suite { struct { struct aead_testvec *vecs; unsigned int count; } enc, dec; }; struct cipher_test_suite { struct { struct cipher_testvec *vecs; unsigned int count; } enc, dec; }; struct comp_test_suite { struct { struct comp_testvec *vecs; unsigned int count; } comp, decomp; }; struct hash_test_suite { struct hash_testvec *vecs; unsigned int count; }; struct cprng_test_suite { struct cprng_testvec *vecs; unsigned int count; }; struct drbg_test_suite { struct drbg_testvec *vecs; unsigned int count; }; struct akcipher_test_suite { struct akcipher_testvec *vecs; unsigned int count; }; struct kpp_test_suite { struct kpp_testvec *vecs; unsigned int count; }; struct alg_test_desc { const char *alg; int (*test)(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask); int fips_allowed; /* set if alg is allowed in fips mode */ union { struct aead_test_suite aead; struct cipher_test_suite cipher; struct comp_test_suite comp; struct hash_test_suite hash; struct cprng_test_suite cprng; struct drbg_test_suite drbg; struct akcipher_test_suite akcipher; struct kpp_test_suite kpp; } suite; }; static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
static void hexdump(unsigned char *buf, unsigned int len) { print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu34100.00%1100.00%
Total34100.00%1100.00%


static void tcrypt_complete(struct crypto_async_request *req, int err) { struct tcrypt_result *res = req->data; if (err == -EINPROGRESS) return; res->err = err; complete(&res->completion); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu45100.00%1100.00%
Total45100.00%1100.00%


static int testmgr_alloc_buf(char *buf[XBUFSIZE]) { int i; for (i = 0; i < XBUFSIZE; i++) { buf[i] = (void *)__get_free_page(GFP_KERNEL); if (!buf[i]) goto err_free_buf; } return 0; err_free_buf: while (i-- > 0) free_page((unsigned long)buf[i]); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu84100.00%1100.00%
Total84100.00%1100.00%


static void testmgr_free_buf(char *buf[XBUFSIZE]) { int i; for (i = 0; i < XBUFSIZE; i++) free_page((unsigned long)buf[i]); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu41100.00%1100.00%
Total41100.00%1100.00%


static int wait_async_op(struct tcrypt_result *tr, int ret) { if (ret == -EINPROGRESS || ret == -EBUSY) { wait_for_completion(&tr->completion); reinit_completion(&tr->completion); ret = tr->err; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4381.13%125.00%
Rabin Vincent713.21%125.00%
Wolfram Sang23.77%125.00%
Cristian Stoica11.89%125.00%
Total53100.00%4100.00%


static int ahash_partial_update(struct ahash_request **preq, struct crypto_ahash *tfm, struct hash_testvec *template, void *hash_buff, int k, int temp, struct scatterlist *sg, const char *algo, char *result, struct tcrypt_result *tresult) { char *state; struct ahash_request *req; int statesize, ret = -EINVAL; const char guard[] = { 0x00, 0xba, 0xad, 0x00 }; req = *preq; statesize = crypto_ahash_statesize( crypto_ahash_reqtfm(req)); state = kmalloc(statesize + sizeof(guard), GFP_KERNEL); if (!state) { pr_err("alt: hash: Failed to alloc state for %s\n", algo); goto out_nostate; } memcpy(state + statesize, guard, sizeof(guard)); ret = crypto_ahash_export(req, state); WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); if (ret) { pr_err("alt: hash: Failed to export() for %s\n", algo); goto out; } ahash_request_free(req); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to alloc request for %s\n", algo); goto out_noreq; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tcrypt_complete, tresult); memcpy(hash_buff, template->plaintext + temp, template->tap[k]); sg_init_one(&sg[0], hash_buff, template->tap[k]); ahash_request_set_crypt(req, sg, result, template->tap[k]); ret = crypto_ahash_import(req, state); if (ret) { pr_err("alg: hash: Failed to import() for %s\n", algo); goto out; } ret = wait_async_op(tresult, crypto_ahash_update(req)); if (ret) goto out; *preq = req; ret = 0; goto out_noreq; out: ahash_request_free(req); out_noreq: kfree(state); out_nostate: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Rui Y Wang18151.13%114.29%
Herbert Xu9326.27%342.86%
Jan Stancek5114.41%114.29%
Horia Geantă236.50%114.29%
Jussi Kivilinna61.69%114.29%
Total354100.00%7100.00%


static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, unsigned int tcount, bool use_digest, const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); size_t digest_size = crypto_ahash_digestsize(tfm); unsigned int i, j, k, temp; struct scatterlist sg[8]; char *result; char *key; struct ahash_request *req; struct tcrypt_result tresult; void *hash_buff; char *xbuf[XBUFSIZE]; int ret = -ENOMEM; result = kmalloc(digest_size, GFP_KERNEL); if (!result) return ret; key = kmalloc(MAX_KEYLEN, GFP_KERNEL); if (!key) goto out_nobuf; if (testmgr_alloc_buf(xbuf)) goto out_nobuf; init_completion(&tresult.completion); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "alg: hash: Failed to allocate request for " "%s\n", algo); goto out_noreq; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tcrypt_complete, &tresult); j = 0; for (i = 0; i < tcount; i++) { if (template[i].np) continue; ret = -EINVAL; if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE)) goto out; j++; memset(result, 0, digest_size); hash_buff = xbuf[0]; hash_buff += align_offset; memcpy(hash_buff, template[i].plaintext, template[i].psize); sg_init_one(&sg[0], hash_buff, template[i].psize); if (template[i].ksize) { crypto_ahash_clear_flags(tfm, ~0); if (template[i].ksize > MAX_KEYLEN) { pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", j, algo, template[i].ksize, MAX_KEYLEN); ret = -EINVAL; goto out; } memcpy(key, template[i].key, template[i].ksize); ret = crypto_ahash_setkey(tfm, key, template[i].ksize); if (ret) { printk(KERN_ERR "alg: hash: setkey failed on " "test %d for %s: ret=%d\n", j, algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, template[i].psize); if (use_digest) { ret = wait_async_op(&tresult, crypto_ahash_digest(req)); if (ret) { pr_err("alg: hash: digest failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } } else { ret = wait_async_op(&tresult, crypto_ahash_init(req)); if (ret) { pr_err("alt: hash: init failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } ret = wait_async_op(&tresult, crypto_ahash_update(req)); if (ret) { pr_err("alt: hash: update failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } ret = wait_async_op(&tresult, crypto_ahash_final(req)); if (ret) { pr_err("alt: hash: final failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } } if (memcmp(result, template[i].digest, crypto_ahash_digestsize(tfm))) { printk(KERN_ERR "alg: hash: Test %d failed for %s\n", j, algo); hexdump(result, crypto_ahash_digestsize(tfm)); ret = -EINVAL; goto out; } } j = 0; for (i = 0; i < tcount; i++) { /* alignment tests are only done with continuous buffers */ if (align_offset != 0) break; if (!template[i].np) continue; j++; memset(result, 0, digest_size); temp = 0; sg_init_table(sg, template[i].np); ret = -EINVAL; for (k = 0; k < template[i].np; k++) { if (WARN_ON(offset_in_page(IDX[k]) + template[i].tap[k] > PAGE_SIZE)) goto out; sg_set_buf(&sg[k], memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]), template[i].plaintext + temp, template[i].tap[k]), template[i].tap[k]); temp += template[i].tap[k]; } if (template[i].ksize) { if (template[i].ksize > MAX_KEYLEN) { pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", j, algo, template[i].ksize, MAX_KEYLEN); ret = -EINVAL; goto out; } crypto_ahash_clear_flags(tfm, ~0); memcpy(key, template[i].key, template[i].ksize); ret = crypto_ahash_setkey(tfm, key, template[i].ksize); if (ret) { printk(KERN_ERR "alg: hash: setkey " "failed on chunking test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, template[i].psize); ret = crypto_ahash_digest(req); switch (ret) { case 0: break; case -EINPROGRESS: case -EBUSY: wait_for_completion(&tresult.completion); reinit_completion(&tresult.completion); ret = tresult.err; if (!ret) break; /* fall through */ default: printk(KERN_ERR "alg: hash: digest failed " "on chunking test %d for %s: " "ret=%d\n", j, algo, -ret); goto out; } if (memcmp(result, template[i].digest, crypto_ahash_digestsize(tfm))) { printk(KERN_ERR "alg: hash: Chunking test %d " "failed for %s\n", j, algo); hexdump(result, crypto_ahash_digestsize(tfm)); ret = -EINVAL; goto out; } } /* partial update exercise */ j = 0; for (i = 0; i < tcount; i++) { /* alignment tests are only done with continuous buffers */ if (align_offset != 0) break; if (template[i].np < 2) continue; j++; memset(result, 0, digest_size); ret = -EINVAL; hash_buff = xbuf[0]; memcpy(hash_buff, template[i].plaintext, template[i].tap[0]); sg_init_one(&sg[0], hash_buff, template[i].tap[0]); if (template[i].ksize) { crypto_ahash_clear_flags(tfm, ~0); if (template[i].ksize > MAX_KEYLEN) { pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", j, algo, template[i].ksize, MAX_KEYLEN); ret = -EINVAL; goto out; } memcpy(key, template[i].key, template[i].ksize); ret = crypto_ahash_setkey(tfm, key, template[i].ksize); if (ret) { pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, template[i].tap[0]); ret = wait_async_op(&tresult, crypto_ahash_init(req)); if (ret) { pr_err("alt: hash: init failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out; } ret = wait_async_op(&tresult, crypto_ahash_update(req)); if (ret) { pr_err("alt: hash: update failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out; } temp = template[i].tap[0]; for (k = 1; k < template[i].np; k++) { ret = ahash_partial_update(&req, tfm, &template[i], hash_buff, k, temp, &sg[0], algo, result, &tresult); if (ret) { pr_err("hash: partial update failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out_noreq; } temp += template[i].tap[k]; } ret = wait_async_op(&tresult, crypto_ahash_final(req)); if (ret) { pr_err("alt: hash: final failed on test %d for %s: ret=%d\n", j, algo, -ret); goto out; } if (memcmp(result, template[i].digest, crypto_ahash_digestsize(tfm))) { pr_err("alg: hash: Partial Test %d failed for %s\n", j, algo); hexdump(result, crypto_ahash_digestsize(tfm)); ret = -EINVAL; goto out; } } ret = 0; out: ahash_request_free(req); out_noreq: testmgr_free_buf(xbuf); out_nobuf: kfree(key); kfree(result); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Rui Y Wang104565.19%18.33%
Herbert Xu39324.52%433.33%
Horia Geantă1227.61%18.33%
David S. Miller130.81%18.33%
Andrew Lutomirski120.75%18.33%
Jussi Kivilinna80.50%18.33%
Rabin Vincent80.50%18.33%
Cristian Stoica10.06%18.33%
Wolfram Sang10.06%18.33%
Total1603100.00%12100.00%


static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, unsigned int tcount, bool use_digest) { unsigned int alignmask; int ret; ret = __test_hash(tfm, template, tcount, use_digest, 0); if (ret) return ret; /* test unaligned buffers, check with one byte offset */ ret = __test_hash(tfm, template, tcount, use_digest, 1); if (ret) return ret; alignmask = crypto_tfm_alg_alignmask(&tfm->base); if (alignmask) { /* Check if alignment mask for tfm is correctly set. */ ret = __test_hash(tfm, template, tcount, use_digest, alignmask + 1); if (ret) return ret; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jussi Kivilinna119100.00%1100.00%
Total119100.00%1100.00%


static int __test_aead(struct crypto_aead *tfm, int enc, struct aead_testvec *template, unsigned int tcount, const bool diff_dst, const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); unsigned int i, j, k, n, temp; int ret = -ENOMEM; char *q; char *key; struct aead_request *req; struct scatterlist *sg; struct scatterlist *sgout; const char *e, *d; struct tcrypt_result result; unsigned int authsize, iv_len; void *input; void *output; void *assoc; char *iv; char *xbuf[XBUFSIZE]; char *xoutbuf[XBUFSIZE]; char *axbuf[XBUFSIZE]; iv = kzalloc(MAX_IVLEN, GFP_KERNEL); if (!iv) return ret; key = kmalloc(MAX_KEYLEN, GFP_KERNEL); if (!key) goto out_noxbuf; if (testmgr_alloc_buf(xbuf)) goto out_noxbuf; if (testmgr_alloc_buf(axbuf)) goto out_noaxbuf; if (diff_dst && testmgr_alloc_buf(xoutbuf)) goto out_nooutbuf; /* avoid "the frame size is larger than 1024 bytes" compiler warning */ sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 4 : 2), GFP_KERNEL); if (!sg) goto out_nosg; sgout = &sg[16]; if (diff_dst) d = "-ddst"; else d = ""; if (enc