Contributors: 14
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Herbert Xu |
694 |
45.81% |
13 |
32.50% |
Tadeusz Struk |
444 |
29.31% |
2 |
5.00% |
Andrzej Zaborowski |
109 |
7.19% |
1 |
2.50% |
Corentin Labbe |
68 |
4.49% |
6 |
15.00% |
Vitaly Chikunov |
60 |
3.96% |
1 |
2.50% |
Ignat Korchagin |
39 |
2.57% |
1 |
2.50% |
Steffen Klassert |
31 |
2.05% |
2 |
5.00% |
Eric Biggers |
27 |
1.78% |
4 |
10.00% |
James Morris |
17 |
1.12% |
4 |
10.00% |
Gideon Israel Dsouza |
11 |
0.73% |
1 |
2.50% |
Giovanni Cabiddu |
5 |
0.33% |
2 |
5.00% |
Ondrej Mosnáček |
5 |
0.33% |
1 |
2.50% |
Loc Ho |
3 |
0.20% |
1 |
2.50% |
Thomas Gleixner |
2 |
0.13% |
1 |
2.50% |
Total |
1515 |
|
40 |
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Public Key Encryption
*
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <tadeusz.struk@intel.com>
*/
#include <crypto/internal/akcipher.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
static int __maybe_unused crypto_akcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
memset(&rakcipher, 0, sizeof(rakcipher));
strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_puts(m, "type : akcipher\n");
}
static void crypto_akcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
struct akcipher_alg *alg = crypto_akcipher_alg(akcipher);
alg->exit(akcipher);
}
static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
struct akcipher_alg *alg = crypto_akcipher_alg(akcipher);
if (alg->exit)
akcipher->base.exit = crypto_akcipher_exit_tfm;
if (alg->init)
return alg->init(akcipher);
return 0;
}
static void crypto_akcipher_free_instance(struct crypto_instance *inst)
{
struct akcipher_instance *akcipher = akcipher_instance(inst);
akcipher->free(akcipher);
}
static int __maybe_unused crypto_akcipher_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
struct crypto_istat_akcipher *istat;
struct crypto_stat_akcipher rakcipher;
istat = akcipher_get_stat(akcipher);
memset(&rakcipher, 0, sizeof(rakcipher));
strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
.free = crypto_akcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_akcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_akcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_akcipher_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_akcipher_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
{
return -ENOSYS;
}
static int akcipher_default_set_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
return -ENOSYS;
}
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (!alg->sign)
alg->sign = akcipher_default_op;
if (!alg->verify)
alg->verify = akcipher_default_op;
if (!alg->encrypt)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
alg->decrypt = akcipher_default_op;
if (!alg->set_priv_key)
alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_akcipher);
void crypto_unregister_akcipher(struct akcipher_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
int akcipher_register_instance(struct crypto_template *tmpl,
struct akcipher_instance *inst)
{
if (WARN_ON(!inst->free))
return -EINVAL;
akcipher_prepare_alg(&inst->alg);
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
{
unsigned int reqsize = crypto_akcipher_reqsize(data->tfm);
struct akcipher_request *req;
struct scatterlist *sg;
unsigned int mlen;
unsigned int len;
u8 *buf;
if (data->dst)
mlen = max(data->slen, data->dlen);
else
mlen = data->slen + data->dlen;
len = sizeof(*req) + reqsize + mlen;
if (len < mlen)
return -EOVERFLOW;
req = kzalloc(len, GFP_KERNEL);
if (!req)
return -ENOMEM;
data->req = req;
akcipher_request_set_tfm(req, data->tfm);
buf = (u8 *)(req + 1) + reqsize;
data->buf = buf;
memcpy(buf, data->src, data->slen);
sg = &data->sg;
sg_init_one(sg, buf, mlen);
akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL,
data->slen, data->dlen);
crypto_init_wait(&data->cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &data->cwait);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep);
int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err)
{
err = crypto_wait_req(err, &data->cwait);
if (data->dst)
memcpy(data->dst, data->buf, data->dlen);
data->dlen = data->req->dst_len;
kfree_sensitive(data->req);
return err;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post);
int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct crypto_akcipher_sync_data data = {
.tfm = tfm,
.src = src,
.dst = dst,
.slen = slen,
.dlen = dlen,
};
return crypto_akcipher_sync_prep(&data) ?:
crypto_akcipher_sync_post(&data,
crypto_akcipher_encrypt(data.req));
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt);
int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct crypto_akcipher_sync_data data = {
.tfm = tfm,
.src = src,
.dst = dst,
.slen = slen,
.dlen = dlen,
};
return crypto_akcipher_sync_prep(&data) ?:
crypto_akcipher_sync_post(&data,
crypto_akcipher_decrypt(data.req)) ?:
data.dlen;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt);
static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm)
{
struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
crypto_free_akcipher(*ctx);
}
int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm)
{
struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *calg = tfm->__crt_alg;
struct crypto_akcipher *akcipher;
if (!crypto_mod_get(calg))
return -EAGAIN;
akcipher = crypto_create_tfm(calg, &crypto_akcipher_type);
if (IS_ERR(akcipher)) {
crypto_mod_put(calg);
return PTR_ERR(akcipher);
}
*ctx = akcipher;
tfm->exit = crypto_exit_akcipher_ops_sig;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");