cregit-Linux how code gets into the kernel

Release 4.11 crypto/skcipher.c

Directory: crypto
/*
 * Symmetric key cipher operations.
 *
 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
 * multiple page boundaries by using temporary blocks.  In user context,
 * the kernel is given a chance to schedule us once per page.
 *
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
#include <net/netlink.h>

#include "internal.h"

enum {
	
SKCIPHER_WALK_PHYS = 1 << 0,
	
SKCIPHER_WALK_SLOW = 1 << 1,
	
SKCIPHER_WALK_COPY = 1 << 2,
	
SKCIPHER_WALK_DIFF = 1 << 3,
	
SKCIPHER_WALK_SLEEP = 1 << 4,
};


struct skcipher_walk_buffer {
	
struct list_head entry;
	
struct scatter_walk dst;
	
unsigned int len;
	
u8 *data;
	
u8 buffer[];
};

static int skcipher_walk_next(struct skcipher_walk *walk);


static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) { if (PageHighMem(scatterwalk_page(walk))) kunmap_atomic(vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu31100.00%1100.00%
Total31100.00%1100.00%


static inline void *skcipher_map(struct scatter_walk *walk) { struct page *page = scatterwalk_page(walk); return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + offset_in_page(walk->offset); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu48100.00%1100.00%
Total48100.00%1100.00%


static inline void skcipher_map_src(struct skcipher_walk *walk) { walk->src.virt.addr = skcipher_map(&walk->in); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu28100.00%1100.00%
Total28100.00%1100.00%


static inline void skcipher_map_dst(struct skcipher_walk *walk) { walk->dst.virt.addr = skcipher_map(&walk->out); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu28100.00%1100.00%
Total28100.00%1100.00%


static inline void skcipher_unmap_src(struct skcipher_walk *walk) { skcipher_unmap(&walk->in, walk->src.virt.addr); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu28100.00%1100.00%
Total28100.00%1100.00%


static inline void skcipher_unmap_dst(struct skcipher_walk *walk) { skcipher_unmap(&walk->out, walk->dst.virt.addr); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu28100.00%1100.00%
Total28100.00%1100.00%


static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) { return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu23100.00%1100.00%
Total23100.00%1100.00%

/* Get a spot of the specified length that does not straddle a page. * The caller needs to ensure that there is enough space for this operation. */
static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) { u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); return max(start, end_page); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu50100.00%1100.00%
Total50100.00%1100.00%


static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu78100.00%1100.00%
Total78100.00%1100.00%


int skcipher_walk_done(struct skcipher_walk *walk, int err) { unsigned int n = walk->nbytes - err; unsigned int nbytes; nbytes = walk->total - n; if (unlikely(err < 0)) { nbytes = 0; n = 0; } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { unmap_src: skcipher_unmap_src(walk); } else if (walk->flags & SKCIPHER_WALK_DIFF) { skcipher_unmap_dst(walk); goto unmap_src; } else if (walk->flags & SKCIPHER_WALK_COPY) { skcipher_map_dst(walk); memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { if (WARN_ON(err)) { err = -EINVAL; nbytes = 0; } else n = skcipher_done_slow(walk, n); } if (err > 0) err = 0; walk->total = nbytes; walk->nbytes = nbytes; scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); scatterwalk_done(&walk->in, 0, nbytes); scatterwalk_done(&walk->out, 1, nbytes); if (nbytes) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; if (walk->flags & SKCIPHER_WALK_PHYS) goto out; if (walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu376100.00%1100.00%
Total376100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_done);
void skcipher_walk_complete(struct skcipher_walk *walk, int err) { struct skcipher_walk_buffer *p, *tmp; list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { u8 *data; if (err) goto done; data = p->data; if (!data) { data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); data = skcipher_get_spot(data, walk->stride); } scatterwalk_copychunks(data, &p->dst, p->len, 1); if (offset_in_page(p->data) + p->len + walk->stride > PAGE_SIZE) free_page((unsigned long)p->data); done: list_del(&p->entry); kfree(p); } if (!err && walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu20899.05%150.00%
Ard Biesheuvel20.95%150.00%
Total210100.00%2100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_complete);
static void skcipher_queue_write(struct skcipher_walk *walk, struct skcipher_walk_buffer *p) { p->dst = walk->out; list_add_tail(&p->entry, &walk->buffers); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu37100.00%1100.00%
Total37100.00%1100.00%


static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { bool phys = walk->flags & SKCIPHER_WALK_PHYS; unsigned alignmask = walk->alignmask; struct skcipher_walk_buffer *p; unsigned a; unsigned n; u8 *buffer; void *v; if (!phys) { if (!walk->buffer) walk->buffer = walk->page; buffer = walk->buffer; if (buffer) goto ok; } /* Start with the minimum alignment of kmalloc. */ a = crypto_tfm_ctx_alignment() - 1; n = bsize; if (phys) { /* Calculate the minimum alignment of p->buffer. */ a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; n += sizeof(*p); } /* Minimum size to align p->buffer by alignmask. */ n += alignmask & ~a; /* Minimum size to ensure p->buffer does not straddle a page. */ n += (bsize - 1) & ~(alignmask | a); v = kzalloc(n, skcipher_walk_gfp(walk)); if (!v) return skcipher_walk_done(walk, -ENOMEM); if (phys) { p = v; p->len = bsize; skcipher_queue_write(walk, p); buffer = p->buffer; } else { walk->buffer = v; buffer = v; } ok: walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); walk->src.virt.addr = walk->dst.virt.addr; scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); walk->nbytes = bsize; walk->flags |= SKCIPHER_WALK_SLOW; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu30495.90%150.00%
Ard Biesheuvel134.10%150.00%
Total317100.00%2100.00%


static int skcipher_next_copy(struct skcipher_walk *walk) { struct skcipher_walk_buffer *p; u8 *tmp = walk->page; skcipher_map_src(walk); memcpy(tmp, walk->src.virt.addr, walk->nbytes); skcipher_unmap_src(walk); walk->src.virt.addr = tmp; walk->dst.virt.addr = tmp; if (!(walk->flags & SKCIPHER_WALK_PHYS)) return 0; p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); if (!p) return -ENOMEM; p->data = walk->page; p->len = walk->nbytes; skcipher_queue_write(walk, p); if (offset_in_page(walk->page) + walk->nbytes + walk->stride > PAGE_SIZE) walk->page = NULL; else walk->page += walk->nbytes; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu16999.41%150.00%
Ard Biesheuvel10.59%150.00%
Total170100.00%2100.00%


static int skcipher_next_fast(struct skcipher_walk *walk) { unsigned long diff; walk->src.phys.page = scatterwalk_page(&walk->in); walk->src.phys.offset = offset_in_page(walk->in.offset); walk->dst.phys.page = scatterwalk_page(&walk->out); walk->dst.phys.offset = offset_in_page(walk->out.offset); if (walk->flags & SKCIPHER_WALK_PHYS) return 0; diff = walk->src.phys.offset - walk->dst.phys.offset; diff |= walk->src.virt.page - walk->dst.virt.page; skcipher_map_src(walk); walk->dst.virt.addr = walk->src.virt.addr; if (diff) { walk->flags |= SKCIPHER_WALK_DIFF; skcipher_map_dst(walk); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu169100.00%1100.00%
Total169100.00%1100.00%


static int skcipher_walk_next(struct skcipher_walk *walk) { unsigned int bsize; unsigned int n; int err; walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF); n = walk->total; bsize = min(walk->stride, max(n, walk->blocksize)); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { if (unlikely(walk->total < walk->blocksize)) return skcipher_walk_done(walk, -EINVAL); slow_path: err = skcipher_next_slow(walk, bsize); goto set_phys_lowmem; } if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { if (!walk->page) { gfp_t gfp = skcipher_walk_gfp(walk); walk->page = (void *)__get_free_page(gfp); if (!walk->page) goto slow_path; } walk->nbytes = min_t(unsigned, n, PAGE_SIZE - offset_in_page(walk->page)); walk->flags |= SKCIPHER_WALK_COPY; err = skcipher_next_copy(walk); goto set_phys_lowmem; } walk->nbytes = n; return skcipher_next_fast(walk); set_phys_lowmem: if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { walk->src.phys.page = virt_to_page(walk->src.virt.addr); walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); walk->src.phys.offset &= PAGE_SIZE - 1; walk->dst.phys.offset &= PAGE_SIZE - 1; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu32499.69%150.00%
Ard Biesheuvel10.31%150.00%
Total325100.00%2100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_next);
static int skcipher_copy_iv(struct skcipher_walk *walk) { unsigned a = crypto_tfm_ctx_alignment() - 1; unsigned alignmask = walk->alignmask; unsigned ivsize = walk->ivsize; unsigned bs = walk->stride; unsigned aligned_bs; unsigned size; u8 *iv; aligned_bs = ALIGN(bs, alignmask); /* Minimum size to align buffer by alignmask. */ size = alignmask & ~a; if (walk->flags & SKCIPHER_WALK_PHYS) size += ivsize; else { size += aligned_bs + ivsize; /* Minimum size to ensure buffer does not straddle a page. */ size += (bs - 1) & ~(alignmask | a); } walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); if (!walk->buffer) return -ENOMEM; iv = PTR_ALIGN(walk->buffer, alignmask + 1); iv = skcipher_get_spot(iv, bs) + aligned_bs; walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu17299.42%150.00%
Ard Biesheuvel10.58%150.00%
Total173100.00%2100.00%


static int skcipher_walk_first(struct skcipher_walk *walk) { walk->nbytes = 0; if (WARN_ON_ONCE(in_irq())) return -EDEADLK; if (unlikely(!walk->total)) return 0; walk->buffer = NULL; if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { int err = skcipher_copy_iv(walk); if (err) return err; } walk->page = NULL; walk->nbytes = walk->total; return skcipher_walk_next(walk); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu104100.00%1100.00%
Total104100.00%1100.00%


static int skcipher_walk_skcipher(struct skcipher_walk *walk, struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); walk->total = req->cryptlen; walk->iv = req->iv; walk->oiv = req->iv; walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; walk->blocksize = crypto_skcipher_blocksize(tfm); walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); return skcipher_walk_first(walk); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu13798.56%150.00%
Ard Biesheuvel21.44%150.00%
Total139100.00%2100.00%


int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, bool atomic) { int err; walk->flags &= ~SKCIPHER_WALK_PHYS; err = skcipher_walk_skcipher(walk, req); walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu52100.00%1100.00%
Total52100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_virt);
void skcipher_walk_atomise(struct skcipher_walk *walk) { walk->flags &= ~SKCIPHER_WALK_SLEEP; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu17100.00%1100.00%
Total17100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req) { walk->flags |= SKCIPHER_WALK_PHYS; INIT_LIST_HEAD(&walk->buffers); return skcipher_walk_skcipher(walk, req); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu37100.00%1100.00%
Total37100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_async);
static int skcipher_walk_aead_common(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); int err; walk->flags &= ~SKCIPHER_WALK_PHYS; scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); walk->iv = req->iv; walk->oiv = req->iv; if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; else walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->blocksize = crypto_aead_blocksize(tfm); walk->stride = crypto_aead_chunksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); err = skcipher_walk_first(walk); if (atomic) walk->flags &= ~SKCIPHER_WALK_SLEEP; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu18495.83%250.00%
Ard Biesheuvel84.17%250.00%
Total192100.00%4100.00%


int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { walk->total = req->cryptlen; return skcipher_walk_aead_common(walk, req, atomic); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu36100.00%1100.00%
Total36100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_aead);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { walk->total = req->cryptlen; return skcipher_walk_aead_common(walk, req, atomic); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu36100.00%1100.00%
Total36100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); walk->total = req->cryptlen - crypto_aead_authsize(tfm); return skcipher_walk_aead_common(walk, req, atomic); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu51100.00%1100.00%
Total51100.00%1100.00%

EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) { if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); if (alg->cra_type == &crypto_ablkcipher_type || alg->cra_type == &crypto_givcipher_type) return sizeof(struct crypto_ablkcipher *); return crypto_alg_extsize(alg); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu59100.00%2100.00%
Total59100.00%2100.00%


static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); struct crypto_blkcipher *blkcipher = *ctx; int err; crypto_blkcipher_clear_flags(blkcipher,