[dm-devel] [PATCH v5 1/7] crypto: essiv - create wrapper template for ESSIV generation

Ard Biesheuvel ard.biesheuvel at linaro.org
Thu Jun 27 07:04:10 UTC 2019


On Wed, 26 Jun 2019 at 22:40, Ard Biesheuvel <ard.biesheuvel at linaro.org> wrote:
>
> Implement a template that wraps a (skcipher,cipher,shash) or
> (aead,cipher,shash) tuple so that we can consolidate the ESSIV handling
> in fscrypt and dm-crypt and move it into the crypto API. This will result
> in better test coverage, and will allow future changes to make the bare
> cipher interface internal to the crypto subsystem, in order to increase
> robustness of the API against misuse.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel at linaro.org>
> ---
>  crypto/Kconfig  |   4 +
>  crypto/Makefile |   1 +
>  crypto/essiv.c  | 636 ++++++++++++++++++++
>  3 files changed, 641 insertions(+)
>
...
> diff --git a/crypto/essiv.c b/crypto/essiv.c
> new file mode 100644
> index 000000000000..fddf6dcc3823
> --- /dev/null
> +++ b/crypto/essiv.c
> @@ -0,0 +1,636 @@
...
> +static void essiv_aead_done(struct crypto_async_request *areq, int err)
> +{
> +       struct aead_request *req = areq->data;
> +       struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
> +
> +       if (rctx->iv)
> +               kfree(rctx->iv);
> +       aead_request_complete(req, err);
> +}
> +
> +static int essiv_aead_crypt(struct aead_request *req, bool enc)
> +{
> +       gfp_t gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL
> +                                                                : GFP_ATOMIC;
> +       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> +       const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
> +       struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
> +       struct aead_request *subreq = &rctx->aead_req;
> +       struct scatterlist *sg;
> +       int err;
> +
> +       crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
> +
> +       /*
> +        * dm-crypt embeds the sector number and the IV in the AAD region, so
> +        * we have to copy the converted IV into the source scatterlist before
> +        * we pass it on. If the source and destination scatterlist pointers
> +        * are the same, we just update the IV copy in the AAD region in-place.
> +        * However, if they are different, the caller is not expecting us to
> +        * modify the memory described by the source scatterlist, and so we have
> +        * to do this little dance to create a new scatterlist that backs the
> +        * IV slot in the AAD region with a scratch buffer that we can freely
> +        * modify.
> +        */
> +       rctx->iv = NULL;
> +       if (req->src != req->dst) {
> +               int ivsize = crypto_aead_ivsize(tfm);
> +               int ssize = req->assoclen - ivsize;
> +               u8 *iv;
> +
> +               if (ssize < 0 || sg_nents_for_len(req->src, ssize) != 1)
> +                       return -EINVAL;
> +
> +               if (enc) {
> +                       rctx->iv = iv = kmemdup(req->iv, ivsize, gfp);

This allocation is not really needed - I'll enlarge the request ctx
struct instead so I can incorporate it as an anonymous member.

> +                       if (!iv)
> +                               return -ENOMEM;
> +               } else {
> +                       /*
> +                        * On the decrypt path, the ahash executes before the
> +                        * skcipher gets a chance to clobber req->iv with its
> +                        * output IV, so just map the buffer directly.
> +                        */
> +                       iv = req->iv;
> +               }
> +
> +               sg_init_table(rctx->sg, 4);
> +               sg_set_page(rctx->sg, sg_page(req->src), ssize, req->src->offset);
> +               sg_set_buf(rctx->sg + 1, iv, ivsize);
> +               sg = scatterwalk_ffwd(rctx->sg + 2, req->src, req->assoclen);
> +               if (sg != rctx->sg + 2)
> +                       sg_chain(rctx->sg, 3, sg);
> +               sg = rctx->sg;
> +       } else {
> +               scatterwalk_map_and_copy(req->iv, req->dst,
> +                                        req->assoclen - crypto_aead_ivsize(tfm),
> +                                        crypto_aead_ivsize(tfm), 1);
> +               sg = req->src;
> +       }
> +
> +       aead_request_set_tfm(subreq, tctx->u.aead);
> +       aead_request_set_ad(subreq, req->assoclen);
> +       aead_request_set_callback(subreq, aead_request_flags(req),
> +                                 essiv_aead_done, req);
> +       aead_request_set_crypt(subreq, sg, req->dst, req->cryptlen, req->iv);
> +
> +       err = enc ? crypto_aead_encrypt(subreq) :
> +                   crypto_aead_decrypt(subreq);
> +
> +       if (rctx->iv && err != -EINPROGRESS)
> +               kfree(rctx->iv);
> +
> +       return err;
> +}
> +
...




More information about the dm-devel mailing list