[dm-devel] [RFC PATCH v2] md/dm-crypt - reuse eboiv skcipher for IV generation

Milan Broz gmazyland at gmail.com
Thu Aug 8 11:53:11 UTC 2019


Hi,

On 07/08/2019 07:50, Ard Biesheuvel wrote:
> Instead of instantiating a separate cipher to perform the encryption
> needed to produce the IV, reuse the skcipher used for the block data
> and invoke it one additional time for each block to encrypt a zero
> vector and use the output as the IV.
> 
> For CBC mode, this is equivalent to using the bare block cipher, but
> without the risk of ending up with a non-time invariant implementation
> of AES when the skcipher itself is time variant (e.g., arm64 without
> Crypto Extensions has a NEON based time invariant implementation of
> cbc(aes) but no time invariant implementation of the core cipher other
> than aes-ti, which is not enabled by default)
> 
> This approach is a compromise between dm-crypt API flexibility and
> reducing dependence on parts of the crypto API that should not usually
> be exposed to other subsystems, such as the bare cipher API.
> 
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel at linaro.org>

For now I have just pair of images here to test, but seems checksums are ok.

Tested-by: Milan Broz <gmazyland at gmail.com>

I talked with Mike already, so it should go through DM tree now.

Thanks!
Milan


> ---
>  drivers/md/dm-crypt.c | 70 ++++++++++++++-----------------------------
>  1 file changed, 22 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
> index d5216bcc4649..48cd76c88d77 100644
> --- a/drivers/md/dm-crypt.c
> +++ b/drivers/md/dm-crypt.c
> @@ -120,10 +120,6 @@ struct iv_tcw_private {
>  	u8 *whitening;
>  };
>  
> -struct iv_eboiv_private {
> -	struct crypto_cipher *tfm;
> -};
> -
>  /*
>   * Crypt: maps a linear range of a block device
>   * and encrypts / decrypts at the same time.
> @@ -163,7 +159,6 @@ struct crypt_config {
>  		struct iv_benbi_private benbi;
>  		struct iv_lmk_private lmk;
>  		struct iv_tcw_private tcw;
> -		struct iv_eboiv_private eboiv;
>  	} iv_gen_private;
>  	u64 iv_offset;
>  	unsigned int iv_size;
> @@ -847,65 +842,47 @@ static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
>  	return 0;
>  }
>  
> -static void crypt_iv_eboiv_dtr(struct crypt_config *cc)
> -{
> -	struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
> -
> -	crypto_free_cipher(eboiv->tfm);
> -	eboiv->tfm = NULL;
> -}
> -
>  static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
>  			    const char *opts)
>  {
> -	struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
> -	struct crypto_cipher *tfm;
> -
> -	tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
> -	if (IS_ERR(tfm)) {
> -		ti->error = "Error allocating crypto tfm for EBOIV";
> -		return PTR_ERR(tfm);
> +	if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) {
> +		ti->error = "AEAD transforms not supported for EBOIV";
> +		return -EINVAL;
>  	}
>  
> -	if (crypto_cipher_blocksize(tfm) != cc->iv_size) {
> +	if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
>  		ti->error = "Block size of EBOIV cipher does "
>  			    "not match IV size of block cipher";
> -		crypto_free_cipher(tfm);
>  		return -EINVAL;
>  	}
>  
> -	eboiv->tfm = tfm;
>  	return 0;
>  }
>  
> -static int crypt_iv_eboiv_init(struct crypt_config *cc)
> +static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
> +			    struct dm_crypt_request *dmreq)
>  {
> -	struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
> +	u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
> +	struct skcipher_request *req;
> +	struct scatterlist src, dst;
> +	struct crypto_wait wait;
>  	int err;
>  
> -	err = crypto_cipher_setkey(eboiv->tfm, cc->key, cc->key_size);
> -	if (err)
> -		return err;
> -
> -	return 0;
> -}
> -
> -static int crypt_iv_eboiv_wipe(struct crypt_config *cc)
> -{
> -	/* Called after cc->key is set to random key in crypt_wipe() */
> -	return crypt_iv_eboiv_init(cc);
> -}
> +	req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
> +	if (!req)
> +		return -ENOMEM;
>  
> -static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
> -			    struct dm_crypt_request *dmreq)
> -{
> -	struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
> +	memset(buf, 0, cc->iv_size);
> +	*(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
>  
> -	memset(iv, 0, cc->iv_size);
> -	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
> -	crypto_cipher_encrypt_one(eboiv->tfm, iv, iv);
> +	sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
> +	sg_init_one(&dst, iv, cc->iv_size);
> +	skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
> +	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
> +	err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
> +	skcipher_request_free(req);
>  
> -	return 0;
> +	return err;
>  }
>  
>  static const struct crypt_iv_operations crypt_iv_plain_ops = {
> @@ -962,9 +939,6 @@ static struct crypt_iv_operations crypt_iv_random_ops = {
>  
>  static struct crypt_iv_operations crypt_iv_eboiv_ops = {
>  	.ctr	   = crypt_iv_eboiv_ctr,
> -	.dtr	   = crypt_iv_eboiv_dtr,
> -	.init	   = crypt_iv_eboiv_init,
> -	.wipe	   = crypt_iv_eboiv_wipe,
>  	.generator = crypt_iv_eboiv_gen
>  };
>  
> 




More information about the dm-devel mailing list