NetBSD/sys/dev/cgd_crypto.c

483 lines
12 KiB
C
Raw Permalink Normal View History

/* $NetBSD: cgd_crypto.c,v 1.27 2020/07/25 22:14:35 riastradh Exp $ */
2002-10-04 22:22:35 +04:00
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Roland C. Dowdeswell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Crypto Framework For cgd.c
*
* This framework is temporary and awaits a more complete
* kernel wide crypto implementation.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cgd_crypto.c,v 1.27 2020/07/25 22:14:35 riastradh Exp $");
2002-10-04 22:22:35 +04:00
#include <sys/param.h>
2020-06-13 21:39:36 +03:00
#include <sys/kmem.h>
2020-06-13 21:38:33 +03:00
#include <sys/systm.h>
2002-10-04 22:22:35 +04:00
#include <dev/cgd_crypto.h>
New cgd cipher adiantum. Adiantum is a wide-block cipher, built out of AES, XChaCha12, Poly1305, and NH, defined in Paul Crowley and Eric Biggers, `Adiantum: length-preserving encryption for entry-level processors', IACR Transactions on Symmetric Cryptology 2018(4), pp. 39--61. Adiantum provides better security than a narrow-block cipher with CBC or XTS, because every bit of each sector affects every other bit, whereas with CBC each block of plaintext only affects the following blocks of ciphertext in the disk sector, and with XTS each block of plaintext only affects its own block of ciphertext and nothing else. Adiantum generally provides much better performance than constant-time AES-CBC or AES-XTS software do without hardware support, and performance comparable to or better than the variable-time (i.e., leaky) AES-CBC and AES-XTS software we had before. (Note: Adiantum also uses AES as a subroutine, but only once per disk sector. It takes only a small fraction of the time spent by Adiantum, so there's relatively little performance impact to using constant-time AES software over using variable-time AES software for it.) Adiantum naturally scales to essentially arbitrary disk sector sizes; sizes >=1024-bytes take the most advantage of Adiantum's design for performance, so 4096-byte sectors would be a natural choice if we taught cgd to change the disk sector size. (However, it's a different cipher for each disk sector size, so it _must_ be a cgd parameter.) The paper presents a similar construction HPolyC. The salient difference is that HPolyC uses Poly1305 directly, whereas Adiantum uses Poly1395(NH(...)). NH is annoying because it requires a 1072-byte key, which means the test vectors are ginormous, and changing keys is costly; HPolyC avoids these shortcomings by using Poly1305 directly, but HPolyC is measurably slower, costing about 1.5x what Adiantum costs on 4096-byte sectors. For the purposes of cgd, we will reuse each key for many messages, and there will be very few keys in total (one per cgd volume) so -- except for the annoying verbosity of test vectors -- the tradeoff weighs in the favour of Adiantum, especially if we teach cgd to do >>512-byte sectors. For now, everything that Adiantum needs beyond what's already in the kernel is gathered into a single file, including NH, Poly1305, and XChaCha12. We can split those out -- and reuse them, and provide MD tuned implementations, and so on -- as needed; this is just a first pass to get Adiantum implemented for experimentation.
2020-06-30 02:44:01 +03:00
#include <crypto/adiantum/adiantum.h>
#include <crypto/aes/aes.h>
#include <crypto/aes/aes_cbc.h>
#include <crypto/aes/aes_xts.h>
2012-12-05 06:23:20 +04:00
#include <crypto/blowfish/blowfish.h>
2020-06-13 21:38:33 +03:00
#include <crypto/des/des.h>
2012-12-05 06:23:20 +04:00
2002-10-04 22:22:35 +04:00
/*
* The general framework provides only one generic function.
* It takes the name of an algorithm and returns a struct cryptfuncs *
2002-10-04 22:22:35 +04:00
* for it. It is up to the initialisation routines of the algorithm
* to check key size and block size.
*/
2016-12-11 03:20:49 +03:00
static cfunc_init cgd_cipher_aes_cbc_init;
static cfunc_destroy cgd_cipher_aes_cbc_destroy;
static cfunc_cipher cgd_cipher_aes_cbc;
2012-12-05 06:23:20 +04:00
2016-12-11 03:20:49 +03:00
static cfunc_init cgd_cipher_aes_xts_init;
static cfunc_destroy cgd_cipher_aes_xts_destroy;
static cfunc_cipher cgd_cipher_aes_xts;
2012-12-05 06:23:20 +04:00
2016-12-11 03:20:49 +03:00
static cfunc_init cgd_cipher_3des_init;
static cfunc_destroy cgd_cipher_3des_destroy;
static cfunc_cipher cgd_cipher_3des_cbc;
static cfunc_init cgd_cipher_bf_init;
static cfunc_destroy cgd_cipher_bf_destroy;
static cfunc_cipher cgd_cipher_bf_cbc;
2012-12-05 06:23:20 +04:00
New cgd cipher adiantum. Adiantum is a wide-block cipher, built out of AES, XChaCha12, Poly1305, and NH, defined in Paul Crowley and Eric Biggers, `Adiantum: length-preserving encryption for entry-level processors', IACR Transactions on Symmetric Cryptology 2018(4), pp. 39--61. Adiantum provides better security than a narrow-block cipher with CBC or XTS, because every bit of each sector affects every other bit, whereas with CBC each block of plaintext only affects the following blocks of ciphertext in the disk sector, and with XTS each block of plaintext only affects its own block of ciphertext and nothing else. Adiantum generally provides much better performance than constant-time AES-CBC or AES-XTS software do without hardware support, and performance comparable to or better than the variable-time (i.e., leaky) AES-CBC and AES-XTS software we had before. (Note: Adiantum also uses AES as a subroutine, but only once per disk sector. It takes only a small fraction of the time spent by Adiantum, so there's relatively little performance impact to using constant-time AES software over using variable-time AES software for it.) Adiantum naturally scales to essentially arbitrary disk sector sizes; sizes >=1024-bytes take the most advantage of Adiantum's design for performance, so 4096-byte sectors would be a natural choice if we taught cgd to change the disk sector size. (However, it's a different cipher for each disk sector size, so it _must_ be a cgd parameter.) The paper presents a similar construction HPolyC. The salient difference is that HPolyC uses Poly1305 directly, whereas Adiantum uses Poly1395(NH(...)). NH is annoying because it requires a 1072-byte key, which means the test vectors are ginormous, and changing keys is costly; HPolyC avoids these shortcomings by using Poly1305 directly, but HPolyC is measurably slower, costing about 1.5x what Adiantum costs on 4096-byte sectors. For the purposes of cgd, we will reuse each key for many messages, and there will be very few keys in total (one per cgd volume) so -- except for the annoying verbosity of test vectors -- the tradeoff weighs in the favour of Adiantum, especially if we teach cgd to do >>512-byte sectors. For now, everything that Adiantum needs beyond what's already in the kernel is gathered into a single file, including NH, Poly1305, and XChaCha12. We can split those out -- and reuse them, and provide MD tuned implementations, and so on -- as needed; this is just a first pass to get Adiantum implemented for experimentation.
2020-06-30 02:44:01 +03:00
static cfunc_init cgd_cipher_adiantum_init;
static cfunc_destroy cgd_cipher_adiantum_destroy;
static cfunc_cipher cgd_cipher_adiantum_crypt;
2012-12-05 06:23:20 +04:00
static const struct cryptfuncs cf[] = {
2016-12-11 03:20:49 +03:00
{
.cf_name = "aes-xts",
.cf_init = cgd_cipher_aes_xts_init,
.cf_destroy = cgd_cipher_aes_xts_destroy,
.cf_cipher = cgd_cipher_aes_xts,
},
2012-12-05 06:23:20 +04:00
{
.cf_name = "aes-cbc",
2016-12-11 03:20:49 +03:00
.cf_init = cgd_cipher_aes_cbc_init,
.cf_destroy = cgd_cipher_aes_cbc_destroy,
2012-12-05 06:23:20 +04:00
.cf_cipher = cgd_cipher_aes_cbc,
},
{
.cf_name = "3des-cbc",
.cf_init = cgd_cipher_3des_init,
.cf_destroy = cgd_cipher_3des_destroy,
.cf_cipher = cgd_cipher_3des_cbc,
},
{
.cf_name = "blowfish-cbc",
.cf_init = cgd_cipher_bf_init,
.cf_destroy = cgd_cipher_bf_destroy,
.cf_cipher = cgd_cipher_bf_cbc,
},
New cgd cipher adiantum. Adiantum is a wide-block cipher, built out of AES, XChaCha12, Poly1305, and NH, defined in Paul Crowley and Eric Biggers, `Adiantum: length-preserving encryption for entry-level processors', IACR Transactions on Symmetric Cryptology 2018(4), pp. 39--61. Adiantum provides better security than a narrow-block cipher with CBC or XTS, because every bit of each sector affects every other bit, whereas with CBC each block of plaintext only affects the following blocks of ciphertext in the disk sector, and with XTS each block of plaintext only affects its own block of ciphertext and nothing else. Adiantum generally provides much better performance than constant-time AES-CBC or AES-XTS software do without hardware support, and performance comparable to or better than the variable-time (i.e., leaky) AES-CBC and AES-XTS software we had before. (Note: Adiantum also uses AES as a subroutine, but only once per disk sector. It takes only a small fraction of the time spent by Adiantum, so there's relatively little performance impact to using constant-time AES software over using variable-time AES software for it.) Adiantum naturally scales to essentially arbitrary disk sector sizes; sizes >=1024-bytes take the most advantage of Adiantum's design for performance, so 4096-byte sectors would be a natural choice if we taught cgd to change the disk sector size. (However, it's a different cipher for each disk sector size, so it _must_ be a cgd parameter.) The paper presents a similar construction HPolyC. The salient difference is that HPolyC uses Poly1305 directly, whereas Adiantum uses Poly1395(NH(...)). NH is annoying because it requires a 1072-byte key, which means the test vectors are ginormous, and changing keys is costly; HPolyC avoids these shortcomings by using Poly1305 directly, but HPolyC is measurably slower, costing about 1.5x what Adiantum costs on 4096-byte sectors. For the purposes of cgd, we will reuse each key for many messages, and there will be very few keys in total (one per cgd volume) so -- except for the annoying verbosity of test vectors -- the tradeoff weighs in the favour of Adiantum, especially if we teach cgd to do >>512-byte sectors. For now, everything that Adiantum needs beyond what's already in the kernel is gathered into a single file, including NH, Poly1305, and XChaCha12. We can split those out -- and reuse them, and provide MD tuned implementations, and so on -- as needed; this is just a first pass to get Adiantum implemented for experimentation.
2020-06-30 02:44:01 +03:00
{
.cf_name = "adiantum",
.cf_init = cgd_cipher_adiantum_init,
.cf_destroy = cgd_cipher_adiantum_destroy,
.cf_cipher = cgd_cipher_adiantum_crypt,
},
2012-12-05 06:23:20 +04:00
};
const struct cryptfuncs *
cryptfuncs_find(const char *alg)
2002-10-04 22:22:35 +04:00
{
2012-12-05 06:23:20 +04:00
for (size_t i = 0; i < __arraycount(cf); i++)
if (strcmp(cf[i].cf_name, alg) == 0)
return &cf[i];
2002-10-04 22:22:35 +04:00
return NULL;
}
/*
* AES Framework
*/
struct aes_privdata {
struct aesenc ap_enckey;
struct aesdec ap_deckey;
uint32_t ap_nrounds;
2002-10-04 22:22:35 +04:00
};
2012-12-05 06:23:20 +04:00
static void *
2016-12-11 03:20:49 +03:00
cgd_cipher_aes_cbc_init(size_t keylen, const void *key, size_t *blocksize)
2002-10-04 22:22:35 +04:00
{
struct aes_privdata *ap;
if (!blocksize)
return NULL;
if (keylen != 128 && keylen != 192 && keylen != 256)
return NULL;
if (*blocksize == (size_t)-1)
2002-10-04 22:22:35 +04:00
*blocksize = 128;
if (*blocksize != 128)
return NULL;
2020-06-13 21:39:36 +03:00
ap = kmem_zalloc(sizeof(*ap), KM_SLEEP);
switch (keylen) {
case 128:
aes_setenckey128(&ap->ap_enckey, key);
aes_setdeckey128(&ap->ap_deckey, key);
ap->ap_nrounds = AES_128_NROUNDS;
break;
case 192:
aes_setenckey192(&ap->ap_enckey, key);
aes_setdeckey192(&ap->ap_deckey, key);
ap->ap_nrounds = AES_192_NROUNDS;
break;
case 256:
aes_setenckey256(&ap->ap_enckey, key);
aes_setdeckey256(&ap->ap_deckey, key);
ap->ap_nrounds = AES_256_NROUNDS;
break;
}
return ap;
2002-10-04 22:22:35 +04:00
}
2012-12-05 06:23:20 +04:00
static void
2016-12-11 03:20:49 +03:00
cgd_cipher_aes_cbc_destroy(void *data)
2002-10-04 22:22:35 +04:00
{
struct aes_privdata *apd = data;
2002-10-04 22:22:35 +04:00
explicit_memset(apd, 0, sizeof(*apd));
2020-06-13 21:39:36 +03:00
kmem_free(apd, sizeof(*apd));
2002-10-04 22:22:35 +04:00
}
2012-12-05 06:23:20 +04:00
static void
cgd_cipher_aes_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
const void *blkno, int dir)
2002-10-04 22:22:35 +04:00
{
struct aes_privdata *apd = privdata;
uint8_t iv[CGD_AES_BLOCK_SIZE] __aligned(CGD_AES_BLOCK_SIZE) = {0};
/* Compute the CBC IV as AES_k(blkno). */
aes_enc(&apd->ap_enckey, blkno, iv, apd->ap_nrounds);
2002-10-04 22:22:35 +04:00
switch (dir) {
case CGD_CIPHER_ENCRYPT:
aes_cbc_enc(&apd->ap_enckey, src, dst, nbytes, iv,
apd->ap_nrounds);
2016-12-11 03:20:49 +03:00
break;
case CGD_CIPHER_DECRYPT:
aes_cbc_dec(&apd->ap_deckey, src, dst, nbytes, iv,
apd->ap_nrounds);
2016-12-11 03:20:49 +03:00
break;
default:
panic("%s: unrecognised direction %d", __func__, dir);
2016-12-11 03:20:49 +03:00
}
}
2020-06-13 21:40:14 +03:00
/*
* AES-XTS
*/
struct aesxts {
struct aesenc ax_enckey;
struct aesdec ax_deckey;
struct aesenc ax_tweakkey;
uint32_t ax_nrounds;
2020-06-13 21:40:14 +03:00
};
2016-12-11 03:20:49 +03:00
static void *
cgd_cipher_aes_xts_init(size_t keylen, const void *xtskey, size_t *blocksize)
{
2020-06-13 21:40:14 +03:00
struct aesxts *ax;
2016-12-11 03:20:49 +03:00
const char *key, *key2; /* XTS key is made of two AES keys. */
if (!blocksize)
return NULL;
if (keylen != 256 && keylen != 512)
return NULL;
if (*blocksize == (size_t)-1)
*blocksize = 128;
if (*blocksize != 128)
return NULL;
2020-06-13 21:40:14 +03:00
ax = kmem_zalloc(sizeof(*ax), KM_SLEEP);
2016-12-11 03:20:49 +03:00
keylen /= 2;
key = xtskey;
key2 = key + keylen / CHAR_BIT;
switch (keylen) {
case 128:
aes_setenckey128(&ax->ax_enckey, key);
aes_setdeckey128(&ax->ax_deckey, key);
aes_setenckey128(&ax->ax_tweakkey, key2);
ax->ax_nrounds = AES_128_NROUNDS;
break;
case 256:
aes_setenckey256(&ax->ax_enckey, key);
aes_setdeckey256(&ax->ax_deckey, key);
aes_setenckey256(&ax->ax_tweakkey, key2);
ax->ax_nrounds = AES_256_NROUNDS;
break;
}
2016-12-11 03:20:49 +03:00
2020-06-13 21:40:14 +03:00
return ax;
2016-12-11 03:20:49 +03:00
}
static void
2020-06-13 21:40:14 +03:00
cgd_cipher_aes_xts_destroy(void *cookie)
2016-12-11 03:20:49 +03:00
{
2020-06-13 21:40:14 +03:00
struct aesxts *ax = cookie;
2016-12-11 03:20:49 +03:00
2020-06-13 21:40:14 +03:00
explicit_memset(ax, 0, sizeof(*ax));
kmem_free(ax, sizeof(*ax));
2016-12-11 03:20:49 +03:00
}
static void
cgd_cipher_aes_xts(void *cookie, void *dst, const void *src, size_t nbytes,
const void *blkno, int dir)
2016-12-11 03:20:49 +03:00
{
2020-06-13 21:40:14 +03:00
struct aesxts *ax = cookie;
uint8_t tweak[CGD_AES_BLOCK_SIZE];
/* Compute the initial tweak as AES_k(blkno). */
aes_enc(&ax->ax_tweakkey, blkno, tweak, ax->ax_nrounds);
2016-12-11 03:20:49 +03:00
switch (dir) {
case CGD_CIPHER_ENCRYPT:
aes_xts_enc(&ax->ax_enckey, src, dst, nbytes, tweak,
ax->ax_nrounds);
2002-10-04 22:22:35 +04:00
break;
case CGD_CIPHER_DECRYPT:
aes_xts_dec(&ax->ax_deckey, src, dst, nbytes, tweak,
ax->ax_nrounds);
2002-10-04 22:22:35 +04:00
break;
default:
panic("%s: unrecognised direction %d", __func__, dir);
2002-10-04 22:22:35 +04:00
}
}
/*
* 3DES Framework
*/
struct c3des_privdata {
des_key_schedule cp_key1;
des_key_schedule cp_key2;
des_key_schedule cp_key3;
};
2012-12-05 06:23:20 +04:00
static void *
2007-01-22 02:00:08 +03:00
cgd_cipher_3des_init(size_t keylen, const void *key, size_t *blocksize)
2002-10-04 22:22:35 +04:00
{
struct c3des_privdata *cp;
int error = 0;
2007-01-22 02:00:08 +03:00
des_cblock *block;
2002-10-04 22:22:35 +04:00
if (!blocksize)
return NULL;
if (*blocksize == (size_t)-1)
2002-10-04 22:22:35 +04:00
*blocksize = 64;
if (keylen != (DES_KEY_SZ * 3 * 8) || *blocksize != 64)
return NULL;
2020-06-13 21:39:36 +03:00
cp = kmem_zalloc(sizeof(*cp), KM_SLEEP);
2007-01-22 02:00:08 +03:00
block = __UNCONST(key);
error = des_key_sched(block, cp->cp_key1);
error |= des_key_sched(block + 1, cp->cp_key2);
error |= des_key_sched(block + 2, cp->cp_key3);
2002-10-04 22:22:35 +04:00
if (error) {
explicit_memset(cp, 0, sizeof(*cp));
2020-06-13 21:39:36 +03:00
kmem_free(cp, sizeof(*cp));
2002-10-04 22:22:35 +04:00
return NULL;
}
return cp;
2002-10-04 22:22:35 +04:00
}
2012-12-05 06:23:20 +04:00
static void
cgd_cipher_3des_destroy(void *data)
2002-10-04 22:22:35 +04:00
{
struct c3des_privdata *cp = data;
2002-10-04 22:22:35 +04:00
explicit_memset(cp, 0, sizeof(*cp));
2020-06-13 21:39:36 +03:00
kmem_free(cp, sizeof(*cp));
2002-10-04 22:22:35 +04:00
}
static void
cgd_cipher_3des_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
const void *blkno, int dir)
2002-10-04 22:22:35 +04:00
{
struct c3des_privdata *cp = privdata;
des_cblock zero;
uint8_t iv[CGD_3DES_BLOCK_SIZE];
/* Compute the CBC IV as 3DES_k(blkno) = 3DES-CBC_k(iv=blkno, 0). */
memset(&zero, 0, sizeof(zero));
des_ede3_cbc_encrypt(blkno, iv, CGD_3DES_BLOCK_SIZE,
cp->cp_key1, cp->cp_key2, cp->cp_key3, &zero, /*encrypt*/1);
2002-10-04 22:22:35 +04:00
switch (dir) {
case CGD_CIPHER_ENCRYPT:
des_ede3_cbc_encrypt(src, dst, nbytes,
cp->cp_key1, cp->cp_key2, cp->cp_key3,
(des_cblock *)iv, /*encrypt*/1);
2002-10-04 22:22:35 +04:00
break;
case CGD_CIPHER_DECRYPT:
des_ede3_cbc_encrypt(src, dst, nbytes,
cp->cp_key1, cp->cp_key2, cp->cp_key3,
(des_cblock *)iv, /*encrypt*/0);
2002-10-04 22:22:35 +04:00
break;
default:
panic("%s: unrecognised direction %d", __func__, dir);
2002-10-04 22:22:35 +04:00
}
}
/*
* Blowfish Framework
*/
struct bf_privdata {
BF_KEY bp_key;
};
struct bf_encdata {
BF_KEY *be_key;
uint8_t be_iv[CGD_BF_BLOCK_SIZE];
2002-10-04 22:22:35 +04:00
};
2012-12-05 06:23:20 +04:00
static void *
2007-01-22 02:00:08 +03:00
cgd_cipher_bf_init(size_t keylen, const void *key, size_t *blocksize)
2002-10-04 22:22:35 +04:00
{
struct bf_privdata *bp;
if (!blocksize)
return NULL;
Fix a longstanding bug in key-handling for the blowfish cipher. This is an incompatible change, and will break all existing cgd images encrypted with blowfish. Users will need to dump their data before booting a kernel with this change, and recreate cgd's and restore data afterwards. I believe this affects a very small number of users other than myself; indeed after several alert mails in an attempt to find them, only 2 such users have come forward. They have both agreed the requirement for backwards compatibility does not warrant the effort nor the mess in the code. This code does exist, if it should later prove to be needed, but will not be in the tree. Further, by the nature of the issue, I have strong reasons to believe that, even if they missed these mails, there would be few other users of blowfish who update their systems with any regularity; any such users would have tripped over the problem in the same way I did when it was first found over a year ago. The problem stems from two issues with the underlying blowfish encryption routines used by cgd: - they take key length arguments counted in bytes, rather than bits like all the opther ciphers. - they silently truncate any keys longer than an internal limit, rather than returning an error (which would have exposed the previous discrepancy immediately). As a result, the kernel reads too much data as the key from cgdconfig, and then truncates most of it. This can easily be demonstrated/tested. Currently, Blowfish users will find that if they mis-enter the cgd passphrase on the first attempt, when validation fails and cgdconfig prompts for the passphrase again, the cgd will not correctly configure even when given a correct passphrase.
2004-03-18 13:42:08 +03:00
if (keylen < 40 || keylen > 448 || (keylen % 8 != 0))
2002-10-04 22:22:35 +04:00
return NULL;
if (*blocksize == (size_t)-1)
2002-10-04 22:22:35 +04:00
*blocksize = 64;
if (*blocksize != 64)
return NULL;
2020-06-13 21:39:36 +03:00
bp = kmem_zalloc(sizeof(*bp), KM_SLEEP);
2002-10-04 22:22:35 +04:00
if (!bp)
return NULL;
Fix a longstanding bug in key-handling for the blowfish cipher. This is an incompatible change, and will break all existing cgd images encrypted with blowfish. Users will need to dump their data before booting a kernel with this change, and recreate cgd's and restore data afterwards. I believe this affects a very small number of users other than myself; indeed after several alert mails in an attempt to find them, only 2 such users have come forward. They have both agreed the requirement for backwards compatibility does not warrant the effort nor the mess in the code. This code does exist, if it should later prove to be needed, but will not be in the tree. Further, by the nature of the issue, I have strong reasons to believe that, even if they missed these mails, there would be few other users of blowfish who update their systems with any regularity; any such users would have tripped over the problem in the same way I did when it was first found over a year ago. The problem stems from two issues with the underlying blowfish encryption routines used by cgd: - they take key length arguments counted in bytes, rather than bits like all the opther ciphers. - they silently truncate any keys longer than an internal limit, rather than returning an error (which would have exposed the previous discrepancy immediately). As a result, the kernel reads too much data as the key from cgdconfig, and then truncates most of it. This can easily be demonstrated/tested. Currently, Blowfish users will find that if they mis-enter the cgd passphrase on the first attempt, when validation fails and cgdconfig prompts for the passphrase again, the cgd will not correctly configure even when given a correct passphrase.
2004-03-18 13:42:08 +03:00
BF_set_key(&bp->bp_key, keylen / 8, key);
return bp;
2002-10-04 22:22:35 +04:00
}
2012-12-05 06:23:20 +04:00
static void
cgd_cipher_bf_destroy(void *data)
2002-10-04 22:22:35 +04:00
{
struct bf_privdata *bp = data;
2002-10-04 22:22:35 +04:00
explicit_memset(bp, 0, sizeof(*bp));
2020-06-13 21:39:36 +03:00
kmem_free(bp, sizeof(*bp));
2002-10-04 22:22:35 +04:00
}
2012-12-05 06:23:20 +04:00
static void
cgd_cipher_bf_cbc(void *privdata, void *dst, const void *src, size_t nbytes,
const void *blkno, int dir)
2002-10-04 22:22:35 +04:00
{
struct bf_privdata *bp = privdata;
uint8_t zero[CGD_BF_BLOCK_SIZE], iv[CGD_BF_BLOCK_SIZE];
/* Compute the CBC IV as Blowfish_k(blkno) = BF_CBC_k(blkno, 0). */
memset(zero, 0, sizeof(zero));
BF_cbc_encrypt(blkno, iv, CGD_BF_BLOCK_SIZE, &bp->bp_key, zero,
/*encrypt*/1);
2002-10-04 22:22:35 +04:00
switch (dir) {
case CGD_CIPHER_ENCRYPT:
BF_cbc_encrypt(src, dst, nbytes, &bp->bp_key, iv,
/*encrypt*/1);
2002-10-04 22:22:35 +04:00
break;
case CGD_CIPHER_DECRYPT:
BF_cbc_encrypt(src, dst, nbytes, &bp->bp_key, iv,
/*encrypt*/0);
2002-10-04 22:22:35 +04:00
break;
default:
panic("%s: unrecognised direction %d", __func__, dir);
2002-10-04 22:22:35 +04:00
}
}
New cgd cipher adiantum. Adiantum is a wide-block cipher, built out of AES, XChaCha12, Poly1305, and NH, defined in Paul Crowley and Eric Biggers, `Adiantum: length-preserving encryption for entry-level processors', IACR Transactions on Symmetric Cryptology 2018(4), pp. 39--61. Adiantum provides better security than a narrow-block cipher with CBC or XTS, because every bit of each sector affects every other bit, whereas with CBC each block of plaintext only affects the following blocks of ciphertext in the disk sector, and with XTS each block of plaintext only affects its own block of ciphertext and nothing else. Adiantum generally provides much better performance than constant-time AES-CBC or AES-XTS software do without hardware support, and performance comparable to or better than the variable-time (i.e., leaky) AES-CBC and AES-XTS software we had before. (Note: Adiantum also uses AES as a subroutine, but only once per disk sector. It takes only a small fraction of the time spent by Adiantum, so there's relatively little performance impact to using constant-time AES software over using variable-time AES software for it.) Adiantum naturally scales to essentially arbitrary disk sector sizes; sizes >=1024-bytes take the most advantage of Adiantum's design for performance, so 4096-byte sectors would be a natural choice if we taught cgd to change the disk sector size. (However, it's a different cipher for each disk sector size, so it _must_ be a cgd parameter.) The paper presents a similar construction HPolyC. The salient difference is that HPolyC uses Poly1305 directly, whereas Adiantum uses Poly1395(NH(...)). NH is annoying because it requires a 1072-byte key, which means the test vectors are ginormous, and changing keys is costly; HPolyC avoids these shortcomings by using Poly1305 directly, but HPolyC is measurably slower, costing about 1.5x what Adiantum costs on 4096-byte sectors. For the purposes of cgd, we will reuse each key for many messages, and there will be very few keys in total (one per cgd volume) so -- except for the annoying verbosity of test vectors -- the tradeoff weighs in the favour of Adiantum, especially if we teach cgd to do >>512-byte sectors. For now, everything that Adiantum needs beyond what's already in the kernel is gathered into a single file, including NH, Poly1305, and XChaCha12. We can split those out -- and reuse them, and provide MD tuned implementations, and so on -- as needed; this is just a first pass to get Adiantum implemented for experimentation.
2020-06-30 02:44:01 +03:00
/*
* Adiantum
*/
static void *
cgd_cipher_adiantum_init(size_t keylen, const void *key, size_t *blocksize)
{
struct adiantum *A;
if (!blocksize)
return NULL;
if (keylen != 256)
return NULL;
if (*blocksize == (size_t)-1)
*blocksize = 128;
if (*blocksize != 128)
return NULL;
A = kmem_zalloc(sizeof(*A), KM_SLEEP);
adiantum_init(A, key);
return A;
}
static void
cgd_cipher_adiantum_destroy(void *cookie)
{
struct adiantum *A = cookie;
explicit_memset(A, 0, sizeof(*A));
kmem_free(A, sizeof(*A));
}
static void
cgd_cipher_adiantum_crypt(void *cookie, void *dst, const void *src,
size_t nbytes, const void *blkno, int dir)
{
/*
* Treat the block number as a 128-bit block. This is more
* than twice as big as the largest number of reasonable
* blocks, but it doesn't hurt (it would be rounded up to a
* 128-bit input anyway).
*/
const unsigned tweaklen = 16;
struct adiantum *A = cookie;
switch (dir) {
case CGD_CIPHER_ENCRYPT:
adiantum_enc(dst, src, nbytes, blkno, tweaklen, A);
break;
case CGD_CIPHER_DECRYPT:
adiantum_dec(dst, src, nbytes, blkno, tweaklen, A);
break;
default:
panic("%s: unrecognised direction %d", __func__, dir);
}
}