QCOM SDCC controller supports Inline Crypto Engine
This driver will enables inline encryption/decryption
for ICE. The algorithm supported by ICE are XTS(AES)
and CBC(AES).
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
---
Change in [v2]
* Added dm-inlinecrypt driver support
* squash the patch blk-crypto: Add additional algo modes for Inline
encryption and md: dm-crypt: Add additional algo modes for inline
encryption and added in this
Change in [v1]
* This patch was not included in [v1]
block/blk-crypto.c | 21 +++
drivers/md/Kconfig | 8 +
drivers/md/Makefile | 1 +
drivers/md/dm-inline-crypt.c | 316 +++++++++++++++++++++++++++++++++++
include/linux/blk-crypto.h | 3 +
5 files changed, 349 insertions(+)
create mode 100644 drivers/md/dm-inline-crypt.c
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 4d760b092deb..e5bc3c7a405b 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -19,6 +19,12 @@
#include "blk-crypto-internal.h"
const struct blk_crypto_mode blk_crypto_modes[] = {
+ [BLK_ENCRYPTION_MODE_AES_128_XTS] = {
+ .name = "AES-128-XTS",
+ .cipher_str = "xts(aes)",
+ .keysize = 32,
+ .ivsize = 16,
+ },
[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
.name = "AES-256-XTS",
.cipher_str = "xts(aes)",
@@ -43,6 +49,18 @@ const struct blk_crypto_mode blk_crypto_modes[] = {
.keysize = 32,
.ivsize = 16,
},
+ [BLK_ENCRYPTION_MODE_AES_128_CBC] = {
+ .name = "AES-128-CBC",
+ .cipher_str = "cbc(aes)",
+ .keysize = 16,
+ .ivsize = 16,
+ },
+ [BLK_ENCRYPTION_MODE_AES_256_CBC] = {
+ .name = "AES-256-CBC",
+ .cipher_str = "cbc(aes)",
+ .keysize = 32,
+ .ivsize = 16,
+ },
};
/*
@@ -106,6 +124,7 @@ void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
bio->bi_crypt_context = bc;
}
+EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
void __bio_crypt_free_ctx(struct bio *bio)
{
@@ -356,6 +375,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
return 0;
}
+EXPORT_SYMBOL_GPL(blk_crypto_init_key);
bool blk_crypto_config_supported_natively(struct block_device *bdev,
const struct blk_crypto_config *cfg)
@@ -398,6 +418,7 @@ int blk_crypto_start_using_key(struct block_device *bdev,
return 0;
return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
}
+EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
/**
* blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 1e9db8e4acdf..272a6a3274bb 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -270,6 +270,14 @@ config DM_CRYPT
If unsure, say N.
+config DM_INLINE_CRYPT
+ tristate "Inline crypt target support"
+ depends on BLK_DEV_DM || COMPILE_TEST
+ help
+ This inline crypt device-mapper target allows to create a device
+ that transparently encrypts the data on it using inline crypto HW
+ engine.
+
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 476a214e4bdc..0e09b7665803 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
+obj-$(CONFIG_DM_INLINE_CRYPT) += dm-inline-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_DUST) += dm-dust.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
diff --git a/drivers/md/dm-inline-crypt.c b/drivers/md/dm-inline-crypt.c
new file mode 100644
index 000000000000..e94f86a3a5e0
--- /dev/null
+++ b/drivers/md/dm-inline-crypt.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, Qualcomm Innovation Center, Inc. All rights reserved
+ *
+ * Based on work by Israel Rukshin file: dm-crypt.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/crypto.h>
+#include <linux/blk-crypto.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "inline-crypt"
+
+struct inlinecrypt_config {
+ struct dm_dev *dev;
+ sector_t start;
+ u64 iv_offset;
+ unsigned int iv_size;
+ unsigned short sector_size;
+ unsigned char sector_shift;
+ unsigned int key_size;
+ enum blk_crypto_mode_num crypto_mode;
+ struct blk_crypto_key *blk_key;
+ u8 key[] __counted_by(key_size);
+};
+
+#define DM_CRYPT_DEFAULT_MAX_READ_SIZE 131072
+#define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE 131072
+
+static unsigned int get_max_request_size(struct inlinecrypt_config *cc, bool wrt)
+{
+ unsigned int val, sector_align;
+
+ val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
+ if (wrt) {
+ if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
+ val = BIO_MAX_VECS << PAGE_SHIFT;
+ }
+ sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned int)cc->sector_size);
+ val = round_down(val, sector_align);
+ if (unlikely(!val))
+ val = sector_align;
+ return val >> SECTOR_SHIFT;
+}
+
+static int crypt_select_inline_crypt_mode(struct dm_target *ti, char *cipher,
+ char *ivmode)
+{
+ struct inlinecrypt_config *cc = ti->private;
+
+ if (strcmp(cipher, "xts(aes128)") == 0) {
+ cc->crypto_mode = BLK_ENCRYPTION_MODE_AES_128_XTS;
+ } else if (strcmp(cipher, "xts(aes256)") == 0) {
+ cc->crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS;
+ } else if (strcmp(cipher, "cbc(aes128)") == 0) {
+ cc->crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC;
+ } else if (strcmp(cipher, "cbc(aes256)") == 0) {
+ cc->crypto_mode = BLK_ENCRYPTION_MODE_AES_256_CBC;
+ } else {
+ ti->error = "Invalid cipher for inline_crypt";
+ return -EINVAL;
+ }
+
+ cc->iv_size = 4;
+
+ return 0;
+}
+
+static int crypt_prepare_inline_crypt_key(struct inlinecrypt_config *cc)
+{
+ int ret;
+
+ cc->blk_key = kzalloc(sizeof(*cc->blk_key), GFP_KERNEL);
+ if (!cc->blk_key)
+ return -ENOMEM;
+
+ ret = blk_crypto_init_key(cc->blk_key, cc->key, cc->crypto_mode,
+ cc->iv_size, cc->sector_size);
+ if (ret) {
+ DMERR("Failed to init inline encryption key");
+ goto bad_key;
+ }
+
+ ret = blk_crypto_start_using_key(cc->dev->bdev, cc->blk_key);
+ if (ret) {
+ DMERR("Failed to use inline encryption key");
+ goto bad_key;
+ }
+
+ return 0;
+bad_key:
+ kfree_sensitive(cc->blk_key);
+ cc->blk_key = NULL;
+ return ret;
+}
+
+static void crypt_destroy_inline_crypt_key(struct inlinecrypt_config *cc)
+{
+ if (cc->blk_key) {
+ blk_crypto_evict_key(cc->dev->bdev, cc->blk_key);
+ kfree_sensitive(cc->blk_key);
+ cc->blk_key = NULL;
+ }
+}
+
+static void crypt_inline_encrypt_submit(struct dm_target *ti, struct bio *bio)
+{
+ struct inlinecrypt_config *cc = ti->private;
+ u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+
+ bio_set_dev(bio, cc->dev->bdev);
+ if (bio_sectors(bio)) {
+ memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
+ bio->bi_iter.bi_sector = cc->start +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
+ dun[0] = le64_to_cpu(bio->bi_iter.bi_sector + cc->iv_offset);
+ bio_crypt_set_ctx(bio, cc->blk_key, dun, GFP_KERNEL);
+ }
+
+ submit_bio_noacct(bio);
+}
+
+static int inlinecrypt_setkey(struct inlinecrypt_config *cc)
+{
+ crypt_destroy_inline_crypt_key(cc);
+
+ return crypt_prepare_inline_crypt_key(cc);
+
+ return 0;
+}
+
+static int inlinecrypt_set_key(struct inlinecrypt_config *cc, char *key)
+{
+ int r = -EINVAL;
+ int key_string_len = strlen(key);
+
+ /* Decode key from its hex representation. */
+ if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
+ goto out;
+
+ r = inlinecrypt_setkey(cc);
+out:
+ memset(key, '0', key_string_len);
+
+ return r;
+}
+
+static void inlinecrypt_dtr(struct dm_target *ti)
+{
+ struct inlinecrypt_config *cc = ti->private;
+
+ ti->private = NULL;
+
+ if (!cc)
+ return;
+
+ crypt_destroy_inline_crypt_key(cc);
+
+ if (cc->dev)
+ dm_put_device(ti, cc->dev);
+
+ kfree_sensitive(cc);
+}
+
+static int inlinecrypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct inlinecrypt_config *cc;
+ char *cipher_api = NULL;
+ char *cipher, *chainmode;
+ unsigned long long tmpll;
+ char *ivmode;
+ int key_size;
+ char dummy;
+ int ret;
+
+ if (argc < 5) {
+ ti->error = "Not enough arguments";
+ return -EINVAL;
+ }
+
+ key_size = strlen(argv[1]) >> 1;
+
+ cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
+ if (!cc) {
+ ti->error = "Cannot allocate encryption context";
+ return -ENOMEM;
+ }
+ cc->key_size = key_size;
+ cc->sector_size = (1 << SECTOR_SHIFT);
+ cc->sector_shift = 0;
+
+ ti->private = cc;
+
+ if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
+ (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
+ ti->error = "Invalid iv_offset sector";
+ goto bad;
+ }
+ cc->iv_offset = tmpll;
+
+ ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+ &cc->dev);
+ if (ret) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+
+ ret = -EINVAL;
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 ||
+ tmpll != (sector_t)tmpll) {
+ ti->error = "Invalid device sector";
+ goto bad;
+ }
+
+ cc->start = tmpll;
+
+ cipher = strsep(&argv[0], "-");
+ chainmode = strsep(&argv[0], "-");
+ ivmode = strsep(&argv[0], "-");
+
+ cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
+ if (!cipher_api)
+ goto bad;
+
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", chainmode, cipher);
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
+ kfree(cipher_api);
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ ret = crypt_select_inline_crypt_mode(ti, cipher_api, ivmode);
+
+ /* Initialize and set key */
+ ret = inlinecrypt_set_key(cc, argv[1]);
+ if (ret < 0) {
+ ti->error = "Error decoding and setting key";
+ return ret;
+ }
+
+ return 0;
+bad:
+ ti->error = "Error in inlinecrypt mapping";
+ inlinecrypt_dtr(ti);
+ return ret;
+}
+
+static int inlinecrypt_map(struct dm_target *ti, struct bio *bio)
+{
+ struct inlinecrypt_config *cc = ti->private;
+ unsigned int max_sectors;
+
+ /*
+ * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
+ * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
+ * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
+ */
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
+ bio_op(bio) == REQ_OP_DISCARD)) {
+ bio_set_dev(bio, cc->dev->bdev);
+ if (bio_sectors(bio))
+ bio->bi_iter.bi_sector = cc->start +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
+ return DM_MAPIO_REMAPPED;
+ }
+
+ /*
+ * Check if bio is too large, split as needed.
+ */
+ max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
+ if (unlikely(bio_sectors(bio) > max_sectors))
+ dm_accept_partial_bio(bio, max_sectors);
+
+ /*
+ * Ensure that bio is a multiple of internal sector eninlinecryption size
+ * and is aligned to this size as defined in IO hints.
+ */
+ if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
+ return DM_MAPIO_KILL;
+
+ if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
+ return DM_MAPIO_KILL;
+
+ crypt_inline_encrypt_submit(ti, bio);
+ return DM_MAPIO_SUBMITTED;
+
+ return 0;
+}
+
+static int inlinecrypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct inlinecrypt_config *cc = ti->private;
+
+ return fn(ti, cc->dev, cc->start, ti->len, data);
+}
+
+static struct target_type inlinecrypt_target = {
+ .name = "inline-crypt",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = inlinecrypt_ctr,
+ .dtr = inlinecrypt_dtr,
+ .map = inlinecrypt_map,
+ .iterate_devices = inlinecrypt_iterate_devices,
+};
+module_dm(inlinecrypt);
+
+MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
+MODULE_DESCRIPTION(DM_NAME " target for inline encryption / decryption");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 5e5822c18ee4..da503a05c5f6 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -10,10 +10,13 @@
enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_INVALID,
+ BLK_ENCRYPTION_MODE_AES_128_XTS,
BLK_ENCRYPTION_MODE_AES_256_XTS,
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
BLK_ENCRYPTION_MODE_ADIANTUM,
BLK_ENCRYPTION_MODE_SM4_XTS,
+ BLK_ENCRYPTION_MODE_AES_128_CBC,
+ BLK_ENCRYPTION_MODE_AES_256_CBC,
BLK_ENCRYPTION_MODE_MAX,
};
--
2.34.1
Hi The patch seems OK. Should it go in via the device mapper tree or the block layer tree? On Mon, 16 Sep 2024, Md Sadre Alam wrote: > +#define DM_CRYPT_DEFAULT_MAX_READ_SIZE 131072 > +#define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE 131072 > + > +static unsigned int get_max_request_size(struct inlinecrypt_config *cc, bool wrt) > +{ > + unsigned int val, sector_align; > + > + val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE; > + if (wrt) { > + if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT)) > + val = BIO_MAX_VECS << PAGE_SHIFT; > + } > + sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned int)cc->sector_size); > + val = round_down(val, sector_align); > + if (unlikely(!val)) > + val = sector_align; > + return val >> SECTOR_SHIFT; > +} This piece of code was copied from the dm-crypt target. For dm-crypt, I was actually benchmarking the performance for various DM_CRYPT_DEFAULT_MAX_READ_SIZE and DM_CRYPT_DEFAULT_MAX_WRITE_SIZE values and I selected the values that resulted in the best performance. You should benchmark it too to find the optimal I/O size. Perhaps you find out that there is no need to split big requests and this piece of code can be dropped. > + /* Omit the key for now. */ > + DMEMIT("%s - %llu %s %llu", ctx->cipher_string, ctx->iv_offset, > + ctx->dev->name, (unsigned long long)ctx->start); What if someone reloads the table? I think you should display the key. dmsetup does not display the key if the "--showkeys" parameter is not specified. Mikulas
Hi, On Mon, Sep 16, 2024 at 02:27:39PM +0530, Md Sadre Alam wrote: > QCOM SDCC controller supports Inline Crypto Engine > This driver will enables inline encryption/decryption > for ICE. The algorithm supported by ICE are XTS(AES) > and CBC(AES). > > Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com> > --- > > Change in [v2] > > * Added dm-inlinecrypt driver support > > * squash the patch blk-crypto: Add additional algo modes for Inline > encryption and md: dm-crypt: Add additional algo modes for inline > encryption and added in this > > Change in [v1] > > * This patch was not included in [v1] > > block/blk-crypto.c | 21 +++ > drivers/md/Kconfig | 8 + > drivers/md/Makefile | 1 + > drivers/md/dm-inline-crypt.c | 316 +++++++++++++++++++++++++++++++++++ > include/linux/blk-crypto.h | 3 + > 5 files changed, 349 insertions(+) > create mode 100644 drivers/md/dm-inline-crypt.c Thanks for working on this! Android uses a similar device-mapper target called dm-default-key (https://android.googlesource.com/kernel/common/+/refs/heads/android-mainline/drivers/md/dm-default-key.c), and I've been looking for the best way to get the functionality upstream. The main challenge is that dm-default-key is integrated with fscrypt, such that if fscrypt encrypts the data, then the data isn't also encrypted with the block device key. There are also cases such as f2fs garbage collection in which filesystems read/write raw data without en/decryption by any key. So essentially a passthrough mode is supported on individual I/O requests. It looks like this patch not only does not support that, but it ignores the existence of fscrypt (or any other use of inline encryption by filesystems) entirely, and overrides any filesystem-provided key with the block device's. At the very least, this case would need to be explicitly not supported initially, i.e. dm-inlinecrypt would error out if the upper layer already provided a key. But I would like there to be an agreed-upon way to extend the code to support the pass-through mode, so that filesystem and block device level encryption work properly together. It can indeed be done with a device-mapper target (as Android does already), whether it's called "dm-inlinecrypt" or "dm-default-key", but not in a standalone way: pass-through requests also require an addition to struct bio and some support in block and filesystem code. Previously, people have said that supporting this functionality natively in the block layer would be a better fit than a dm target (e.g., see the thread https://lore.kernel.org/all/1658316391-13472-1-git-send-email-israelr@nvidia.com/T/#u). I'd appreciate people's feedback on which approach they'd prefer. Anyway, assuming the device-mapper target approach, I also have some other feedback on this patch: New algorithms should not be added in the same patch as a new dm target. Also, I do not see why there is any need for the new algorithms you are adding (AES-128-XTS, AES-128-CBC, and AES-256-CBC). XTS is preferable to CBC, and AES-256 is preferable to AES-128. AES-256-XTS is already supported, both by blk-crypto and by Qualcomm ICE. So you should just use AES-256-XTS. There are also a lot of miscellaneous issues with the proposed code. Missing ->io_hints and ->status methods, truncating IVs to 32 bits, unnecessarily using a custom algorithm name syntax that doesn't make the IV generation method explicit, unnecessarily splitting bios, incrementing IVs every 512 bytes instead of each sector, etc. I can comment on all of these in detail if you want, but to start it might be helpful to just check out dm-default-key (https://android.googlesource.com/kernel/common/+/refs/heads/android-mainline/drivers/md/dm-default-key.c) and maybe base your code on that, as it has handled these issues already. E.g., its syntax is aligned with dm-crypt's, it implements ->io_hints and ->status, and it calculates the maximum DUN correctly so that it can be determined correctly whether the inline encryption hardware can be used or not. Finally, the commit message needs to summarize what the patch does and what its motivation is. Currently it just talks about the Qualcomm ICE driver and doesn't actually say anything about dm-inlinecrypt. Yes, the motivation for dm-inlinecrypt involves being able to take advantage of inline encryption hardware such as Qualcomm ICE, but it's not explained. Thanks, - Eric
On Sat, Sep 21, 2024 at 11:55:19AM -0700, Eric Biggers wrote: > (https://android.googlesource.com/kernel/common/+/refs/heads/android-mainline/drivers/md/dm-default-key.c), > and I've been looking for the best way to get the functionality upstream. The > main challenge is that dm-default-key is integrated with fscrypt, such that if > fscrypt encrypts the data, then the data isn't also encrypted with the block > device key. There are also cases such as f2fs garbage collection in which > filesystems read/write raw data without en/decryption by any key. So > essentially a passthrough mode is supported on individual I/O requests. Adding a default key is not the job of a block remapping driver. You'll need to fit that into the file system and/or file system level helpers. > > It looks like this patch not only does not support that, but it ignores the > existence of fscrypt (or any other use of inline encryption by filesystems) > entirely, and overrides any filesystem-provided key with the block device's. At > the very least, this case would need to be explicitly not supported initially, > i.e. dm-inlinecrypt would error out if the upper layer already provided a key. I agree that we have an incompatibility here, but simply erroring out feels like the wrong way to approach the stacking. If a stacking driver consumes the inline encryption capability it must not advertise it to the upper layers.
On 9/24/24 03:44, Christoph Hellwig wrote: > On Sat, Sep 21, 2024 at 11:55:19AM -0700, Eric Biggers wrote: >> (https://android.googlesource.com/kernel/common/+/refs/heads/android-mainline/drivers/md/dm-default-key.c), >> and I've been looking for the best way to get the functionality upstream. The >> main challenge is that dm-default-key is integrated with fscrypt, such that if >> fscrypt encrypts the data, then the data isn't also encrypted with the block >> device key. There are also cases such as f2fs garbage collection in which >> filesystems read/write raw data without en/decryption by any key. So >> essentially a passthrough mode is supported on individual I/O requests. > Adding a default key is not the job of a block remapping driver. You'll > need to fit that into the file system and/or file system level helpers. fscrypt isn't the only thing that would use such functionality. If you put it in the filesystem layer then you're only serving fscrypt when there are other usecases that don't involve filesystems at all. Let's say I'm using LVM, so I've got a physical partition that stores a couple different virtual partitions. I can use dm-default-key both underneath the physical partition, and on top of some of the virtual partitions. In such a configuration, the virtual partitions with their own dm-default-key instance get encrypted with their own key and passed through the lower dm-default-key instance onto the hardware. Virtual partitions that lack their own dm-default-key are encrypted once by the lower dm-default-key instance. There's no filesystem involved here, and yet to avoid the cost of double-encryption we need the passthrough functionality of dm-default-key. This scenario is constrained entirely to the block layer. Other usecases involve loopback devices. This is a real scenario of something we do in userspace. I have a loopback file, with a partition table inside where some partitions are encrypted and others are not. I would like to store this loopback file in a filesystem that sits on top of a dm-crypt protected partition. With the current capabilities of the kernel, I'd have to double-encrypt. But with dm-default-key, I could encrypt just once. Unlike the previous case, this time there's a layer of filesystem between the block devices, but it still can't do anything: the filesystem that stores the loopback device can't do anything because it has no idea that any encryption is happening. fscrypt isn't being used, nor can it be used since the file is only partially encrypted, so the filesystem is unaware that the contents of the loopback file are encrypted. And the filesystem doesn't know that it's encrypted from below by its block device. So what can the filesystem do if, as far as it can tell, nothing is being encrypted? Best, Adrian
On Thu, Oct 17, 2024 at 11:26:49PM -0400, Adrian Vovk wrote: > Let's say I'm using LVM, so I've got a physical partition that stores a > couple different virtual partitions. I can use dm-default-key both > underneath the physical partition, and on top of some of the virtual > partitions. In such a configuration, the virtual partitions with their own > dm-default-key instance get encrypted with their own key and passed through > the lower dm-default-key instance onto the hardware. Virtual partitions that > lack their own dm-default-key are encrypted once by the lower dm-default-key > instance. There's no filesystem involved here, and yet to avoid the cost of > double-encryption we need the passthrough functionality of dm-default-key. > This scenario is constrained entirely to the block layer. So just run a target on each partition. > Other usecases involve loopback devices. This is a real scenario of > something we do in userspace. I have a loopback file, with a partition table > inside where some partitions are encrypted and others are not. I would like > to store this loopback file in a filesystem that sits on top of a dm-crypt > protected partition. With the current capabilities of the kernel, I'd have > to double-encrypt. But with dm-default-key, I could encrypt just once. Which would completely break the security model of the underlying file system, because it can't trust what you do in the loop device. This is the prime example of why allowing higher layers to skip encryption is a no-go.
On Tue, Sep 24, 2024 at 12:44:53AM -0700, Christoph Hellwig wrote: > On Sat, Sep 21, 2024 at 11:55:19AM -0700, Eric Biggers wrote: > > (https://android.googlesource.com/kernel/common/+/refs/heads/android-mainline/drivers/md/dm-default-key.c), > > and I've been looking for the best way to get the functionality upstream. The > > main challenge is that dm-default-key is integrated with fscrypt, such that if > > fscrypt encrypts the data, then the data isn't also encrypted with the block > > device key. There are also cases such as f2fs garbage collection in which > > filesystems read/write raw data without en/decryption by any key. So > > essentially a passthrough mode is supported on individual I/O requests. > > Adding a default key is not the job of a block remapping driver. You'll > need to fit that into the file system and/or file system level helpers. What about a block device ioctl, as was previously proposed (https://lore.kernel.org/linux-block/1658316391-13472-1-git-send-email-israelr@nvidia.com/T/#u)? > > It looks like this patch not only does not support that, but it ignores the > > existence of fscrypt (or any other use of inline encryption by filesystems) > > entirely, and overrides any filesystem-provided key with the block device's. At > > the very least, this case would need to be explicitly not supported initially, > > i.e. dm-inlinecrypt would error out if the upper layer already provided a key. > > I agree that we have an incompatibility here, but simply erroring out > feels like the wrong way to approach the stacking. If a stacking driver > consumes the inline encryption capability it must not advertise it to > the upper layers. Right, I missed that's actually already how it works. The crypto capabilities are only passed through if the target sets DM_TARGET_PASSES_CRYPTO. - Eric
On Tue, Sep 24, 2024 at 03:04:34PM -0700, Eric Biggers wrote: > What about a block device ioctl, as was previously proposed > (https://lore.kernel.org/linux-block/1658316391-13472-1-git-send-email-israelr@nvidia.com/T/#u)? No. This is a file system layer policy and needs to sit entirely above the block layer instead of breaking abstraction boundaries.
Hi Md, kernel test robot noticed the following build warnings: [auto build test WARNING on device-mapper-dm/for-next] [also build test WARNING on axboe-block/for-next linus/master song-md/md-next v6.11 next-20240917] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Md-Sadre-Alam/dm-inlinecrypt-Add-inline-encryption-support/20240916-170452 base: https://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git for-next patch link: https://lore.kernel.org/r/20240916085741.1636554-2-quic_mdalam%40quicinc.com patch subject: [PATCH v2 1/3] dm-inlinecrypt: Add inline encryption support config: csky-randconfig-r111-20240918 (https://download.01.org/0day-ci/archive/20240918/202409181233.1FrQNVtU-lkp@intel.com/config) compiler: csky-linux-gcc (GCC) 14.1.0 reproduce: (https://download.01.org/0day-ci/archive/20240918/202409181233.1FrQNVtU-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202409181233.1FrQNVtU-lkp@intel.com/ sparse warnings: (new ones prefixed by >>) >> drivers/md/dm-inline-crypt.c:120:26: sparse: sparse: cast to restricted __le64 drivers/md/dm-inline-crypt.c:214:32: sparse: sparse: self-comparison always evaluates to false vim +120 drivers/md/dm-inline-crypt.c 109 110 static void crypt_inline_encrypt_submit(struct dm_target *ti, struct bio *bio) 111 { 112 struct inlinecrypt_config *cc = ti->private; 113 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 114 115 bio_set_dev(bio, cc->dev->bdev); 116 if (bio_sectors(bio)) { 117 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); 118 bio->bi_iter.bi_sector = cc->start + 119 dm_target_offset(ti, bio->bi_iter.bi_sector); > 120 dun[0] = le64_to_cpu(bio->bi_iter.bi_sector + cc->iv_offset); 121 bio_crypt_set_ctx(bio, cc->blk_key, dun, GFP_KERNEL); 122 } 123 124 submit_bio_noacct(bio); 125 } 126 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
Hi Md, kernel test robot noticed the following build warnings: [auto build test WARNING on device-mapper-dm/for-next] [also build test WARNING on axboe-block/for-next linus/master song-md/md-next v6.11 next-20240916] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Md-Sadre-Alam/dm-inlinecrypt-Add-inline-encryption-support/20240916-170452 base: https://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git for-next patch link: https://lore.kernel.org/r/20240916085741.1636554-2-quic_mdalam%40quicinc.com patch subject: [PATCH v2 1/3] dm-inlinecrypt: Add inline encryption support config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20240917/202409171440.qx2iOkY3-lkp@intel.com/config) compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240917/202409171440.qx2iOkY3-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202409171440.qx2iOkY3-lkp@intel.com/ All warnings (new ones prefixed by >>): >> drivers/md/dm-inline-crypt.c:198:6: warning: variable 'ret' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized] 198 | if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 199 | (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/md/dm-inline-crypt.c:250:9: note: uninitialized use occurs here 250 | return ret; | ^~~ drivers/md/dm-inline-crypt.c:198:2: note: remove the 'if' if its condition is always false 198 | if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 199 | (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 200 | ti->error = "Invalid iv_offset sector"; | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 201 | goto bad; | ~~~~~~~~~ 202 | } | ~ >> drivers/md/dm-inline-crypt.c:198:6: warning: variable 'ret' is used uninitialized whenever '||' condition is true [-Wsometimes-uninitialized] 198 | if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/md/dm-inline-crypt.c:250:9: note: uninitialized use occurs here 250 | return ret; | ^~~ drivers/md/dm-inline-crypt.c:198:6: note: remove the '||' if its condition is always false 198 | if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/md/dm-inline-crypt.c:178:9: note: initialize the variable 'ret' to silence this warning 178 | int ret; | ^ | = 0 2 warnings generated. vim +198 drivers/md/dm-inline-crypt.c 168 169 static int inlinecrypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 170 { 171 struct inlinecrypt_config *cc; 172 char *cipher_api = NULL; 173 char *cipher, *chainmode; 174 unsigned long long tmpll; 175 char *ivmode; 176 int key_size; 177 char dummy; 178 int ret; 179 180 if (argc < 5) { 181 ti->error = "Not enough arguments"; 182 return -EINVAL; 183 } 184 185 key_size = strlen(argv[1]) >> 1; 186 187 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL); 188 if (!cc) { 189 ti->error = "Cannot allocate encryption context"; 190 return -ENOMEM; 191 } 192 cc->key_size = key_size; 193 cc->sector_size = (1 << SECTOR_SHIFT); 194 cc->sector_shift = 0; 195 196 ti->private = cc; 197 > 198 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || 199 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { 200 ti->error = "Invalid iv_offset sector"; 201 goto bad; 202 } 203 cc->iv_offset = tmpll; 204 205 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), 206 &cc->dev); 207 if (ret) { 208 ti->error = "Device lookup failed"; 209 goto bad; 210 } 211 212 ret = -EINVAL; 213 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || 214 tmpll != (sector_t)tmpll) { 215 ti->error = "Invalid device sector"; 216 goto bad; 217 } 218 219 cc->start = tmpll; 220 221 cipher = strsep(&argv[0], "-"); 222 chainmode = strsep(&argv[0], "-"); 223 ivmode = strsep(&argv[0], "-"); 224 225 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 226 if (!cipher_api) 227 goto bad; 228 229 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 230 "%s(%s)", chainmode, cipher); 231 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { 232 kfree(cipher_api); 233 ret = -ENOMEM; 234 goto bad; 235 } 236 237 ret = crypt_select_inline_crypt_mode(ti, cipher_api, ivmode); 238 239 /* Initialize and set key */ 240 ret = inlinecrypt_set_key(cc, argv[1]); 241 if (ret < 0) { 242 ti->error = "Error decoding and setting key"; 243 return ret; 244 } 245 246 return 0; 247 bad: 248 ti->error = "Error in inlinecrypt mapping"; 249 inlinecrypt_dtr(ti); 250 return ret; 251 } 252 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
Hi Md, kernel test robot noticed the following build errors: [auto build test ERROR on device-mapper-dm/for-next] [also build test ERROR on axboe-block/for-next linus/master song-md/md-next v6.11 next-20240916] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Md-Sadre-Alam/dm-inlinecrypt-Add-inline-encryption-support/20240916-170452 base: https://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git for-next patch link: https://lore.kernel.org/r/20240916085741.1636554-2-quic_mdalam%40quicinc.com patch subject: [PATCH v2 1/3] dm-inlinecrypt: Add inline encryption support config: openrisc-randconfig-r062-20240917 (https://download.01.org/0day-ci/archive/20240917/202409171209.aEtxsPez-lkp@intel.com/config) compiler: or1k-linux-gcc (GCC) 14.1.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240917/202409171209.aEtxsPez-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202409171209.aEtxsPez-lkp@intel.com/ All errors (new ones prefixed by >>): drivers/md/dm-inline-crypt.c: In function 'crypt_prepare_inline_crypt_key': >> drivers/md/dm-inline-crypt.c:81:15: error: implicit declaration of function 'blk_crypto_init_key' [-Wimplicit-function-declaration] 81 | ret = blk_crypto_init_key(cc->blk_key, cc->key, cc->crypto_mode, | ^~~~~~~~~~~~~~~~~~~ >> drivers/md/dm-inline-crypt.c:88:15: error: implicit declaration of function 'blk_crypto_start_using_key' [-Wimplicit-function-declaration] 88 | ret = blk_crypto_start_using_key(cc->dev->bdev, cc->blk_key); | ^~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/md/dm-inline-crypt.c: In function 'crypt_destroy_inline_crypt_key': >> drivers/md/dm-inline-crypt.c:104:17: error: implicit declaration of function 'blk_crypto_evict_key'; did you mean 'blk_crypto_register'? [-Wimplicit-function-declaration] 104 | blk_crypto_evict_key(cc->dev->bdev, cc->blk_key); | ^~~~~~~~~~~~~~~~~~~~ | blk_crypto_register drivers/md/dm-inline-crypt.c: In function 'crypt_inline_encrypt_submit': >> drivers/md/dm-inline-crypt.c:121:17: error: implicit declaration of function 'bio_crypt_set_ctx' [-Wimplicit-function-declaration] 121 | bio_crypt_set_ctx(bio, cc->blk_key, dun, GFP_KERNEL); | ^~~~~~~~~~~~~~~~~ vim +/blk_crypto_init_key +81 drivers/md/dm-inline-crypt.c 72 73 static int crypt_prepare_inline_crypt_key(struct inlinecrypt_config *cc) 74 { 75 int ret; 76 77 cc->blk_key = kzalloc(sizeof(*cc->blk_key), GFP_KERNEL); 78 if (!cc->blk_key) 79 return -ENOMEM; 80 > 81 ret = blk_crypto_init_key(cc->blk_key, cc->key, cc->crypto_mode, 82 cc->iv_size, cc->sector_size); 83 if (ret) { 84 DMERR("Failed to init inline encryption key"); 85 goto bad_key; 86 } 87 > 88 ret = blk_crypto_start_using_key(cc->dev->bdev, cc->blk_key); 89 if (ret) { 90 DMERR("Failed to use inline encryption key"); 91 goto bad_key; 92 } 93 94 return 0; 95 bad_key: 96 kfree_sensitive(cc->blk_key); 97 cc->blk_key = NULL; 98 return ret; 99 } 100 101 static void crypt_destroy_inline_crypt_key(struct inlinecrypt_config *cc) 102 { 103 if (cc->blk_key) { > 104 blk_crypto_evict_key(cc->dev->bdev, cc->blk_key); 105 kfree_sensitive(cc->blk_key); 106 cc->blk_key = NULL; 107 } 108 } 109 110 static void crypt_inline_encrypt_submit(struct dm_target *ti, struct bio *bio) 111 { 112 struct inlinecrypt_config *cc = ti->private; 113 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; 114 115 bio_set_dev(bio, cc->dev->bdev); 116 if (bio_sectors(bio)) { 117 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); 118 bio->bi_iter.bi_sector = cc->start + 119 dm_target_offset(ti, bio->bi_iter.bi_sector); 120 dun[0] = le64_to_cpu(bio->bi_iter.bi_sector + cc->iv_offset); > 121 bio_crypt_set_ctx(bio, cc->blk_key, dun, GFP_KERNEL); 122 } 123 124 submit_bio_noacct(bio); 125 } 126 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2024 Red Hat, Inc.