drivers/mmc/host/sdhci-msm.c | 71 ++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+)
Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
without Command Queue Engine (CQE).This allows hardware-accelerated
encryption and decryption for standard (non-CMDQ) requests.
This patch:
- Adds ICE register definitions for non-CMDQ crypto configuration
- Implements a per-request crypto setup via sdhci_msm_ice_cfg()
- Hooks into the request path via mmc_host_ops.request
- Initializes ICE hardware during CQE setup for compatible platforms
With this, non-CMDQ eMMC devices can benefit from inline encryption,
improving performance for encrypted I/O while maintaining compatibility
with existing CQE crypto support.
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
---
Change in [v2]
* Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
sdhci-msm.c
* Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
maintainable bitfield handling in ICE configuration.
* Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
as both are guaranteed to be valid in the current call path.
* Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
integrate ICE configuration into the standard request path for non-CMDQ
eMMC devices.
* Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
Change in [v1]
* Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
devices.
drivers/mmc/host/sdhci-msm.c | 71 ++++++++++++++++++++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4e5edbf2fc9b..483aadaca262 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -157,6 +157,18 @@
#define CQHCI_VENDOR_CFG1 0xA00
#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+/* non command queue crypto enable register*/
+#define NONCQ_CRYPTO_PARM 0x70
+#define NONCQ_CRYPTO_DUN 0x74
+
+#define DISABLE_CRYPTO BIT(15)
+#define CRYPTO_GENERAL_ENABLE BIT(1)
+#define HC_VENDOR_SPECIFIC_FUNC4 0x260
+#define ICE_HCI_SUPPORT BIT(28)
+
+#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
+#define ICE_HCI_PARAM_CE GENMASK(8, 8)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
@@ -1885,6 +1897,48 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
#ifdef CONFIG_MMC_CRYPTO
+static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = msm_host->mmc;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned int crypto_params = 0;
+ int key_index = 0;
+ bool bypass = true;
+ u64 dun = 0;
+
+ if (mrq->crypto_ctx) {
+ dun = mrq->crypto_ctx->bc_dun[0];
+ bypass = false;
+ key_index = mrq->crypto_key_slot;
+ }
+
+ crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, !bypass) |
+ FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
+
+ cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
+
+ if (mrq->crypto_ctx)
+ cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
+
+ /* Ensure crypto configuration is written before proceeding */
+ wmb();
+
+ return 0;
+}
+
+static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (mmc->caps2 & MMC_CAP2_CRYPTO)
+ sdhci_msm_ice_cfg(host, mrq, 0);
+
+ sdhci_request(mmc, mrq);
+}
+
static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
@@ -2131,6 +2185,8 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
struct cqhci_host *cq_host;
bool dma64;
u32 cqcfg;
+ u32 config;
+ u32 ice_cap;
int ret;
/*
@@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
if (ret)
goto cleanup;
+ /* Initialize ICE for non-CMDQ eMMC devices */
+ config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
+ config &= ~DISABLE_CRYPTO;
+ sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
+ ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
+ if (ice_cap & ICE_HCI_SUPPORT) {
+ config = cqhci_readl(cq_host, CQHCI_CFG);
+ config |= CRYPTO_GENERAL_ENABLE;
+ cqhci_writel(cq_host, config, CQHCI_CFG);
+ }
+ sdhci_msm_ice_enable(msm_host);
+
dev_info(&pdev->dev, "%s: CQE init: success\n",
mmc_hostname(host->mmc));
return ret;
@@ -2759,6 +2827,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+#ifdef CONFIG_MMC_CRYPTO
+ host->mmc_host_ops.request = sdhci_msm_request;
+#endif
/* Set the timeout value to max possible */
host->max_timeout_count = 0xF;
--
2.34.1
On Tue, Oct 14, 2025 at 03:05:03PM +0530, Md Sadre Alam wrote:
> Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
> without Command Queue Engine (CQE).This allows hardware-accelerated
> encryption and decryption for standard (non-CMDQ) requests.
>
> This patch:
> - Adds ICE register definitions for non-CMDQ crypto configuration
> - Implements a per-request crypto setup via sdhci_msm_ice_cfg()
> - Hooks into the request path via mmc_host_ops.request
> - Initializes ICE hardware during CQE setup for compatible platforms
>
> With this, non-CMDQ eMMC devices can benefit from inline encryption,
> improving performance for encrypted I/O while maintaining compatibility
> with existing CQE crypto support.
>
> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
How was this tested?
> #ifdef CONFIG_MMC_CRYPTO
>
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
> + u32 slot)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + unsigned int crypto_params = 0;
> + int key_index = 0;
> + bool bypass = true;
> + u64 dun = 0;
> +
> + if (mrq->crypto_ctx) {
> + dun = mrq->crypto_ctx->bc_dun[0];
> + bypass = false;
> + key_index = mrq->crypto_key_slot;
> + }
> +
> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, !bypass) |
> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
> +
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> +
> + if (mrq->crypto_ctx)
> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
> +
> + /* Ensure crypto configuration is written before proceeding */
> + wmb();
> +
> + return 0;
> +}
This would probably be easier to read with separate code paths for
crypto_ctx != NULL and crypto_ctx == NULL. Also 'bypass' should be
inverted and renamed to 'crypto_enable' to match the bitfield. Or just
prepare the bitfield directly, without an intermediate variable.
> @@ -2131,6 +2185,8 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> struct cqhci_host *cq_host;
> bool dma64;
> u32 cqcfg;
> + u32 config;
> + u32 ice_cap;
> int ret;
>
> /*
> @@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> if (ret)
> goto cleanup;
>
> + /* Initialize ICE for non-CMDQ eMMC devices */
> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> + config &= ~DISABLE_CRYPTO;
> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> + if (ice_cap & ICE_HCI_SUPPORT) {
> + config = cqhci_readl(cq_host, CQHCI_CFG);
> + config |= CRYPTO_GENERAL_ENABLE;
> + cqhci_writel(cq_host, config, CQHCI_CFG);
> + }
> + sdhci_msm_ice_enable(msm_host);
This is after __sdhci_add_host() was called, which is probably too late.
> +#ifdef CONFIG_MMC_CRYPTO
> + host->mmc_host_ops.request = sdhci_msm_request;
> +#endif
> /* Set the timeout value to max possible */
> host->max_timeout_count = 0xF;
A lot of the code in this patch also seems to actually run on
CQE-capable hosts. Can you explain? Why is it needed? Is there any
change in behavior on them?
- Eric
Hi,
On 10/17/2025 11:08 PM, Eric Biggers wrote:
> On Tue, Oct 14, 2025 at 03:05:03PM +0530, Md Sadre Alam wrote:
>> Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
>> without Command Queue Engine (CQE).This allows hardware-accelerated
>> encryption and decryption for standard (non-CMDQ) requests.
>>
>> This patch:
>> - Adds ICE register definitions for non-CMDQ crypto configuration
>> - Implements a per-request crypto setup via sdhci_msm_ice_cfg()
>> - Hooks into the request path via mmc_host_ops.request
>> - Initializes ICE hardware during CQE setup for compatible platforms
>>
>> With this, non-CMDQ eMMC devices can benefit from inline encryption,
>> improving performance for encrypted I/O while maintaining compatibility
>> with existing CQE crypto support.
>>
>> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
>
> How was this tested?
I tested this using fscrypt on a Phison eMMC device. However, since that
particular eMMC does not support CMDQ, inline encryption (ICE) was
bypassed during testing.
>
>> #ifdef CONFIG_MMC_CRYPTO
>>
>> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
>> + u32 slot)
>> +{
>> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
>> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
>> + struct mmc_host *mmc = msm_host->mmc;
>> + struct cqhci_host *cq_host = mmc->cqe_private;
>> + unsigned int crypto_params = 0;
>> + int key_index = 0;
>> + bool bypass = true;
>> + u64 dun = 0;
>> +
>> + if (mrq->crypto_ctx) {
>> + dun = mrq->crypto_ctx->bc_dun[0];
>> + bypass = false;
>> + key_index = mrq->crypto_key_slot;
>> + }
>> +
>> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, !bypass) |
>> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
>> +
>> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
>> +
>> + if (mrq->crypto_ctx)
>> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
>> +
>> + /* Ensure crypto configuration is written before proceeding */
>> + wmb();
>> +
>> + return 0;
>> +}
>
> This would probably be easier to read with separate code paths for
> crypto_ctx != NULL and crypto_ctx == NULL. Also 'bypass' should be
> inverted and renamed to 'crypto_enable' to match the bitfield. Or just
> prepare the bitfield directly, without an intermediate variable.
Thanks for the suggestion. I agree that separating the logic based
on crypto_ctx presence improves readability.I’ll refactor the function
to use distinct code paths for crypto_ctx != NULL and crypto_ctx ==
NULL, and rename bypass to crypto_enable to better reflect the bitfield
semantics.I’ll also remove the intermediate variable and prepare
crypto_params directly as recommended.
>
>> @@ -2131,6 +2185,8 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
>> struct cqhci_host *cq_host;
>> bool dma64;
>> u32 cqcfg;
>> + u32 config;
>> + u32 ice_cap;
>> int ret;
>>
>> /*
>> @@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
>> if (ret)
>> goto cleanup;
>>
>> + /* Initialize ICE for non-CMDQ eMMC devices */
>> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
>> + config &= ~DISABLE_CRYPTO;
>> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
>> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
>> + if (ice_cap & ICE_HCI_SUPPORT) {
>> + config = cqhci_readl(cq_host, CQHCI_CFG);
>> + config |= CRYPTO_GENERAL_ENABLE;
>> + cqhci_writel(cq_host, config, CQHCI_CFG);
>> + }
>> + sdhci_msm_ice_enable(msm_host);
>
> This is after __sdhci_add_host() was called, which is probably too late.
ok,I’ll move the ICE initialization earlier in the probe flow, ideally
before __sdhci_add_host() is called.
>
>> +#ifdef CONFIG_MMC_CRYPTO
>> + host->mmc_host_ops.request = sdhci_msm_request;
>> +#endif
>> /* Set the timeout value to max possible */
>> host->max_timeout_count = 0xF;
>
> A lot of the code in this patch also seems to actually run on
> CQE-capable hosts. Can you explain? Why is it needed? Is there any
> change in behavior on them?
Thanks for raising this. You're right that some parts of the patch
interact with CQE-related structures, such as cqhci_host, even on
CQE-capable hosts. However, the intent is to reuse existing CQE
infrastructure (like register access helpers and memory-mapped regions)
to configure ICE for non-CMDQ requests.
Importantly, actual CQE functionality is only enabled if the eMMC device
advertises CMDQ support. For devices without CMDQ, the CQE engine
remains disabled, and the request path falls back to standard non-CMDQ
flow. In this case, we're simply leveraging the CQE register space to
program ICE parameters.
So while the code runs on CQE-capable hosts, there's no change in
behavior for CMDQ-enabled devices — the patch does not interfere with
CQE operation. It only enables ICE for non-CMDQ requests when supported
by the platform.
Thanks,
Alam.
On Wed, Oct 22, 2025 at 10:49:23AM +0530, Md Sadre Alam wrote:
> Hi,
>
> On 10/17/2025 11:08 PM, Eric Biggers wrote:
> > On Tue, Oct 14, 2025 at 03:05:03PM +0530, Md Sadre Alam wrote:
> > > Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
> > > without Command Queue Engine (CQE).This allows hardware-accelerated
> > > encryption and decryption for standard (non-CMDQ) requests.
> > >
> > > This patch:
> > > - Adds ICE register definitions for non-CMDQ crypto configuration
> > > - Implements a per-request crypto setup via sdhci_msm_ice_cfg()
> > > - Hooks into the request path via mmc_host_ops.request
> > > - Initializes ICE hardware during CQE setup for compatible platforms
> > >
> > > With this, non-CMDQ eMMC devices can benefit from inline encryption,
> > > improving performance for encrypted I/O while maintaining compatibility
> > > with existing CQE crypto support.
> > >
> > > Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
> >
> > How was this tested?
> I tested this using fscrypt on a Phison eMMC device. However, since that
> particular eMMC does not support CMDQ, inline encryption (ICE) was bypassed
> during testing.
What do you mean by "inline encryption (ICE) was bypassed during
testing"?
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
> + u32 slot)
Could you also remove the unused 'slot' parameter from this function?
> > > @@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> > > if (ret)
> > > goto cleanup;
> > > + /* Initialize ICE for non-CMDQ eMMC devices */
> > > + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> > > + config &= ~DISABLE_CRYPTO;
> > > + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> > > + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> > > + if (ice_cap & ICE_HCI_SUPPORT) {
> > > + config = cqhci_readl(cq_host, CQHCI_CFG);
> > > + config |= CRYPTO_GENERAL_ENABLE;
> > > + cqhci_writel(cq_host, config, CQHCI_CFG);
> > > + }
> > > + sdhci_msm_ice_enable(msm_host);
> >
> > This is after __sdhci_add_host() was called, which is probably too late.
> ok,I’ll move the ICE initialization earlier in the probe flow, ideally
> before __sdhci_add_host() is called.
> >
> > > +#ifdef CONFIG_MMC_CRYPTO
> > > + host->mmc_host_ops.request = sdhci_msm_request;
> > > +#endif
> > > /* Set the timeout value to max possible */
> > > host->max_timeout_count = 0xF;
> >
> > A lot of the code in this patch also seems to actually run on
> > CQE-capable hosts. Can you explain? Why is it needed? Is there any
> > change in behavior on them?
> Thanks for raising this. You're right that some parts of the patch interact
> with CQE-related structures, such as cqhci_host, even on CQE-capable hosts.
> However, the intent is to reuse existing CQE infrastructure (like register
> access helpers and memory-mapped regions) to configure ICE for non-CMDQ
> requests.
>
> Importantly, actual CQE functionality is only enabled if the eMMC device
> advertises CMDQ support. For devices without CMDQ, the CQE engine remains
> disabled, and the request path falls back to standard non-CMDQ flow. In this
> case, we're simply leveraging the CQE register space to program ICE
> parameters.
>
> So while the code runs on CQE-capable hosts, there's no change in behavior
> for CMDQ-enabled devices — the patch does not interfere with CQE operation.
> It only enables ICE for non-CMDQ requests when supported by the platform.
So, we're dealing only with hosts that do support a command queue, but
support eMMC devices either with or without using it?
Could you explain why sdhci_msm_ice_enable() is called twice: once from
sdhci_msm_cqe_add_host() and once from sdhci_msm_cqe_enable()?
- Eric
Hi,
On 10/22/2025 11:14 AM, Eric Biggers wrote:
> On Wed, Oct 22, 2025 at 10:49:23AM +0530, Md Sadre Alam wrote:
>> Hi,
>>
>> On 10/17/2025 11:08 PM, Eric Biggers wrote:
>>> On Tue, Oct 14, 2025 at 03:05:03PM +0530, Md Sadre Alam wrote:
>>>> Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
>>>> without Command Queue Engine (CQE).This allows hardware-accelerated
>>>> encryption and decryption for standard (non-CMDQ) requests.
>>>>
>>>> This patch:
>>>> - Adds ICE register definitions for non-CMDQ crypto configuration
>>>> - Implements a per-request crypto setup via sdhci_msm_ice_cfg()
>>>> - Hooks into the request path via mmc_host_ops.request
>>>> - Initializes ICE hardware during CQE setup for compatible platforms
>>>>
>>>> With this, non-CMDQ eMMC devices can benefit from inline encryption,
>>>> improving performance for encrypted I/O while maintaining compatibility
>>>> with existing CQE crypto support.
>>>>
>>>> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
>>>
>>> How was this tested?
>> I tested this using fscrypt on a Phison eMMC device. However, since that
>> particular eMMC does not support CMDQ, inline encryption (ICE) was bypassed
>> during testing.
>
> What do you mean by "inline encryption (ICE) was bypassed during
> testing"?
By "inline encryption (ICE) was bypassed during testing," I meant that
encryption was not working because ICE was only being enabled in the CQE
request path (cqhci_request). For eMMC devices that do not support CMDQ,
the mmc core sends requests via the legacy path (sdhci_request), where
ICE was not being configured.
>
>> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
>> + u32 slot)
>
> Could you also remove the unused 'slot' parameter from this function?
Ok
>
>>>> @@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
>>>> if (ret)
>>>> goto cleanup;
>>>> + /* Initialize ICE for non-CMDQ eMMC devices */
>>>> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
>>>> + config &= ~DISABLE_CRYPTO;
>>>> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
>>>> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
>>>> + if (ice_cap & ICE_HCI_SUPPORT) {
>>>> + config = cqhci_readl(cq_host, CQHCI_CFG);
>>>> + config |= CRYPTO_GENERAL_ENABLE;
>>>> + cqhci_writel(cq_host, config, CQHCI_CFG);
>>>> + }
>>>> + sdhci_msm_ice_enable(msm_host);
>>>
>>> This is after __sdhci_add_host() was called, which is probably too late.
>> ok,I’ll move the ICE initialization earlier in the probe flow, ideally
>> before __sdhci_add_host() is called.
>>>
>>>> +#ifdef CONFIG_MMC_CRYPTO
>>>> + host->mmc_host_ops.request = sdhci_msm_request;
>>>> +#endif
>>>> /* Set the timeout value to max possible */
>>>> host->max_timeout_count = 0xF;
>>>
>>> A lot of the code in this patch also seems to actually run on
>>> CQE-capable hosts. Can you explain? Why is it needed? Is there any
>>> change in behavior on them?
>> Thanks for raising this. You're right that some parts of the patch interact
>> with CQE-related structures, such as cqhci_host, even on CQE-capable hosts.
>> However, the intent is to reuse existing CQE infrastructure (like register
>> access helpers and memory-mapped regions) to configure ICE for non-CMDQ
>> requests.
>>
>> Importantly, actual CQE functionality is only enabled if the eMMC device
>> advertises CMDQ support. For devices without CMDQ, the CQE engine remains
>> disabled, and the request path falls back to standard non-CMDQ flow. In this
>> case, we're simply leveraging the CQE register space to program ICE
>> parameters.
>>
>> So while the code runs on CQE-capable hosts, there's no change in behavior
>> for CMDQ-enabled devices — the patch does not interfere with CQE operation.
>> It only enables ICE for non-CMDQ requests when supported by the platform.
>
> So, we're dealing only with hosts that do support a command queue, but
> support eMMC devices either with or without using it?
There are two cases where ICE support is needed without CMDQ:
1) The eMMC device does not support CMDQ, but we still want to use ICE
for encryption/decryption.
2) The host intentionally disables CMDQ, even if the eMMC device
supports it, and wants to use ICE in the legacy (non-CMDQ) path.
This patch addresses the first case — enabling ICE for devices that lack
CMDQ support. I'm currently working on the host-side logic to support
the second case, and will submit that separately.
>
> Could you explain why sdhci_msm_ice_enable() is called twice: once from
> sdhci_msm_cqe_add_host() and once from sdhci_msm_cqe_enable()?
Thanks for pointing this out. sdhci_msm_ice_enable() is called twice
only when the eMMC device supports CMDQ — once during
sdhci_msm_cqe_add_host() and again in sdhci_msm_cqe_enable(). For
non-CMDQ devices, it is called only once.
Since the function primarily performs register configuration, the second
call effectively reprograms the same values and has no functional side
effects. That said, I’ll look into adding a condition to avoid redundant
configuration when ICE is already enabled, to make the flow cleaner.
Thanks,
Alam.
+ Abel, Eric
On Tue, 14 Oct 2025 at 11:35, Md Sadre Alam <quic_mdalam@quicinc.com> wrote:
>
> Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
> without Command Queue Engine (CQE).This allows hardware-accelerated
> encryption and decryption for standard (non-CMDQ) requests.
>
> This patch:
> - Adds ICE register definitions for non-CMDQ crypto configuration
> - Implements a per-request crypto setup via sdhci_msm_ice_cfg()
> - Hooks into the request path via mmc_host_ops.request
> - Initializes ICE hardware during CQE setup for compatible platforms
>
> With this, non-CMDQ eMMC devices can benefit from inline encryption,
> improving performance for encrypted I/O while maintaining compatibility
> with existing CQE crypto support.
>
> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
Before applying this I want to get confirmation from the people who
implemented the inline crypto engine support, hence I have added Abel
and Eric.
Kind regards
Uffe
> ---
>
> Change in [v2]
>
> * Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
> sdhci-msm.c
>
> * Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
> maintainable bitfield handling in ICE configuration.
>
> * Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
> as both are guaranteed to be valid in the current call path.
>
> * Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
> integrate ICE configuration into the standard request path for non-CMDQ
> eMMC devices.
>
> * Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
>
> Change in [v1]
>
> * Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
> devices.
>
> drivers/mmc/host/sdhci-msm.c | 71 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 71 insertions(+)
>
> diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
> index 4e5edbf2fc9b..483aadaca262 100644
> --- a/drivers/mmc/host/sdhci-msm.c
> +++ b/drivers/mmc/host/sdhci-msm.c
> @@ -157,6 +157,18 @@
> #define CQHCI_VENDOR_CFG1 0xA00
> #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
>
> +/* non command queue crypto enable register*/
> +#define NONCQ_CRYPTO_PARM 0x70
> +#define NONCQ_CRYPTO_DUN 0x74
> +
> +#define DISABLE_CRYPTO BIT(15)
> +#define CRYPTO_GENERAL_ENABLE BIT(1)
> +#define HC_VENDOR_SPECIFIC_FUNC4 0x260
> +#define ICE_HCI_SUPPORT BIT(28)
> +
> +#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
> +#define ICE_HCI_PARAM_CE GENMASK(8, 8)
> +
> struct sdhci_msm_offset {
> u32 core_hc_mode;
> u32 core_mci_data_cnt;
> @@ -1885,6 +1897,48 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
>
> #ifdef CONFIG_MMC_CRYPTO
>
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
> + u32 slot)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + unsigned int crypto_params = 0;
> + int key_index = 0;
> + bool bypass = true;
> + u64 dun = 0;
> +
> + if (mrq->crypto_ctx) {
> + dun = mrq->crypto_ctx->bc_dun[0];
> + bypass = false;
> + key_index = mrq->crypto_key_slot;
> + }
> +
> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, !bypass) |
> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
> +
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> +
> + if (mrq->crypto_ctx)
> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
> +
> + /* Ensure crypto configuration is written before proceeding */
> + wmb();
> +
> + return 0;
> +}
> +
> +static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
> +{
> + struct sdhci_host *host = mmc_priv(mmc);
> +
> + if (mmc->caps2 & MMC_CAP2_CRYPTO)
> + sdhci_msm_ice_cfg(host, mrq, 0);
> +
> + sdhci_request(mmc, mrq);
> +}
> +
> static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
>
> static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
> @@ -2131,6 +2185,8 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> struct cqhci_host *cq_host;
> bool dma64;
> u32 cqcfg;
> + u32 config;
> + u32 ice_cap;
> int ret;
>
> /*
> @@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> if (ret)
> goto cleanup;
>
> + /* Initialize ICE for non-CMDQ eMMC devices */
> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> + config &= ~DISABLE_CRYPTO;
> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> + if (ice_cap & ICE_HCI_SUPPORT) {
> + config = cqhci_readl(cq_host, CQHCI_CFG);
> + config |= CRYPTO_GENERAL_ENABLE;
> + cqhci_writel(cq_host, config, CQHCI_CFG);
> + }
> + sdhci_msm_ice_enable(msm_host);
> +
> dev_info(&pdev->dev, "%s: CQE init: success\n",
> mmc_hostname(host->mmc));
> return ret;
> @@ -2759,6 +2827,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
>
> msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
>
> +#ifdef CONFIG_MMC_CRYPTO
> + host->mmc_host_ops.request = sdhci_msm_request;
> +#endif
> /* Set the timeout value to max possible */
> host->max_timeout_count = 0xF;
>
> --
> 2.34.1
>
On 14/10/2025 12:35, Md Sadre Alam wrote:
> Enable Inline Crypto Engine (ICE) support for eMMC devices that operate
> without Command Queue Engine (CQE).This allows hardware-accelerated
> encryption and decryption for standard (non-CMDQ) requests.
>
> This patch:
> - Adds ICE register definitions for non-CMDQ crypto configuration
> - Implements a per-request crypto setup via sdhci_msm_ice_cfg()
> - Hooks into the request path via mmc_host_ops.request
> - Initializes ICE hardware during CQE setup for compatible platforms
>
> With this, non-CMDQ eMMC devices can benefit from inline encryption,
> improving performance for encrypted I/O while maintaining compatibility
> with existing CQE crypto support.
>
> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
> ---
>
> Change in [v2]
>
> * Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
> sdhci-msm.c
>
> * Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
> maintainable bitfield handling in ICE configuration.
>
> * Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
> as both are guaranteed to be valid in the current call path.
>
> * Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
> integrate ICE configuration into the standard request path for non-CMDQ
> eMMC devices.
>
> * Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
>
> Change in [v1]
>
> * Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
> devices.
>
> drivers/mmc/host/sdhci-msm.c | 71 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 71 insertions(+)
>
> diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
> index 4e5edbf2fc9b..483aadaca262 100644
> --- a/drivers/mmc/host/sdhci-msm.c
> +++ b/drivers/mmc/host/sdhci-msm.c
> @@ -157,6 +157,18 @@
> #define CQHCI_VENDOR_CFG1 0xA00
> #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
>
> +/* non command queue crypto enable register*/
> +#define NONCQ_CRYPTO_PARM 0x70
> +#define NONCQ_CRYPTO_DUN 0x74
> +
> +#define DISABLE_CRYPTO BIT(15)
> +#define CRYPTO_GENERAL_ENABLE BIT(1)
> +#define HC_VENDOR_SPECIFIC_FUNC4 0x260
> +#define ICE_HCI_SUPPORT BIT(28)
> +
> +#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
> +#define ICE_HCI_PARAM_CE GENMASK(8, 8)
> +
> struct sdhci_msm_offset {
> u32 core_hc_mode;
> u32 core_mci_data_cnt;
> @@ -1885,6 +1897,48 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
>
> #ifdef CONFIG_MMC_CRYPTO
>
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
> + u32 slot)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + unsigned int crypto_params = 0;
> + int key_index = 0;
> + bool bypass = true;
> + u64 dun = 0;
> +
> + if (mrq->crypto_ctx) {
> + dun = mrq->crypto_ctx->bc_dun[0];
> + bypass = false;
> + key_index = mrq->crypto_key_slot;
> + }
> +
> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, !bypass) |
> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
> +
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> +
> + if (mrq->crypto_ctx)
> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
> +
> + /* Ensure crypto configuration is written before proceeding */
> + wmb();
> +
> + return 0;
> +}
> +
> +static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
> +{
> + struct sdhci_host *host = mmc_priv(mmc);
> +
> + if (mmc->caps2 & MMC_CAP2_CRYPTO)
> + sdhci_msm_ice_cfg(host, mrq, 0);
> +
> + sdhci_request(mmc, mrq);
> +}
> +
> static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
>
> static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
> @@ -2131,6 +2185,8 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> struct cqhci_host *cq_host;
> bool dma64;
> u32 cqcfg;
> + u32 config;
> + u32 ice_cap;
> int ret;
>
> /*
> @@ -2185,6 +2241,18 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
> if (ret)
> goto cleanup;
>
> + /* Initialize ICE for non-CMDQ eMMC devices */
> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> + config &= ~DISABLE_CRYPTO;
> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> + if (ice_cap & ICE_HCI_SUPPORT) {
> + config = cqhci_readl(cq_host, CQHCI_CFG);
> + config |= CRYPTO_GENERAL_ENABLE;
> + cqhci_writel(cq_host, config, CQHCI_CFG);
> + }
> + sdhci_msm_ice_enable(msm_host);
> +
> dev_info(&pdev->dev, "%s: CQE init: success\n",
> mmc_hostname(host->mmc));
> return ret;
> @@ -2759,6 +2827,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
>
> msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
>
> +#ifdef CONFIG_MMC_CRYPTO
> + host->mmc_host_ops.request = sdhci_msm_request;
> +#endif
> /* Set the timeout value to max possible */
> host->max_timeout_count = 0xF;
>
© 2016 - 2026 Red Hat, Inc.