drivers/mmc/host/sdhci-msm.c | 101 +++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+)
Enable Inline Crypto Engine (ICE) support for CQE-capable sdhci-msm
controllers when used with eMMC cards that do not support CQE.
This addresses the scenario where:
- The host controller supports CQE (and has CQHCI crypto infrastructure)
- The eMMC card does not support CQE
- Standard (non-CMDQ) requests need crypto support
This allows hardware-accelerated encryption and decryption for standard
requests on CQE-capable hardware by utilizing the existing CQHCI crypto
register space even when CQE functionality is not available due to card
limitations.
The implementation:
- Adds ICE register definitions for non-CQE crypto configuration
- Implements per-request crypto setup via sdhci_msm_ice_cfg()
- Hooks into the request path via mmc_host_ops.request for non-CQE requests
- Uses CQHCI register space (NONCQ_CRYPTO_PARM/DUN) for crypto configuration
With this, CQE-capable controllers can benefit from inline encryption
when paired with non-CQE cards, improving performance for encrypted I/O
while maintaining compatibility with existing CQE crypto support.
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
---
Change in [v5]
* Removed unused variable
* Added proper comment for sdhci_msm_request()
* Removed sdhci_msm_ice_enable(); it is already invoked during resume
Change in [v4]
* Moved ICE initialization for non cmdq into sdhci_msm_ice_cfg() and made
it conditional on mrq->crypto_ctx to enable lazy setup.
* Added msm_host->ice_init_done guard to prevent redundant initialization.
* Updated commit message
Change in [v3]
* Refactored logic to use separate code paths for crypto_ctx != NULL and
crypto_ctx == NULL to improve readability.
* Renamed bypass to crypto_enable to align with bitfield semantics.
* Removed slot variable
* Added ICE initialization sequence for non-CMDQ eMMC devices before
__sdhci_add_host()
Change in [v2]
* Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
sdhci-msm.c
* Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
maintainable bitfield handling in ICE configuration.
* Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
as both are guaranteed to be valid in the current call path.
* Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
integrate ICE configuration into the standard request path for non-CMDQ
eMMC devices.
* Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
Change in [v1]
* Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
devices.
drivers/mmc/host/sdhci-msm.c | 101 +++++++++++++++++++++++++++++++++++
1 file changed, 101 insertions(+)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4e5edbf2fc9b..69c67242519c 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -157,6 +157,18 @@
#define CQHCI_VENDOR_CFG1 0xA00
#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+/* non command queue crypto enable register*/
+#define NONCQ_CRYPTO_PARM 0x70
+#define NONCQ_CRYPTO_DUN 0x74
+
+#define DISABLE_CRYPTO BIT(15)
+#define CRYPTO_GENERAL_ENABLE BIT(1)
+#define HC_VENDOR_SPECIFIC_FUNC4 0x260
+#define ICE_HCI_SUPPORT BIT(28)
+
+#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
+#define ICE_HCI_PARAM_CE GENMASK(8, 8)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
@@ -300,6 +312,7 @@ struct sdhci_msm_host {
u32 dll_config;
u32 ddr_config;
bool vqmmc_enabled;
+ bool ice_init_done;
};
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
@@ -2009,6 +2022,91 @@ static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
return qcom_ice_evict_key(msm_host->ice, slot);
}
+static void sdhci_msm_non_cqe_ice_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = msm_host->mmc;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 config;
+ u32 ice_cap;
+
+ config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
+ config &= ~DISABLE_CRYPTO;
+ sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
+ ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
+ if (ice_cap & ICE_HCI_SUPPORT) {
+ config = cqhci_readl(cq_host, CQHCI_CFG);
+ config |= CRYPTO_GENERAL_ENABLE;
+ cqhci_writel(cq_host, config, CQHCI_CFG);
+ }
+}
+
+static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = msm_host->mmc;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned int crypto_params = 0;
+ int key_index;
+ bool crypto_enable;
+ u64 dun = 0;
+
+ if (mrq->crypto_ctx) {
+ if (!msm_host->ice_init_done) {
+ sdhci_msm_non_cqe_ice_init(host);
+ msm_host->ice_init_done = true;
+ }
+
+ crypto_enable = true;
+ dun = mrq->crypto_ctx->bc_dun[0];
+ key_index = mrq->crypto_key_slot;
+ crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, crypto_enable) |
+ FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
+
+ cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
+ cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
+ } else {
+ cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
+ }
+
+ /* Ensure crypto configuration is written before proceeding */
+ wmb();
+
+ return 0;
+}
+
+/*
+ * sdhci_msm_request - Handle non-CQE MMC requests with crypto support
+ * @mmc: MMC host
+ * @mrq: MMC request
+ *
+ * This function is called for non-CQE requests only. The MMC block layer
+ * routes requests as follows:
+ *
+ * if (host->cqe_enabled)
+ * ret = mmc_blk_cqe_issue_rw_rq(mq, req); // → cqhci_request()
+ * else
+ * ret = mmc_blk_mq_issue_rw_rq(mq, req); // → sdhci_msm_request()
+ *
+ * For CQE requests, crypto is handled in cqhci_request() in
+ * drivers/mmc/host/cqhci-core.c using the existing CQE crypto infrastructure.
+ *
+ * For non-CQE requests, this function provides crypto support by configuring
+ * the ICE (Inline Crypto Engine) registers before passing the request to
+ * the standard SDHCI request handler.
+ */
+static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (mmc->caps2 & MMC_CAP2_CRYPTO)
+ sdhci_msm_ice_cfg(host, mrq);
+
+ sdhci_request(mmc, mrq);
+}
+
static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
.keyslot_program = sdhci_msm_ice_keyslot_program,
.keyslot_evict = sdhci_msm_ice_keyslot_evict,
@@ -2759,6 +2857,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+#ifdef CONFIG_MMC_CRYPTO
+ host->mmc_host_ops.request = sdhci_msm_request;
+#endif
/* Set the timeout value to max possible */
host->max_timeout_count = 0xF;
--
2.34.1
On Wed, Nov 19, 2025 at 05:16:53PM +0530, Md Sadre Alam wrote:
> struct sdhci_msm_offset {
> u32 core_hc_mode;
> u32 core_mci_data_cnt;
> @@ -300,6 +312,7 @@ struct sdhci_msm_host {
> u32 dll_config;
> u32 ddr_config;
> bool vqmmc_enabled;
> + bool ice_init_done;
Rename to non_cqe_ice_init_done
> +static void sdhci_msm_non_cqe_ice_init(struct sdhci_host *host)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + u32 config;
> + u32 ice_cap;
> +
> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> + config &= ~DISABLE_CRYPTO;
> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> + if (ice_cap & ICE_HCI_SUPPORT) {
> + config = cqhci_readl(cq_host, CQHCI_CFG);
> + config |= CRYPTO_GENERAL_ENABLE;
> + cqhci_writel(cq_host, config, CQHCI_CFG);
> + }
> +}
Why would ICE_HCI_SUPPORT not be set here? When this is called, the
driver is already in the middle of processing an mmc_request with an
encryption context, due to the driver advertising that it supports
inline crypto earlier. If the hardware doesn't actually support inline
crypto, that has to be detected earlier. But I thought it already does.
So it's unclear what checking ICE_HCI_SUPPORT here is meant to achieve.
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq)
This should return void.
> + if (mrq->crypto_ctx) {
> + if (!msm_host->ice_init_done) {
> + sdhci_msm_non_cqe_ice_init(host);
> + msm_host->ice_init_done = true;
> + }
> +
> + crypto_enable = true;
> + dun = mrq->crypto_ctx->bc_dun[0];
> + key_index = mrq->crypto_key_slot;
> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, crypto_enable) |
> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
> +
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
No need for the crypto_enable variable. Just use:
FIELD_PREP(ICE_HCI_PARAM_CE, 1).
Also no need for the dun variable. Just use:
cqhci_writel(cq_host, lower_32_bits(mrq->crypto_ctx->bc_dun[0]),
NONCQ_CRYPTO_DUN);
> + * For CQE requests, crypto is handled in cqhci_request() in
> + * drivers/mmc/host/cqhci-core.c using the existing CQE crypto infrastructure.
It's recommended to not reference file paths like this in kernel code,
since files are sometimes moved around.
- Eric
Hi,
On 11/21/2025 7:42 AM, Eric Biggers wrote:
> On Wed, Nov 19, 2025 at 05:16:53PM +0530, Md Sadre Alam wrote:
>> struct sdhci_msm_offset {
>> u32 core_hc_mode;
>> u32 core_mci_data_cnt;
>> @@ -300,6 +312,7 @@ struct sdhci_msm_host {
>> u32 dll_config;
>> u32 ddr_config;
>> bool vqmmc_enabled;
>> + bool ice_init_done;
>
> Rename to non_cqe_ice_init_done
Ok
>
>> +static void sdhci_msm_non_cqe_ice_init(struct sdhci_host *host)
>> +{
>> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
>> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
>> + struct mmc_host *mmc = msm_host->mmc;
>> + struct cqhci_host *cq_host = mmc->cqe_private;
>> + u32 config;
>> + u32 ice_cap;
>> +
>> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
>> + config &= ~DISABLE_CRYPTO;
>> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
>> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
>> + if (ice_cap & ICE_HCI_SUPPORT) {
>> + config = cqhci_readl(cq_host, CQHCI_CFG);
>> + config |= CRYPTO_GENERAL_ENABLE;
>> + cqhci_writel(cq_host, config, CQHCI_CFG);
>> + }
>> +}
>
> Why would ICE_HCI_SUPPORT not be set here? When this is called, the
> driver is already in the middle of processing an mmc_request with an
> encryption context, due to the driver advertising that it supports
> inline crypto earlier. If the hardware doesn't actually support inline
> crypto, that has to be detected earlier. But I thought it already does.
> So it's unclear what checking ICE_HCI_SUPPORT here is meant to achieve.
You're right — by the time we reach sdhci_msm_request(), the driver has
already advertised inline crypto support and is processing a request
with an encryption context.The check for ICE_HCI_SUPPORT here seems
redundant given the existing capability detection.
I’ll drop this check in next revision.
>
>> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq)
>
> This should return void.
>
>> + if (mrq->crypto_ctx) {
>> + if (!msm_host->ice_init_done) {
>> + sdhci_msm_non_cqe_ice_init(host);
>> + msm_host->ice_init_done = true;
>> + }
>> +
>> + crypto_enable = true;
>> + dun = mrq->crypto_ctx->bc_dun[0];
>> + key_index = mrq->crypto_key_slot;
>> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, crypto_enable) |
>> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
>> +
>> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
>> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
>
> No need for the crypto_enable variable. Just use:
>
> FIELD_PREP(ICE_HCI_PARAM_CE, 1).
ok
>
> Also no need for the dun variable. Just use:
>
> cqhci_writel(cq_host, lower_32_bits(mrq->crypto_ctx->bc_dun[0]),
> NONCQ_CRYPTO_DUN);
ok
>
>> + * For CQE requests, crypto is handled in cqhci_request() in
>> + * drivers/mmc/host/cqhci-core.c using the existing CQE crypto infrastructure.
>
> It's recommended to not reference file paths like this in kernel code,
> since files are sometimes moved around.
Sure, Will rewrite the comment in next revision.
Thanks,
Alam.
On 19/11/2025 13:46, Md Sadre Alam wrote:
> Enable Inline Crypto Engine (ICE) support for CQE-capable sdhci-msm
> controllers when used with eMMC cards that do not support CQE.
>
> This addresses the scenario where:
> - The host controller supports CQE (and has CQHCI crypto infrastructure)
> - The eMMC card does not support CQE
> - Standard (non-CMDQ) requests need crypto support
>
> This allows hardware-accelerated encryption and decryption for standard
> requests on CQE-capable hardware by utilizing the existing CQHCI crypto
> register space even when CQE functionality is not available due to card
> limitations.
>
> The implementation:
> - Adds ICE register definitions for non-CQE crypto configuration
> - Implements per-request crypto setup via sdhci_msm_ice_cfg()
> - Hooks into the request path via mmc_host_ops.request for non-CQE requests
> - Uses CQHCI register space (NONCQ_CRYPTO_PARM/DUN) for crypto configuration
>
> With this, CQE-capable controllers can benefit from inline encryption
> when paired with non-CQE cards, improving performance for encrypted I/O
> while maintaining compatibility with existing CQE crypto support.
>
> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
> Acked-by: Adrian Hunter <adrian.hunter@intel.com>
> ---
>
> Change in [v5]
>
> * Removed unused variable
>
> * Added proper comment for sdhci_msm_request()
>
> * Removed sdhci_msm_ice_enable(); it is already invoked during resume
>
> Change in [v4]
>
> * Moved ICE initialization for non cmdq into sdhci_msm_ice_cfg() and made
> it conditional on mrq->crypto_ctx to enable lazy setup.
>
> * Added msm_host->ice_init_done guard to prevent redundant initialization.
>
> * Updated commit message
>
> Change in [v3]
>
> * Refactored logic to use separate code paths for crypto_ctx != NULL and
> crypto_ctx == NULL to improve readability.
>
> * Renamed bypass to crypto_enable to align with bitfield semantics.
>
> * Removed slot variable
>
> * Added ICE initialization sequence for non-CMDQ eMMC devices before
> __sdhci_add_host()
>
> Change in [v2]
>
> * Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
> sdhci-msm.c
>
> * Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
> maintainable bitfield handling in ICE configuration.
>
> * Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
> as both are guaranteed to be valid in the current call path.
>
> * Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
> integrate ICE configuration into the standard request path for non-CMDQ
> eMMC devices.
>
> * Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
>
> Change in [v1]
>
> * Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
> devices.
>
> drivers/mmc/host/sdhci-msm.c | 101 +++++++++++++++++++++++++++++++++++
> 1 file changed, 101 insertions(+)
>
> diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
> index 4e5edbf2fc9b..69c67242519c 100644
> --- a/drivers/mmc/host/sdhci-msm.c
> +++ b/drivers/mmc/host/sdhci-msm.c
> @@ -157,6 +157,18 @@
> #define CQHCI_VENDOR_CFG1 0xA00
> #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
>
> +/* non command queue crypto enable register*/
> +#define NONCQ_CRYPTO_PARM 0x70
> +#define NONCQ_CRYPTO_DUN 0x74
> +
> +#define DISABLE_CRYPTO BIT(15)
> +#define CRYPTO_GENERAL_ENABLE BIT(1)
> +#define HC_VENDOR_SPECIFIC_FUNC4 0x260
> +#define ICE_HCI_SUPPORT BIT(28)
> +
> +#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
> +#define ICE_HCI_PARAM_CE GENMASK(8, 8)
> +
> struct sdhci_msm_offset {
> u32 core_hc_mode;
> u32 core_mci_data_cnt;
> @@ -300,6 +312,7 @@ struct sdhci_msm_host {
> u32 dll_config;
> u32 ddr_config;
> bool vqmmc_enabled;
> + bool ice_init_done;
> };
>
> static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
> @@ -2009,6 +2022,91 @@ static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
> return qcom_ice_evict_key(msm_host->ice, slot);
> }
>
> +static void sdhci_msm_non_cqe_ice_init(struct sdhci_host *host)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + u32 config;
> + u32 ice_cap;
> +
> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
> + config &= ~DISABLE_CRYPTO;
> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
> + if (ice_cap & ICE_HCI_SUPPORT) {
> + config = cqhci_readl(cq_host, CQHCI_CFG);
> + config |= CRYPTO_GENERAL_ENABLE;
> + cqhci_writel(cq_host, config, CQHCI_CFG);
> + }
> +}
> +
> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq)
> +{
> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
> + struct mmc_host *mmc = msm_host->mmc;
> + struct cqhci_host *cq_host = mmc->cqe_private;
> + unsigned int crypto_params = 0;
> + int key_index;
> + bool crypto_enable;
> + u64 dun = 0;
> +
> + if (mrq->crypto_ctx) {
> + if (!msm_host->ice_init_done) {
> + sdhci_msm_non_cqe_ice_init(host);
> + msm_host->ice_init_done = true;
> + }
> +
> + crypto_enable = true;
> + dun = mrq->crypto_ctx->bc_dun[0];
> + key_index = mrq->crypto_key_slot;
> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, crypto_enable) |
> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
> +
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
> + } else {
> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
> + }
> +
> + /* Ensure crypto configuration is written before proceeding */
> + wmb();
> +
> + return 0;
> +}
> +
> +/*
> + * sdhci_msm_request - Handle non-CQE MMC requests with crypto support
> + * @mmc: MMC host
> + * @mrq: MMC request
> + *
> + * This function is called for non-CQE requests only. The MMC block layer
> + * routes requests as follows:
> + *
> + * if (host->cqe_enabled)
> + * ret = mmc_blk_cqe_issue_rw_rq(mq, req); // → cqhci_request()
> + * else
> + * ret = mmc_blk_mq_issue_rw_rq(mq, req); // → sdhci_msm_request()
> + *
> + * For CQE requests, crypto is handled in cqhci_request() in
> + * drivers/mmc/host/cqhci-core.c using the existing CQE crypto infrastructure.
> + *
> + * For non-CQE requests, this function provides crypto support by configuring
> + * the ICE (Inline Crypto Engine) registers before passing the request to
> + * the standard SDHCI request handler.
> + */
Kernel-style is not to put kernel-doc like comments on call-back
functions, since the functionality is defined by the upper layer,
and there is no point duplicating the information for every single
implementation.
> +static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
> +{
> + struct sdhci_host *host = mmc_priv(mmc);
> +
A simple comment here would suffice, say something like:
/* Only need to handle non-CQE crypto requests in this path */
> + if (mmc->caps2 & MMC_CAP2_CRYPTO)
> + sdhci_msm_ice_cfg(host, mrq);
> +
> + sdhci_request(mmc, mrq);
> +}
> +
> static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
> .keyslot_program = sdhci_msm_ice_keyslot_program,
> .keyslot_evict = sdhci_msm_ice_keyslot_evict,
> @@ -2759,6 +2857,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
>
> msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
>
> +#ifdef CONFIG_MMC_CRYPTO
> + host->mmc_host_ops.request = sdhci_msm_request;
> +#endif
> /* Set the timeout value to max possible */
> host->max_timeout_count = 0xF;
>
Hi,
On 11/20/2025 12:10 PM, Adrian Hunter wrote:
> On 19/11/2025 13:46, Md Sadre Alam wrote:
>> Enable Inline Crypto Engine (ICE) support for CQE-capable sdhci-msm
>> controllers when used with eMMC cards that do not support CQE.
>>
>> This addresses the scenario where:
>> - The host controller supports CQE (and has CQHCI crypto infrastructure)
>> - The eMMC card does not support CQE
>> - Standard (non-CMDQ) requests need crypto support
>>
>> This allows hardware-accelerated encryption and decryption for standard
>> requests on CQE-capable hardware by utilizing the existing CQHCI crypto
>> register space even when CQE functionality is not available due to card
>> limitations.
>>
>> The implementation:
>> - Adds ICE register definitions for non-CQE crypto configuration
>> - Implements per-request crypto setup via sdhci_msm_ice_cfg()
>> - Hooks into the request path via mmc_host_ops.request for non-CQE requests
>> - Uses CQHCI register space (NONCQ_CRYPTO_PARM/DUN) for crypto configuration
>>
>> With this, CQE-capable controllers can benefit from inline encryption
>> when paired with non-CQE cards, improving performance for encrypted I/O
>> while maintaining compatibility with existing CQE crypto support.
>>
>> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
>> Acked-by: Adrian Hunter <adrian.hunter@intel.com>
>> ---
>>
>> Change in [v5]
>>
>> * Removed unused variable
>>
>> * Added proper comment for sdhci_msm_request()
>>
>> * Removed sdhci_msm_ice_enable(); it is already invoked during resume
>>
>> Change in [v4]
>>
>> * Moved ICE initialization for non cmdq into sdhci_msm_ice_cfg() and made
>> it conditional on mrq->crypto_ctx to enable lazy setup.
>>
>> * Added msm_host->ice_init_done guard to prevent redundant initialization.
>>
>> * Updated commit message
>>
>> Change in [v3]
>>
>> * Refactored logic to use separate code paths for crypto_ctx != NULL and
>> crypto_ctx == NULL to improve readability.
>>
>> * Renamed bypass to crypto_enable to align with bitfield semantics.
>>
>> * Removed slot variable
>>
>> * Added ICE initialization sequence for non-CMDQ eMMC devices before
>> __sdhci_add_host()
>>
>> Change in [v2]
>>
>> * Moved NONCQ_CRYPTO_PARM and NONCQ_CRYPTO_DUN register definitions into
>> sdhci-msm.c
>>
>> * Introduced use of GENMASK() and FIELD_PREP() macros for cleaner and more
>> maintainable bitfield handling in ICE configuration.
>>
>> * Removed redundant if (!mrq || !cq_host) check from sdhci_msm_ice_cfg()
>> as both are guaranteed to be valid in the current call path.
>>
>> * Added assignment of host->mmc_host_ops.request = sdhci_msm_request; to
>> integrate ICE configuration into the standard request path for non-CMDQ
>> eMMC devices.
>>
>> * Removed sdhci_crypto_cfg() from sdhci.c and its invocation in sdhci_request()
>>
>> Change in [v1]
>>
>> * Added initial support for Inline Crypto Engine (ICE) on non-CMDQ eMMC
>> devices.
>>
>> drivers/mmc/host/sdhci-msm.c | 101 +++++++++++++++++++++++++++++++++++
>> 1 file changed, 101 insertions(+)
>>
>> diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
>> index 4e5edbf2fc9b..69c67242519c 100644
>> --- a/drivers/mmc/host/sdhci-msm.c
>> +++ b/drivers/mmc/host/sdhci-msm.c
>> @@ -157,6 +157,18 @@
>> #define CQHCI_VENDOR_CFG1 0xA00
>> #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
>>
>> +/* non command queue crypto enable register*/
>> +#define NONCQ_CRYPTO_PARM 0x70
>> +#define NONCQ_CRYPTO_DUN 0x74
>> +
>> +#define DISABLE_CRYPTO BIT(15)
>> +#define CRYPTO_GENERAL_ENABLE BIT(1)
>> +#define HC_VENDOR_SPECIFIC_FUNC4 0x260
>> +#define ICE_HCI_SUPPORT BIT(28)
>> +
>> +#define ICE_HCI_PARAM_CCI GENMASK(7, 0)
>> +#define ICE_HCI_PARAM_CE GENMASK(8, 8)
>> +
>> struct sdhci_msm_offset {
>> u32 core_hc_mode;
>> u32 core_mci_data_cnt;
>> @@ -300,6 +312,7 @@ struct sdhci_msm_host {
>> u32 dll_config;
>> u32 ddr_config;
>> bool vqmmc_enabled;
>> + bool ice_init_done;
>> };
>>
>> static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
>> @@ -2009,6 +2022,91 @@ static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
>> return qcom_ice_evict_key(msm_host->ice, slot);
>> }
>>
>> +static void sdhci_msm_non_cqe_ice_init(struct sdhci_host *host)
>> +{
>> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
>> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
>> + struct mmc_host *mmc = msm_host->mmc;
>> + struct cqhci_host *cq_host = mmc->cqe_private;
>> + u32 config;
>> + u32 ice_cap;
>> +
>> + config = sdhci_readl(host, HC_VENDOR_SPECIFIC_FUNC4);
>> + config &= ~DISABLE_CRYPTO;
>> + sdhci_writel(host, config, HC_VENDOR_SPECIFIC_FUNC4);
>> + ice_cap = cqhci_readl(cq_host, CQHCI_CAP);
>> + if (ice_cap & ICE_HCI_SUPPORT) {
>> + config = cqhci_readl(cq_host, CQHCI_CFG);
>> + config |= CRYPTO_GENERAL_ENABLE;
>> + cqhci_writel(cq_host, config, CQHCI_CFG);
>> + }
>> +}
>> +
>> +static int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq)
>> +{
>> + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
>> + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
>> + struct mmc_host *mmc = msm_host->mmc;
>> + struct cqhci_host *cq_host = mmc->cqe_private;
>> + unsigned int crypto_params = 0;
>> + int key_index;
>> + bool crypto_enable;
>> + u64 dun = 0;
>> +
>> + if (mrq->crypto_ctx) {
>> + if (!msm_host->ice_init_done) {
>> + sdhci_msm_non_cqe_ice_init(host);
>> + msm_host->ice_init_done = true;
>> + }
>> +
>> + crypto_enable = true;
>> + dun = mrq->crypto_ctx->bc_dun[0];
>> + key_index = mrq->crypto_key_slot;
>> + crypto_params = FIELD_PREP(ICE_HCI_PARAM_CE, crypto_enable) |
>> + FIELD_PREP(ICE_HCI_PARAM_CCI, key_index);
>> +
>> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
>> + cqhci_writel(cq_host, lower_32_bits(dun), NONCQ_CRYPTO_DUN);
>> + } else {
>> + cqhci_writel(cq_host, crypto_params, NONCQ_CRYPTO_PARM);
>> + }
>> +
>> + /* Ensure crypto configuration is written before proceeding */
>> + wmb();
>> +
>> + return 0;
>> +}
>> +
>> +/*
>> + * sdhci_msm_request - Handle non-CQE MMC requests with crypto support
>> + * @mmc: MMC host
>> + * @mrq: MMC request
>> + *
>> + * This function is called for non-CQE requests only. The MMC block layer
>> + * routes requests as follows:
>> + *
>> + * if (host->cqe_enabled)
>> + * ret = mmc_blk_cqe_issue_rw_rq(mq, req); // → cqhci_request()
>> + * else
>> + * ret = mmc_blk_mq_issue_rw_rq(mq, req); // → sdhci_msm_request()
>> + *
>> + * For CQE requests, crypto is handled in cqhci_request() in
>> + * drivers/mmc/host/cqhci-core.c using the existing CQE crypto infrastructure.
>> + *
>> + * For non-CQE requests, this function provides crypto support by configuring
>> + * the ICE (Inline Crypto Engine) registers before passing the request to
>> + * the standard SDHCI request handler.
>> + */
>
> Kernel-style is not to put kernel-doc like comments on call-back
> functions, since the functionality is defined by the upper layer,
> and there is no point duplicating the information for every single
> implementation.
Thanks for pointing this out. Will drop this comment from
sdhci_msm_request().
>
>> +static void sdhci_msm_request(struct mmc_host *mmc, struct mmc_request *mrq)
>> +{
>> + struct sdhci_host *host = mmc_priv(mmc);
>> +
>
> A simple comment here would suffice, say something like:
>
> /* Only need to handle non-CQE crypto requests in this path */
Ok, Will add in next revision.
>
Thanks,
Alam.
© 2016 - 2025 Red Hat, Inc.