This patch provides iaa_crypto driver implementations for the newly added
crypto_acomp batch_compress() and batch_decompress() interfaces.
This allows swap modules such as zswap/zram to invoke batch parallel
compression/decompression of pages on systems with Intel IAA, by invoking
these API, respectively:
crypto_acomp_batch_compress(...);
crypto_acomp_batch_decompress(...);
This enables zswap_batch_store() compress batching code to be developed in
a manner similar to the current single-page synchronous calls to:
crypto_acomp_compress(...);
crypto_acomp_decompress(...);
thereby, facilitating encapsulated and modular hand-off between the kernel
zswap/zram code and the crypto_acomp layer.
Suggested-by: Yosry Ahmed <yosryahmed@google.com>
Suggested-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
drivers/crypto/intel/iaa/iaa_crypto_main.c | 337 +++++++++++++++++++++
1 file changed, 337 insertions(+)
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 2edaecd42cc6..cbf147a3c3cb 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -1797,6 +1797,341 @@ static void compression_ctx_init(struct iaa_compression_ctx *ctx)
ctx->use_irq = use_irq;
}
+static int iaa_comp_poll(struct acomp_req *req)
+{
+ struct idxd_desc *idxd_desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct idxd_wq *wq;
+ bool compress_op;
+ int ret;
+
+ idxd_desc = req->base.data;
+ if (!idxd_desc)
+ return -EAGAIN;
+
+ compress_op = (idxd_desc->iax_hw->opcode == IAX_OPCODE_COMPRESS);
+ wq = idxd_desc->wq;
+ iaa_wq = idxd_wq_get_private(wq);
+ idxd = iaa_wq->iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ ret = check_completion(dev, idxd_desc->iax_completion, true, true);
+ if (ret == -EAGAIN)
+ return ret;
+ if (ret)
+ goto out;
+
+ req->dlen = idxd_desc->iax_completion->output_size;
+
+ /* Update stats */
+ if (compress_op) {
+ update_total_comp_bytes_out(req->dlen);
+ update_wq_comp_bytes(wq, req->dlen);
+ } else {
+ update_total_decomp_bytes_in(req->slen);
+ update_wq_decomp_bytes(wq, req->slen);
+ }
+
+ if (iaa_verify_compress && (idxd_desc->iax_hw->opcode == IAX_OPCODE_COMPRESS)) {
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ u32 compression_crc;
+
+ compression_crc = idxd_desc->iax_completion->crc;
+
+ dma_sync_sg_for_device(dev, req->dst, 1, DMA_FROM_DEVICE);
+ dma_sync_sg_for_device(dev, req->src, 1, DMA_TO_DEVICE);
+
+ src_addr = sg_dma_address(req->src);
+ dst_addr = sg_dma_address(req->dst);
+
+ ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, compression_crc);
+ }
+out:
+ /* caller doesn't call crypto_wait_req, so no acomp_request_complete() */
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+
+ idxd_free_desc(idxd_desc->wq, idxd_desc);
+
+ dev_dbg(dev, "%s: returning ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static void iaa_set_req_poll(
+ struct acomp_req *reqs[],
+ int nr_reqs,
+ bool set_flag)
+{
+ int i;
+
+ for (i = 0; i < nr_reqs; ++i) {
+ set_flag ? (reqs[i]->flags |= CRYPTO_ACOMP_REQ_POLL) :
+ (reqs[i]->flags &= ~CRYPTO_ACOMP_REQ_POLL);
+ }
+}
+
+/**
+ * This API provides IAA compress batching functionality for use by swap
+ * modules.
+ *
+ * @reqs: @nr_pages asynchronous compress requests.
+ * @wait: crypto_wait for synchronous acomp batch compress. If NULL, the
+ * completions will be processed asynchronously.
+ * @pages: Pages to be compressed by IAA in parallel.
+ * @dsts: Pre-allocated destination buffers to store results of IAA
+ * compression. Each element of @dsts must be of size "PAGE_SIZE * 2".
+ * @dlens: Will contain the compressed lengths.
+ * @errors: zero on successful compression of the corresponding
+ * req, or error code in case of error.
+ * @nr_pages: The number of pages, up to CRYPTO_BATCH_SIZE,
+ * to be compressed.
+ */
+static void iaa_comp_acompress_batch(
+ struct acomp_req *reqs[],
+ struct crypto_wait *wait,
+ struct page *pages[],
+ u8 *dsts[],
+ unsigned int dlens[],
+ int errors[],
+ int nr_pages)
+{
+ struct scatterlist inputs[CRYPTO_BATCH_SIZE];
+ struct scatterlist outputs[CRYPTO_BATCH_SIZE];
+ bool compressions_done = false;
+ bool poll = (async_mode && !use_irq);
+ int i;
+
+ BUG_ON(nr_pages > CRYPTO_BATCH_SIZE);
+ BUG_ON(!poll && !wait);
+
+ if (poll)
+ iaa_set_req_poll(reqs, nr_pages, true);
+ else
+ iaa_set_req_poll(reqs, nr_pages, false);
+
+ /*
+ * Prepare and submit acomp_reqs to IAA. IAA will process these
+ * compress jobs in parallel if async-poll mode is enabled.
+ * If IAA is used in sync mode, the jobs will be processed sequentially
+ * using "wait".
+ */
+ for (i = 0; i < nr_pages; ++i) {
+ sg_init_table(&inputs[i], 1);
+ sg_set_page(&inputs[i], pages[i], PAGE_SIZE, 0);
+
+ /*
+ * Each dst buffer should be of size (PAGE_SIZE * 2).
+ * Reflect same in sg_list.
+ */
+ sg_init_one(&outputs[i], dsts[i], PAGE_SIZE * 2);
+ acomp_request_set_params(reqs[i], &inputs[i],
+ &outputs[i], PAGE_SIZE, dlens[i]);
+
+ /*
+ * If poll is in effect, submit the request now, and poll for
+ * a completion status later, after all descriptors have been
+ * submitted. If polling is not enabled, submit the request
+ * and wait for it to complete, i.e., synchronously, before
+ * moving on to the next request.
+ */
+ if (poll) {
+ errors[i] = iaa_comp_acompress(reqs[i]);
+
+ if (errors[i] != -EINPROGRESS)
+ errors[i] = -EINVAL;
+ else
+ errors[i] = -EAGAIN;
+ } else {
+ acomp_request_set_callback(reqs[i],
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, wait);
+ errors[i] = crypto_wait_req(iaa_comp_acompress(reqs[i]),
+ wait);
+ if (!errors[i])
+ dlens[i] = reqs[i]->dlen;
+ }
+ }
+
+ /*
+ * If not doing async compressions, the batch has been processed at
+ * this point and we can return.
+ */
+ if (!poll)
+ goto reset_reqs_wait;
+
+ /*
+ * Poll for and process IAA compress job completions
+ * in out-of-order manner.
+ */
+ while (!compressions_done) {
+ compressions_done = true;
+
+ for (i = 0; i < nr_pages; ++i) {
+ /*
+ * Skip, if the compression has already completed
+ * successfully or with an error.
+ */
+ if (errors[i] != -EAGAIN)
+ continue;
+
+ errors[i] = iaa_comp_poll(reqs[i]);
+
+ if (errors[i]) {
+ if (errors[i] == -EAGAIN)
+ compressions_done = false;
+ } else {
+ dlens[i] = reqs[i]->dlen;
+ }
+ }
+ }
+
+reset_reqs_wait:
+ /*
+ * For the same 'reqs[]' and 'wait' to be usable by
+ * iaa_comp_acompress()/iaa_comp_deacompress():
+ * Clear the CRYPTO_ACOMP_REQ_POLL bit on the acomp_reqs.
+ * Reset the crypto_wait "wait" callback to reqs[0].
+ */
+ iaa_set_req_poll(reqs, nr_pages, false);
+ acomp_request_set_callback(reqs[0],
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, wait);
+}
+
+/**
+ * This API provides IAA decompress batching functionality for use by swap
+ * modules.
+ *
+ * @reqs: @nr_pages asynchronous decompress requests.
+ * @wait: crypto_wait for synchronous acomp batch decompress. If NULL, the
+ * driver must provide a way to process completions asynchronously.
+ * @srcs: The src buffers to be decompressed by IAA in parallel.
+ * @pages: The pages to store the decompressed buffers.
+ * @slens: Compressed lengths of @srcs.
+ * @errors: zero on successful compression of the corresponding
+ * req, or error code in case of error.
+ * @nr_pages: The number of pages, up to CRYPTO_BATCH_SIZE,
+ * to be decompressed.
+ */
+static void iaa_comp_adecompress_batch(
+ struct acomp_req *reqs[],
+ struct crypto_wait *wait,
+ u8 *srcs[],
+ struct page *pages[],
+ unsigned int slens[],
+ int errors[],
+ int nr_pages)
+{
+ struct scatterlist inputs[CRYPTO_BATCH_SIZE];
+ struct scatterlist outputs[CRYPTO_BATCH_SIZE];
+ unsigned int dlens[CRYPTO_BATCH_SIZE];
+ bool decompressions_done = false;
+ bool poll = (async_mode && !use_irq);
+ int i;
+
+ BUG_ON(nr_pages > CRYPTO_BATCH_SIZE);
+ BUG_ON(!poll && !wait);
+
+ if (poll)
+ iaa_set_req_poll(reqs, nr_pages, true);
+ else
+ iaa_set_req_poll(reqs, nr_pages, false);
+
+ /*
+ * Prepare and submit acomp_reqs to IAA. IAA will process these
+ * decompress jobs in parallel if async-poll mode is enabled.
+ * If IAA is used in sync mode, the jobs will be processed sequentially
+ * using "wait".
+ */
+ for (i = 0; i < nr_pages; ++i) {
+ dlens[i] = PAGE_SIZE;
+ sg_init_one(&inputs[i], srcs[i], slens[i]);
+ sg_init_table(&outputs[i], 1);
+ sg_set_page(&outputs[i], pages[i], PAGE_SIZE, 0);
+ acomp_request_set_params(reqs[i], &inputs[i],
+ &outputs[i], slens[i], dlens[i]);
+ /*
+ * If poll is in effect, submit the request now, and poll for
+ * a completion status later, after all descriptors have been
+ * submitted. If polling is not enabled, submit the request
+ * and wait for it to complete, i.e., synchronously, before
+ * moving on to the next request.
+ */
+ if (poll) {
+ errors[i] = iaa_comp_adecompress(reqs[i]);
+
+ if (errors[i] != -EINPROGRESS)
+ errors[i] = -EINVAL;
+ else
+ errors[i] = -EAGAIN;
+ } else {
+ acomp_request_set_callback(reqs[i],
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, wait);
+ errors[i] = crypto_wait_req(iaa_comp_adecompress(reqs[i]),
+ wait);
+ if (!errors[i]) {
+ dlens[i] = reqs[i]->dlen;
+ BUG_ON(dlens[i] != PAGE_SIZE);
+ }
+ }
+ }
+
+ /*
+ * If not doing async decompressions, the batch has been processed at
+ * this point and we can return.
+ */
+ if (!poll)
+ goto reset_reqs_wait;
+
+ /*
+ * Poll for and process IAA decompress job completions
+ * in out-of-order manner.
+ */
+ while (!decompressions_done) {
+ decompressions_done = true;
+
+ for (i = 0; i < nr_pages; ++i) {
+ /*
+ * Skip, if the decompression has already completed
+ * successfully or with an error.
+ */
+ if (errors[i] != -EAGAIN)
+ continue;
+
+ errors[i] = iaa_comp_poll(reqs[i]);
+
+ if (errors[i]) {
+ if (errors[i] == -EAGAIN)
+ decompressions_done = false;
+ } else {
+ dlens[i] = reqs[i]->dlen;
+ BUG_ON(dlens[i] != PAGE_SIZE);
+ }
+ }
+ }
+
+reset_reqs_wait:
+ /*
+ * For the same 'reqs[]' and 'wait' to be usable by
+ * iaa_comp_acompress()/iaa_comp_deacompress():
+ * Clear the CRYPTO_ACOMP_REQ_POLL bit on the acomp_reqs.
+ * Reset the crypto_wait "wait" callback to reqs[0].
+ */
+ iaa_set_req_poll(reqs, nr_pages, false);
+ acomp_request_set_callback(reqs[0],
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, wait);
+}
+
static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
@@ -1822,6 +2157,8 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.compress = iaa_comp_acompress,
.decompress = iaa_comp_adecompress,
.dst_free = dst_free,
+ .batch_compress = iaa_comp_acompress_batch,
+ .batch_decompress = iaa_comp_adecompress_batch,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
--
2.27.0
Hi Kanchana,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 5a7056135bb69da2ce0a42eb8c07968c1331777b]
url: https://github.com/intel-lab-lkp/linux/commits/Kanchana-P-Sridhar/crypto-acomp-Define-two-new-interfaces-for-compress-decompress-batching/20241125-110412
base: 5a7056135bb69da2ce0a42eb8c07968c1331777b
patch link: https://lore.kernel.org/r/20241123070127.332773-4-kanchana.p.sridhar%40intel.com
patch subject: [PATCH v4 03/10] crypto: iaa - Implement batch_compress(), batch_decompress() API in iaa_crypto.
config: x86_64-rhel-9.4 (https://download.01.org/0day-ci/archive/20241126/202411261737.ozFff8Ym-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241126/202411261737.ozFff8Ym-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202411261737.ozFff8Ym-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/crypto/intel/iaa/iaa_crypto_main.c:1882: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
* This API provides IAA compress batching functionality for use by swap
drivers/crypto/intel/iaa/iaa_crypto_main.c:2010: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
* This API provides IAA decompress batching functionality for use by swap
vim +1882 drivers/crypto/intel/iaa/iaa_crypto_main.c
1880
1881 /**
> 1882 * This API provides IAA compress batching functionality for use by swap
1883 * modules.
1884 *
1885 * @reqs: @nr_pages asynchronous compress requests.
1886 * @wait: crypto_wait for synchronous acomp batch compress. If NULL, the
1887 * completions will be processed asynchronously.
1888 * @pages: Pages to be compressed by IAA in parallel.
1889 * @dsts: Pre-allocated destination buffers to store results of IAA
1890 * compression. Each element of @dsts must be of size "PAGE_SIZE * 2".
1891 * @dlens: Will contain the compressed lengths.
1892 * @errors: zero on successful compression of the corresponding
1893 * req, or error code in case of error.
1894 * @nr_pages: The number of pages, up to CRYPTO_BATCH_SIZE,
1895 * to be compressed.
1896 */
1897 static void iaa_comp_acompress_batch(
1898 struct acomp_req *reqs[],
1899 struct crypto_wait *wait,
1900 struct page *pages[],
1901 u8 *dsts[],
1902 unsigned int dlens[],
1903 int errors[],
1904 int nr_pages)
1905 {
1906 struct scatterlist inputs[CRYPTO_BATCH_SIZE];
1907 struct scatterlist outputs[CRYPTO_BATCH_SIZE];
1908 bool compressions_done = false;
1909 bool poll = (async_mode && !use_irq);
1910 int i;
1911
1912 BUG_ON(nr_pages > CRYPTO_BATCH_SIZE);
1913 BUG_ON(!poll && !wait);
1914
1915 if (poll)
1916 iaa_set_req_poll(reqs, nr_pages, true);
1917 else
1918 iaa_set_req_poll(reqs, nr_pages, false);
1919
1920 /*
1921 * Prepare and submit acomp_reqs to IAA. IAA will process these
1922 * compress jobs in parallel if async-poll mode is enabled.
1923 * If IAA is used in sync mode, the jobs will be processed sequentially
1924 * using "wait".
1925 */
1926 for (i = 0; i < nr_pages; ++i) {
1927 sg_init_table(&inputs[i], 1);
1928 sg_set_page(&inputs[i], pages[i], PAGE_SIZE, 0);
1929
1930 /*
1931 * Each dst buffer should be of size (PAGE_SIZE * 2).
1932 * Reflect same in sg_list.
1933 */
1934 sg_init_one(&outputs[i], dsts[i], PAGE_SIZE * 2);
1935 acomp_request_set_params(reqs[i], &inputs[i],
1936 &outputs[i], PAGE_SIZE, dlens[i]);
1937
1938 /*
1939 * If poll is in effect, submit the request now, and poll for
1940 * a completion status later, after all descriptors have been
1941 * submitted. If polling is not enabled, submit the request
1942 * and wait for it to complete, i.e., synchronously, before
1943 * moving on to the next request.
1944 */
1945 if (poll) {
1946 errors[i] = iaa_comp_acompress(reqs[i]);
1947
1948 if (errors[i] != -EINPROGRESS)
1949 errors[i] = -EINVAL;
1950 else
1951 errors[i] = -EAGAIN;
1952 } else {
1953 acomp_request_set_callback(reqs[i],
1954 CRYPTO_TFM_REQ_MAY_BACKLOG,
1955 crypto_req_done, wait);
1956 errors[i] = crypto_wait_req(iaa_comp_acompress(reqs[i]),
1957 wait);
1958 if (!errors[i])
1959 dlens[i] = reqs[i]->dlen;
1960 }
1961 }
1962
1963 /*
1964 * If not doing async compressions, the batch has been processed at
1965 * this point and we can return.
1966 */
1967 if (!poll)
1968 goto reset_reqs_wait;
1969
1970 /*
1971 * Poll for and process IAA compress job completions
1972 * in out-of-order manner.
1973 */
1974 while (!compressions_done) {
1975 compressions_done = true;
1976
1977 for (i = 0; i < nr_pages; ++i) {
1978 /*
1979 * Skip, if the compression has already completed
1980 * successfully or with an error.
1981 */
1982 if (errors[i] != -EAGAIN)
1983 continue;
1984
1985 errors[i] = iaa_comp_poll(reqs[i]);
1986
1987 if (errors[i]) {
1988 if (errors[i] == -EAGAIN)
1989 compressions_done = false;
1990 } else {
1991 dlens[i] = reqs[i]->dlen;
1992 }
1993 }
1994 }
1995
1996 reset_reqs_wait:
1997 /*
1998 * For the same 'reqs[]' and 'wait' to be usable by
1999 * iaa_comp_acompress()/iaa_comp_deacompress():
2000 * Clear the CRYPTO_ACOMP_REQ_POLL bit on the acomp_reqs.
2001 * Reset the crypto_wait "wait" callback to reqs[0].
2002 */
2003 iaa_set_req_poll(reqs, nr_pages, false);
2004 acomp_request_set_callback(reqs[0],
2005 CRYPTO_TFM_REQ_MAY_BACKLOG,
2006 crypto_req_done, wait);
2007 }
2008
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2026 Red Hat, Inc.