This patch provides iaa_crypto driver implementations for the newly added
crypto_acomp batch_compress() and batch_decompress() interfaces using
acomp request chaining.
iaa_crypto also implements the new crypto_acomp get_batch_size() interface
that returns an iaa_driver specific constant, IAA_CRYPTO_MAX_BATCH_SIZE
(set to 8U currently).
This allows swap modules such as zswap/zram to allocate required batching
resources and then invoke fully asynchronous batch parallel
compression/decompression of pages on systems with Intel IAA, by invoking
these API, respectively:
crypto_acomp_batch_size(...);
crypto_acomp_batch_compress(...);
crypto_acomp_batch_decompress(...);
This enables zswap compress batching code to be developed in
a manner similar to the current single-page synchronous calls to:
crypto_acomp_compress(...);
crypto_acomp_decompress(...);
thereby, facilitating encapsulated and modular hand-off between the kernel
zswap/zram code and the crypto_acomp layer.
Since iaa_crypto supports the use of acomp request chaining, this patch
also adds CRYPTO_ALG_REQ_CHAIN to the iaa_acomp_fixed_deflate algorithm's
cra_flags.
Suggested-by: Yosry Ahmed <yosryahmed@google.com>
Suggested-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
drivers/crypto/intel/iaa/iaa_crypto.h | 9 +
drivers/crypto/intel/iaa/iaa_crypto_main.c | 395 ++++++++++++++++++++-
2 files changed, 403 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index 56985e395263..b3b67c44ec8a 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -39,6 +39,15 @@
IAA_DECOMP_CHECK_FOR_EOB | \
IAA_DECOMP_STOP_ON_EOB)
+/*
+ * The maximum compress/decompress batch size for IAA's implementation of
+ * the crypto_acomp batch_compress() and batch_decompress() interfaces.
+ * The IAA compression algorithms should provide the crypto_acomp
+ * get_batch_size() interface through a function that returns this
+ * constant.
+ */
+#define IAA_CRYPTO_MAX_BATCH_SIZE 8U
+
/* Representation of IAA workqueue */
struct iaa_wq {
struct list_head list;
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 29d03df39fab..b51b0b4b9ac3 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -1807,6 +1807,396 @@ static void compression_ctx_init(struct iaa_compression_ctx *ctx)
ctx->use_irq = use_irq;
}
+static int iaa_comp_poll(struct acomp_req *req)
+{
+ struct idxd_desc *idxd_desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct idxd_wq *wq;
+ bool compress_op;
+ int ret;
+
+ idxd_desc = req->base.data;
+ if (!idxd_desc)
+ return -EAGAIN;
+
+ compress_op = (idxd_desc->iax_hw->opcode == IAX_OPCODE_COMPRESS);
+ wq = idxd_desc->wq;
+ iaa_wq = idxd_wq_get_private(wq);
+ idxd = iaa_wq->iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ ret = check_completion(dev, idxd_desc->iax_completion, true, true);
+ if (ret == -EAGAIN)
+ return ret;
+ if (ret)
+ goto out;
+
+ req->dlen = idxd_desc->iax_completion->output_size;
+
+ /* Update stats */
+ if (compress_op) {
+ update_total_comp_bytes_out(req->dlen);
+ update_wq_comp_bytes(wq, req->dlen);
+ } else {
+ update_total_decomp_bytes_in(req->slen);
+ update_wq_decomp_bytes(wq, req->slen);
+ }
+
+ if (iaa_verify_compress && (idxd_desc->iax_hw->opcode == IAX_OPCODE_COMPRESS)) {
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ u32 compression_crc;
+
+ compression_crc = idxd_desc->iax_completion->crc;
+
+ dma_sync_sg_for_device(dev, req->dst, 1, DMA_FROM_DEVICE);
+ dma_sync_sg_for_device(dev, req->src, 1, DMA_TO_DEVICE);
+
+ src_addr = sg_dma_address(req->src);
+ dst_addr = sg_dma_address(req->dst);
+
+ ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, compression_crc);
+ }
+out:
+ /* caller doesn't call crypto_wait_req, so no acomp_request_complete() */
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+
+ idxd_free_desc(idxd_desc->wq, idxd_desc);
+
+ dev_dbg(dev, "%s: returning ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static unsigned int iaa_comp_get_batch_size(void)
+{
+ return IAA_CRYPTO_MAX_BATCH_SIZE;
+}
+
+static void iaa_set_req_poll(
+ struct acomp_req *reqs[],
+ int nr_reqs,
+ bool set_flag)
+{
+ int i;
+
+ for (i = 0; i < nr_reqs; ++i) {
+ set_flag ? (reqs[i]->flags |= CRYPTO_ACOMP_REQ_POLL) :
+ (reqs[i]->flags &= ~CRYPTO_ACOMP_REQ_POLL);
+ }
+}
+
+/**
+ * This API provides IAA compress batching functionality for use by swap
+ * modules.
+ *
+ * @reqs: @nr_pages asynchronous compress requests.
+ * @wait: crypto_wait for acomp batch compress implemented using request
+ * chaining. Required if async_mode is "false". If async_mode is "true",
+ * and @wait is NULL, the completions will be processed using
+ * asynchronous polling of the requests' completion statuses.
+ * @pages: Pages to be compressed by IAA.
+ * @dsts: Pre-allocated destination buffers to store results of IAA
+ * compression. Each element of @dsts must be of size "PAGE_SIZE * 2".
+ * @dlens: Will contain the compressed lengths.
+ * @errors: zero on successful compression of the corresponding
+ * req, or error code in case of error.
+ * @nr_pages: The number of pages, up to IAA_CRYPTO_MAX_BATCH_SIZE,
+ * to be compressed.
+ *
+ * Returns true if all compress requests complete successfully,
+ * false otherwise.
+ */
+static bool iaa_comp_acompress_batch(
+ struct acomp_req *reqs[],
+ struct crypto_wait *wait,
+ struct page *pages[],
+ u8 *dsts[],
+ unsigned int dlens[],
+ int errors[],
+ int nr_pages)
+{
+ struct scatterlist inputs[IAA_CRYPTO_MAX_BATCH_SIZE];
+ struct scatterlist outputs[IAA_CRYPTO_MAX_BATCH_SIZE];
+ bool compressions_done = false;
+ bool async = (async_mode && !use_irq);
+ bool async_poll = (async && !wait);
+ int i, err = 0;
+
+ BUG_ON(nr_pages > IAA_CRYPTO_MAX_BATCH_SIZE);
+ BUG_ON(!async && !wait);
+
+ if (async)
+ iaa_set_req_poll(reqs, nr_pages, true);
+ else
+ iaa_set_req_poll(reqs, nr_pages, false);
+
+ /*
+ * Prepare and submit acomp_reqs to IAA. IAA will process these
+ * compress jobs in parallel if async_mode is true.
+ */
+ for (i = 0; i < nr_pages; ++i) {
+ sg_init_table(&inputs[i], 1);
+ sg_set_page(&inputs[i], pages[i], PAGE_SIZE, 0);
+
+ /*
+ * Each dst buffer should be of size (PAGE_SIZE * 2).
+ * Reflect same in sg_list.
+ */
+ sg_init_one(&outputs[i], dsts[i], PAGE_SIZE * 2);
+ acomp_request_set_params(reqs[i], &inputs[i],
+ &outputs[i], PAGE_SIZE, dlens[i]);
+
+ /*
+ * As long as the API is called with a valid "wait", chain the
+ * requests for synchronous/asynchronous compress ops.
+ * If async_mode is in effect, but the API is called with a
+ * NULL "wait", submit the requests first, and poll for
+ * their completion status later, after all descriptors have
+ * been submitted.
+ */
+ if (!async_poll) {
+ /* acomp request chaining. */
+ if (i)
+ acomp_request_chain(reqs[i], reqs[0]);
+ else
+ acomp_reqchain_init(reqs[0], 0, crypto_req_done,
+ wait);
+ } else {
+ errors[i] = iaa_comp_acompress(reqs[i]);
+
+ if (errors[i] != -EINPROGRESS) {
+ errors[i] = -EINVAL;
+ err = -EINVAL;
+ } else {
+ errors[i] = -EAGAIN;
+ }
+ }
+ }
+
+ if (!async_poll) {
+ if (async)
+ /* Process the request chain in parallel. */
+ err = crypto_wait_req(acomp_do_async_req_chain(reqs[0],
+ iaa_comp_acompress, iaa_comp_poll),
+ wait);
+ else
+ /* Process the request chain in series. */
+ err = crypto_wait_req(acomp_do_req_chain(reqs[0],
+ iaa_comp_acompress), wait);
+
+ for (i = 0; i < nr_pages; ++i) {
+ errors[i] = acomp_request_err(reqs[i]);
+ if (errors[i]) {
+ err = -EINVAL;
+ pr_debug("Request chaining req %d compress error %d\n", i, errors[i]);
+ } else {
+ dlens[i] = reqs[i]->dlen;
+ }
+ }
+
+ goto reset_reqs;
+ }
+
+ /*
+ * Asynchronously poll for and process IAA compress job completions.
+ */
+ while (!compressions_done) {
+ compressions_done = true;
+
+ for (i = 0; i < nr_pages; ++i) {
+ /*
+ * Skip, if the compression has already completed
+ * successfully or with an error.
+ */
+ if (errors[i] != -EAGAIN)
+ continue;
+
+ errors[i] = iaa_comp_poll(reqs[i]);
+
+ if (errors[i]) {
+ if (errors[i] == -EAGAIN)
+ compressions_done = false;
+ else
+ err = -EINVAL;
+ } else {
+ dlens[i] = reqs[i]->dlen;
+ }
+ }
+ }
+
+reset_reqs:
+ /*
+ * For the same 'reqs[]' to be usable by
+ * iaa_comp_acompress()/iaa_comp_deacompress(),
+ * clear the CRYPTO_ACOMP_REQ_POLL bit on all acomp_reqs, and the
+ * CRYPTO_TFM_REQ_CHAIN bit on the reqs[0].
+ */
+ iaa_set_req_poll(reqs, nr_pages, false);
+ if (!async_poll)
+ acomp_reqchain_clear(reqs[0], wait);
+
+ return !err;
+}
+
+/**
+ * This API provides IAA decompress batching functionality for use by swap
+ * modules.
+ *
+ * @reqs: @nr_pages asynchronous decompress requests.
+ * @wait: crypto_wait for acomp batch decompress implemented using request
+ * chaining. Required if async_mode is "false". If async_mode is "true",
+ * and @wait is NULL, the completions will be processed using
+ * asynchronous polling of the requests' completion statuses.
+ * @srcs: The src buffers to be decompressed by IAA.
+ * @pages: The pages to store the decompressed buffers.
+ * @slens: Compressed lengths of @srcs.
+ * @errors: zero on successful compression of the corresponding
+ * req, or error code in case of error.
+ * @nr_pages: The number of pages, up to IAA_CRYPTO_MAX_BATCH_SIZE,
+ * to be decompressed.
+ *
+ * Returns true if all decompress requests complete successfully,
+ * false otherwise.
+ */
+static bool iaa_comp_adecompress_batch(
+ struct acomp_req *reqs[],
+ struct crypto_wait *wait,
+ u8 *srcs[],
+ struct page *pages[],
+ unsigned int slens[],
+ int errors[],
+ int nr_pages)
+{
+ struct scatterlist inputs[IAA_CRYPTO_MAX_BATCH_SIZE];
+ struct scatterlist outputs[IAA_CRYPTO_MAX_BATCH_SIZE];
+ unsigned int dlens[IAA_CRYPTO_MAX_BATCH_SIZE];
+ bool decompressions_done = false;
+ bool async = (async_mode && !use_irq);
+ bool async_poll = (async && !wait);
+ int i, err = 0;
+
+ BUG_ON(nr_pages > IAA_CRYPTO_MAX_BATCH_SIZE);
+ BUG_ON(!async && !wait);
+
+ if (async)
+ iaa_set_req_poll(reqs, nr_pages, true);
+ else
+ iaa_set_req_poll(reqs, nr_pages, false);
+
+ /*
+ * Prepare and submit acomp_reqs to IAA. IAA will process these
+ * decompress jobs in parallel if async_mode is true.
+ */
+ for (i = 0; i < nr_pages; ++i) {
+ dlens[i] = PAGE_SIZE;
+ sg_init_one(&inputs[i], srcs[i], slens[i]);
+ sg_init_table(&outputs[i], 1);
+ sg_set_page(&outputs[i], pages[i], PAGE_SIZE, 0);
+ acomp_request_set_params(reqs[i], &inputs[i],
+ &outputs[i], slens[i], dlens[i]);
+
+ /*
+ * As long as the API is called with a valid "wait", chain the
+ * requests for synchronous/asynchronous decompress ops.
+ * If async_mode is in effect, but the API is called with a
+ * NULL "wait", submit the requests first, and poll for
+ * their completion status later, after all descriptors have
+ * been submitted.
+ */
+ if (!async_poll) {
+ /* acomp request chaining. */
+ if (i)
+ acomp_request_chain(reqs[i], reqs[0]);
+ else
+ acomp_reqchain_init(reqs[0], 0, crypto_req_done,
+ wait);
+ } else {
+ errors[i] = iaa_comp_adecompress(reqs[i]);
+
+ if (errors[i] != -EINPROGRESS) {
+ errors[i] = -EINVAL;
+ err = -EINVAL;
+ } else {
+ errors[i] = -EAGAIN;
+ }
+ }
+ }
+
+ if (!async_poll) {
+ if (async)
+ /* Process the request chain in parallel. */
+ err = crypto_wait_req(acomp_do_async_req_chain(reqs[0],
+ iaa_comp_adecompress, iaa_comp_poll),
+ wait);
+ else
+ /* Process the request chain in series. */
+ err = crypto_wait_req(acomp_do_req_chain(reqs[0],
+ iaa_comp_adecompress), wait);
+
+ for (i = 0; i < nr_pages; ++i) {
+ errors[i] = acomp_request_err(reqs[i]);
+ if (errors[i]) {
+ err = -EINVAL;
+ pr_debug("Request chaining req %d decompress error %d\n", i, errors[i]);
+ } else {
+ dlens[i] = reqs[i]->dlen;
+ BUG_ON(dlens[i] != PAGE_SIZE);
+ }
+ }
+
+ goto reset_reqs;
+ }
+
+ /*
+ * Asynchronously poll for and process IAA decompress job completions.
+ */
+ while (!decompressions_done) {
+ decompressions_done = true;
+
+ for (i = 0; i < nr_pages; ++i) {
+ /*
+ * Skip, if the decompression has already completed
+ * successfully or with an error.
+ */
+ if (errors[i] != -EAGAIN)
+ continue;
+
+ errors[i] = iaa_comp_poll(reqs[i]);
+
+ if (errors[i]) {
+ if (errors[i] == -EAGAIN)
+ decompressions_done = false;
+ else
+ err = -EINVAL;
+ } else {
+ dlens[i] = reqs[i]->dlen;
+ BUG_ON(dlens[i] != PAGE_SIZE);
+ }
+ }
+ }
+
+reset_reqs:
+ /*
+ * For the same 'reqs[]' to be usable by
+ * iaa_comp_acompress()/iaa_comp_deacompress(),
+ * clear the CRYPTO_ACOMP_REQ_POLL bit on all acomp_reqs, and the
+ * CRYPTO_TFM_REQ_CHAIN bit on the reqs[0].
+ */
+ iaa_set_req_poll(reqs, nr_pages, false);
+ if (!async_poll)
+ acomp_reqchain_clear(reqs[0], wait);
+
+ return !err;
+}
+
static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
@@ -1832,10 +2222,13 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.compress = iaa_comp_acompress,
.decompress = iaa_comp_adecompress,
.dst_free = dst_free,
+ .get_batch_size = iaa_comp_get_batch_size,
+ .batch_compress = iaa_comp_acompress_batch,
+ .batch_decompress = iaa_comp_adecompress_batch,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_CHAIN,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
--
2.27.0
Hi Kanchana, kernel test robot noticed the following build warnings: [auto build test WARNING on 5555a83c82d66729e4abaf16ae28d6bd81f9a64a] url: https://github.com/intel-lab-lkp/linux/commits/Kanchana-P-Sridhar/crypto-acomp-Add-synchronous-asynchronous-acomp-request-chaining/20241221-143254 base: 5555a83c82d66729e4abaf16ae28d6bd81f9a64a patch link: https://lore.kernel.org/r/20241221063119.29140-5-kanchana.p.sridhar%40intel.com patch subject: [PATCH v5 04/12] crypto: iaa - Implement batch_compress(), batch_decompress() API in iaa_crypto. config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20241222/202412221117.i9BKx0mV-lkp@intel.com/config) compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241222/202412221117.i9BKx0mV-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202412221117.i9BKx0mV-lkp@intel.com/ All warnings (new ones prefixed by >>): >> drivers/crypto/intel/iaa/iaa_crypto_main.c:1897: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * This API provides IAA compress batching functionality for use by swap drivers/crypto/intel/iaa/iaa_crypto_main.c:2050: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * This API provides IAA decompress batching functionality for use by swap vim +1897 drivers/crypto/intel/iaa/iaa_crypto_main.c 1895 1896 /** > 1897 * This API provides IAA compress batching functionality for use by swap 1898 * modules. 1899 * 1900 * @reqs: @nr_pages asynchronous compress requests. 1901 * @wait: crypto_wait for acomp batch compress implemented using request 1902 * chaining. Required if async_mode is "false". If async_mode is "true", 1903 * and @wait is NULL, the completions will be processed using 1904 * asynchronous polling of the requests' completion statuses. 1905 * @pages: Pages to be compressed by IAA. 1906 * @dsts: Pre-allocated destination buffers to store results of IAA 1907 * compression. Each element of @dsts must be of size "PAGE_SIZE * 2". 1908 * @dlens: Will contain the compressed lengths. 1909 * @errors: zero on successful compression of the corresponding 1910 * req, or error code in case of error. 1911 * @nr_pages: The number of pages, up to IAA_CRYPTO_MAX_BATCH_SIZE, 1912 * to be compressed. 1913 * 1914 * Returns true if all compress requests complete successfully, 1915 * false otherwise. 1916 */ 1917 static bool iaa_comp_acompress_batch( 1918 struct acomp_req *reqs[], 1919 struct crypto_wait *wait, 1920 struct page *pages[], 1921 u8 *dsts[], 1922 unsigned int dlens[], 1923 int errors[], 1924 int nr_pages) 1925 { 1926 struct scatterlist inputs[IAA_CRYPTO_MAX_BATCH_SIZE]; 1927 struct scatterlist outputs[IAA_CRYPTO_MAX_BATCH_SIZE]; 1928 bool compressions_done = false; 1929 bool async = (async_mode && !use_irq); 1930 bool async_poll = (async && !wait); 1931 int i, err = 0; 1932 1933 BUG_ON(nr_pages > IAA_CRYPTO_MAX_BATCH_SIZE); 1934 BUG_ON(!async && !wait); 1935 1936 if (async) 1937 iaa_set_req_poll(reqs, nr_pages, true); 1938 else 1939 iaa_set_req_poll(reqs, nr_pages, false); 1940 1941 /* 1942 * Prepare and submit acomp_reqs to IAA. IAA will process these 1943 * compress jobs in parallel if async_mode is true. 1944 */ 1945 for (i = 0; i < nr_pages; ++i) { 1946 sg_init_table(&inputs[i], 1); 1947 sg_set_page(&inputs[i], pages[i], PAGE_SIZE, 0); 1948 1949 /* 1950 * Each dst buffer should be of size (PAGE_SIZE * 2). 1951 * Reflect same in sg_list. 1952 */ 1953 sg_init_one(&outputs[i], dsts[i], PAGE_SIZE * 2); 1954 acomp_request_set_params(reqs[i], &inputs[i], 1955 &outputs[i], PAGE_SIZE, dlens[i]); 1956 1957 /* 1958 * As long as the API is called with a valid "wait", chain the 1959 * requests for synchronous/asynchronous compress ops. 1960 * If async_mode is in effect, but the API is called with a 1961 * NULL "wait", submit the requests first, and poll for 1962 * their completion status later, after all descriptors have 1963 * been submitted. 1964 */ 1965 if (!async_poll) { 1966 /* acomp request chaining. */ 1967 if (i) 1968 acomp_request_chain(reqs[i], reqs[0]); 1969 else 1970 acomp_reqchain_init(reqs[0], 0, crypto_req_done, 1971 wait); 1972 } else { 1973 errors[i] = iaa_comp_acompress(reqs[i]); 1974 1975 if (errors[i] != -EINPROGRESS) { 1976 errors[i] = -EINVAL; 1977 err = -EINVAL; 1978 } else { 1979 errors[i] = -EAGAIN; 1980 } 1981 } 1982 } 1983 1984 if (!async_poll) { 1985 if (async) 1986 /* Process the request chain in parallel. */ 1987 err = crypto_wait_req(acomp_do_async_req_chain(reqs[0], 1988 iaa_comp_acompress, iaa_comp_poll), 1989 wait); 1990 else 1991 /* Process the request chain in series. */ 1992 err = crypto_wait_req(acomp_do_req_chain(reqs[0], 1993 iaa_comp_acompress), wait); 1994 1995 for (i = 0; i < nr_pages; ++i) { 1996 errors[i] = acomp_request_err(reqs[i]); 1997 if (errors[i]) { 1998 err = -EINVAL; 1999 pr_debug("Request chaining req %d compress error %d\n", i, errors[i]); 2000 } else { 2001 dlens[i] = reqs[i]->dlen; 2002 } 2003 } 2004 2005 goto reset_reqs; 2006 } 2007 2008 /* 2009 * Asynchronously poll for and process IAA compress job completions. 2010 */ 2011 while (!compressions_done) { 2012 compressions_done = true; 2013 2014 for (i = 0; i < nr_pages; ++i) { 2015 /* 2016 * Skip, if the compression has already completed 2017 * successfully or with an error. 2018 */ 2019 if (errors[i] != -EAGAIN) 2020 continue; 2021 2022 errors[i] = iaa_comp_poll(reqs[i]); 2023 2024 if (errors[i]) { 2025 if (errors[i] == -EAGAIN) 2026 compressions_done = false; 2027 else 2028 err = -EINVAL; 2029 } else { 2030 dlens[i] = reqs[i]->dlen; 2031 } 2032 } 2033 } 2034 2035 reset_reqs: 2036 /* 2037 * For the same 'reqs[]' to be usable by 2038 * iaa_comp_acompress()/iaa_comp_deacompress(), 2039 * clear the CRYPTO_ACOMP_REQ_POLL bit on all acomp_reqs, and the 2040 * CRYPTO_TFM_REQ_CHAIN bit on the reqs[0]. 2041 */ 2042 iaa_set_req_poll(reqs, nr_pages, false); 2043 if (!async_poll) 2044 acomp_reqchain_clear(reqs[0], wait); 2045 2046 return !err; 2047 } 2048 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.