This patch improves the send error path as it wasn't handling all error
cases. A new label is added, and some of the goto are updated to point
to the right labels, so that the code is more robust to errors.

Signed-off-by: Antoine Tenart <antoine.ten...@bootlin.com>
---
 drivers/crypto/inside-secure/safexcel_hash.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_hash.c 
b/drivers/crypto/inside-secure/safexcel_hash.c
index bb2be12a8f4a..ef3e0c1c0f2c 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -279,7 +279,7 @@ static int safexcel_ahash_send_req(struct 
crypto_async_request *async, int ring,
                                           sglen, len, ctx->base.ctxr_dma);
                if (IS_ERR(cdesc)) {
                        ret = PTR_ERR(cdesc);
-                       goto cdesc_rollback;
+                       goto unmap_sg;
                }
                n_cdesc++;
 
@@ -303,7 +303,7 @@ static int safexcel_ahash_send_req(struct 
crypto_async_request *async, int ring,
                                         DMA_FROM_DEVICE);
        if (dma_mapping_error(priv->dev, req->result_dma)) {
                ret = -EINVAL;
-               goto cdesc_rollback;
+               goto unmap_sg;
        }
 
        /* Add a result descriptor */
@@ -324,6 +324,9 @@ static int safexcel_ahash_send_req(struct 
crypto_async_request *async, int ring,
        return 0;
 
 unmap_result:
+       dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+                        DMA_FROM_DEVICE);
+unmap_sg:
        dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
 cdesc_rollback:
        for (i = 0; i < n_cdesc; i++)
-- 
2.14.3

Reply via email to