Add same request checks on the request side.

Fixes: b2866f473369 ("vhost/crypto: fix missed request check for copy mode")
Cc: roy.fan.zh...@intel.com
Cc: sta...@dpdk.org

Signed-off-by: Radu Nicolau <radu.nico...@intel.com>
---
 drivers/crypto/virtio/virtio_rxtx.c | 40 +++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/drivers/crypto/virtio/virtio_rxtx.c 
b/drivers/crypto/virtio/virtio_rxtx.c
index 0cc904485c..afdb8fb406 100644
--- a/drivers/crypto/virtio/virtio_rxtx.c
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -193,6 +193,40 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
        return i;
 }
 
+static __rte_always_inline uint8_t
+virtqueue_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req 
*req)
+{
+       if (likely((req->para.iv_len <= VIRTIO_CRYPTO_MAX_IV_SIZE) &&
+               (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.dst_data_len >= req->para.src_data_len) &&
+               (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
+               return VIRTIO_CRYPTO_OK;
+       return VIRTIO_CRYPTO_BADMSG;
+}
+
+static __rte_always_inline uint8_t
+virtqueue_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req 
*req)
+{
+       if (likely((req->para.iv_len <= VIRTIO_CRYPTO_MAX_IV_SIZE) &&
+               (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.dst_data_len >= req->para.src_data_len) &&
+               (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.cipher_start_src_offset <
+                       RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.len_to_cipher <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.hash_start_src_offset <
+                       RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.len_to_hash <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
+               (req->para.cipher_start_src_offset + req->para.len_to_cipher <=
+                       req->para.src_data_len) &&
+               (req->para.hash_start_src_offset + req->para.len_to_hash <=
+                       req->para.src_data_len) &&
+               (req->para.dst_data_len + req->para.hash_result_len <=
+                       RTE_MBUF_DEFAULT_BUF_SIZE)))
+               return VIRTIO_CRYPTO_OK;
+       return VIRTIO_CRYPTO_BADMSG;
+}
+
 static inline int
 virtqueue_crypto_sym_pkt_header_arrange(
                struct rte_crypto_op *cop,
@@ -228,6 +262,9 @@ virtqueue_crypto_sym_pkt_header_arrange(
                                sym_op->cipher.data.offset);
                req_data->u.sym_req.u.cipher.para.dst_data_len =
                        req_data->u.sym_req.u.cipher.para.src_data_len;
+               if (virtqueue_crypto_check_cipher_request(
+                       &req_data->u.sym_req.u.cipher) != VIRTIO_CRYPTO_OK)
+                       return -1;
                break;
        case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
                req_data->u.sym_req.op_type =
@@ -265,6 +302,9 @@ virtqueue_crypto_sym_pkt_header_arrange(
                        VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
                        req_data->u.sym_req.u.chain.para.hash_result_len =
                                chain_para->u.mac_param.hash_result_len;
+               if (virtqueue_crypto_check_chain_request(
+                       &req_data->u.sym_req.u.chain) != VIRTIO_CRYPTO_OK)
+                       return -1;
                break;
        default:
                return -1;
-- 
2.43.0

Reply via email to