Add RTE_ETH_RX_OFFLOAD_SCATTER to the advertised receive offload
capabilities. Validate in rx_queue_setup that the mbuf pool data
room is large enough when scatter is not enabled, following the
same pattern as the virtio driver.

Gate the multi-segment receive path on the scatter offload flag
and drop oversized packets when scatter is disabled.
Reject scatter with infinite_rx mode since the ring-based replay
path does not support multi-segment mbufs.

Signed-off-by: Stephen Hemminger <[email protected]>
---
 drivers/net/pcap/pcap_ethdev.c | 47 +++++++++++++++++++++++++++++++---
 1 file changed, 44 insertions(+), 3 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index 8a2b5c1b4b..d8a924b0cd 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -79,6 +79,7 @@ struct pcap_rx_queue {
        uint16_t port_id;
        uint16_t queue_id;
        bool vlan_strip;
+       bool rx_scatter;
        bool timestamp_offloading;
        struct rte_mempool *mb_pool;
        struct queue_stat rx_stat;
@@ -112,6 +113,7 @@ struct pmd_internals {
        bool phy_mac;
        bool infinite_rx;
        bool vlan_strip;
+       bool rx_scatter;
        bool timestamp_offloading;
 };
 
@@ -342,14 +344,19 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t 
nb_pkts)
                        /* pcap packet will fit in the mbuf, can copy it */
                        rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, len);
                        mbuf->data_len = len;
-               } else {
-                       /* Try read jumbo frame into multi mbufs. */
+               } else if (pcap_q->rx_scatter) {
+                       /* Scatter into multi-segment mbufs. */
                        if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
                                                       mbuf, packet, len) == 
-1)) {
                                pcap_q->rx_stat.err_pkts++;
                                rte_pktmbuf_free(mbuf);
                                break;
                        }
+               } else {
+                       /* Packet too large and scatter not enabled, drop it. */
+                       pcap_q->rx_stat.err_pkts++;
+                       rte_pktmbuf_free(mbuf);
+                       continue;
                }
 
                mbuf->pkt_len = len;
@@ -907,6 +914,7 @@ eth_dev_configure(struct rte_eth_dev *dev)
        const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
 
        internals->vlan_strip = !!(rxmode->offloads & 
RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
+       internals->rx_scatter = !!(rxmode->offloads & 
RTE_ETH_RX_OFFLOAD_SCATTER);
        internals->timestamp_offloading = !!(rxmode->offloads & 
RTE_ETH_RX_OFFLOAD_TIMESTAMP);
        return 0;
 }
@@ -927,7 +935,8 @@ eth_dev_info(struct rte_eth_dev *dev,
        dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
                RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
        dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
-               RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+               RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+               RTE_ETH_RX_OFFLOAD_SCATTER;
 
        return 0;
 }
@@ -1088,11 +1097,37 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
 {
        struct pmd_internals *internals = dev->data->dev_private;
        struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
+       uint16_t buf_size;
+       bool rx_scatter;
+
+       buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM;
+       rx_scatter = !!(dev->data->dev_conf.rxmode.offloads &
+                       RTE_ETH_RX_OFFLOAD_SCATTER);
+
+       /*
+        * If Rx scatter is not enabled, verify that the mbuf data room
+        * can hold the largest received packet in a single segment.
+        * Use the MTU-derived frame size as the expected maximum, not
+        * snapshot_len which is a capture truncation limit rather than
+        * an expected packet size.
+        */
+       if (!rx_scatter) {
+               uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
+
+               if (max_rx_pktlen > buf_size) {
+                       PMD_LOG(ERR,
+                               "Rx scatter is disabled and RxQ mbuf pool 
object size is too small "
+                               "(buf_size=%u, max_rx_pkt_len=%u)",
+                               buf_size, max_rx_pktlen);
+                       return -EINVAL;
+               }
+       }
 
        pcap_q->mb_pool = mb_pool;
        pcap_q->port_id = dev->data->port_id;
        pcap_q->queue_id = rx_queue_id;
        pcap_q->vlan_strip = internals->vlan_strip;
+       pcap_q->rx_scatter = rx_scatter;
        dev->data->rx_queues[rx_queue_id] = pcap_q;
        pcap_q->timestamp_offloading = internals->timestamp_offloading;
 
@@ -1105,6 +1140,12 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
                pcap_t **pcap;
                bool save_vlan_strip;
 
+               if (rx_scatter) {
+                       PMD_LOG(ERR,
+                               "Rx scatter is not supported with infinite_rx 
mode");
+                       return -EINVAL;
+               }
+
                pp = rte_eth_devices[pcap_q->port_id].process_private;
                pcap = &pp->rx_pcap[pcap_q->queue_id];
 
-- 
2.51.0

Reply via email to