From: Alan Robertson <alanrobertson...@gmail.com>

When forwarding traffic across a TDM network the ethernet header will
be replaced with a ML-PPP one thereby reducing the size of the packet.

Signed-off-by: Alan Robertson <alan.robert...@att.com>
---
 lib/librte_sched/rte_sched.c | 41 ++++++++++++++++++++++++++++++++++++-----
 lib/librte_sched/rte_sched.h |  2 +-
 2 files changed, 37 insertions(+), 6 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 7252f850d..5c88f1b62 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -216,7 +216,7 @@ struct rte_sched_port {
        uint32_t n_pipes_per_subport;
        uint32_t rate;
        uint32_t mtu;
-       uint32_t frame_overhead;
+       int32_t frame_overhead;
        uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
        uint32_t n_pipe_profiles;
        uint32_t pipe_tc3_rate_max;
@@ -643,7 +643,14 @@ rte_sched_port_config(struct rte_sched_port_params *params)
        port->n_subports_per_port = params->n_subports_per_port;
        port->n_pipes_per_subport = params->n_pipes_per_subport;
        port->rate = params->rate;
-       port->mtu = params->mtu + params->frame_overhead;
+
+       /* Only add a +ve overhead.  A -ve overhead is to accommodate
+        * for downstream TDM devices which won't have an ethernet header,
+        * they obviously won't impact our local interface MTU size.
+        */
+       port->mtu = params->mtu;
+       if (params->frame_overhead > 0)
+               port->mtu += params->frame_overhead;
        port->frame_overhead = params->frame_overhead;
        memcpy(port->qsize, params->qsize, sizeof(params->qsize));
        port->n_pipe_profiles = params->n_pipe_profiles;
@@ -1613,13 +1620,21 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
        struct rte_sched_pipe *pipe = grinder->pipe;
        struct rte_mbuf *pkt = grinder->pkt;
        uint32_t tc_index = grinder->tc_index;
-       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+       uint32_t pkt_len;
+       int32_t tpkt_len;
        uint32_t subport_tb_credits = subport->tb_credits;
        uint32_t subport_tc_credits = subport->tc_credits[tc_index];
        uint32_t pipe_tb_credits = pipe->tb_credits;
        uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
        int enough_credits;
 
+       /* Make sure we don't allow this to go -ve.  To accommodate
+        * downstream TDM devices we may want to ignore the ethernet
+        * header so allow -ve overhead values.
+        */
+       tpkt_len = pkt->pkt_len + port->frame_overhead;
+       pkt_len = (tpkt_len < 0) ? 1 : tpkt_len;
+
        /* Check queue credits */
        enough_credits = (pkt_len <= subport_tb_credits) &&
                (pkt_len <= subport_tc_credits) &&
@@ -1648,7 +1663,8 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
        struct rte_sched_pipe *pipe = grinder->pipe;
        struct rte_mbuf *pkt = grinder->pkt;
        uint32_t tc_index = grinder->tc_index;
-       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+       uint32_t pkt_len;
+       int32_t tpkt_len;
        uint32_t subport_tb_credits = subport->tb_credits;
        uint32_t subport_tc_credits = subport->tc_credits[tc_index];
        uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1658,6 +1674,13 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
        uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
        int enough_credits;
 
+       /* Make sure we don't allow this to go -ve.  To accommodate
+        * downstream TDM devices we may want to ignore the ethernet
+        * header so allow -ve overhead values.
+        */
+       tpkt_len = pkt->pkt_len + port->frame_overhead;
+       pkt_len = (tpkt_len < 0) ? 1 : tpkt_len;
+
        /* Check pipe and subport credits */
        enough_credits = (pkt_len <= subport_tb_credits) &&
                (pkt_len <= subport_tc_credits) &&
@@ -1687,11 +1710,19 @@ grinder_schedule(struct rte_sched_port *port, uint32_t 
pos)
        struct rte_sched_grinder *grinder = port->grinder + pos;
        struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
        struct rte_mbuf *pkt = grinder->pkt;
-       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+       uint32_t pkt_len;
+       int32_t tpkt_len;
 
        if (!grinder_credits_check(port, pos))
                return 0;
 
+       /* Make sure we don't allow this to go -ve.  To accommodate
+        * downstream TDM devices we may want to ignore the ethernet
+        * header so allow -ve overhead values.
+        */
+       tpkt_len = pkt->pkt_len + port->frame_overhead;
+       pkt_len = (tpkt_len < 0) ? 1 : tpkt_len;
+
        /* Advance port time */
        port->time += pkt_len;
 
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index e9c281726..63677eefc 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -219,7 +219,7 @@ struct rte_sched_port_params {
        uint32_t mtu;                    /**< Maximum Ethernet frame size
                                          * (measured in bytes).
                                          * Should not include the framing 
overhead. */
-       uint32_t frame_overhead;         /**< Framing overhead per packet
+       int32_t frame_overhead;          /**< Framing overhead per packet
                                          * (measured in bytes) */
        uint32_t n_subports_per_port;    /**< Number of subports */
        uint32_t n_pipes_per_subport;    /**< Number of pipes per subport */
-- 
2.11.0

Reply via email to