[PATCH v1 0/1] dts: Add the ability to bind ports to drivers
From: Jeremy Spewock Currently in the DTS framework there is nothing that allows you to bind ports on the SUT to different drivers, but the ability to do so is necessary in the case of many different test suites. This change allows developers to bind to either os_driver or os_driver_for_dpdk which they define in conf.yaml. This patch additionally binds to the driver for DPDK at the start of each build target setup which means that test suites will be run on the driver for DPDK unless otherwise specified. Jeremy Spewock (1): dts: bind to DPDK driver before running test suites dts/framework/remote_session/linux_session.py | 3 ++ dts/framework/remote_session/os_session.py| 6 dts/framework/testbed_model/sut_node.py | 34 +++ dts/tests/TestSuite_os_udp.py | 4 +++ dts/tests/TestSuite_smoke_tests.py| 6 ++-- 5 files changed, 49 insertions(+), 4 deletions(-) -- 2.42.0
[PATCH v1 1/1] dts: bind to DPDK driver before running test suites
From: Jeremy Spewock Modifies the current process so that we bind to os_driver_for_dpdk from the configuration file before running test suites and bind back to the os_driver afterwards. This allows test suites to assume that the ports are bound to a DPDK supported driver or bind to either driver as needed. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/linux_session.py | 3 ++ dts/framework/remote_session/os_session.py| 6 dts/framework/testbed_model/sut_node.py | 34 +++ dts/tests/TestSuite_os_udp.py | 4 +++ dts/tests/TestSuite_smoke_tests.py| 6 ++-- 5 files changed, 49 insertions(+), 4 deletions(-) diff --git a/dts/framework/remote_session/linux_session.py b/dts/framework/remote_session/linux_session.py index a3f1a6bf3b..7f2453c44c 100644 --- a/dts/framework/remote_session/linux_session.py +++ b/dts/framework/remote_session/linux_session.py @@ -199,3 +199,6 @@ def configure_port_ip_address( def configure_ipv4_forwarding(self, enable: bool) -> None: state = 1 if enable else 0 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", privileged=True) + +def probe_driver(self, driver_name: str) -> None: +self.send_command(f"modprobe {driver_name}", verify=True) diff --git a/dts/framework/remote_session/os_session.py b/dts/framework/remote_session/os_session.py index 8a709eac1c..719e815ac8 100644 --- a/dts/framework/remote_session/os_session.py +++ b/dts/framework/remote_session/os_session.py @@ -282,3 +282,9 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: """ Enable IPv4 forwarding in the underlying OS. """ + +@abstractmethod +def probe_driver(self, driver_name: str) -> None: +""" +Load the module for the driver. +""" diff --git a/dts/framework/testbed_model/sut_node.py b/dts/framework/testbed_model/sut_node.py index 202aebfd06..5a7dd91cac 100644 --- a/dts/framework/testbed_model/sut_node.py +++ b/dts/framework/testbed_model/sut_node.py @@ -89,6 +89,7 @@ class SutNode(Node): _dpdk_version: str | None _node_info: NodeInfo | None _compiler_version: str | None +_path_to_devbind: PurePath | None def __init__(self, node_config: SutNodeConfiguration): super(SutNode, self).__init__(node_config) @@ -105,6 +106,7 @@ def __init__(self, node_config: SutNodeConfiguration): self._dpdk_version = None self._node_info = None self._compiler_version = None +self._path_to_devbind = None self._logger.info(f"Created node: {self.name}") @property @@ -155,6 +157,14 @@ def compiler_version(self) -> str: return "" return self._compiler_version +@property +def path_to_devbind(self) -> PurePath: +if self._path_to_devbind is None: +self._path_to_devbind = self.main_session.join_remote_path( +self._remote_dpdk_dir, "usertools", "dpdk-devbind.py" +) +return self._path_to_devbind + def get_build_target_info(self) -> BuildTargetInfo: return BuildTargetInfo( dpdk_version=self.dpdk_version, compiler_version=self.compiler_version @@ -176,6 +186,14 @@ def _set_up_build_target( self._configure_build_target(build_target_config) self._copy_dpdk_tarball() self._build_dpdk() +self.bind_ports_to_driver() + +def _tear_down_build_target(self) -> None: +""" +This method exists to be optionally overwritten by derived classes and +is not decorated so that the derived class doesn't have to use the decorator. +""" +self.bind_ports_to_driver(for_dpdk=False) def _configure_build_target( self, build_target_config: BuildTargetConfiguration @@ -389,3 +407,19 @@ def create_interactive_shell( return super().create_interactive_shell( shell_cls, timeout, privileged, str(eal_parameters) ) + +def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: +"""Bind all ports on the SUT to a driver. + +Args: +for_dpdk: Boolean that, when True, binds ports to os_driver_for_dpdk +or, when False, binds to os_driver. Defaults to True. +""" +for port in self.ports: +driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver +self.main_session.probe_driver(driver) +self.main_session.send_command( +f"{self.path_to_devbind} -b {driver} --force {port.pci}", +privileged=True, +verify=True, +) diff --git a/dts/tests/TestSuite_os_udp.py b/dts/tests/TestSuite_os_udp.py index 9b5f39711d..bf6b93deb5 100644 --- a/dts/tests/TestSuite_os_udp.py +++ b/dts/tests/TestSuite_os_udp.py @@ -19,6 +19,8 @@ def set_up_suite(self) -> None: Configure SUT ports and SUT to route traffic from if1 to if2.
[PATCH v2 0/2] dts: add context manager
From: Jeremy Spewock v2: * addresses the comments from version 1, adjusting documentation accordingly and condensing usage of the context manager. Jeremy Spewock (2): dts: add context manager for interactive shells dts: improve starting and stopping interactive shells dts/framework/remote_session/dpdk_shell.py| 9 +- .../remote_session/interactive_shell.py | 171 ++--- .../single_active_interactive_shell.py| 233 ++ dts/framework/remote_session/testpmd_shell.py | 9 +- .../testbed_model/traffic_generator/scapy.py | 2 + dts/tests/TestSuite_pmd_buffer_scatter.py | 26 +- dts/tests/TestSuite_smoke_tests.py| 4 +- 7 files changed, 284 insertions(+), 170 deletions(-) create mode 100644 dts/framework/remote_session/single_active_interactive_shell.py -- 2.45.2
[PATCH v2 1/2] dts: add context manager for interactive shells
From: Jeremy Spewock Interactive shells are managed in a way currently where they are closed and cleaned up at the time of garbage collection. Due to there being no guarantee of when this garbage collection happens in Python, there is no way to consistently know when an application will be closed without manually closing the application yourself when you are done with it. This doesn't cause a problem in cases where you can start another instance of the same application multiple times on a server, but this isn't the case for primary applications in DPDK. The introduction of primary applications, such as testpmd, adds a need for knowing previous instances of the application have been stopped and cleaned up before starting a new one, which the garbage collector does not provide. To solve this problem, a new class is added which acts as a base class for interactive shells that enforces that instances of the application be managed using a context manager. Using a context manager guarantees that once you leave the scope of the block where the application is being used for any reason, the application will be closed immediately. This avoids the possibility of the shell not being closed due to an exception being raised or user error. The interactive shell class then becomes shells that can be started/stopped manually or at the time of garbage collection rather than through a context manager. Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Reviewed-by: Patrick Robb Reviewed-by: Luca Vizzarro --- dts/framework/remote_session/dpdk_shell.py| 9 +- .../remote_session/interactive_shell.py | 160 ++- .../single_active_interactive_shell.py| 193 ++ dts/framework/remote_session/testpmd_shell.py | 7 +- .../testbed_model/traffic_generator/scapy.py | 2 + dts/tests/TestSuite_pmd_buffer_scatter.py | 26 +-- dts/tests/TestSuite_smoke_tests.py| 4 +- 7 files changed, 232 insertions(+), 169 deletions(-) create mode 100644 dts/framework/remote_session/single_active_interactive_shell.py diff --git a/dts/framework/remote_session/dpdk_shell.py b/dts/framework/remote_session/dpdk_shell.py index 296639f37d..950c6ca670 100644 --- a/dts/framework/remote_session/dpdk_shell.py +++ b/dts/framework/remote_session/dpdk_shell.py @@ -11,7 +11,9 @@ from pathlib import PurePath from framework.params.eal import EalParams -from framework.remote_session.interactive_shell import InteractiveShell +from framework.remote_session.single_active_interactive_shell import ( +SingleActiveInteractiveShell, +) from framework.settings import SETTINGS from framework.testbed_model.cpu import LogicalCoreCount, LogicalCoreList from framework.testbed_model.sut_node import SutNode @@ -60,7 +62,7 @@ def compute_eal_params( return params -class DPDKShell(InteractiveShell, ABC): +class DPDKShell(SingleActiveInteractiveShell, ABC): """The base class for managing DPDK-based interactive shells. This class shouldn't be instantiated directly, but instead be extended. @@ -79,7 +81,6 @@ def __init__( lcore_filter_specifier: LogicalCoreCount | LogicalCoreList = LogicalCoreCount(), ascending_cores: bool = True, append_prefix_timestamp: bool = True, -start_on_init: bool = True, app_params: EalParams = EalParams(), ) -> None: """Extends :meth:`~.interactive_shell.InteractiveShell.__init__`. @@ -95,7 +96,7 @@ def __init__( append_prefix_timestamp, ) -super().__init__(node, privileged, timeout, start_on_init, app_params) +super().__init__(node, privileged, timeout, app_params) def _update_real_path(self, path: PurePath) -> None: """Extends :meth:`~.interactive_shell.InteractiveShell._update_real_path`. diff --git a/dts/framework/remote_session/interactive_shell.py b/dts/framework/remote_session/interactive_shell.py index 254aa29f89..11dc8a0643 100644 --- a/dts/framework/remote_session/interactive_shell.py +++ b/dts/framework/remote_session/interactive_shell.py @@ -2,166 +2,32 @@ # Copyright(c) 2023 University of New Hampshire # Copyright(c) 2024 Arm Limited -"""Common functionality for interactive shell handling. +"""Interactive shell with manual stop/start functionality. -The base class, :class:`InteractiveShell`, is meant to be extended by subclasses that contain -functionality specific to that shell type. These subclasses will often modify things like -the prompt to expect or the arguments to pass into the application, but still utilize -the same method for sending a command and collecting output. How this output is handled however -is often application specific. If an application needs elevated privileges to start it is expected -that the method for gaining those privileges is provided when initializing the class. - -The :option:`--timeout` command line argument and the :envvar:`DTS_TIMEOUT` -environment variable configure the tim
[PATCH v2 2/2] dts: improve starting and stopping interactive shells
From: Jeremy Spewock The InteractiveShell class currently relies on being cleaned up and shutdown at the time of garbage collection, but this cleanup of the class does no verification that the session is still running prior to cleanup. So, if a user were to call this method themselves prior to garbage collection, it would be called twice and throw an exception when the desired behavior is to do nothing since the session is already cleaned up. This is solved by using a weakref and a finalize class which achieves the same result of calling the method at garbage collection, but also ensures that it is called exactly once. Additionally, this fixes issues regarding starting a primary DPDK application while another is still cleaning up via a retry when starting interactive shells. It also adds catch for attempting to send a command to an interactive shell that is not running to create a more descriptive error message. Signed-off-by: Jeremy Spewock Reviewed-by: Luca Vizzarro Reviewed-by: Patrick Robb Reviewed-by: Juraj Linkeš --- .../remote_session/interactive_shell.py | 29 .../single_active_interactive_shell.py| 46 +-- dts/framework/remote_session/testpmd_shell.py | 2 +- 3 files changed, 64 insertions(+), 13 deletions(-) diff --git a/dts/framework/remote_session/interactive_shell.py b/dts/framework/remote_session/interactive_shell.py index 11dc8a0643..9ca285b604 100644 --- a/dts/framework/remote_session/interactive_shell.py +++ b/dts/framework/remote_session/interactive_shell.py @@ -9,6 +9,9 @@ collection. """ +import weakref +from typing import ClassVar + from .single_active_interactive_shell import SingleActiveInteractiveShell @@ -16,18 +19,26 @@ class InteractiveShell(SingleActiveInteractiveShell): """Adds manual start and stop functionality to interactive shells. Like its super-class, this class should not be instantiated directly and should instead be -extended. This class also provides an option for automated cleanup of the application through -the garbage collector. +extended. This class also provides an option for automated cleanup of the application using a +weakref and a finalize class. This finalize class allows for cleanup of the class at the time +of garbage collection and also ensures that cleanup only happens once. This way if a user +initiates the closing of the shell manually it is not repeated at the time of garbage +collection. """ +_finalizer: weakref.finalize +#: One attempt should be enough for shells which don't have to worry about other instances +#: closing before starting a new one. +_init_attempts: ClassVar[int] = 1 + def start_application(self) -> None: -"""Start the application.""" +"""Start the application. + +After the application has started, use :class:`weakref.finalize` to manage cleanup. +""" self._start_application() +self._finalizer = weakref.finalize(self, self._close) def close(self) -> None: -"""Properly free all resources.""" -self._close() - -def __del__(self) -> None: -"""Make sure the session is properly closed before deleting the object.""" -self.close() +"""Free all resources using :class:`weakref.finalize`.""" +self._finalizer() diff --git a/dts/framework/remote_session/single_active_interactive_shell.py b/dts/framework/remote_session/single_active_interactive_shell.py index 30c55d4703..38094c0fe2 100644 --- a/dts/framework/remote_session/single_active_interactive_shell.py +++ b/dts/framework/remote_session/single_active_interactive_shell.py @@ -27,6 +27,7 @@ from paramiko import Channel, channel # type: ignore[import-untyped] from typing_extensions import Self +from framework.exception import InteractiveCommandExecutionError from framework.logger import DTSLogger from framework.params import Params from framework.settings import SETTINGS @@ -45,6 +46,10 @@ class SingleActiveInteractiveShell(ABC): Interactive shells are started and stopped using a context manager. This allows for the start and cleanup of the application to happen at predictable times regardless of exceptions or interrupts. + +Attributes: +is_alive: :data:`True` if the application has started successfully, :data:`False` +otherwise. """ _node: Node @@ -57,6 +62,9 @@ class SingleActiveInteractiveShell(ABC): _privileged: bool _real_path: PurePath +#: The number of times to try starting the application before considering it a failure. +_init_attempts: ClassVar[int] = 5 + #: Prompt to expect at the end of output when sending a command. #: This is often overridden by subclasses. _default_prompt: ClassVar[str] = "" @@ -69,6 +77,8 @@ class SingleActiveInteractiveShell(ABC): #: Path to the executable to start the interactive application. path: ClassVar[Pu
[PATCH v1 0/3] dts: add test suite for dual VLANs
From: Jeremy Spewock This series ports over the implementation of the dual_vlan test suite in old DTS and refactors it, dropping some duplicated functionality as well as some features that are specific to certain NICs. One thing to note about this series is that it is tested and fully working on a Mellanox NIC running the mlx5_core driver, but in testing I did notice some stranger behavior on a NIC running the bnxt_en driver. The broadcom NIC worked for all test cases except for those involving VLAN insertion. In the presence of 2 VLAN headers it seems that the bnxt_en NIC drops the packet completely if you attempt to insert a 3rd. I originally thought this might be an MTU issue, but with MTUs of 2000 on the DUT and 9000 on the traffic generator the packet was still dropped. I believe VLAN insertion in the presence of no other VLAN headers works on this same NIC was tested by Dean Marx. Jeremy Spewock (3): dts: fix Testpmd function for resetting VLAN insertion dts: add dual_vlan testing suite dts: add dual_vlan test suite to the yaml schema dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 2 +- dts/tests/TestSuite_dual_vlan.py | 281 ++ 3 files changed, 284 insertions(+), 2 deletions(-) create mode 100644 dts/tests/TestSuite_dual_vlan.py -- 2.45.2
[PATCH v1 1/3] dts: fix Testpmd function for resetting VLAN insertion
From: Jeremy Spewock The previous method would send the command `tx_vlan set ` when the correct command is `tx_vlan reset `. Fixes: a49d9da1e9a5 ("dts: add VLAN methods to testpmd shell") Cc: dm...@iol.unh.edu depends-on: patch-142103 ("dts: add VLAN methods to testpmd shell") Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 09d3bda5d6..a8b6a054b5 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -994,7 +994,7 @@ def tx_vlan_reset(self, port: int, verify: bool = True): InteractiveCommandExecutionError: If `verify` is :data:`True` and the insertion tag is not reset. """ -vlan_insert_output = self.send_command(f"tx_vlan set {port}") +vlan_insert_output = self.send_command(f"tx_vlan reset {port}") if verify: if "Please stop port" in vlan_insert_output or "Invalid port" in vlan_insert_output: self._logger.debug( -- 2.45.2
[PATCH v1 2/3] dts: add dual_vlan testing suite
From: Jeremy Spewock This patch ports over the functionality of the dual_vlan suite from old DTS to the new framework. This test suite exists to test the functionality of VLAN functions such as stripping, inserting, and filerting in the presence of two VLAN headers. There are some test cases which were left out in this refactored version including test cases that test the functionality of VLAN functions on a packet with only one VLAN header, as this is something that is tested in another test suite which is currently in development. Additionally, this series does not include test cases for testing the adjustment of TPID or extended VLAN ranges, as these things were included in the old test suite specifically for testing on Intel hardware and they are not universally supported on every NIC. There could be further reason to add these test cases in the future once the capabilities feature is fully implemented. Extended mode for VLANs seems to be exposed through offload capabilities of the port, but there doesn't seem to be anything as obvious for TPID modification. depends-on: patch-142103 ("dts: add VLAN methods to testpmd shell") Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_dual_vlan.py | 281 +++ 1 file changed, 281 insertions(+) create mode 100644 dts/tests/TestSuite_dual_vlan.py diff --git a/dts/tests/TestSuite_dual_vlan.py b/dts/tests/TestSuite_dual_vlan.py new file mode 100644 index 00..095e57bc56 --- /dev/null +++ b/dts/tests/TestSuite_dual_vlan.py @@ -0,0 +1,281 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire + +"""Dual VLAN functionality testing suite. + +The main objective of this test suite is to ensure that standard VLAN functions such as stripping, +filtering, and inserting all still carry out their expected behavior in the presence of a packet +which contains two VLAN headers. These functions should carry out said behavior not just in +isolation, but also when other VLAN functions are configured on the same port. In addition to this, +the priority attributes of VLAN headers should be unchanged in the case of multiple VLAN headers +existing on a single packet. +""" +import time +from enum import Flag, auto +from typing import ClassVar + +from scapy.layers.l2 import Dot1Q, Ether # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] + +from framework.params.testpmd import SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell +from framework.test_suite import TestSuite + + +class TestDualVlan(TestSuite): +"""DPDK Dual VLAN test suite. + +This suite tests the behavior of VLAN functions and properties in the presence of two VLAN +headers. All VLAN functions which are tested in this suite are specified using the inner class +:class:`TestCaseOptions` and should have cases for configuring them in +:meth:`configure_testpmd` as well as cases for testing their behavior in +:meth:`verify_vlan_functions`. Every combination of VLAN functions being enabled should be +tested. Additionally, attributes of VLAN headers, such as priority, are tested to ensure they +are not modified in the case of two VLAN headers. +""" + +class TestCaseOptions(Flag): +"""Flag for specifying which VLAN functions to configure.""" + +#: +VLAN_STRIP = auto() +#: +VLAN_FILTER_INNER = auto() +#: +VLAN_FILTER_OUTER = auto() +#: +VLAN_INSERT = auto() + +#: ID to set on inner VLAN tags. +inner_vlan_tag: ClassVar[int] = 2 +#: ID to set on outer VLAN tags. +outer_vlan_tag: ClassVar[int] = 1 +#: ID to use when inserting VLAN tags. +vlan_insert_tag: ClassVar[int] = 3 +#: +rx_port: ClassVar[int] = 0 +#: +tx_port: ClassVar[int] = 1 + +def is_relevant_packet(self, pkt: Packet) -> bool: +"""Check if a packet was sent by functions in this suite. + +All functions in this test suite send packets with a payload that is packed with 20 "X" +characters. This method, therefore, can determine if the packet was sent by this test suite +by just checking to see if this payload exists on the received packet. + +Args: +pkt: Packet to check for relevancy. + +Returns: +:data:`True` if the packet contains the expected payload, :data:`False` otherwise. +""" +return hasattr(pkt, "load") and "X" * 20 in str(pkt.load) + +def pkt_payload_contains_layers(self, pkt: Packet, *expected_layers: Dot1Q) -> bool: +"""Verify that the payload of the packet matches `expected_layers`. + +The layers in the payload of `pkt` must match the type and the user-defined fields of the +layers in `expected_layers` in order. + +Args: +pkt: Packet to check the payload of. +*expected_layers: Layers expecte
[PATCH v1 3/3] dts: add dual_vlan test suite to the yaml schema
From: Jeremy Spewock Adds the test suite name to the yaml schema to allow for it to be run. Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..b8ad5b37b3 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"dual_vlan" ] }, "test_target": { -- 2.45.2
[PATCH v2 0/2] dts: add test suite for dual VLANs
From: Jeremy Spewock v2: * remove test cases that verify the ability to have 3 VLAN headers as this is a less practical case than only having 2. * add a test case that verifies a packet with only 1 VLAN header can have another inserted into it. * remove patch that fixes tx_vlan_reset method in testpmd as it is no longer used in this series. Jeremy Spewock (2): dts: add dual_vlan testing suite dts: add dual_vlan test suite to the yaml schema dts/framework/config/conf_yaml_schema.json | 3 +- dts/tests/TestSuite_dual_vlan.py | 276 + 2 files changed, 278 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_dual_vlan.py -- 2.45.2
[PATCH v2 1/2] dts: add dual_vlan testing suite
From: Jeremy Spewock This patch ports over the functionality of the dual_vlan suite from old DTS to the new framework. This test suite exists to test the functionality of VLAN functions such as stripping, inserting, and filerting in the presence of two VLAN headers. There are some test cases which were left out in this refactored version including test cases that test the functionality of VLAN functions on a packet with only one VLAN header, as this is something that is tested in another test suite which is currently in development. Additionally, this series does not include test cases for testing the adjustment of TPID or extended VLAN ranges, as these things were included in the old test suite specifically for testing on Intel hardware and they are not universally supported on every NIC. There could be further reason to add these test cases in the future once the capabilities feature is fully implemented. Extended mode for VLANs seems to be exposed through offload capabilities of the port, but there doesn't seem to be anything as obvious for TPID modification. depends-on: patch-142103 ("dts: add VLAN methods to testpmd shell") Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_dual_vlan.py | 276 +++ 1 file changed, 276 insertions(+) create mode 100644 dts/tests/TestSuite_dual_vlan.py diff --git a/dts/tests/TestSuite_dual_vlan.py b/dts/tests/TestSuite_dual_vlan.py new file mode 100644 index 00..623231c9b5 --- /dev/null +++ b/dts/tests/TestSuite_dual_vlan.py @@ -0,0 +1,276 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire + +"""Dual VLAN functionality testing suite. + +The main objective of this test suite is to ensure that standard VLAN functions such as stripping +and filtering both still carry out their expected behavior in the presence of a packet which +contains two VLAN headers. These functions should carry out said behavior not just in isolation, +but also when other VLAN functions are configured on the same port. In addition to this, the +priority attributes of VLAN headers should be unchanged in the case of multiple VLAN headers +existing on a single packet, and a packet with only a single VLAN header should be able to have one +additional VLAN inserted into it. +""" +import time +from enum import Flag, auto +from typing import ClassVar + +from scapy.layers.l2 import Dot1Q, Ether # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] + +from framework.params.testpmd import SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell +from framework.test_suite import TestSuite + + +class TestDualVlan(TestSuite): +"""DPDK Dual VLAN test suite. + +This suite tests the behavior of VLAN functions and properties in the presence of two VLAN +headers. All VLAN functions which are tested in this suite are specified using the inner class +:class:`TestCaseOptions` and should have cases for configuring them in +:meth:`configure_testpmd` as well as cases for testing their behavior in +:meth:`verify_vlan_functions`. Every combination of VLAN functions being enabled should be +tested. Additionally, attributes of VLAN headers, such as priority, are tested to ensure they +are not modified in the case of two VLAN headers. +""" + +class TestCaseOptions(Flag): +"""Flag for specifying which VLAN functions to configure.""" + +#: +VLAN_STRIP = auto() +#: +VLAN_FILTER_INNER = auto() +#: +VLAN_FILTER_OUTER = auto() + +#: ID to set on inner VLAN tags. +inner_vlan_tag: ClassVar[int] = 2 +#: ID to set on outer VLAN tags. +outer_vlan_tag: ClassVar[int] = 1 +#: ID to use when inserting VLAN tags. +vlan_insert_tag: ClassVar[int] = 3 +#: +rx_port: ClassVar[int] = 0 +#: +tx_port: ClassVar[int] = 1 + +def is_relevant_packet(self, pkt: Packet) -> bool: +"""Check if a packet was sent by functions in this suite. + +All functions in this test suite send packets with a payload that is packed with 20 "X" +characters. This method, therefore, can determine if the packet was sent by this test suite +by just checking to see if this payload exists on the received packet. + +Args: +pkt: Packet to check for relevancy. + +Returns: +:data:`True` if the packet contains the expected payload, :data:`False` otherwise. +""" +return hasattr(pkt, "load") and "X" * 20 in str(pkt.load) + +def pkt_payload_contains_layers(self, pkt: Packet, *expected_layers: Dot1Q) -> bool: +"""Verify that the payload of the packet matches `expected_layers`. + +The layers in the payload of `pkt` must match the type and the user-defined fields of the +layers in `expected_layers` in order. + +Args: +pkt: Packet to check the pay
[PATCH v2 2/2] dts: add dual_vlan test suite to the yaml schema
From: Jeremy Spewock Adds the test suite name to the yaml schema to allow for it to be run. Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..b8ad5b37b3 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"dual_vlan" ] }, "test_target": { -- 2.45.2
[PATCH v5 0/3] Improve interactive shell output gathering and logging
From: Jeremy Spewock v5: * rebased on main * pulled tags forward from previous versions since there has been no change to the series outside of rebases since then. Jeremy Spewock (3): dts: Improve output gathering in interactive shells dts: Add missing docstring from XML-RPC server dts: Improve logging for interactive shells dts/framework/exception.py| 66 --- dts/framework/remote_session/dpdk_shell.py| 3 +- .../single_active_interactive_shell.py| 58 +++- dts/framework/remote_session/testpmd_shell.py | 2 + .../testbed_model/traffic_generator/scapy.py | 50 +- 5 files changed, 138 insertions(+), 41 deletions(-) -- 2.45.2
[PATCH v5 1/3] dts: Improve output gathering in interactive shells
From: Jeremy Spewock The current implementation of consuming output from interactive shells relies on being able to find an expected prompt somewhere within the output buffer after sending the command. This is useful in situations where the prompt does not appear in the output itself, but in some practical cases (such as the starting of an XML-RPC server for scapy) the prompt exists in one of the commands sent to the shell and this can cause the command to exit early and creates a race condition between the server starting and the first command being sent to the server. This patch addresses this problem by searching for a line that strictly ends with the provided prompt, rather than one that simply contains it, so that the detection that a command is finished is more consistent. It also adds a catch to detect when a command times out before finding the prompt or the underlying SSH session dies so that the exception can be wrapped into a more explicit one and be more consistent with the non-interactive shells. Bugzilla ID: 1359 Fixes: 88489c0501af ("dts: add smoke tests") Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Reviewed-by: Luca Vizzarro Reviewed-by: Nicholas Pratte --- dts/framework/exception.py| 66 --- .../single_active_interactive_shell.py| 49 ++ 2 files changed, 79 insertions(+), 36 deletions(-) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 74fd2af3b6..f45f789825 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -51,26 +51,6 @@ class DTSError(Exception): severity: ClassVar[ErrorSeverity] = ErrorSeverity.GENERIC_ERR -class SSHTimeoutError(DTSError): -"""The SSH execution of a command timed out.""" - -#: -severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR -_command: str - -def __init__(self, command: str): -"""Define the meaning of the first argument. - -Args: -command: The executed command. -""" -self._command = command - -def __str__(self) -> str: -"""Add some context to the string representation.""" -return f"{self._command} execution timed out." - - class SSHConnectionError(DTSError): """An unsuccessful SSH connection.""" @@ -98,8 +78,42 @@ def __str__(self) -> str: return message -class SSHSessionDeadError(DTSError): -"""The SSH session is no longer alive.""" +class _SSHTimeoutError(DTSError): +"""The execution of a command via SSH timed out. + +This class is private and meant to be raised as its interactive and non-interactive variants. +""" + +#: +severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR +_command: str + +def __init__(self, command: str): +"""Define the meaning of the first argument. + +Args: +command: The executed command. +""" +self._command = command + +def __str__(self) -> str: +"""Add some context to the string representation.""" +return f"{self._command} execution timed out." + + +class SSHTimeoutError(_SSHTimeoutError): +"""The execution of a command on a non-interactive SSH session timed out.""" + + +class InteractiveSSHTimeoutError(_SSHTimeoutError): +"""The execution of a command on an interactive SSH session timed out.""" + + +class _SSHSessionDeadError(DTSError): +"""The SSH session is no longer alive. + +This class is private and meant to be raised as its interactive and non-interactive variants. +""" #: severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR @@ -118,6 +132,14 @@ def __str__(self) -> str: return f"SSH session with {self._host} has died." +class SSHSessionDeadError(_SSHSessionDeadError): +"""Non-interactive SSH session has died.""" + + +class InteractiveSSHSessionDeadError(_SSHSessionDeadError): +"""Interactive SSH session as died.""" + + class ConfigurationError(DTSError): """An invalid configuration.""" diff --git a/dts/framework/remote_session/single_active_interactive_shell.py b/dts/framework/remote_session/single_active_interactive_shell.py index 38094c0fe2..0e5a04885f 100644 --- a/dts/framework/remote_session/single_active_interactive_shell.py +++ b/dts/framework/remote_session/single_active_interactive_shell.py @@ -27,7 +27,11 @@ from paramiko import Channel, channel # type: ignore[import-untyped] from typing_extensions import Self -from framework.exception import InteractiveCommandExecutionError +from framework.exception import ( +InteractiveCommandExecutionError, +InteractiveSSHSessionDeadError, +InteractiveSSHTimeoutError, +) from framework.logger import DTSLogger from framework.params import Params from framework.settings import SETTINGS @@ -71,7 +75,10 @@ class SingleActiveInteractiveShell(ABC): #: Extra characters to add to the end of every command #: before sending them. This is often overrid
[PATCH v5 2/3] dts: Add missing docstring from XML-RPC server
From: Jeremy Spewock When this XML-RPC server implementation was added, the docstring had to be shortened in order to reduce the chances of this race condition being encountered. Now that this race condition issue is resolved, the full docstring can be restored. Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Reviewed-by: Luca Vizzarro Reviewed-by: Nicholas Pratte --- .../testbed_model/traffic_generator/scapy.py | 46 ++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/traffic_generator/scapy.py b/dts/framework/testbed_model/traffic_generator/scapy.py index 7f0cc2bc18..08e1f4ae7e 100644 --- a/dts/framework/testbed_model/traffic_generator/scapy.py +++ b/dts/framework/testbed_model/traffic_generator/scapy.py @@ -128,9 +128,53 @@ def scapy_send_packets(xmlrpc_packets: list[xmlrpc.client.Binary], send_iface: s class QuittableXMLRPCServer(SimpleXMLRPCServer): -"""Basic XML-RPC server. +r"""Basic XML-RPC server. The server may be augmented by functions serializable by the :mod:`marshal` module. + +Example: +:: + +def hello_world(): +# to be sent to the XML-RPC server +print("Hello World!") + +# start the XML-RPC server on the remote node +# this is done by starting a Python shell on the remote node +from framework.remote_session import PythonShell +# the example assumes you're already connected to a tg_node +session = tg_node.create_interactive_shell(PythonShell, timeout=5, privileged=True) + +# then importing the modules needed to run the server +# and the modules for any functions later added to the server +session.send_command("import xmlrpc") +session.send_command("from xmlrpc.server import SimpleXMLRPCServer") + +# sending the source code of this class to the Python shell +from xmlrpc.server import SimpleXMLRPCServer +src = inspect.getsource(QuittableXMLRPCServer) +src = "\n".join([l for l in src.splitlines() if not l.isspace() and l != ""]) +spacing = "\n" * 4 +session.send_command(spacing + src + spacing) + +# then starting the server with: +command = "s = QuittableXMLRPCServer(('0.0.0.0', {listen_port}));s.serve_forever()" +session.send_command(command, "XMLRPC OK") + +# now the server is running on the remote node and we can add functions to it +# first connect to the server from the execution node +import xmlrpc.client +server_url = f"http://{tg_node.config.hostname}:8000"; +rpc_server_proxy = xmlrpc.client.ServerProxy(server_url) + +# get the function bytes to send +import marshal +function_bytes = marshal.dumps(hello_world.__code__) +rpc_server_proxy.add_rpc_function(hello_world.__name__, function_bytes) + +# now we can execute the function on the server +xmlrpc_binary_recv: xmlrpc.client.Binary = rpc_server_proxy.hello_world() +print(str(xmlrpc_binary_recv)) """ def __init__(self, *args, **kwargs): -- 2.45.2
[PATCH v5 3/3] dts: Improve logging for interactive shells
From: Jeremy Spewock The messages being logged by interactive shells currently are using the same logger as the node they were created from. Because of this, when sending interactive commands, the logs make no distinction between when you are sending a command directly to the host and when you are using an interactive shell on the host. This change adds names to interactive shells so that they are able to use their own loggers with distinct names. Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Tested-by: Nicholas Pratte Reviewed-by: Nicholas Pratte Reviewed-by: Luca Vizzarro --- dts/framework/remote_session/dpdk_shell.py | 3 ++- .../remote_session/single_active_interactive_shell.py| 9 +++-- dts/framework/remote_session/testpmd_shell.py| 2 ++ dts/framework/testbed_model/traffic_generator/scapy.py | 4 +++- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/dts/framework/remote_session/dpdk_shell.py b/dts/framework/remote_session/dpdk_shell.py index 950c6ca670..c5f5c2d116 100644 --- a/dts/framework/remote_session/dpdk_shell.py +++ b/dts/framework/remote_session/dpdk_shell.py @@ -82,6 +82,7 @@ def __init__( ascending_cores: bool = True, append_prefix_timestamp: bool = True, app_params: EalParams = EalParams(), +name: str | None = None, ) -> None: """Extends :meth:`~.interactive_shell.InteractiveShell.__init__`. @@ -96,7 +97,7 @@ def __init__( append_prefix_timestamp, ) -super().__init__(node, privileged, timeout, app_params) +super().__init__(node, privileged, timeout, app_params, name) def _update_real_path(self, path: PurePath) -> None: """Extends :meth:`~.interactive_shell.InteractiveShell._update_real_path`. diff --git a/dts/framework/remote_session/single_active_interactive_shell.py b/dts/framework/remote_session/single_active_interactive_shell.py index 0e5a04885f..701d0c 100644 --- a/dts/framework/remote_session/single_active_interactive_shell.py +++ b/dts/framework/remote_session/single_active_interactive_shell.py @@ -32,7 +32,7 @@ InteractiveSSHSessionDeadError, InteractiveSSHTimeoutError, ) -from framework.logger import DTSLogger +from framework.logger import DTSLogger, get_dts_logger from framework.params import Params from framework.settings import SETTINGS from framework.testbed_model.node import Node @@ -92,6 +92,7 @@ def __init__( privileged: bool = False, timeout: float = SETTINGS.timeout, app_params: Params = Params(), +name: str | None = None, ) -> None: """Create an SSH channel during initialization. @@ -102,9 +103,13 @@ def __init__( shell. This timeout is for collecting output, so if reading from the buffer and no output is gathered within the timeout, an exception is thrown. app_params: The command line parameters to be passed to the application on startup. +name: Name for the interactive shell to use for logging. This name will be appended to +the name of the underlying node which it is running on. """ self._node = node -self._logger = node._logger +if name is None: +name = type(self).__name__ +self._logger = get_dts_logger(f"{node.name}.{name}") self._app_params = app_params self._privileged = privileged self._timeout = timeout diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index eda6eb320f..43e9f56517 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -604,6 +604,7 @@ def __init__( lcore_filter_specifier: LogicalCoreCount | LogicalCoreList = LogicalCoreCount(), ascending_cores: bool = True, append_prefix_timestamp: bool = True, +name: str | None = None, **app_params: Unpack[TestPmdParamsDict], ) -> None: """Overrides :meth:`~.dpdk_shell.DPDKShell.__init__`. Changes app_params to kwargs.""" @@ -615,6 +616,7 @@ def __init__( ascending_cores, append_prefix_timestamp, TestPmdParams(**app_params), +name, ) def start(self, verify: bool = True) -> None: diff --git a/dts/framework/testbed_model/traffic_generator/scapy.py b/dts/framework/testbed_model/traffic_generator/scapy.py index 08e1f4ae7e..13fc1107aa 100644 --- a/dts/framework/testbed_model/traffic_generator/scapy.py +++ b/dts/framework/testbed_model/traffic_generator/scapy.py @@ -261,7 +261,9 @@ def __init__(self, tg_node: Node, config: ScapyTrafficGeneratorConfig): self._tg_node.config.os == OS.linux ), "Linux is the only supported OS for scapy traffic generation" -self.session = PythonShell(self._tg_node, timeout=5, privileged=True) +self.session
[PATCH v3 0/4] dts: add dynamic queue configuration test suite
From: Jeremy Spewock v3: * rebase on rc3 Jeremy Spewock (4): dts: add send_packets to test suites and rework packet addressing dts: add port queue modification and forwarding stats to testpmd dts: add dynamic queue test suite dts: add dynamic queue conf to the yaml schema dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 233 +- dts/framework/test_suite.py | 74 +++-- dts/framework/testbed_model/tg_node.py| 9 + dts/tests/TestSuite_dynamic_queue_conf.py | 286 ++ 5 files changed, 581 insertions(+), 24 deletions(-) create mode 100644 dts/tests/TestSuite_dynamic_queue_conf.py -- 2.45.2
[PATCH v3 1/4] dts: add send_packets to test suites and rework packet addressing
From: Jeremy Spewock Currently the only method provided in the test suite class for sending packets sends a single packet and then captures the results. There is, in some cases, a need to send multiple packets at once while not really needing to capture any traffic received back. The method to do this exists in the traffic generator already, but this patch exposes the method to test suites. This patch also updates the _adjust_addresses method of test suites so that addresses of packets are only modified if the developer did not configure them beforehand. This allows for developers to have more control over the content of their packets when sending them through the framework. Signed-off-by: Jeremy Spewock --- dts/framework/test_suite.py| 74 ++ dts/framework/testbed_model/tg_node.py | 9 2 files changed, 62 insertions(+), 21 deletions(-) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 694b2eba65..0b678ed62d 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -199,7 +199,7 @@ def send_packet_and_capture( Returns: A list of received packets. """ -packet = self._adjust_addresses(packet) +packet = self._adjust_addresses([packet])[0] return self.tg_node.send_packet_and_capture( packet, self._tg_port_egress, @@ -208,6 +208,18 @@ def send_packet_and_capture( duration, ) +def send_packets( +self, +packets: list[Packet], +) -> None: +"""Send packets using the traffic generator and do not capture received traffic. + +Args: +packets: Packets to send. +""" +packets = self._adjust_addresses(packets) +self.tg_node.send_packets(packets, self._tg_port_egress) + def get_expected_packet(self, packet: Packet) -> Packet: """Inject the proper L2/L3 addresses into `packet`. @@ -219,39 +231,59 @@ def get_expected_packet(self, packet: Packet) -> Packet: """ return self._adjust_addresses(packet, expected=True) -def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: +def _adjust_addresses(self, packets: list[Packet], expected: bool = False) -> list[Packet]: """L2 and L3 address additions in both directions. +Only missing addresses are added to packets, existing addressed will not be overridden. + Assumptions: Two links between SUT and TG, one link is TG -> SUT, the other SUT -> TG. Args: -packet: The packet to modify. +packets: The packets to modify. expected: If :data:`True`, the direction is SUT -> TG, otherwise the direction is TG -> SUT. """ -if expected: -# The packet enters the TG from SUT -# update l2 addresses -packet.src = self._sut_port_egress.mac_address -packet.dst = self._tg_port_ingress.mac_address +ret_packets = [] +for packet in packets: +default_pkt_src = type(packet)().src +default_pkt_dst = type(packet)().dst +default_pkt_payload_src = IP().src if hasattr(packet.payload, "src") else None +default_pkt_payload_dst = IP().dst if hasattr(packet.payload, "dst") else None +# If `expected` is :data:`True`, the packet enters the TG from SUT, otherwise the +# packet leaves the TG towards the SUT -# The packet is routed from TG egress to TG ingress -# update l3 addresses -packet.payload.src = self._tg_ip_address_egress.ip.exploded -packet.payload.dst = self._tg_ip_address_ingress.ip.exploded -else: -# The packet leaves TG towards SUT # update l2 addresses -packet.src = self._tg_port_egress.mac_address -packet.dst = self._sut_port_ingress.mac_address +if packet.src == default_pkt_src: +packet.src = ( +self._sut_port_egress.mac_address +if expected +else self._tg_port_egress.mac_address +) +if packet.dst == default_pkt_dst: +packet.dst = ( +self._tg_port_ingress.mac_address +if expected +else self._sut_port_ingress.mac_address +) + +# The packet is routed from TG egress to TG ingress regardless of if it is expected or +# not. -# The packet is routed from TG egress to TG ingress # update l3 addresses -packet.payload.src = self._tg_ip_address_egress.ip.exploded -packet.payload.dst = self._tg_ip_address_ingress.ip.exploded - -return Ether(packet.build()) +if ( +default_pkt_payload_src is not None +
[PATCH v3 2/4] dts: add port queue modification and forwarding stats to testpmd
From: Jeremy Spewock This patch adds methods for querying and modifying port queue state and configuration. In addition to this, it also adds the ability to capture the forwarding statistics that get outputted when you send the "stop" command in testpmd. Querying of port queue information is handled through a TextParser dataclass in case there is future need for using more of the output from the command used to query the information. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 233 +- 1 file changed, 231 insertions(+), 2 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index eda6eb320f..45b379c808 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -19,7 +19,7 @@ from dataclasses import dataclass, field from enum import Flag, auto from pathlib import PurePath -from typing import ClassVar +from typing import ClassVar, cast from typing_extensions import Self, Unpack @@ -541,6 +541,56 @@ class TestPmdPort(TextParser): ) +@dataclass +class TestPmdPortQueue(TextParser): +"""Dataclass representation of the common parts of the testpmd `show rxq/txq info` commands.""" + +#: +prefetch_threshold: int = field(metadata=TextParser.find_int(r"prefetch threshold: (\d+)")) +#: +host_threshold: int = field(metadata=TextParser.find_int(r"host threshold: (\d+)")) +#: +writeback_threshold: int = field(metadata=TextParser.find_int(r"writeback threshold: (\d+)")) +#: +free_threshold: int = field(metadata=TextParser.find_int(r"free threshold: (\d+)")) +#: +deferred_start: bool = field(metadata=TextParser.find("deferred start: on")) +#: The number of RXD/TXDs is just the ring size of the queue. +ring_size: int = field(metadata=TextParser.find_int(r"Number of (?:RXDs|TXDs): (\d+)")) +#: +is_queue_started: bool = field(metadata=TextParser.find("queue state: started")) +#: +burst_mode: str | None = field( +default=None, metadata=TextParser.find(r"Burst mode: ([^\r\n]+)") +) + + +@dataclass +class TestPmdTxPortQueue(TestPmdPortQueue): +"""Dataclass representation for testpmd `show txq info` command.""" + +#: +rs_threshold: int | None = field( +default=None, metadata=TextParser.find_int(r"RS threshold: (\d+)") +) + + +@dataclass +class TestPmdRxPortQueue(TestPmdPortQueue): +"""Dataclass representation for testpmd `show rxq info` command.""" + +#: +mempool: str | None = field(default=None, metadata=TextParser.find(r"Mempool: ([^\r\n]+)")) +#: +can_drop_packets: bool | None = field( +default=None, metadata=TextParser.find(r"drop packets: on") +) +#: +is_scattering_packets: bool | None = field( +default=None, metadata=TextParser.find(r"scattered packets: on") +) + + @dataclass class TestPmdPortStats(TextParser): """Port statistics.""" @@ -643,7 +693,7 @@ def start(self, verify: bool = True) -> None: "Not all ports came up after starting packet forwarding in testpmd." ) -def stop(self, verify: bool = True) -> None: +def stop(self, verify: bool = True) -> str: """Stop packet forwarding. Args: @@ -651,6 +701,9 @@ def stop(self, verify: bool = True) -> None: forwarding was stopped successfully or not started. If neither is found, it is considered an error. +Returns: +Output gathered from sending the stop command. + Raises: InteractiveCommandExecutionError: If `verify` is :data:`True` and the command to stop forwarding results in an error. @@ -663,6 +716,7 @@ def stop(self, verify: bool = True) -> None: ): self._logger.debug(f"Failed to stop packet forwarding: \n{stop_cmd_output}") raise InteractiveCommandExecutionError("Testpmd failed to stop packet forwarding.") +return stop_cmd_output def get_devices(self) -> list[TestPmdDevice]: """Get a list of device names that are known to testpmd. @@ -804,6 +858,181 @@ def show_port_stats(self, port_id: int) -> TestPmdPortStats: return TestPmdPortStats.parse(output) +def show_port_queue_info( +self, port_id: int, queue_id: int, is_rx_queue: bool +) -> TestPmdPortQueue: +"""Get the info for a queue on a given port. + +Args: +port_id: ID of the port where the queue resides. +queue_id: ID of the queue to query. +is_rx_queue: Whether to check an RX or TX queue. If :data:`True` an RX queue will be +queried, otherwise a TX queue will be queried. + +Raises: +InteractiveCommandExecutionError: If there is a failure when getting the info for the +queue. + +
[PATCH v3 3/4] dts: add dynamic queue test suite
From: Jeremy Spewock This patch adds a new test suite that is designed to test the stopping and modification of port queues at runtime. Specifically, there are test cases that display the ports ability to stop some queues but still send and receive traffic on others, as well as the ability to configure the ring size of the queue without blocking the traffic on other queues. Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_dynamic_queue_conf.py | 286 ++ 1 file changed, 286 insertions(+) create mode 100644 dts/tests/TestSuite_dynamic_queue_conf.py diff --git a/dts/tests/TestSuite_dynamic_queue_conf.py b/dts/tests/TestSuite_dynamic_queue_conf.py new file mode 100644 index 00..f5c667cdeb --- /dev/null +++ b/dts/tests/TestSuite_dynamic_queue_conf.py @@ -0,0 +1,286 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire + +"""Dynamic configuration of port queues test suite. + +This test suite tests the support of being able to either stop or reconfigure port queues at +runtime without stopping the entire device. Previously, to configure a DPDK ethdev, the application +first specifies how many Tx and Rx queues to include in the ethdev and then application sets up +each queue individually. Only once all the queues have been set up can the application then start +the device, and at this point traffic can flow. If device stops, this halts the flow of traffic on +all queues in the ethdev completely. Dynamic queue is a capability present on some NICs that +specifies whether the NIC is able to delay the configuration of queues on its port. This capability +allows for the support of stopping and reconfiguring queues on a port at runtime without stopping +the entire device. + +Support of this capability is shown by starting the Poll Mode Driver with multiple Rx and Tx queues +configured and stopping some prior to forwarding packets, then examining whether or not the stopped +ports and the unmodified ports were able to handle traffic. In addition to just stopping the ports, +the ports must also show that they support configuration changes on their queues at runtime without +stopping the entire device. This is shown by changing the ring size of the queues. + +If the Poll Mode Driver is able to stop some queues on a port and modify them then handle traffic +on the unmodified queues while the others are stopped, then it is the case that the device properly +supports dynamic configuration of its queues. +""" + +import random +from typing import Callable, ClassVar, MutableSet + +from scapy.layers.inet import IP # type: ignore[import-untyped] +from scapy.layers.l2 import Ether # type: ignore[import-untyped] +from scapy.packet import Raw # type: ignore[import-untyped] + +from framework.exception import InteractiveCommandExecutionError +from framework.params.testpmd import PortTopology, SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell +from framework.test_suite import TestSuite + + +def setup_and_teardown_test( +test_meth: Callable[ +["TestDynamicQueueConf", int, MutableSet, MutableSet, TestPmdShell, bool], None +], +) -> Callable[["TestDynamicQueueConf", bool], None]: +"""Decorator that provides a setup and teardown for testing methods. + +This decorator provides a method that sets up the environment for testing, runs the test +method, and then does a clean-up verification step after the queues are started again. The +decorated method will be provided with all the variables it should need to run testing +including: The ID of the port where the queues for testing reside, disjoint sets of IDs for +queues that are/aren't modified, a testpmd session to run testing with, and a flag that +indicates whether or not testing should be done on Rx or Tx queues. + +Args: +test_meth: The decorated method that tests configuration of port queues at runtime. +This method must have the following parameters in order: An int that represents a +port ID, a set of queues for testing, a set of unmodified queues, a testpmd +interactive shell, and a boolean that, when :data:`True`, does Rx testing, +otherwise does Tx testing. This method must also be a member of the +:class:`TestDynamicQueueConf` class. + +Returns: +A method that sets up the environment, runs the decorated method, then re-enables all +queues and validates they can still handle traffic. +""" + +def wrap(self: "TestDynamicQueueConf", is_rx_testing: bool) -> None: +"""Setup environment, run test function, then cleanup. + +Start a testpmd shell and stop ports for testing, then call the decorated function that +performs the testing. After the decorated function is finished running its testing, +start the stopped queues and send packets to validate that these ports can prope
[PATCH v3 4/4] dts: add dynamic queue conf to the yaml schema
From: Jeremy Spewock Adds the ability to run the test suite using the yaml configuration file. Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..d83a2f51c5 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"dynamic_queue_conf" ] }, "test_target": { -- 2.45.2
[PATCH v3 0/2] dts: add test suite for dual VLANs
From: Jeremy Spewock v3: * rebase on rc3 Jeremy Spewock (2): dts: add dual_vlan testing suite dts: add dual_vlan test suite to the yaml schema dts/framework/config/conf_yaml_schema.json | 3 +- dts/tests/TestSuite_dual_vlan.py | 268 + 2 files changed, 270 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_dual_vlan.py -- 2.45.2
[PATCH v3 1/2] dts: add dual_vlan testing suite
From: Jeremy Spewock This patch ports over the functionality of the dual_vlan suite from old DTS to the new framework. This test suite exists to test the functionality of VLAN functions such as stripping, inserting, and filerting in the presence of two VLAN headers. There are some test cases which were left out in this refactored version including test cases that test the functionality of VLAN functions on a packet with only one VLAN header, as this is something that is tested in another test suite which is currently in development. Additionally, this series does not include test cases for testing the adjustment of TPID or extended VLAN ranges, as these things were included in the old test suite specifically for testing on Intel hardware and they are not universally supported on every NIC. There could be further reason to add these test cases in the future once the capabilities feature is fully implemented. Extended mode for VLANs seems to be exposed through offload capabilities of the port, but there doesn't seem to be anything as obvious for TPID modification. depends-on: patch-142103 ("dts: add VLAN methods to testpmd shell") Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_dual_vlan.py | 268 +++ 1 file changed, 268 insertions(+) create mode 100644 dts/tests/TestSuite_dual_vlan.py diff --git a/dts/tests/TestSuite_dual_vlan.py b/dts/tests/TestSuite_dual_vlan.py new file mode 100644 index 00..3a8a52afeb --- /dev/null +++ b/dts/tests/TestSuite_dual_vlan.py @@ -0,0 +1,268 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire + +"""Dual VLAN functionality testing suite. + +The main objective of this test suite is to ensure that standard VLAN functions such as stripping +and filtering both still carry out their expected behavior in the presence of a packet which +contains two VLAN headers. These functions should carry out said behavior not just in isolation, +but also when other VLAN functions are configured on the same port. In addition to this, the +priority attributes of VLAN headers should be unchanged in the case of multiple VLAN headers +existing on a single packet, and a packet with only a single VLAN header should be able to have one +additional VLAN inserted into it. +""" +from enum import Flag, auto +from typing import ClassVar + +from scapy.layers.l2 import Dot1Q, Ether # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] + +from framework.params.testpmd import SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell +from framework.test_suite import TestSuite + + +class TestDualVlan(TestSuite): +"""DPDK Dual VLAN test suite. + +This suite tests the behavior of VLAN functions and properties in the presence of two VLAN +headers. All VLAN functions which are tested in this suite are specified using the inner class +:class:`TestCaseOptions` and should have cases for configuring them in +:meth:`configure_testpmd` as well as cases for testing their behavior in +:meth:`verify_vlan_functions`. Every combination of VLAN functions being enabled should be +tested. Additionally, attributes of VLAN headers, such as priority, are tested to ensure they +are not modified in the case of two VLAN headers. +""" + +class TestCaseOptions(Flag): +"""Flag for specifying which VLAN functions to configure.""" + +#: +VLAN_STRIP = auto() +#: +VLAN_FILTER_INNER = auto() +#: +VLAN_FILTER_OUTER = auto() + +#: ID to set on inner VLAN tags. +inner_vlan_tag: ClassVar[int] = 2 +#: ID to set on outer VLAN tags. +outer_vlan_tag: ClassVar[int] = 1 +#: ID to use when inserting VLAN tags. +vlan_insert_tag: ClassVar[int] = 3 +#: +rx_port: ClassVar[int] = 0 +#: +tx_port: ClassVar[int] = 1 + +def is_relevant_packet(self, pkt: Packet) -> bool: +"""Check if a packet was sent by functions in this suite. + +All functions in this test suite send packets with a payload that is packed with 20 "X" +characters. This method, therefore, can determine if the packet was sent by this test suite +by just checking to see if this payload exists on the received packet. + +Args: +pkt: Packet to check for relevancy. + +Returns: +:data:`True` if the packet contains the expected payload, :data:`False` otherwise. +""" +return hasattr(pkt, "load") and "X" * 20 in str(pkt.load) + +def pkt_payload_contains_layers(self, pkt: Packet, *expected_layers: Dot1Q) -> bool: +"""Verify that the payload of the packet matches `expected_layers`. + +The layers in the payload of `pkt` must match the type and the user-defined fields of the +layers in `expected_layers` in order. + +Args: +pkt: Packet to check the payload of. +
[PATCH v3 2/2] dts: add dual_vlan test suite to the yaml schema
From: Jeremy Spewock Adds the test suite name to the yaml schema to allow for it to be run. Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..b8ad5b37b3 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"dual_vlan" ] }, "test_target": { -- 2.45.2
[PATCH v6 0/3] Improve interactive shell output gathering and logging
From: Jeremy Spewock v6: * Fix error catch for retries. This series changed the error that is thrown in the case of a timeout, but it was originally overlooked that the context manager patch added a catch that is looking for the old timeout error. This version fixes the patch by adjusting the error that is expected in the context manager patch to match what this series changes it to. Jeremy Spewock (3): dts: Improve output gathering in interactive shells dts: Add missing docstring from XML-RPC server dts: Improve logging for interactive shells dts/framework/exception.py| 66 --- dts/framework/remote_session/dpdk_shell.py| 3 +- .../single_active_interactive_shell.py| 60 - dts/framework/remote_session/testpmd_shell.py | 2 + .../testbed_model/traffic_generator/scapy.py | 50 +- 5 files changed, 139 insertions(+), 42 deletions(-) -- 2.45.2
[PATCH v6 1/3] dts: Improve output gathering in interactive shells
From: Jeremy Spewock The current implementation of consuming output from interactive shells relies on being able to find an expected prompt somewhere within the output buffer after sending the command. This is useful in situations where the prompt does not appear in the output itself, but in some practical cases (such as the starting of an XML-RPC server for scapy) the prompt exists in one of the commands sent to the shell and this can cause the command to exit early and creates a race condition between the server starting and the first command being sent to the server. This patch addresses this problem by searching for a line that strictly ends with the provided prompt, rather than one that simply contains it, so that the detection that a command is finished is more consistent. It also adds a catch to detect when a command times out before finding the prompt or the underlying SSH session dies so that the exception can be wrapped into a more explicit one and be more consistent with the non-interactive shells. Bugzilla ID: 1359 Fixes: 88489c0501af ("dts: add smoke tests") Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Reviewed-by: Luca Vizzarro Reviewed-by: Nicholas Pratte --- dts/framework/exception.py| 66 --- .../single_active_interactive_shell.py| 51 +- 2 files changed, 80 insertions(+), 37 deletions(-) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 74fd2af3b6..f45f789825 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -51,26 +51,6 @@ class DTSError(Exception): severity: ClassVar[ErrorSeverity] = ErrorSeverity.GENERIC_ERR -class SSHTimeoutError(DTSError): -"""The SSH execution of a command timed out.""" - -#: -severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR -_command: str - -def __init__(self, command: str): -"""Define the meaning of the first argument. - -Args: -command: The executed command. -""" -self._command = command - -def __str__(self) -> str: -"""Add some context to the string representation.""" -return f"{self._command} execution timed out." - - class SSHConnectionError(DTSError): """An unsuccessful SSH connection.""" @@ -98,8 +78,42 @@ def __str__(self) -> str: return message -class SSHSessionDeadError(DTSError): -"""The SSH session is no longer alive.""" +class _SSHTimeoutError(DTSError): +"""The execution of a command via SSH timed out. + +This class is private and meant to be raised as its interactive and non-interactive variants. +""" + +#: +severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR +_command: str + +def __init__(self, command: str): +"""Define the meaning of the first argument. + +Args: +command: The executed command. +""" +self._command = command + +def __str__(self) -> str: +"""Add some context to the string representation.""" +return f"{self._command} execution timed out." + + +class SSHTimeoutError(_SSHTimeoutError): +"""The execution of a command on a non-interactive SSH session timed out.""" + + +class InteractiveSSHTimeoutError(_SSHTimeoutError): +"""The execution of a command on an interactive SSH session timed out.""" + + +class _SSHSessionDeadError(DTSError): +"""The SSH session is no longer alive. + +This class is private and meant to be raised as its interactive and non-interactive variants. +""" #: severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR @@ -118,6 +132,14 @@ def __str__(self) -> str: return f"SSH session with {self._host} has died." +class SSHSessionDeadError(_SSHSessionDeadError): +"""Non-interactive SSH session has died.""" + + +class InteractiveSSHSessionDeadError(_SSHSessionDeadError): +"""Interactive SSH session as died.""" + + class ConfigurationError(DTSError): """An invalid configuration.""" diff --git a/dts/framework/remote_session/single_active_interactive_shell.py b/dts/framework/remote_session/single_active_interactive_shell.py index 38094c0fe2..38318aa764 100644 --- a/dts/framework/remote_session/single_active_interactive_shell.py +++ b/dts/framework/remote_session/single_active_interactive_shell.py @@ -27,7 +27,11 @@ from paramiko import Channel, channel # type: ignore[import-untyped] from typing_extensions import Self -from framework.exception import InteractiveCommandExecutionError +from framework.exception import ( +InteractiveCommandExecutionError, +InteractiveSSHSessionDeadError, +InteractiveSSHTimeoutError, +) from framework.logger import DTSLogger from framework.params import Params from framework.settings import SETTINGS @@ -71,7 +75,10 @@ class SingleActiveInteractiveShell(ABC): #: Extra characters to add to the end of every command #: before sending them. This is often overrid
[PATCH v6 2/3] dts: Add missing docstring from XML-RPC server
From: Jeremy Spewock When this XML-RPC server implementation was added, the docstring had to be shortened in order to reduce the chances of this race condition being encountered. Now that this race condition issue is resolved, the full docstring can be restored. Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Reviewed-by: Luca Vizzarro Reviewed-by: Nicholas Pratte --- .../testbed_model/traffic_generator/scapy.py | 46 ++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/traffic_generator/scapy.py b/dts/framework/testbed_model/traffic_generator/scapy.py index 7f0cc2bc18..08e1f4ae7e 100644 --- a/dts/framework/testbed_model/traffic_generator/scapy.py +++ b/dts/framework/testbed_model/traffic_generator/scapy.py @@ -128,9 +128,53 @@ def scapy_send_packets(xmlrpc_packets: list[xmlrpc.client.Binary], send_iface: s class QuittableXMLRPCServer(SimpleXMLRPCServer): -"""Basic XML-RPC server. +r"""Basic XML-RPC server. The server may be augmented by functions serializable by the :mod:`marshal` module. + +Example: +:: + +def hello_world(): +# to be sent to the XML-RPC server +print("Hello World!") + +# start the XML-RPC server on the remote node +# this is done by starting a Python shell on the remote node +from framework.remote_session import PythonShell +# the example assumes you're already connected to a tg_node +session = tg_node.create_interactive_shell(PythonShell, timeout=5, privileged=True) + +# then importing the modules needed to run the server +# and the modules for any functions later added to the server +session.send_command("import xmlrpc") +session.send_command("from xmlrpc.server import SimpleXMLRPCServer") + +# sending the source code of this class to the Python shell +from xmlrpc.server import SimpleXMLRPCServer +src = inspect.getsource(QuittableXMLRPCServer) +src = "\n".join([l for l in src.splitlines() if not l.isspace() and l != ""]) +spacing = "\n" * 4 +session.send_command(spacing + src + spacing) + +# then starting the server with: +command = "s = QuittableXMLRPCServer(('0.0.0.0', {listen_port}));s.serve_forever()" +session.send_command(command, "XMLRPC OK") + +# now the server is running on the remote node and we can add functions to it +# first connect to the server from the execution node +import xmlrpc.client +server_url = f"http://{tg_node.config.hostname}:8000"; +rpc_server_proxy = xmlrpc.client.ServerProxy(server_url) + +# get the function bytes to send +import marshal +function_bytes = marshal.dumps(hello_world.__code__) +rpc_server_proxy.add_rpc_function(hello_world.__name__, function_bytes) + +# now we can execute the function on the server +xmlrpc_binary_recv: xmlrpc.client.Binary = rpc_server_proxy.hello_world() +print(str(xmlrpc_binary_recv)) """ def __init__(self, *args, **kwargs): -- 2.45.2
[PATCH v6 3/3] dts: Improve logging for interactive shells
From: Jeremy Spewock The messages being logged by interactive shells currently are using the same logger as the node they were created from. Because of this, when sending interactive commands, the logs make no distinction between when you are sending a command directly to the host and when you are using an interactive shell on the host. This change adds names to interactive shells so that they are able to use their own loggers with distinct names. Signed-off-by: Jeremy Spewock Reviewed-by: Juraj Linkeš Tested-by: Nicholas Pratte Reviewed-by: Nicholas Pratte Reviewed-by: Luca Vizzarro --- dts/framework/remote_session/dpdk_shell.py | 3 ++- .../remote_session/single_active_interactive_shell.py| 9 +++-- dts/framework/remote_session/testpmd_shell.py| 2 ++ dts/framework/testbed_model/traffic_generator/scapy.py | 4 +++- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/dts/framework/remote_session/dpdk_shell.py b/dts/framework/remote_session/dpdk_shell.py index 950c6ca670..c5f5c2d116 100644 --- a/dts/framework/remote_session/dpdk_shell.py +++ b/dts/framework/remote_session/dpdk_shell.py @@ -82,6 +82,7 @@ def __init__( ascending_cores: bool = True, append_prefix_timestamp: bool = True, app_params: EalParams = EalParams(), +name: str | None = None, ) -> None: """Extends :meth:`~.interactive_shell.InteractiveShell.__init__`. @@ -96,7 +97,7 @@ def __init__( append_prefix_timestamp, ) -super().__init__(node, privileged, timeout, app_params) +super().__init__(node, privileged, timeout, app_params, name) def _update_real_path(self, path: PurePath) -> None: """Extends :meth:`~.interactive_shell.InteractiveShell._update_real_path`. diff --git a/dts/framework/remote_session/single_active_interactive_shell.py b/dts/framework/remote_session/single_active_interactive_shell.py index 38318aa764..77a4dcefdf 100644 --- a/dts/framework/remote_session/single_active_interactive_shell.py +++ b/dts/framework/remote_session/single_active_interactive_shell.py @@ -32,7 +32,7 @@ InteractiveSSHSessionDeadError, InteractiveSSHTimeoutError, ) -from framework.logger import DTSLogger +from framework.logger import DTSLogger, get_dts_logger from framework.params import Params from framework.settings import SETTINGS from framework.testbed_model.node import Node @@ -92,6 +92,7 @@ def __init__( privileged: bool = False, timeout: float = SETTINGS.timeout, app_params: Params = Params(), +name: str | None = None, ) -> None: """Create an SSH channel during initialization. @@ -102,9 +103,13 @@ def __init__( shell. This timeout is for collecting output, so if reading from the buffer and no output is gathered within the timeout, an exception is thrown. app_params: The command line parameters to be passed to the application on startup. +name: Name for the interactive shell to use for logging. This name will be appended to +the name of the underlying node which it is running on. """ self._node = node -self._logger = node._logger +if name is None: +name = type(self).__name__ +self._logger = get_dts_logger(f"{node.name}.{name}") self._app_params = app_params self._privileged = privileged self._timeout = timeout diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index eda6eb320f..43e9f56517 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -604,6 +604,7 @@ def __init__( lcore_filter_specifier: LogicalCoreCount | LogicalCoreList = LogicalCoreCount(), ascending_cores: bool = True, append_prefix_timestamp: bool = True, +name: str | None = None, **app_params: Unpack[TestPmdParamsDict], ) -> None: """Overrides :meth:`~.dpdk_shell.DPDKShell.__init__`. Changes app_params to kwargs.""" @@ -615,6 +616,7 @@ def __init__( ascending_cores, append_prefix_timestamp, TestPmdParams(**app_params), +name, ) def start(self, verify: bool = True) -> None: diff --git a/dts/framework/testbed_model/traffic_generator/scapy.py b/dts/framework/testbed_model/traffic_generator/scapy.py index 08e1f4ae7e..13fc1107aa 100644 --- a/dts/framework/testbed_model/traffic_generator/scapy.py +++ b/dts/framework/testbed_model/traffic_generator/scapy.py @@ -261,7 +261,9 @@ def __init__(self, tg_node: Node, config: ScapyTrafficGeneratorConfig): self._tg_node.config.os == OS.linux ), "Linux is the only supported OS for scapy traffic generation" -self.session = PythonShell(self._tg_node, timeout=5, privileged=True) +self.session
[PATCH v1 0/1] dts: testpmd verbose parser
From: Jeremy Spewock This series adds a new text parser that is able to extract all verbose messages from testpmd output into a more organized data structure. Jeremy Spewock (1): dts: add text parser for testpmd verbose output dts/framework/parser.py | 30 dts/framework/remote_session/testpmd_shell.py | 146 +- dts/framework/utils.py| 1 + 3 files changed, 175 insertions(+), 2 deletions(-) -- 2.45.2
[PATCH v1 1/1] dts: add text parser for testpmd verbose output
From: Jeremy Spewock Multiple test suites from the old DTS framework rely on being able to consume and interpret the verbose output of testpmd. The new framework doesn't have an elegant way for handling the verbose output, but test suites are starting to be written that rely on it. This patch creates a TextParser class that can be used to extract the verbose information from any testpmd output and also adjusts the `stop` method of the shell to return all output that it collected. Signed-off-by: Jeremy Spewock --- One thing to note here is I don't love the regex in extract_verbose_output(). It works great when there is a bunch of verbose output in a row, but any chunk that isn't followed by another piece of verbose output will contain everything that comes after it in the match group. This could be solved by changing the regex to look ahead only for the next port X/queue Y line instead of also including the end of the string, and then having another alternate route which is solely dedicated to the last block of verbose output which greedily consumes everything until the end of ol_flags, but I didn't want to over complicate the regex since the text parser will extract the specific information it needs anyways. For reference, I was thinking it could be something like this: r"(port \d+/queue \d+:.*?(?=port \d+/queue \d+)|port \d+/queue \d+:.*ol_flags: [\w ]+)" but this has a lot of repition (some of which that could be ripped out with a simple variable) and it is a little more confusing to read I think. dts/framework/parser.py | 30 dts/framework/remote_session/testpmd_shell.py | 146 +- dts/framework/utils.py| 1 + 3 files changed, 175 insertions(+), 2 deletions(-) diff --git a/dts/framework/parser.py b/dts/framework/parser.py index 741dfff821..0b39025a48 100644 --- a/dts/framework/parser.py +++ b/dts/framework/parser.py @@ -160,6 +160,36 @@ def _find(text: str) -> Any: return ParserFn(TextParser_fn=_find) +@staticmethod +def find_all( +pattern: str | re.Pattern[str], +flags: re.RegexFlag = re.RegexFlag(0), +) -> ParserFn: +"""Makes a parser function that finds all of the regular expression matches in the text. + +If there are no matches found in the text than None will be returned, otherwise a list +containing all matches will be returned. Patterns that contain multiple groups will pack +the matches for each group into a tuple. + +Args: +pattern: The regular expression pattern. +flags: The regular expression flags. Ignored if the given pattern is already compiled. + +Returns: +A :class:`ParserFn` that can be used as metadata for a dataclass field. +""" +if isinstance(pattern, str): +pattern = re.compile(pattern, flags) + +def _find_all(text: str) -> list[str] | None: +m = pattern.findall(text) +if len(m) == 0: +return None + +return m + +return ParserFn(TextParser_fn=_find_all) + @staticmethod def find_int( pattern: str | re.Pattern[str], diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 43e9f56517..9f09a98490 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -31,7 +31,7 @@ from framework.settings import SETTINGS from framework.testbed_model.cpu import LogicalCoreCount, LogicalCoreList from framework.testbed_model.sut_node import SutNode -from framework.utils import StrEnum +from framework.utils import REGEX_FOR_MAC_ADDRESS, StrEnum class TestPmdDevice: @@ -577,6 +577,128 @@ class TestPmdPortStats(TextParser): tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)")) +class VerboseOLFlag(Flag): +"""Flag representing the OL flags of a packet from Testpmd verbose output.""" + +#: +RTE_MBUF_F_RX_RSS_HASH = auto() + +#: +RTE_MBUF_F_RX_L4_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN = auto() + +#: +RTE_MBUF_F_RX_IP_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN = auto() + +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN = auto() + +@classmethod +def from_str_list(cls, arr: list[str]) -> Self: +"""Makes an instance from a list containing the flag members. + +Args: +arr: A list of strings containing ol_flag values. + +Returns: +A new instance of the flag. +""" +flag = cls(0) +for name in cls.__members__: +if name in arr: +flag |= cls[name] +return flag + +
[PATCH v2 0/1] dts: testpmd verbose parser
From: Jeremy Spewock v2: * fix queue and port IDs types in the testpmd verbose output dataclass using find_int(). Jeremy Spewock (1): dts: add text parser for testpmd verbose output dts/framework/parser.py | 30 dts/framework/remote_session/testpmd_shell.py | 146 +- dts/framework/utils.py| 1 + 3 files changed, 175 insertions(+), 2 deletions(-) -- 2.45.2
[PATCH v2 1/1] dts: add text parser for testpmd verbose output
From: Jeremy Spewock Multiple test suites from the old DTS framework rely on being able to consume and interpret the verbose output of testpmd. The new framework doesn't have an elegant way for handling the verbose output, but test suites are starting to be written that rely on it. This patch creates a TextParser class that can be used to extract the verbose information from any testpmd output and also adjusts the `stop` method of the shell to return all output that it collected. Signed-off-by: Jeremy Spewock --- One thing to note here is I don't love the regex in extract_verbose_output(). It works great when there is a bunch of verbose output in a row, but any chunk that isn't followed by another piece of verbose output will contain everything that comes after it in the match group. This could be solved by changing the regex to look ahead only for the next port X/queue Y line instead of also including the end of the string, and then having another alternate route which is solely dedicated to the last block of verbose output which greedily consumes everything until the end of ol_flags, but I didn't want to over complicate the regex since the text parser will extract the specific information it needs anyways. For reference, I was thinking it could be something like this: r"(port \d+/queue \d+:.*?(?=port \d+/queue \d+)|port \d+/queue \d+:.*ol_flags: [\w ]+)" but this has a lot of repition (some of which that could be ripped out with a simple variable) and it is a little more confusing to read I think. dts/framework/parser.py | 30 dts/framework/remote_session/testpmd_shell.py | 146 +- dts/framework/utils.py| 1 + 3 files changed, 175 insertions(+), 2 deletions(-) diff --git a/dts/framework/parser.py b/dts/framework/parser.py index 741dfff821..0b39025a48 100644 --- a/dts/framework/parser.py +++ b/dts/framework/parser.py @@ -160,6 +160,36 @@ def _find(text: str) -> Any: return ParserFn(TextParser_fn=_find) +@staticmethod +def find_all( +pattern: str | re.Pattern[str], +flags: re.RegexFlag = re.RegexFlag(0), +) -> ParserFn: +"""Makes a parser function that finds all of the regular expression matches in the text. + +If there are no matches found in the text than None will be returned, otherwise a list +containing all matches will be returned. Patterns that contain multiple groups will pack +the matches for each group into a tuple. + +Args: +pattern: The regular expression pattern. +flags: The regular expression flags. Ignored if the given pattern is already compiled. + +Returns: +A :class:`ParserFn` that can be used as metadata for a dataclass field. +""" +if isinstance(pattern, str): +pattern = re.compile(pattern, flags) + +def _find_all(text: str) -> list[str] | None: +m = pattern.findall(text) +if len(m) == 0: +return None + +return m + +return ParserFn(TextParser_fn=_find_all) + @staticmethod def find_int( pattern: str | re.Pattern[str], diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 43e9f56517..dedf1553cf 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -31,7 +31,7 @@ from framework.settings import SETTINGS from framework.testbed_model.cpu import LogicalCoreCount, LogicalCoreList from framework.testbed_model.sut_node import SutNode -from framework.utils import StrEnum +from framework.utils import REGEX_FOR_MAC_ADDRESS, StrEnum class TestPmdDevice: @@ -577,6 +577,128 @@ class TestPmdPortStats(TextParser): tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)")) +class VerboseOLFlag(Flag): +"""Flag representing the OL flags of a packet from Testpmd verbose output.""" + +#: +RTE_MBUF_F_RX_RSS_HASH = auto() + +#: +RTE_MBUF_F_RX_L4_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN = auto() + +#: +RTE_MBUF_F_RX_IP_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN = auto() + +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN = auto() + +@classmethod +def from_str_list(cls, arr: list[str]) -> Self: +"""Makes an instance from a list containing the flag members. + +Args: +arr: A list of strings containing ol_flag values. + +Returns: +A new instance of the flag. +""" +flag = cls(0) +for name in cls.__members__: +if name in arr: +flag |= cls[name] +return flag + +
[RFC PATCH v1 0/3] dts: port over stats checks
From: Jeremy Spewock This series ports over the functionality of the stats_checks test suite from old DTS, but I left it as an RFC just because the verification is different than other test suites that we have written. Mainly because verifying the accuracy of the port statistics while accounting for noise on the wire is not the most straight-forward task. The way I decided to differentiate noise from valid packets in this suite was I used the MAC addresses of the packets and the software packet types that are provided in the verbose output of testpmd. Another idea for how to do this that I tried was using packet checksums. I wanted originally to send packets with bad checksums and assume that noise on the wire would either have a valid checksum or no checksum at all, but this unfortunately only works for the RX side of verbose output as the TX side does not reflect the same checksum information. Jeremy Spewock (3): dts: add clearing port stats and verbose mode to testpmd dts: add port stats checks test suite dts: add stats checks to schemai dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 62 +++ dts/tests/TestSuite_port_stats_checks.py | 156 ++ 3 files changed, 220 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_port_stats_checks.py -- 2.45.2
[RFC PATCH v1 1/3] dts: add clearing port stats and verbose mode to testpmd
From: Jeremy Spewock Methods currently exist for querying the statistics of a port in testpmd, but there weren't methods added for clearing the current statistics on a port. This patch adds methods that allow you to clear the statistics of a single port or all ports to account for situations where the user only wants the port statistics after a certain point and does not care about any existing prior values. This patch also contains methods for modifying the verbose level of testpmd so that users are able to utilize the extra information that it provides. Depends-on: patch-142762 ("dts: add text parser for testpmd verbose output") Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 62 +++ 1 file changed, 62 insertions(+) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index dedf1553cf..cbea03464f 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -948,6 +948,68 @@ def extract_verbose_output(output: str) -> list[TestPmdVerboseOutput]: iter = re.finditer(r"(port \d+/queue \d+:.*?(?=port \d+/queue \d+|$))", output, re.S) return [TestPmdVerboseOutput.parse(s.group(0)) for s in iter] +def clear_port_stats(self, port_id: int, verify: bool = True) -> None: +"""Clear statistics of a given port. + +Args: +port_id: ID of the port to clear the statistics on. +verify: If :data:`True` the output of the command will be scanned to verify that it was +successful, otherwise failures will be ignored. Defaults to :data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and testpmd fails to +clear the statistics of the given port. +""" +clear_output = self.send_command(f"clear port stats {port_id}") +if verify and f"NIC statistics for port {port_id} cleared" not in clear_output: +raise InteractiveCommandExecutionError( +f"Test pmd failed to set clear forwarding stats on port {port_id}" +) + +def clear_port_stats_all(self, verify: bool = True) -> None: +"""Clear the statistics of all ports that testpmd is aware of. + +Args: +verify: If :data:`True` the output of the command will be scanned to verify that all +ports had their statistics cleared, otherwise failures will be ignored. Defaults to +:data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and testpmd fails to +clear the statistics of any of its ports. +""" +clear_output = self.send_command("clear port stats all") +if verify: +if type(self._app_params.ports) is list: +for port_id in range(len(self._app_params.ports)): +if f"NIC statistics for port {port_id} cleared" not in clear_output: +raise InteractiveCommandExecutionError( +f"Test pmd failed to set clear forwarding stats on port {port_id}" +) + +def set_verbose(self, level: int, verify: bool = True) -> None: +"""Set debug verbosity level. + +Args: +level: 0 - silent except for error +1 - fully verbose except for Tx packets +2 - fully verbose except for Rx packets +>2 - fully verbose +verify: if :data:`True` an additional command will be sent to verify that verbose level +is properly set. Defaults to :data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and verbose level +is not correctly set. +""" +verbose_output = self.send_command(f"set verbose {level}") +if verify: +if "Change verbose level" not in verbose_output: +self._logger.debug(f"Failed to set verbose level to {level}: \n{verbose_output}") +raise InteractiveCommandExecutionError( +f"Testpmd failed to set verbose level to {level}." +) + def _close(self) -> None: """Overrides :meth:`~.interactive_shell.close`.""" self.stop() -- 2.45.2
[RFC PATCH v1 2/3] dts: add port stats checks test suite
From: Jeremy Spewock This patch adds a new test suite to DTS that validates the accuracy of the port statistics using testpmd. The functionality is tested by sending a packet of a fixed side to the SUT and verifying that the statistic for packets received, received bytes, packets sent, and sent bytes all update accordingly. Depends-on: patch-142762 ("dts: add text parser for testpmd verbose output") Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_port_stats_checks.py | 156 +++ 1 file changed, 156 insertions(+) create mode 100644 dts/tests/TestSuite_port_stats_checks.py diff --git a/dts/tests/TestSuite_port_stats_checks.py b/dts/tests/TestSuite_port_stats_checks.py new file mode 100644 index 00..71e1c7906f --- /dev/null +++ b/dts/tests/TestSuite_port_stats_checks.py @@ -0,0 +1,156 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire + +"""Port Statistics testing suite. + +This test suite tests the functionality of querying the statistics of a port and verifies that the +values provided in the statistics accurately reflect the traffic that has been handled on the port. +This is shown by sending a packet of a fixed size to the SUT and verifying that the number of RX +packets has increased by 1, the number of RX bytes has increased by the specified size, the number +of TX packets has also increased by 1 (since we expect the packet to be forwarded), and the number +of TX bytes has also increased by the same fixed amount. +""" + +from typing import ClassVar, Tuple + +from scapy.layers.inet import IP # type: ignore[import-untyped] +from scapy.layers.l2 import Ether # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] + +from framework.params.testpmd import SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell, TestPmdVerboseOutput +from framework.test_suite import TestSuite + + +class TestPortStatsChecks(TestSuite): +"""DPDK Port statistics testing suite. + +Support for port statistics is tested by sending a packet of a fixed size denoted by +`total_packet_len` and verifying the that TX/RX packets of the TX/RX ports updated by exactly +1 and the TX/RX bytes of the TX/RX ports updated by exactly `total_packet_len`. This is done by +finding the total amount of packets that were sent/received which did not originate from this +test suite and taking the sum of the lengths of each of these "noise" packets and subtracting +it from the total values in the port statistics so that all that is left are relevant values. +""" + +#: Port where traffic will be received on the SUT. +recv_port: ClassVar[int] = 0 +#: Port where traffic will be sent from on the SUT. +send_port: ClassVar[int] = 1 + +#: +ip_header_len: ClassVar[int] = 20 +#: +ether_header_len: ClassVar[int] = 14 + +#: Length of the packet being sent including the IP and frame headers. +total_packet_len: ClassVar[int] = 100 +#: Packet to send during testing. +send_pkt: ClassVar[Packet] = ( +Ether() / IP() / Raw("X" * (total_packet_len - ip_header_len - ether_header_len)) +) + +def extract_noise_information( +self, verbose_out: list[TestPmdVerboseOutput] +) -> Tuple[int, int, int, int]: +"""Extract information about packets that were not sent by the framework in `verbose_out`. + +Extract the number of sent/received packets that did not originate from this test suite as +well as the sum of the lengths of said "noise" packets. Note that received packets are only +examined on the port with the ID `self.recv_port` since these are the receive stats that +will be analyzed in this suite. Sent packets are also only examined on the port with the ID +`self.send_port`. + +Packets are considered to be "noise" when they don't match the expected structure of the +packets that are being sent by this test suite. Specifically, the source and destination +mac addresses as well as the software packet type are checked on packets received by +testpmd to ensure they match the proper addresses of the TG and SUT nodes. Packets that are +sent by testpmd however only check the source mac address and the software packet type. +This is because MAC forwarding mode adjusts both addresses, but only the source will belong +to the TG or SUT node. + +Args: +verbose_out: Parsed testpmd verbose output to collect the noise information from. + +Returns: +A tuple containing the total size of received noise in bytes, the number of received +noise packets, size of all noise packets sent by testpmd in bytes, and the number of +noise packets sent by testpmd. +""" +recv_noise_bytes = 0 +recv_noise_packets = 0 +sent_no
[RFC PATCH v1 3/3] dts: add stats checks to schemai
From: Jeremy Spewock Adding the test suite to the yaml schema allows for users to specify it in their conf.yaml files and run the suite in their test runs. Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..8ecfa2a145 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"port_stats_checks" ] }, "test_target": { -- 2.45.2
[RFC PATCH v1 0/3] dts: pf_smoke port
From: Jeremy Spewock This series ports the functionality of the pf_msoke test sutie from old DTS to the new framework. It is listed as an RFC mainly due to the fact that is uses different verification steps than most other test suites by utilizing checksums to differentiate packets sent by the framework and ones that are just noise on the wire. It should be noted however that this will not work as expected on mlx5 due to the following bugzilla ticket: https://bugs.dpdk.org/show_bug.cgi?id=1514 Jeremy Spewock (3): dts: add ability to modify number of queues on a port to testpmd dts: add pf smoke testing suite dts: added pf_smoke_tests to yaml schema dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 36 + dts/tests/TestSuite_pf_smoke_tests.py | 129 ++ 3 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_pf_smoke_tests.py -- 2.45.2
[RFC PATCH v1 1/3] dts: add ability to modify number of queues on a port to testpmd
From: Jeremy Spewock The ability to change the configuration of a port at runtime is a crucial aspect of DPDK. This patch adds both the steps required to modify the number of queues on a port at runtime and also the verification steps to ensure that the command behaved as expected. Depends-on: patch-142762 ("dts: add text parser for testpmd verbose output") Depends-on: patch-142696 ("dts: add VLAN methods to testpmd shell") Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 36 +++ 1 file changed, 36 insertions(+) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 6bde7f536f..6eb6360bf7 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -1191,6 +1191,42 @@ def set_verbose(self, level: int, verify: bool = True) -> None: f"Testpmd failed to set verbose level to {level}." ) +def set_num_queues_all(self, num_queues: int, is_rx: bool, verify: bool = True) -> None: +"""Modify the number of Rx/Tx queues configured on all ports. + +Args: +num_queues: Number of queues to set on all ports. +is_rx: If :data:`True` then the number of Rx queues will be modified, otherwise the +number of Tx queues will be modified. +verify: If :data:`True` then an additional command will be sent to check the info of +`port_id` and verify that the number of queues is equal to `num_queues`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and testpmd failed to +update the number of queues on the ports. +""" +queue_type = "rxq" if is_rx else "txq" +self.port_stop_all(verify=verify) +port_config_output = self.send_command(f"port config all {queue_type} {num_queues}") +# ports have to be started before the output can be verified. +self.port_start_all(verify=verify) +if verify: +all_ports_modified = all( +queues == num_queues +for queues in map( +lambda info: info.rx_queues_num if is_rx else info.tx_queues_num, +self.show_port_info_all(), +) +) +if not all_ports_modified: +self._logger.debug( +f"Failed to set number of queues on all ports to " +f"{num_queues}:\n{port_config_output}" +) +raise InteractiveCommandExecutionError( +"Testpmd failed to update the number of queues on all ports." +) + def _close(self) -> None: """Overrides :meth:`~.interactive_shell.close`.""" self.stop() -- 2.45.2
[RFC PATCH v1 2/3] dts: add pf smoke testing suite
From: Jeremy Spewock This patch adds a smoke testing suite for Physical Function features. The goal of this suite is to test some of the most basic features of DPDK on a physical function and bail out early if any of these features aren't supported as expected. Unlike DTS smoke tests, these ones are not included as a switch in the config file and thus are an additional test suite that developers can include alongside others at their own discretion. Depends-on: patch-142691 ("dts: add send_packets to test suites and rework packet addressing") Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_pf_smoke_tests.py | 129 ++ 1 file changed, 129 insertions(+) create mode 100644 dts/tests/TestSuite_pf_smoke_tests.py diff --git a/dts/tests/TestSuite_pf_smoke_tests.py b/dts/tests/TestSuite_pf_smoke_tests.py new file mode 100644 index 00..82c84c7c8d --- /dev/null +++ b/dts/tests/TestSuite_pf_smoke_tests.py @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire +"""Physical Function (PF) smoke testing suite. + +This test suite tests some of the more common DPDK functionality on a PF. Things such as +jumbroframes, Receive Side Scaling (RSS) functions, and being able to modify the number of queues +at runtime should all be supported by PMDs that are capable of running DPDK. Since this is a smoke +testing suite, it is considered a blocking suite that will stop following ones from running. +""" + +from typing import ClassVar + +from scapy.layers.inet import IP # type: ignore[import-untyped] +from scapy.layers.l2 import Ether # type: ignore[import-untyped] +from scapy.packet import Raw # type: ignore[import-untyped] + +from framework.exception import InteractiveCommandExecutionError, TestCaseVerifyError +from framework.params.testpmd import SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell, VerboseOLFlag +from framework.test_suite import TestSuite + + +class TestPfSmokeTests(TestSuite): +"""DPDK Physical Function Testing Suite. + +This test suite is designed to verify the basic functions of DPDK on a PF. The MTU of the ports +on the traffic generator are increased to 9000 to support jumboframes for one of the test +cases, and then reverted back to 1500 once the test suite is complete. Some functionality in +this test suite also relies on the ability of testpmd to recognize and flag invalid checksum +values in its verbose output. + +Attributes: +is_blocking: This test suite will block the execution of all other test suites +in the build target after it. +""" + +is_blocking: ClassVar[bool] = True +jumbo_frame_len: ClassVar[int] = 9000 +num_queues: int = 4 +rx_port: int = 0 + +def set_up_suite(self) -> None: +"""Increase the MTU of the traffic generator to support jumboframes.""" +for port_link in self._port_links: +self.tg_node.main_session.configure_port_mtu(self.jumbo_frame_len, port_link.tg_port) + +def test_jumbo_frame_support(self) -> None: +"""Verify that the PF is able to send and receive jumboframes.""" +with TestPmdShell( +self.sut_node, +max_pkt_len=self.jumbo_frame_len, +mbuf_size=[self.jumbo_frame_len + 128], +forward_mode=SimpleForwardingModes.mac, +) as testpmd: +testpmd.start() +# Take 26 bytes off the MTU size to account for Ethernet headers +payload_len = self.jumbo_frame_len - 26 +packet = Ether() / Raw("X" * payload_len) +recv = self.send_packet_and_capture(packet) +self.verify( +any(hasattr(p, "load") and "X" * 20 in str(p.load) for p in recv), +f"Jumboframe was not received even when MTU was set to {self.jumbo_frame_len}.", +) + +def test_rss_functionality(self) -> None: +"""Test that Receive Side Scaling functions are working as intended. + +The primary things to test in this case are that packets that are sent with different +destination IP addresses are handled by different queues and that the RSS hash of every +packet is unique. Verification of these functionalities is done by sending packets with +invalid checksums so that the packets sent by this test suite can be differentiated from +other packets sent to the same port. This makes the assumption that other packets sent to +the port will all have valid checksums. +""" +with TestPmdShell( +self.sut_node, +forward_mode=SimpleForwardingModes.rxonly, +rx_queues=self.num_queues, +tx_queues=self.num_queues, +) as testpmd: +testpmd.set_verbose(1) +send_pkts = [ +Ether() / IP(dst=f"192.168.0.{i+1}", chksum=0x0) for i in range(self.num_queues * 4)
[RFC PATCH v1 3/3] dts: added pf_smoke_tests to yaml schema
From: Jeremy Spewock Add the PF smoke testing suite to the yaml schema so that it can be specified in conf.yaml. Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..910134f9e4 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"pf_smoke_tests" ] }, "test_target": { -- 2.45.2
[PATCH v3 0/1] dts: testpmd verbose parser
From: Jeremy Spewock I have not yet explored the idea of caching verbose output from all commands rather than simply parsing whatever output is passed into the method, but I wanted to get what I have currently out in the meantime. v3: * Add more OL flags to include everything listed in rte_mbuf_core.h * Address comments in the previous version regarding formatting and adding a new class for packet types in verbose output * Changed the structure of verbose output gathering so that rather a block of verbose output that contains a list of packets in the burst it is just simply a list of packet information. This is done by taking the header information from the start of the burst and caching it to add to all packet info. Jeremy Spewock (1): dts: add text parser for testpmd verbose output dts/framework/parser.py | 30 ++ dts/framework/remote_session/testpmd_shell.py | 405 +- dts/framework/utils.py| 1 + 3 files changed, 434 insertions(+), 2 deletions(-) -- 2.45.2
[PATCH v3 1/1] dts: add text parser for testpmd verbose output
From: Jeremy Spewock Multiple test suites from the old DTS framework rely on being able to consume and interpret the verbose output of testpmd. The new framework doesn't have an elegant way for handling the verbose output, but test suites are starting to be written that rely on it. This patch creates a TextParser class that can be used to extract the verbose information from any testpmd output and also adjusts the `stop` method of the shell to return all output that it collected. Signed-off-by: Jeremy Spewock --- dts/framework/parser.py | 30 ++ dts/framework/remote_session/testpmd_shell.py | 405 +- dts/framework/utils.py| 1 + 3 files changed, 434 insertions(+), 2 deletions(-) diff --git a/dts/framework/parser.py b/dts/framework/parser.py index 741dfff821..0b39025a48 100644 --- a/dts/framework/parser.py +++ b/dts/framework/parser.py @@ -160,6 +160,36 @@ def _find(text: str) -> Any: return ParserFn(TextParser_fn=_find) +@staticmethod +def find_all( +pattern: str | re.Pattern[str], +flags: re.RegexFlag = re.RegexFlag(0), +) -> ParserFn: +"""Makes a parser function that finds all of the regular expression matches in the text. + +If there are no matches found in the text than None will be returned, otherwise a list +containing all matches will be returned. Patterns that contain multiple groups will pack +the matches for each group into a tuple. + +Args: +pattern: The regular expression pattern. +flags: The regular expression flags. Ignored if the given pattern is already compiled. + +Returns: +A :class:`ParserFn` that can be used as metadata for a dataclass field. +""" +if isinstance(pattern, str): +pattern = re.compile(pattern, flags) + +def _find_all(text: str) -> list[str] | None: +m = pattern.findall(text) +if len(m) == 0: +return None + +return m + +return ParserFn(TextParser_fn=_find_all) + @staticmethod def find_int( pattern: str | re.Pattern[str], diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 43e9f56517..7d0b5a374c 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -31,7 +31,7 @@ from framework.settings import SETTINGS from framework.testbed_model.cpu import LogicalCoreCount, LogicalCoreList from framework.testbed_model.sut_node import SutNode -from framework.utils import StrEnum +from framework.utils import REGEX_FOR_MAC_ADDRESS, StrEnum class TestPmdDevice: @@ -577,6 +577,377 @@ class TestPmdPortStats(TextParser): tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)")) +class OLFlag(Flag): +"""Flag representing the Packet Offload Features Flags in DPDK. + +Values in this class are taken from the definitions in the RTE MBUF core library in DPDK. +""" + +# RX flags +#: +RTE_MBUF_F_RX_RSS_HASH = auto() + +#: +RTE_MBUF_F_RX_L4_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN = auto() +#: +RTE_MBUF_F_RX_L4_CKSUM_NONE = auto() + +#: +RTE_MBUF_F_RX_IP_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN = auto() +#: +RTE_MBUF_F_RX_IP_CKSUM_NONE = auto() + +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN = auto() +#: +RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID = auto() + +#: +RTE_MBUF_F_RX_VLAN = auto() +#: +RTE_MBUF_F_RX_FDIR = auto() +#: +RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD = auto() +#: +RTE_MBUF_F_RX_VLAN_STRIPPED = auto() +#: RX IEEE1588 L2 Ethernet PT Packet. +RTE_MBUF_F_RX_IEEE1588_PTP = auto() +#: RX IEEE1588 L2/L4 timestamped packet. +RTE_MBUF_F_RX_IEEE1588_TMST = auto() +#: FD id reported if FDIR match. +RTE_MBUF_F_RX_FDIR_ID = auto() +#: Flexible bytes reported if FDIR match. +RTE_MBUF_F_RX_FDIR_FLX = auto() +#: +RTE_MBUF_F_RX_QINQ_STRIPPED = auto() +#: +RTE_MBUF_F_RX_LRO = auto() +#: +RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED = auto() +#: +RTE_MBUF_F_RX_QINQ = auto() + +# TX flags +#: +RTE_MBUF_F_TX_OUTER_UDP_CKSUM = auto() +#: +RTE_MBUF_F_TX_UDP_SEG = auto() +#: +RTE_MBUF_F_TX_SEC_OFFLOAD = auto() +#: +RTE_MBUF_F_TX_MACSEC = auto() + +#: +RTE_MBUF_F_TX_TUNNEL_VXLAN = auto() +#: +RTE_MBUF_F_TX_TUNNEL_GRE = auto() +#: +RTE_MBUF_F_TX_TUNNEL_IPIP = auto() +#: +RTE_MBUF_F_TX_TUNNEL_GENEVE = auto() +#: +RTE_MBUF_F_TX_TUNNEL_MPLSINUDP = auto() +#: +RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE
[PATCH v1 0/1] dts: fix hugepage mounting
From: Jeremy Spewock Currently in the DTS framework there are some commands used for remounting hugepages that require super-user privileges but do not use them, causing them to throw errors whenever they are run as a non-root user. This patches fixes these problems by simply adding a flag that enables admin privileges for the commands that are missing them. Jeremy Spewock (1): dts: add admin privileges to hugepage mounting dts/framework/testbed_model/linux_session.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- 2.45.2
[PATCH v1 1/1] dts: add admin privileges to hugepage mounting
From: Jeremy Spewock There were two different commands in the hugepage mounting process that were not using super-user privileges; one for unmounting hugepages and another for re-mounting them. This patch adds the flag that enables enhanced permissions for both of these actions. Bugzilla ID: 1439 Fixes: b8bdc4c58f57 ("dts: replace pexpect with fabric") Cc: juraj.lin...@pantheon.tech Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/linux_session.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dts/framework/testbed_model/linux_session.py b/dts/framework/testbed_model/linux_session.py index 99abc21353..544a665b83 100644 --- a/dts/framework/testbed_model/linux_session.py +++ b/dts/framework/testbed_model/linux_session.py @@ -123,12 +123,12 @@ def _get_numa_nodes(self) -> list[int]: def _mount_huge_pages(self) -> None: self._logger.info("Re-mounting Hugepages.") hugapge_fs_cmd = "awk '/hugetlbfs/ { print $2 }' /proc/mounts" -self.send_command(f"umount $({hugapge_fs_cmd})") +self.send_command(f"umount $({hugapge_fs_cmd})", privileged=True) result = self.send_command(hugapge_fs_cmd) if result.stdout == "": remote_mount_path = "/mnt/huge" -self.send_command(f"mkdir -p {remote_mount_path}") -self.send_command(f"mount -t hugetlbfs nodev {remote_mount_path}") +self.send_command(f"mkdir -p {remote_mount_path}", privileged=True) +self.send_command(f"mount -t hugetlbfs nodev {remote_mount_path}", privileged=True) def _supports_numa(self) -> bool: # the system supports numa if self._numa_nodes is non-empty and there are more -- 2.45.2
[PATCH 0/1] dts: add driver binding on TG
From: Jeremy Spewock Previously in DTS there was support for binding ports a node to different drivers on a SUT, but there was no option on the TG. Since there are likely to be some traffic generators in the future that would require different drivers to operate properly, this support is something that would likely be useful to have, and it very simple to add. All that is done in this patch is moving functionality for copying the DPDK tarball onto a host out of the SUT node and into the generic Node class so that both the TG and the SUT can take advantage of DPDK tools. It should be noted however that the TG node still does not build DPDK as it likely wouldn't need the compiled binaries. Jeremy Spewock (1): dts: add binding to different drivers to TG node dts/framework/runner.py | 2 + dts/framework/testbed_model/node.py | 106 +++- dts/framework/testbed_model/sut_node.py | 86 +-- 3 files changed, 109 insertions(+), 85 deletions(-) -- 2.45.2
[PATCH 1/1] dts: add binding to different drivers to TG node
From: Jeremy Spewock The DTS framework in its current state supports binding ports to different drivers on the SUT node but not the TG node. The TG node already has the information that it needs about the different drivers that it has available in the configuration file, but it did not previously have access to the devbind script, so it did not use that information for anything. This patch moves the steps to copy the DPDK tarball into the node class rather than the SUT node class, and calls this function on the TG node as well as the SUT. It also moves the driver binding step into the Node class and triggers the same pattern of binding to ports that existed on the SUT on the TG. Bugzilla ID: 1420 Signed-off-by: Jeremy Spewock --- dts/framework/runner.py | 2 + dts/framework/testbed_model/node.py | 106 +++- dts/framework/testbed_model/sut_node.py | 86 +-- 3 files changed, 109 insertions(+), 85 deletions(-) diff --git a/dts/framework/runner.py b/dts/framework/runner.py index 6b6f6a05f5..ed9e58b172 100644 --- a/dts/framework/runner.py +++ b/dts/framework/runner.py @@ -484,6 +484,7 @@ def _run_build_target( try: sut_node.set_up_build_target(build_target_config) +tg_node.set_up_build_target(build_target_config) self._result.dpdk_version = sut_node.dpdk_version build_target_result.add_build_target_info(sut_node.get_build_target_info()) build_target_result.update_setup(Result.PASS) @@ -498,6 +499,7 @@ def _run_build_target( try: self._logger.set_stage(DtsStage.build_target_teardown) sut_node.tear_down_build_target() +tg_node.tear_down_build_target() build_target_result.update_teardown(Result.PASS) except Exception as e: self._logger.exception("Build target teardown failed.") diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 12a40170ac..8e6181e424 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -13,11 +13,19 @@ The :func:`~Node.skip_setup` decorator can be used without subclassing. """ +import os +import tarfile from abc import ABC from ipaddress import IPv4Interface, IPv6Interface +from pathlib import PurePath from typing import Any, Callable, Union -from framework.config import OS, NodeConfiguration, TestRunConfiguration +from framework.config import ( +OS, +BuildTargetConfiguration, +NodeConfiguration, +TestRunConfiguration, +) from framework.exception import ConfigurationError from framework.logger import DTSLogger, get_dts_logger from framework.settings import SETTINGS @@ -58,8 +66,11 @@ class Node(ABC): lcores: list[LogicalCore] ports: list[Port] _logger: DTSLogger +_remote_tmp_dir: PurePath +__remote_dpdk_dir: PurePath | None _other_sessions: list[OSSession] _test_run_config: TestRunConfiguration +_path_to_devbind_script: PurePath | None def __init__(self, node_config: NodeConfiguration): """Connect to the node and gather info during initialization. @@ -88,6 +99,9 @@ def __init__(self, node_config: NodeConfiguration): self._other_sessions = [] self._init_ports() +self._remote_tmp_dir = self.main_session.get_remote_tmp_dir() +self.__remote_dpdk_dir = None +self._path_to_devbind_script = None def _init_ports(self) -> None: self.ports = [Port(self.name, port_config) for port_config in self.config.ports] @@ -95,6 +109,34 @@ def _init_ports(self) -> None: for port in self.ports: self.configure_port_state(port) +def _guess_dpdk_remote_dir(self) -> PurePath: +return self.main_session.guess_dpdk_remote_dir(self._remote_tmp_dir) + +@property +def _remote_dpdk_dir(self) -> PurePath: +"""The remote DPDK dir. + +This internal property should be set after extracting the DPDK tarball. If it's not set, +that implies the DPDK setup step has been skipped, in which case we can guess where +a previous build was located. +""" +if self.__remote_dpdk_dir is None: +self.__remote_dpdk_dir = self._guess_dpdk_remote_dir() +return self.__remote_dpdk_dir + +@_remote_dpdk_dir.setter +def _remote_dpdk_dir(self, value: PurePath) -> None: +self.__remote_dpdk_dir = value + +@property +def path_to_devbind_script(self) -> PurePath: +"""The path to the dpdk-devbind.py script on the node.""" +if self._path_to_devbind_script is None: +self._path_to_devbind_script = self.main_session.join_remote_path( +self._remote_dpdk_dir, "usertools", "dpdk-devbind.py" +) +return self._path_to_devbind_script + def set_up_test_run(self, test_run_config: TestRunConfiguration) -> N
[PATCH v1 0/2] dts: port over port_control testing suite
From: Jeremy Spewock This series ports over most of the test coverage provided from the port_control testing suite in the Old DTS framework. The only functionality that is missing is testing port functions in a VM through QEMU and testing the support of resetting ports. Since we have no method of handling virtual machines in the new framework and have yet to express interest in adding it, these test cases were omitted from this suite. Additionally the ability to reset a port is not something that is supported by all devices and devices do not seem to expose information regarding whether they support resetting ports through testpmd. Jeremy Spewock (2): dts: add methods for closing and restarting ports to testpmd dts: add port control testing suite dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 21 + dts/tests/TestSuite_port_control.py | 80 +++ 3 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_port_control.py -- 2.45.2
[PATCH v1 1/2] dts: add methods for closing ports to testpmd
From: Jeremy Spewock Closing ports is a standard configuration feature that is available in testpmd but the framework lacks the ability to access this command through the Testpmd API. This patch adds a method that performs this action and verifies the results of sending the command to allow developers to have more control over the state of the ports that testpmd is aware of. Depends-on: patch-142952 ("dts: add ability to start/stop testpmd ports") Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 21 +++ 1 file changed, 21 insertions(+) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index ca24b28070..51593c61f5 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -805,6 +805,27 @@ def start_all_ports(self, verify: bool = True) -> None: self.ports_started = True +@requires_stopped_ports +def close_all_ports(self, verify: bool = True) -> None: +"""Close all ports. + +Args: +verify: If :data:`True` the output of the close command will be scanned in an attempt +to verify that all ports were stopped successfully. Defaults to :data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and at lease one port +failed to close. +""" +port_close_output = self.send_command("port close all") +if verify: +if type(self._app_params.ports) is list: +num_ports = len(self._app_params.ports) +if not all( +f"Port {p_id} is closed" in port_close_output for p_id in range(num_ports) +): +raise InteractiveCommandExecutionError("Ports were not closed successfully.") + def show_port_info_all(self) -> list[TestPmdPort]: """Returns the information of all the ports. -- 2.45.2
[PATCH v1 2/2] dts: add port control testing suite
From: Jeremy Spewock This patch ports over the port_control test suite from the Old DTS framework and adapts the functionality to fit with the current testing framework. The test suite provides validation of basic port control functions such as starting, stopping, and closing ports. It should be noted that this test suite is not completely 1-to-1 with the one from Old DTS as it does exclude test cases that use QEMU for testing as this is not something we are looking to add to the framework in the near future. It also excludes test cases for resetting ports as this feature is something that is not supported by all devices and does not expose a capability regarding if it is through testpmd. Depends-on: patch-142949 ("dts: add ability to send/receive multiple packets") Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 +- dts/tests/TestSuite_port_control.py| 80 ++ 2 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_port_control.py diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..78cbd17dad 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"port_control" ] }, "test_target": { diff --git a/dts/tests/TestSuite_port_control.py b/dts/tests/TestSuite_port_control.py new file mode 100644 index 00..9e843512ab --- /dev/null +++ b/dts/tests/TestSuite_port_control.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 University of New Hampshire +"""Port Control Testing Suite. + +This test suite serves to show that ports within testpmd support basic configuration functions. +Things such as starting a port, stopping a port, and closing a port should all be supported by the +device. Additionally, after each of these configuration steps (outside of closing the port) it +should still be possible to start the port again and verify that the port is able to forward a +large amount of packets (1000 are sent in the test cases). +""" +from scapy.layers.l2 import Ether # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] + +from framework.params.testpmd import SimpleForwardingModes +from framework.remote_session.testpmd_shell import TestPmdShell +from framework.test_suite import TestSuite + + +class TestPortControl(TestSuite): +"""DPDK Port Control Testing Suite.""" + +def send_packets_and_verify(self) -> None: +"""Send 1000 packets and verify that all packets were forwarded back. + +Packets sent are identical and are all ethernet frames with a payload of 30 "X" characters. +This payload is used to differentiate noise on the wire from packets sent by this +framework. +""" +payload = "X" * 30 +num_pakts = 1000 +send_p = Ether() / Raw(payload) +recv_pakts: list[Packet] = [] +# The scapy sniffer can only handle a little under 200 packets per 1000 at a time, so this +# is limited to 100 per burst. +for _ in range(int(num_pakts / 100)): +recv_pakts += self.send_packets_and_capture([send_p] * 100) +recv_pakts += self.send_packets_and_capture([send_p] * (num_pakts % 100)) +recv_pakts = [ +p +for p in recv_pakts +if ( +# Remove padding from the bytes. +hasattr(p, "load") +and p.load.decode("utf-8").replace("\x00", "") == payload +) +] +self.verify( +len(recv_pakts) == num_pakts, +f"Received {len(recv_pakts)} packets when {num_pakts} were expected.", +) + +def test_start_ports(self) -> None: +"""Ensure that the port can receive traffic after explicitly being started.""" +with TestPmdShell(self.sut_node, forward_mode=SimpleForwardingModes.mac) as testpmd: +testpmd.start_all_ports() +testpmd.start() +self.send_packets_and_verify() + +def test_stop_ports(self) -> None: +"""Verify that the link goes down after stopping ports. + +This case also verifies that the port can be started again and properly forward traffic +after being stopped. +""" +with TestPmdShell(self.sut_node, forward_mode=SimpleForwardingModes.mac) as testpmd: +testpmd.stop_all_ports() +self.verify( +all(not p.is_link_up for p in testpmd.show_port_info_all()), +"Failed to stop all ports.", +) +testpmd.start() +self.send_packets_and_verify() + +def test_close_ports(self) -> None: +"""Verify that ports can be closed and no
[RFC PATCH v1 0/5] dts: add VFs to the framework
From: Jeremy Spewock There currently is no method of creating or managing virtual functions (VFs) in the new DTS framework but there are multiple test suites in the old DTS framework that provide testing coverage using them. This patch adds the functionality to the framework that is needed to create and use VFs in test suites in the future. The series is marked as an RFC primarily because it is a new feature that has been a recent talking point on the DTS bugzilla. The code however is functional. Jeremy Spewock (5): dts: allow binding only a single port to a different driver dts: parameterize what ports the TG sends packets to dts: add class for virtual functions dts: add OS abstractions for creating virtual functions dts: add functions for managing VFs to Node dts/framework/test_suite.py | 38 -- dts/framework/testbed_model/linux_session.py | 36 +- dts/framework/testbed_model/node.py | 115 +-- dts/framework/testbed_model/os_session.py| 40 +++ dts/framework/testbed_model/port.py | 37 +- 5 files changed, 247 insertions(+), 19 deletions(-) -- 2.46.0
[RFC PATCH v1 1/5] dts: allow binding only a single port to a different driver
From: Jeremy Spewock Previously the DTS framework only included methods that bind all ports that the test run was aware of to either the DPDK driver or the OS driver. There are however some cases, like creating virtual functions, where you would want some ports bound to the OS driver and others bound to their DPDK driver. This patch adds the ability to bind individual drivers to their respective ports to solve this problem. Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 21 - 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 8e6181e424..85d4eb1f7c 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: BuildTargetConfiguration) -> the setup steps will be taken. """ self._copy_dpdk_tarball() -self.bind_ports_to_driver() +self.bind_all_ports_to_driver() def tear_down_build_target(self) -> None: """Reset DPDK variables and bind port driver to the OS driver.""" self.__remote_dpdk_dir = None -self.bind_ports_to_driver(for_dpdk=False) +self.bind_all_ports_to_driver(for_dpdk=False) def create_session(self, name: str) -> OSSession: """Create and return a new OS-aware remote session. @@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None: # then extract to remote path self.main_session.extract_remote_tarball(remote_tarball_path, self._remote_dpdk_dir) -def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: +def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None: """Bind all ports on the node to a driver. Args: @@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: If :data:`False`, binds to os_driver. """ for port in self.ports: -driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver -self.main_session.send_command( -f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", -privileged=True, -verify=True, -) +self._bind_port_to_driver(port, for_dpdk) + +def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: +driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver +self.main_session.send_command( +f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", +privileged=True, +verify=True, +) def create_session(node_config: NodeConfiguration, name: str, logger: DTSLogger) -> OSSession: -- 2.46.0
[RFC PATCH v1 2/5] dts: parameterize what ports the TG sends packets to
From: Jeremy Spewock Previously in the DTS framework the helper methods in the TestSutie class designated ports as either ingress or egress ports and would wrap the methods of the traffic generator to allow packets to only flow to those designated ingress or egress ports. This is undesirable in some cases, such as when you have virtual functions on top of your port, where the TG ports can send to more than one SUT port since the framework limits where the TG is allowed to send packets. This patch solves this problem by creating optional parameters that allow the user to specify which port to gather the MAC addresses from when sending and receiving packets. Signed-off-by: Jeremy Spewock --- dts/framework/test_suite.py | 38 ++--- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 694b2eba65..d5c0021503 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -185,6 +185,8 @@ def send_packet_and_capture( packet: Packet, filter_config: PacketFilteringConfig = PacketFilteringConfig(), duration: float = 1, +sut_ingress: Port | None = None, +sut_egress: Port | None = None, ) -> list[Packet]: """Send and receive `packet` using the associated TG. @@ -195,11 +197,19 @@ def send_packet_and_capture( packet: The packet to send. filter_config: The filter to use when capturing packets. duration: Capture traffic for this amount of time after sending `packet`. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress` Returns: A list of received packets. """ -packet = self._adjust_addresses(packet) +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +packet = self._adjust_addresses(packet, sut_ingress, sut_egress) return self.tg_node.send_packet_and_capture( packet, self._tg_port_egress, @@ -208,18 +218,30 @@ def send_packet_and_capture( duration, ) -def get_expected_packet(self, packet: Packet) -> Packet: +def get_expected_packet( +self, packet: Packet, sut_ingress: Port | None = None, sut_egress: Port | None = None +) -> Packet: """Inject the proper L2/L3 addresses into `packet`. Args: packet: The packet to modify. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress`. Returns: `packet` with injected L2/L3 addresses. """ -return self._adjust_addresses(packet, expected=True) - -def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +return self._adjust_addresses(packet, sut_ingress, sut_egress, expected=True) + +def _adjust_addresses( +self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, expected: bool = False +) -> Packet: """L2 and L3 address additions in both directions. Assumptions: @@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: packet: The packet to modify. expected: If :data:`True`, the direction is SUT -> TG, otherwise the direction is TG -> SUT. +sut_ingress_port: The port to use as the Rx port on the SUT. +sut_egress_port: The port to use as the Tx port on the SUT. """ if expected: # The packet enters the TG from SUT # update l2 addresses -packet.src = self._sut_port_egress.mac_address +packet.src = sut_egress_port.mac_address packet.dst = self._tg_port_ingress.mac_address # The packet is routed from TG egress to TG ingress @@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: # The packet leaves TG towards SUT # update l2 addresses packet.src = self._tg_port_egress.mac_address -packet.dst = self._sut_port_ingress.mac_address +packet.dst = sut_ingress_port.mac_address # The packet is routed from TG egress to TG ingress # update l3 addresses -- 2.46.0
[RFC PATCH v1 4/5] dts: add OS abstractions for creating virtual functions
From: Jeremy Spewock Virtual functions in the framework are created using SR-IOV. The process for doing this can vary depending on the operating system, so the commands to create VFs have to be abstracted into different classes based on the operating system. This patch adds the stubs for methods that create VFs and get the PCI addresses of all VFs on a port to the abstract class as well as a linux implementation for the methods. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/linux_session.py | 36 +- dts/framework/testbed_model/os_session.py| 40 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/linux_session.py b/dts/framework/testbed_model/linux_session.py index 99abc21353..48bf212f6a 100644 --- a/dts/framework/testbed_model/linux_session.py +++ b/dts/framework/testbed_model/linux_session.py @@ -15,7 +15,7 @@ from typing_extensions import NotRequired -from framework.exception import ConfigurationError, RemoteCommandExecutionError +from framework.exception import ConfigurationError, RemoteCommandExecutionError, InternalError from framework.utils import expand_range from .cpu import LogicalCore @@ -210,3 +210,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: """Overrides :meth:`~.os_session.OSSession.configure_ipv4_forwarding`.""" state = 1 if enable else 0 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", privileged=True) + +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Overrides :meth:`~.os_session.OSSession.set_num_virtual_functions`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout) +if num > 0 and curr_num_vfs >= num: +self._logger.info( +f"{curr_num_vfs} VFs already configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}." +) +return False +elif num > 0 and curr_num_vfs > 0: +self._logger.error( +f"Not enough VFs configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}. Need {num} but only {curr_num_vfs} are configured. " +"DTS is unable to modify number of preexisting VFs." +) +raise InternalError("Failed to create VFs on port.") +self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, verify=True) +return True + +def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]: +"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}/sriov_numvfs").stdout) +if curr_num_vfs > 0: +pci_addrs = self.send_command( +'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" ' ++ f"{sys_bus_path}/virtfn*/uevent", +privileged=True, +) +return pci_addrs.stdout.splitlines() +else: +return [] diff --git a/dts/framework/testbed_model/os_session.py b/dts/framework/testbed_model/os_session.py index 79f56b289b..191fc3c0c8 100644 --- a/dts/framework/testbed_model/os_session.py +++ b/dts/framework/testbed_model/os_session.py @@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: Args: enable: If :data:`True`, enable the forwarding, otherwise disable it. """ + +@abstractmethod +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Update the number of virtual functions (VFs) on a port. + +It should be noted that, due to the nature of VFs, if there are already VFs that exist on +the physical function (PF) prior to calling this function, additional ones cannot be added. +The only way to add more VFs is to remove the existing and then set the desired amount. For +this reason, this method will handle creation in the following order: + +1. Use existing VFs on the PF if the number of existing VFs is greater than or equal to +`num` +2. Throw an exception noting that VFs cannot be created if the PF has some VFs already set +on it, but the total VFs that it has are less then `num`. +3. Create `num` VFs on the PF if there are none on it already + +Args: +num: The number of VFs to set on the port. +pf_port: The port to add the VFs to. + +Raises: +InternalError: If `pf_port` has less than `num` VFs configured on it +already. + +Returns: +:data:`True` if this method successfully created VFs, :data:`False` i
[RFC PATCH v1 3/5] dts: add class for virtual functions
From: Jeremy Spewock In DPDK applications virtual functions are treated the same as ports, but within the framework there are benefits to differentiating the two in order to add more metadata to VFs about where they originate from. For this reason this patch adds a new class for handling virtual functions that extends the Port class with some additional information about the VF. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/port.py | 37 - 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/port.py b/dts/framework/testbed_model/port.py index 817405bea4..c1d85fec2b 100644 --- a/dts/framework/testbed_model/port.py +++ b/dts/framework/testbed_model/port.py @@ -27,7 +27,7 @@ class PortIdentifier: pci: str -@dataclass(slots=True) +@dataclass class Port: """Physical port on a node. @@ -80,6 +80,41 @@ def pci(self) -> str: return self.identifier.pci +@dataclass +class VirtualFunction(Port): +"""Virtual Function (VF) on a port. + +DPDK applications often treat VFs the same as they do the physical ports (PFs) on the host. +For this reason VFs are represented in the framework as a type of port with some additional +metadata that allows the framework to more easily identify which device the VF belongs to as +well as where the VF originated from. + +Attributes: +created_by_framework: :data:`True` if this VF represents one that the DTS framework created +on the node, :data:`False` otherwise. +pf_port: The PF that this VF was created on/gathered from. +""" + +created_by_framework: bool = False +pf_port: Port | None = None + +def __init__( +self, node_name: str, config: PortConfig, created_by_framework: bool, pf_port: Port +) -> None: +"""Extends :meth:`Port.__init__` with VF specific metadata. + +Args: +node_name: The name of the node the VF resides on. +config: Configuration information about the VF. +created_by_framework: :data:`True` if DTS created this VF, otherwise :data:`False` if +this class represents a VF that was preexisting on the node. +pf_port: The PF that this VF was created on/gathered from. +""" +super().__init__(node_name, config) +self.created_by_framework = created_by_framework +self.pf_port = pf_port + + @dataclass(slots=True, frozen=True) class PortLink: """The physical, cabled connection between the ports. -- 2.46.0
[RFC PATCH v1 5/5] dts: add functions for managing VFs to Node
From: Jeremy Spewock In order for test suites to create virtual functions there has to be functions in the API that developers can use. This patch adds the ability to create virtual functions to the Node API so that they are reachable within test suites. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 96 - 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 85d4eb1f7c..101a8edfbc 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -14,6 +14,7 @@ """ import os +import re import tarfile from abc import ABC from ipaddress import IPv4Interface, IPv6Interface @@ -24,9 +25,10 @@ OS, BuildTargetConfiguration, NodeConfiguration, +PortConfig, TestRunConfiguration, ) -from framework.exception import ConfigurationError +from framework.exception import ConfigurationError, InternalError from framework.logger import DTSLogger, get_dts_logger from framework.settings import SETTINGS @@ -39,7 +41,7 @@ ) from .linux_session import LinuxSession from .os_session import OSSession -from .port import Port +from .port import Port, VirtualFunction class Node(ABC): @@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: verify=True, ) +def create_virtual_functions( +self, num: int, pf_port: Port, dpdk_driver: str | None = None +) -> list[VirtualFunction]: +"""Create virtual functions (VFs) from a given physical function (PF) on the node. + +Virtual functions will be created if there are not any currently configured on `pf_port`. +If there are greater than or equal to `num` VFs already configured on `pf_port`, those will +be used instead of creating more. In order to create VFs, the PF must be bound to its +kernel driver. This method will handle binding `pf_port` and any other ports in the test +run that reside on the same device back to their OS drivers if this was not done already. +VFs gathered in this method will be bound to `driver` if one is provided, or the DPDK +driver for `pf_port` and then added to `self.ports`. + +Args: +num: The number of VFs to create. Must be greater than 0. +pf_port: The PF to create the VFs on. +dpdk_driver: Optional driver to bind the VFs to after they are created. Defaults to the +DPDK driver of `pf_port`. + +Raises: +InternalError: If `num` is less than or equal to 0. +""" +if num <= 0: +raise InternalError( +"Method for creating virtual functions received a non-positive value." +) +if not dpdk_driver: +dpdk_driver = pf_port.os_driver_for_dpdk +# Get any other port that is on the same device which DTS is aware of +all_device_ports = [ +p for p in self.ports if p.pci.split(".")[0] == pf_port.pci.split(".")[0] +] +# Ports must be bound to the kernel driver in order to create VFs from them +for port in all_device_ports: +self._bind_port_to_driver(port, False) +# Some PMDs require the interface being up in order to make VFs +self.configure_port_state(port) +created_vfs = self.main_session.set_num_virtual_functions(num, pf_port) +# We don't need more then `num` VFs from the list +vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num] +devbind_info = self.main_session.send_command( +f"{self.path_to_devbind_script} -s", privileged=True +).stdout + +ret = [] + +for pci in vf_pcis: +original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", devbind_info) +os_driver = original_driver[1] if original_driver else pf_port.os_driver +vf_config = PortConfig( +self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, pf_port.peer.pci +) +vf_port = VirtualFunction(self.name, vf_config, created_vfs, pf_port) +self.main_session.update_ports([vf_port]) +self._bind_port_to_driver(vf_port) +self.ports.append(vf_port) +ret.append(vf_port) +return ret + +def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]: +"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`. + +Args: +pf_port: The port to search for the VFs on. + +Returns: +A list of VFs in the framework that were created/gathered from `pf_port`. +""" +return [p for p in self.ports if isinstance(p, VirtualFunction) and p.pf_port == pf_port] + +def remove_virtual_functions(self, pf_port: Port) -> None: +"""Remove
[RFC PATCH v2 0/5] dts: add VFs to the framework
From: Jeremy Spewock v2: * Accidentally left out a formatting fix in v1. Jeremy Spewock (5): dts: allow binding only a single port to a different driver dts: parameterize what ports the TG sends packets to dts: add class for virtual functions dts: add OS abstractions for creating virtual functions dts: add functions for managing VFs to Node dts/framework/test_suite.py | 38 -- dts/framework/testbed_model/linux_session.py | 40 ++- dts/framework/testbed_model/node.py | 115 +-- dts/framework/testbed_model/os_session.py| 40 +++ dts/framework/testbed_model/port.py | 37 +- 5 files changed, 251 insertions(+), 19 deletions(-) -- 2.46.0
[RFC PATCH v2 1/5] dts: allow binding only a single port to a different driver
From: Jeremy Spewock Previously the DTS framework only included methods that bind all ports that the test run was aware of to either the DPDK driver or the OS driver. There are however some cases, like creating virtual functions, where you would want some ports bound to the OS driver and others bound to their DPDK driver. This patch adds the ability to bind individual drivers to their respective ports to solve this problem. Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 21 - 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 8e6181e424..85d4eb1f7c 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: BuildTargetConfiguration) -> the setup steps will be taken. """ self._copy_dpdk_tarball() -self.bind_ports_to_driver() +self.bind_all_ports_to_driver() def tear_down_build_target(self) -> None: """Reset DPDK variables and bind port driver to the OS driver.""" self.__remote_dpdk_dir = None -self.bind_ports_to_driver(for_dpdk=False) +self.bind_all_ports_to_driver(for_dpdk=False) def create_session(self, name: str) -> OSSession: """Create and return a new OS-aware remote session. @@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None: # then extract to remote path self.main_session.extract_remote_tarball(remote_tarball_path, self._remote_dpdk_dir) -def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: +def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None: """Bind all ports on the node to a driver. Args: @@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: If :data:`False`, binds to os_driver. """ for port in self.ports: -driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver -self.main_session.send_command( -f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", -privileged=True, -verify=True, -) +self._bind_port_to_driver(port, for_dpdk) + +def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: +driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver +self.main_session.send_command( +f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", +privileged=True, +verify=True, +) def create_session(node_config: NodeConfiguration, name: str, logger: DTSLogger) -> OSSession: -- 2.46.0
[RFC PATCH v2 2/5] dts: parameterize what ports the TG sends packets to
From: Jeremy Spewock Previously in the DTS framework the helper methods in the TestSutie class designated ports as either ingress or egress ports and would wrap the methods of the traffic generator to allow packets to only flow to those designated ingress or egress ports. This is undesirable in some cases, such as when you have virtual functions on top of your port, where the TG ports can send to more than one SUT port since the framework limits where the TG is allowed to send packets. This patch solves this problem by creating optional parameters that allow the user to specify which port to gather the MAC addresses from when sending and receiving packets. Signed-off-by: Jeremy Spewock --- dts/framework/test_suite.py | 38 ++--- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 694b2eba65..d5c0021503 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -185,6 +185,8 @@ def send_packet_and_capture( packet: Packet, filter_config: PacketFilteringConfig = PacketFilteringConfig(), duration: float = 1, +sut_ingress: Port | None = None, +sut_egress: Port | None = None, ) -> list[Packet]: """Send and receive `packet` using the associated TG. @@ -195,11 +197,19 @@ def send_packet_and_capture( packet: The packet to send. filter_config: The filter to use when capturing packets. duration: Capture traffic for this amount of time after sending `packet`. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress` Returns: A list of received packets. """ -packet = self._adjust_addresses(packet) +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +packet = self._adjust_addresses(packet, sut_ingress, sut_egress) return self.tg_node.send_packet_and_capture( packet, self._tg_port_egress, @@ -208,18 +218,30 @@ def send_packet_and_capture( duration, ) -def get_expected_packet(self, packet: Packet) -> Packet: +def get_expected_packet( +self, packet: Packet, sut_ingress: Port | None = None, sut_egress: Port | None = None +) -> Packet: """Inject the proper L2/L3 addresses into `packet`. Args: packet: The packet to modify. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress`. Returns: `packet` with injected L2/L3 addresses. """ -return self._adjust_addresses(packet, expected=True) - -def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +return self._adjust_addresses(packet, sut_ingress, sut_egress, expected=True) + +def _adjust_addresses( +self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, expected: bool = False +) -> Packet: """L2 and L3 address additions in both directions. Assumptions: @@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: packet: The packet to modify. expected: If :data:`True`, the direction is SUT -> TG, otherwise the direction is TG -> SUT. +sut_ingress_port: The port to use as the Rx port on the SUT. +sut_egress_port: The port to use as the Tx port on the SUT. """ if expected: # The packet enters the TG from SUT # update l2 addresses -packet.src = self._sut_port_egress.mac_address +packet.src = sut_egress_port.mac_address packet.dst = self._tg_port_ingress.mac_address # The packet is routed from TG egress to TG ingress @@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: # The packet leaves TG towards SUT # update l2 addresses packet.src = self._tg_port_egress.mac_address -packet.dst = self._sut_port_ingress.mac_address +packet.dst = sut_ingress_port.mac_address # The packet is routed from TG egress to TG ingress # update l3 addresses -- 2.46.0
[RFC PATCH v2 3/5] dts: add class for virtual functions
From: Jeremy Spewock In DPDK applications virtual functions are treated the same as ports, but within the framework there are benefits to differentiating the two in order to add more metadata to VFs about where they originate from. For this reason this patch adds a new class for handling virtual functions that extends the Port class with some additional information about the VF. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/port.py | 37 - 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/port.py b/dts/framework/testbed_model/port.py index 817405bea4..c1d85fec2b 100644 --- a/dts/framework/testbed_model/port.py +++ b/dts/framework/testbed_model/port.py @@ -27,7 +27,7 @@ class PortIdentifier: pci: str -@dataclass(slots=True) +@dataclass class Port: """Physical port on a node. @@ -80,6 +80,41 @@ def pci(self) -> str: return self.identifier.pci +@dataclass +class VirtualFunction(Port): +"""Virtual Function (VF) on a port. + +DPDK applications often treat VFs the same as they do the physical ports (PFs) on the host. +For this reason VFs are represented in the framework as a type of port with some additional +metadata that allows the framework to more easily identify which device the VF belongs to as +well as where the VF originated from. + +Attributes: +created_by_framework: :data:`True` if this VF represents one that the DTS framework created +on the node, :data:`False` otherwise. +pf_port: The PF that this VF was created on/gathered from. +""" + +created_by_framework: bool = False +pf_port: Port | None = None + +def __init__( +self, node_name: str, config: PortConfig, created_by_framework: bool, pf_port: Port +) -> None: +"""Extends :meth:`Port.__init__` with VF specific metadata. + +Args: +node_name: The name of the node the VF resides on. +config: Configuration information about the VF. +created_by_framework: :data:`True` if DTS created this VF, otherwise :data:`False` if +this class represents a VF that was preexisting on the node. +pf_port: The PF that this VF was created on/gathered from. +""" +super().__init__(node_name, config) +self.created_by_framework = created_by_framework +self.pf_port = pf_port + + @dataclass(slots=True, frozen=True) class PortLink: """The physical, cabled connection between the ports. -- 2.46.0
[RFC PATCH v2 4/5] dts: add OS abstractions for creating virtual functions
From: Jeremy Spewock Virtual functions in the framework are created using SR-IOV. The process for doing this can vary depending on the operating system, so the commands to create VFs have to be abstracted into different classes based on the operating system. This patch adds the stubs for methods that create VFs and get the PCI addresses of all VFs on a port to the abstract class as well as a linux implementation for the methods. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/linux_session.py | 40 +++- dts/framework/testbed_model/os_session.py| 40 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/linux_session.py b/dts/framework/testbed_model/linux_session.py index 99abc21353..738ddd7600 100644 --- a/dts/framework/testbed_model/linux_session.py +++ b/dts/framework/testbed_model/linux_session.py @@ -15,7 +15,11 @@ from typing_extensions import NotRequired -from framework.exception import ConfigurationError, RemoteCommandExecutionError +from framework.exception import ( +ConfigurationError, +InternalError, +RemoteCommandExecutionError, +) from framework.utils import expand_range from .cpu import LogicalCore @@ -210,3 +214,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: """Overrides :meth:`~.os_session.OSSession.configure_ipv4_forwarding`.""" state = 1 if enable else 0 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", privileged=True) + +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Overrides :meth:`~.os_session.OSSession.set_num_virtual_functions`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout) +if num > 0 and curr_num_vfs >= num: +self._logger.info( +f"{curr_num_vfs} VFs already configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}." +) +return False +elif num > 0 and curr_num_vfs > 0: +self._logger.error( +f"Not enough VFs configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}. Need {num} but only {curr_num_vfs} are configured. " +"DTS is unable to modify number of preexisting VFs." +) +raise InternalError("Failed to create VFs on port.") +self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, verify=True) +return True + +def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]: +"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}/sriov_numvfs").stdout) +if curr_num_vfs > 0: +pci_addrs = self.send_command( +'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" ' ++ f"{sys_bus_path}/virtfn*/uevent", +privileged=True, +) +return pci_addrs.stdout.splitlines() +else: +return [] diff --git a/dts/framework/testbed_model/os_session.py b/dts/framework/testbed_model/os_session.py index 79f56b289b..191fc3c0c8 100644 --- a/dts/framework/testbed_model/os_session.py +++ b/dts/framework/testbed_model/os_session.py @@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: Args: enable: If :data:`True`, enable the forwarding, otherwise disable it. """ + +@abstractmethod +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Update the number of virtual functions (VFs) on a port. + +It should be noted that, due to the nature of VFs, if there are already VFs that exist on +the physical function (PF) prior to calling this function, additional ones cannot be added. +The only way to add more VFs is to remove the existing and then set the desired amount. For +this reason, this method will handle creation in the following order: + +1. Use existing VFs on the PF if the number of existing VFs is greater than or equal to +`num` +2. Throw an exception noting that VFs cannot be created if the PF has some VFs already set +on it, but the total VFs that it has are less then `num`. +3. Create `num` VFs on the PF if there are none on it already + +Args: +num: The number of VFs to set on the port. +pf_port: The port to add the VFs to. + +Raises: +InternalError: If `pf_port` has less than `num` VFs configured on it +already. + +Returns: +:data:`True` if this method successfully create
[RFC PATCH v2 5/5] dts: add functions for managing VFs to Node
From: Jeremy Spewock In order for test suites to create virtual functions there has to be functions in the API that developers can use. This patch adds the ability to create virtual functions to the Node API so that they are reachable within test suites. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 96 - 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 85d4eb1f7c..101a8edfbc 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -14,6 +14,7 @@ """ import os +import re import tarfile from abc import ABC from ipaddress import IPv4Interface, IPv6Interface @@ -24,9 +25,10 @@ OS, BuildTargetConfiguration, NodeConfiguration, +PortConfig, TestRunConfiguration, ) -from framework.exception import ConfigurationError +from framework.exception import ConfigurationError, InternalError from framework.logger import DTSLogger, get_dts_logger from framework.settings import SETTINGS @@ -39,7 +41,7 @@ ) from .linux_session import LinuxSession from .os_session import OSSession -from .port import Port +from .port import Port, VirtualFunction class Node(ABC): @@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: verify=True, ) +def create_virtual_functions( +self, num: int, pf_port: Port, dpdk_driver: str | None = None +) -> list[VirtualFunction]: +"""Create virtual functions (VFs) from a given physical function (PF) on the node. + +Virtual functions will be created if there are not any currently configured on `pf_port`. +If there are greater than or equal to `num` VFs already configured on `pf_port`, those will +be used instead of creating more. In order to create VFs, the PF must be bound to its +kernel driver. This method will handle binding `pf_port` and any other ports in the test +run that reside on the same device back to their OS drivers if this was not done already. +VFs gathered in this method will be bound to `driver` if one is provided, or the DPDK +driver for `pf_port` and then added to `self.ports`. + +Args: +num: The number of VFs to create. Must be greater than 0. +pf_port: The PF to create the VFs on. +dpdk_driver: Optional driver to bind the VFs to after they are created. Defaults to the +DPDK driver of `pf_port`. + +Raises: +InternalError: If `num` is less than or equal to 0. +""" +if num <= 0: +raise InternalError( +"Method for creating virtual functions received a non-positive value." +) +if not dpdk_driver: +dpdk_driver = pf_port.os_driver_for_dpdk +# Get any other port that is on the same device which DTS is aware of +all_device_ports = [ +p for p in self.ports if p.pci.split(".")[0] == pf_port.pci.split(".")[0] +] +# Ports must be bound to the kernel driver in order to create VFs from them +for port in all_device_ports: +self._bind_port_to_driver(port, False) +# Some PMDs require the interface being up in order to make VFs +self.configure_port_state(port) +created_vfs = self.main_session.set_num_virtual_functions(num, pf_port) +# We don't need more then `num` VFs from the list +vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num] +devbind_info = self.main_session.send_command( +f"{self.path_to_devbind_script} -s", privileged=True +).stdout + +ret = [] + +for pci in vf_pcis: +original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", devbind_info) +os_driver = original_driver[1] if original_driver else pf_port.os_driver +vf_config = PortConfig( +self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, pf_port.peer.pci +) +vf_port = VirtualFunction(self.name, vf_config, created_vfs, pf_port) +self.main_session.update_ports([vf_port]) +self._bind_port_to_driver(vf_port) +self.ports.append(vf_port) +ret.append(vf_port) +return ret + +def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]: +"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`. + +Args: +pf_port: The port to search for the VFs on. + +Returns: +A list of VFs in the framework that were created/gathered from `pf_port`. +""" +return [p for p in self.ports if isinstance(p, VirtualFunction) and p.pf_port == pf_port] + +def remove_virtual_functions(self, pf_port: Port) -> None: +"""Remove
[RFC PATCH v2 0/5] dts: add VFs to the framework
From: Jeremy Spewock v2: * Accidentally left out a formatting fix in v1. Jeremy Spewock (5): dts: allow binding only a single port to a different driver dts: parameterize what ports the TG sends packets to dts: add class for virtual functions dts: add OS abstractions for creating virtual functions dts: add functions for managing VFs to Node dts/framework/test_suite.py | 38 -- dts/framework/testbed_model/linux_session.py | 40 ++- dts/framework/testbed_model/node.py | 115 +-- dts/framework/testbed_model/os_session.py| 40 +++ dts/framework/testbed_model/port.py | 37 +- 5 files changed, 251 insertions(+), 19 deletions(-) -- 2.46.0
[RFC PATCH v2 1/5] dts: allow binding only a single port to a different driver
From: Jeremy Spewock Previously the DTS framework only included methods that bind all ports that the test run was aware of to either the DPDK driver or the OS driver. There are however some cases, like creating virtual functions, where you would want some ports bound to the OS driver and others bound to their DPDK driver. This patch adds the ability to bind individual drivers to their respective ports to solve this problem. Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 21 - 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 8e6181e424..85d4eb1f7c 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: BuildTargetConfiguration) -> the setup steps will be taken. """ self._copy_dpdk_tarball() -self.bind_ports_to_driver() +self.bind_all_ports_to_driver() def tear_down_build_target(self) -> None: """Reset DPDK variables and bind port driver to the OS driver.""" self.__remote_dpdk_dir = None -self.bind_ports_to_driver(for_dpdk=False) +self.bind_all_ports_to_driver(for_dpdk=False) def create_session(self, name: str) -> OSSession: """Create and return a new OS-aware remote session. @@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None: # then extract to remote path self.main_session.extract_remote_tarball(remote_tarball_path, self._remote_dpdk_dir) -def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: +def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None: """Bind all ports on the node to a driver. Args: @@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: If :data:`False`, binds to os_driver. """ for port in self.ports: -driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver -self.main_session.send_command( -f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", -privileged=True, -verify=True, -) +self._bind_port_to_driver(port, for_dpdk) + +def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: +driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver +self.main_session.send_command( +f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", +privileged=True, +verify=True, +) def create_session(node_config: NodeConfiguration, name: str, logger: DTSLogger) -> OSSession: -- 2.46.0
[RFC PATCH v2 2/5] dts: parameterize what ports the TG sends packets to
From: Jeremy Spewock Previously in the DTS framework the helper methods in the TestSutie class designated ports as either ingress or egress ports and would wrap the methods of the traffic generator to allow packets to only flow to those designated ingress or egress ports. This is undesirable in some cases, such as when you have virtual functions on top of your port, where the TG ports can send to more than one SUT port since the framework limits where the TG is allowed to send packets. This patch solves this problem by creating optional parameters that allow the user to specify which port to gather the MAC addresses from when sending and receiving packets. Signed-off-by: Jeremy Spewock --- dts/framework/test_suite.py | 38 ++--- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 694b2eba65..d5c0021503 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -185,6 +185,8 @@ def send_packet_and_capture( packet: Packet, filter_config: PacketFilteringConfig = PacketFilteringConfig(), duration: float = 1, +sut_ingress: Port | None = None, +sut_egress: Port | None = None, ) -> list[Packet]: """Send and receive `packet` using the associated TG. @@ -195,11 +197,19 @@ def send_packet_and_capture( packet: The packet to send. filter_config: The filter to use when capturing packets. duration: Capture traffic for this amount of time after sending `packet`. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress` Returns: A list of received packets. """ -packet = self._adjust_addresses(packet) +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +packet = self._adjust_addresses(packet, sut_ingress, sut_egress) return self.tg_node.send_packet_and_capture( packet, self._tg_port_egress, @@ -208,18 +218,30 @@ def send_packet_and_capture( duration, ) -def get_expected_packet(self, packet: Packet) -> Packet: +def get_expected_packet( +self, packet: Packet, sut_ingress: Port | None = None, sut_egress: Port | None = None +) -> Packet: """Inject the proper L2/L3 addresses into `packet`. Args: packet: The packet to modify. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress`. Returns: `packet` with injected L2/L3 addresses. """ -return self._adjust_addresses(packet, expected=True) - -def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +return self._adjust_addresses(packet, sut_ingress, sut_egress, expected=True) + +def _adjust_addresses( +self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, expected: bool = False +) -> Packet: """L2 and L3 address additions in both directions. Assumptions: @@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: packet: The packet to modify. expected: If :data:`True`, the direction is SUT -> TG, otherwise the direction is TG -> SUT. +sut_ingress_port: The port to use as the Rx port on the SUT. +sut_egress_port: The port to use as the Tx port on the SUT. """ if expected: # The packet enters the TG from SUT # update l2 addresses -packet.src = self._sut_port_egress.mac_address +packet.src = sut_egress_port.mac_address packet.dst = self._tg_port_ingress.mac_address # The packet is routed from TG egress to TG ingress @@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: # The packet leaves TG towards SUT # update l2 addresses packet.src = self._tg_port_egress.mac_address -packet.dst = self._sut_port_ingress.mac_address +packet.dst = sut_ingress_port.mac_address # The packet is routed from TG egress to TG ingress # update l3 addresses -- 2.46.0
[RFC PATCH v2 3/5] dts: add class for virtual functions
From: Jeremy Spewock In DPDK applications virtual functions are treated the same as ports, but within the framework there are benefits to differentiating the two in order to add more metadata to VFs about where they originate from. For this reason this patch adds a new class for handling virtual functions that extends the Port class with some additional information about the VF. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/port.py | 37 - 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/port.py b/dts/framework/testbed_model/port.py index 817405bea4..c1d85fec2b 100644 --- a/dts/framework/testbed_model/port.py +++ b/dts/framework/testbed_model/port.py @@ -27,7 +27,7 @@ class PortIdentifier: pci: str -@dataclass(slots=True) +@dataclass class Port: """Physical port on a node. @@ -80,6 +80,41 @@ def pci(self) -> str: return self.identifier.pci +@dataclass +class VirtualFunction(Port): +"""Virtual Function (VF) on a port. + +DPDK applications often treat VFs the same as they do the physical ports (PFs) on the host. +For this reason VFs are represented in the framework as a type of port with some additional +metadata that allows the framework to more easily identify which device the VF belongs to as +well as where the VF originated from. + +Attributes: +created_by_framework: :data:`True` if this VF represents one that the DTS framework created +on the node, :data:`False` otherwise. +pf_port: The PF that this VF was created on/gathered from. +""" + +created_by_framework: bool = False +pf_port: Port | None = None + +def __init__( +self, node_name: str, config: PortConfig, created_by_framework: bool, pf_port: Port +) -> None: +"""Extends :meth:`Port.__init__` with VF specific metadata. + +Args: +node_name: The name of the node the VF resides on. +config: Configuration information about the VF. +created_by_framework: :data:`True` if DTS created this VF, otherwise :data:`False` if +this class represents a VF that was preexisting on the node. +pf_port: The PF that this VF was created on/gathered from. +""" +super().__init__(node_name, config) +self.created_by_framework = created_by_framework +self.pf_port = pf_port + + @dataclass(slots=True, frozen=True) class PortLink: """The physical, cabled connection between the ports. -- 2.46.0
[RFC PATCH v2 4/5] dts: add OS abstractions for creating virtual functions
From: Jeremy Spewock Virtual functions in the framework are created using SR-IOV. The process for doing this can vary depending on the operating system, so the commands to create VFs have to be abstracted into different classes based on the operating system. This patch adds the stubs for methods that create VFs and get the PCI addresses of all VFs on a port to the abstract class as well as a linux implementation for the methods. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/linux_session.py | 40 +++- dts/framework/testbed_model/os_session.py| 40 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/linux_session.py b/dts/framework/testbed_model/linux_session.py index 99abc21353..738ddd7600 100644 --- a/dts/framework/testbed_model/linux_session.py +++ b/dts/framework/testbed_model/linux_session.py @@ -15,7 +15,11 @@ from typing_extensions import NotRequired -from framework.exception import ConfigurationError, RemoteCommandExecutionError +from framework.exception import ( +ConfigurationError, +InternalError, +RemoteCommandExecutionError, +) from framework.utils import expand_range from .cpu import LogicalCore @@ -210,3 +214,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: """Overrides :meth:`~.os_session.OSSession.configure_ipv4_forwarding`.""" state = 1 if enable else 0 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", privileged=True) + +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Overrides :meth:`~.os_session.OSSession.set_num_virtual_functions`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout) +if num > 0 and curr_num_vfs >= num: +self._logger.info( +f"{curr_num_vfs} VFs already configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}." +) +return False +elif num > 0 and curr_num_vfs > 0: +self._logger.error( +f"Not enough VFs configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}. Need {num} but only {curr_num_vfs} are configured. " +"DTS is unable to modify number of preexisting VFs." +) +raise InternalError("Failed to create VFs on port.") +self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, verify=True) +return True + +def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]: +"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}/sriov_numvfs").stdout) +if curr_num_vfs > 0: +pci_addrs = self.send_command( +'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" ' ++ f"{sys_bus_path}/virtfn*/uevent", +privileged=True, +) +return pci_addrs.stdout.splitlines() +else: +return [] diff --git a/dts/framework/testbed_model/os_session.py b/dts/framework/testbed_model/os_session.py index 79f56b289b..191fc3c0c8 100644 --- a/dts/framework/testbed_model/os_session.py +++ b/dts/framework/testbed_model/os_session.py @@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: Args: enable: If :data:`True`, enable the forwarding, otherwise disable it. """ + +@abstractmethod +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Update the number of virtual functions (VFs) on a port. + +It should be noted that, due to the nature of VFs, if there are already VFs that exist on +the physical function (PF) prior to calling this function, additional ones cannot be added. +The only way to add more VFs is to remove the existing and then set the desired amount. For +this reason, this method will handle creation in the following order: + +1. Use existing VFs on the PF if the number of existing VFs is greater than or equal to +`num` +2. Throw an exception noting that VFs cannot be created if the PF has some VFs already set +on it, but the total VFs that it has are less then `num`. +3. Create `num` VFs on the PF if there are none on it already + +Args: +num: The number of VFs to set on the port. +pf_port: The port to add the VFs to. + +Raises: +InternalError: If `pf_port` has less than `num` VFs configured on it +already. + +Returns: +:data:`True` if this method successfully create
[RFC PATCH v2 5/5] dts: add functions for managing VFs to Node
From: Jeremy Spewock In order for test suites to create virtual functions there has to be functions in the API that developers can use. This patch adds the ability to create virtual functions to the Node API so that they are reachable within test suites. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 96 - 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 85d4eb1f7c..101a8edfbc 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -14,6 +14,7 @@ """ import os +import re import tarfile from abc import ABC from ipaddress import IPv4Interface, IPv6Interface @@ -24,9 +25,10 @@ OS, BuildTargetConfiguration, NodeConfiguration, +PortConfig, TestRunConfiguration, ) -from framework.exception import ConfigurationError +from framework.exception import ConfigurationError, InternalError from framework.logger import DTSLogger, get_dts_logger from framework.settings import SETTINGS @@ -39,7 +41,7 @@ ) from .linux_session import LinuxSession from .os_session import OSSession -from .port import Port +from .port import Port, VirtualFunction class Node(ABC): @@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: verify=True, ) +def create_virtual_functions( +self, num: int, pf_port: Port, dpdk_driver: str | None = None +) -> list[VirtualFunction]: +"""Create virtual functions (VFs) from a given physical function (PF) on the node. + +Virtual functions will be created if there are not any currently configured on `pf_port`. +If there are greater than or equal to `num` VFs already configured on `pf_port`, those will +be used instead of creating more. In order to create VFs, the PF must be bound to its +kernel driver. This method will handle binding `pf_port` and any other ports in the test +run that reside on the same device back to their OS drivers if this was not done already. +VFs gathered in this method will be bound to `driver` if one is provided, or the DPDK +driver for `pf_port` and then added to `self.ports`. + +Args: +num: The number of VFs to create. Must be greater than 0. +pf_port: The PF to create the VFs on. +dpdk_driver: Optional driver to bind the VFs to after they are created. Defaults to the +DPDK driver of `pf_port`. + +Raises: +InternalError: If `num` is less than or equal to 0. +""" +if num <= 0: +raise InternalError( +"Method for creating virtual functions received a non-positive value." +) +if not dpdk_driver: +dpdk_driver = pf_port.os_driver_for_dpdk +# Get any other port that is on the same device which DTS is aware of +all_device_ports = [ +p for p in self.ports if p.pci.split(".")[0] == pf_port.pci.split(".")[0] +] +# Ports must be bound to the kernel driver in order to create VFs from them +for port in all_device_ports: +self._bind_port_to_driver(port, False) +# Some PMDs require the interface being up in order to make VFs +self.configure_port_state(port) +created_vfs = self.main_session.set_num_virtual_functions(num, pf_port) +# We don't need more then `num` VFs from the list +vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num] +devbind_info = self.main_session.send_command( +f"{self.path_to_devbind_script} -s", privileged=True +).stdout + +ret = [] + +for pci in vf_pcis: +original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", devbind_info) +os_driver = original_driver[1] if original_driver else pf_port.os_driver +vf_config = PortConfig( +self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, pf_port.peer.pci +) +vf_port = VirtualFunction(self.name, vf_config, created_vfs, pf_port) +self.main_session.update_ports([vf_port]) +self._bind_port_to_driver(vf_port) +self.ports.append(vf_port) +ret.append(vf_port) +return ret + +def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]: +"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`. + +Args: +pf_port: The port to search for the VFs on. + +Returns: +A list of VFs in the framework that were created/gathered from `pf_port`. +""" +return [p for p in self.ports if isinstance(p, VirtualFunction) and p.pf_port == pf_port] + +def remove_virtual_functions(self, pf_port: Port) -> None: +"""Remove
[RFC PATCH v3 0/5] dts: add VFs to the framework
From: Jeremy Spewock rfc-v3: * add missing depends-on tags to appropriate commits. * adjust some other small issues in commit bodies * add changes to fix name of function in os_udp Jeremy Spewock (5): dts: allow binding only a single port to a different driver dts: parameterize what ports the TG sends packets to dts: add class for virtual functions dts: add OS abstractions for creating virtual functions dts: add functions for managing VFs to Node dts/framework/test_suite.py | 38 -- dts/framework/testbed_model/linux_session.py | 40 ++- dts/framework/testbed_model/node.py | 115 +-- dts/framework/testbed_model/os_session.py| 40 +++ dts/framework/testbed_model/port.py | 37 +- dts/tests/TestSuite_os_udp.py| 4 +- 6 files changed, 253 insertions(+), 21 deletions(-) -- 2.46.0
[RFC PATCH v3 1/5] dts: allow binding only a single port to a different driver
From: Jeremy Spewock Previously the DTS framework only included methods that bind all ports that the test run was aware of to either the DPDK driver or the OS driver. There are however some cases, like creating virtual functions, where you would want some ports bound to the OS driver and others bound to their DPDK driver. This patch adds the ability to bind individual ports to their respective drviers to solve this problem. Depends-on: patch-143101 ("dts: add binding to different drivers to TG node") Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 21 - dts/tests/TestSuite_os_udp.py | 4 ++-- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 8e6181e424..85d4eb1f7c 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: BuildTargetConfiguration) -> the setup steps will be taken. """ self._copy_dpdk_tarball() -self.bind_ports_to_driver() +self.bind_all_ports_to_driver() def tear_down_build_target(self) -> None: """Reset DPDK variables and bind port driver to the OS driver.""" self.__remote_dpdk_dir = None -self.bind_ports_to_driver(for_dpdk=False) +self.bind_all_ports_to_driver(for_dpdk=False) def create_session(self, name: str) -> OSSession: """Create and return a new OS-aware remote session. @@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None: # then extract to remote path self.main_session.extract_remote_tarball(remote_tarball_path, self._remote_dpdk_dir) -def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: +def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None: """Bind all ports on the node to a driver. Args: @@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> None: If :data:`False`, binds to os_driver. """ for port in self.ports: -driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver -self.main_session.send_command( -f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", -privileged=True, -verify=True, -) +self._bind_port_to_driver(port, for_dpdk) + +def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: +driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver +self.main_session.send_command( +f"{self.path_to_devbind_script} -b {driver} --force {port.pci}", +privileged=True, +verify=True, +) def create_session(node_config: NodeConfiguration, name: str, logger: DTSLogger) -> OSSession: diff --git a/dts/tests/TestSuite_os_udp.py b/dts/tests/TestSuite_os_udp.py index a78bd74139..5e9469bbac 100644 --- a/dts/tests/TestSuite_os_udp.py +++ b/dts/tests/TestSuite_os_udp.py @@ -23,7 +23,7 @@ def set_up_suite(self) -> None: Bind the SUT ports to the OS driver, configure the ports and configure the SUT to route traffic from if1 to if2. """ -self.sut_node.bind_ports_to_driver(for_dpdk=False) +self.sut_node.bind_all_ports_to_driver(for_dpdk=False) self.configure_testbed_ipv4() def test_os_udp(self) -> None: @@ -50,4 +50,4 @@ def tear_down_suite(self) -> None: """ self.configure_testbed_ipv4(restore=True) # Assume other suites will likely need dpdk driver -self.sut_node.bind_ports_to_driver(for_dpdk=True) +self.sut_node.bind_all_ports_to_driver(for_dpdk=True) -- 2.46.0
[RFC PATCH v3 2/5] dts: parameterize what ports the TG sends packets to
From: Jeremy Spewock Previously in the DTS framework the helper methods in the TestSuite class designated ports as either ingress or egress ports and would wrap the methods of the traffic generator to allow packets to only flow to those designated ingress or egress ports. This is undesirable in some cases, such as when you have virtual functions on top of your port, where the TG ports can send to more than one SUT port. This patch solves this problem by creating optional parameters that allow the user to specify which port to gather the MAC addresses from when sending and receiving packets. Signed-off-by: Jeremy Spewock --- dts/framework/test_suite.py | 38 ++--- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 694b2eba65..d5c0021503 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -185,6 +185,8 @@ def send_packet_and_capture( packet: Packet, filter_config: PacketFilteringConfig = PacketFilteringConfig(), duration: float = 1, +sut_ingress: Port | None = None, +sut_egress: Port | None = None, ) -> list[Packet]: """Send and receive `packet` using the associated TG. @@ -195,11 +197,19 @@ def send_packet_and_capture( packet: The packet to send. filter_config: The filter to use when capturing packets. duration: Capture traffic for this amount of time after sending `packet`. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress` Returns: A list of received packets. """ -packet = self._adjust_addresses(packet) +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +packet = self._adjust_addresses(packet, sut_ingress, sut_egress) return self.tg_node.send_packet_and_capture( packet, self._tg_port_egress, @@ -208,18 +218,30 @@ def send_packet_and_capture( duration, ) -def get_expected_packet(self, packet: Packet) -> Packet: +def get_expected_packet( +self, packet: Packet, sut_ingress: Port | None = None, sut_egress: Port | None = None +) -> Packet: """Inject the proper L2/L3 addresses into `packet`. Args: packet: The packet to modify. +sut_ingress: Optional port to use as the SUT ingress port. Defaults to +`self._sut_port_ingress`. +sut_egress: Optional port to use as the SUT egress port. Defaults to +`self._sut_port_egress`. Returns: `packet` with injected L2/L3 addresses. """ -return self._adjust_addresses(packet, expected=True) - -def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: +if sut_ingress is None: +sut_ingress = self._sut_port_ingress +if sut_egress is None: +sut_egress = self._sut_port_egress +return self._adjust_addresses(packet, sut_ingress, sut_egress, expected=True) + +def _adjust_addresses( +self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, expected: bool = False +) -> Packet: """L2 and L3 address additions in both directions. Assumptions: @@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: packet: The packet to modify. expected: If :data:`True`, the direction is SUT -> TG, otherwise the direction is TG -> SUT. +sut_ingress_port: The port to use as the Rx port on the SUT. +sut_egress_port: The port to use as the Tx port on the SUT. """ if expected: # The packet enters the TG from SUT # update l2 addresses -packet.src = self._sut_port_egress.mac_address +packet.src = sut_egress_port.mac_address packet.dst = self._tg_port_ingress.mac_address # The packet is routed from TG egress to TG ingress @@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: # The packet leaves TG towards SUT # update l2 addresses packet.src = self._tg_port_egress.mac_address -packet.dst = self._sut_port_ingress.mac_address +packet.dst = sut_ingress_port.mac_address # The packet is routed from TG egress to TG ingress # update l3 addresses -- 2.46.0
[RFC PATCH v3 3/5] dts: add class for virtual functions
From: Jeremy Spewock In DPDK applications virtual functions are treated the same as ports, but within the framework there are benefits to differentiating the two in order to add more metadata to VFs about where they originate from. For this reason this patch adds a new class for handling virtual functions that extends the Port class with some additional information about the VF. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/port.py | 37 - 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/port.py b/dts/framework/testbed_model/port.py index 817405bea4..c1d85fec2b 100644 --- a/dts/framework/testbed_model/port.py +++ b/dts/framework/testbed_model/port.py @@ -27,7 +27,7 @@ class PortIdentifier: pci: str -@dataclass(slots=True) +@dataclass class Port: """Physical port on a node. @@ -80,6 +80,41 @@ def pci(self) -> str: return self.identifier.pci +@dataclass +class VirtualFunction(Port): +"""Virtual Function (VF) on a port. + +DPDK applications often treat VFs the same as they do the physical ports (PFs) on the host. +For this reason VFs are represented in the framework as a type of port with some additional +metadata that allows the framework to more easily identify which device the VF belongs to as +well as where the VF originated from. + +Attributes: +created_by_framework: :data:`True` if this VF represents one that the DTS framework created +on the node, :data:`False` otherwise. +pf_port: The PF that this VF was created on/gathered from. +""" + +created_by_framework: bool = False +pf_port: Port | None = None + +def __init__( +self, node_name: str, config: PortConfig, created_by_framework: bool, pf_port: Port +) -> None: +"""Extends :meth:`Port.__init__` with VF specific metadata. + +Args: +node_name: The name of the node the VF resides on. +config: Configuration information about the VF. +created_by_framework: :data:`True` if DTS created this VF, otherwise :data:`False` if +this class represents a VF that was preexisting on the node. +pf_port: The PF that this VF was created on/gathered from. +""" +super().__init__(node_name, config) +self.created_by_framework = created_by_framework +self.pf_port = pf_port + + @dataclass(slots=True, frozen=True) class PortLink: """The physical, cabled connection between the ports. -- 2.46.0
[RFC PATCH v3 4/5] dts: add OS abstractions for creating virtual functions
From: Jeremy Spewock Virtual functions in the framework are created using SR-IOV. The process for doing this can vary depending on the operating system, so the commands to create VFs have to be abstracted into different classes based on the operating system. This patch adds the stubs for methods that create VFs and get the PCI addresses of all VFs on a port to the abstract class as well as a linux implementation for the methods. Bugzilla ID: 1500 Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/linux_session.py | 40 +++- dts/framework/testbed_model/os_session.py| 40 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/dts/framework/testbed_model/linux_session.py b/dts/framework/testbed_model/linux_session.py index 99abc21353..738ddd7600 100644 --- a/dts/framework/testbed_model/linux_session.py +++ b/dts/framework/testbed_model/linux_session.py @@ -15,7 +15,11 @@ from typing_extensions import NotRequired -from framework.exception import ConfigurationError, RemoteCommandExecutionError +from framework.exception import ( +ConfigurationError, +InternalError, +RemoteCommandExecutionError, +) from framework.utils import expand_range from .cpu import LogicalCore @@ -210,3 +214,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: """Overrides :meth:`~.os_session.OSSession.configure_ipv4_forwarding`.""" state = 1 if enable else 0 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", privileged=True) + +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Overrides :meth:`~.os_session.OSSession.set_num_virtual_functions`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout) +if num > 0 and curr_num_vfs >= num: +self._logger.info( +f"{curr_num_vfs} VFs already configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}." +) +return False +elif num > 0 and curr_num_vfs > 0: +self._logger.error( +f"Not enough VFs configured on port {pf_port.identifier.pci} on node " +f"{pf_port.identifier.node}. Need {num} but only {curr_num_vfs} are configured. " +"DTS is unable to modify number of preexisting VFs." +) +raise InternalError("Failed to create VFs on port.") +self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, verify=True) +return True + +def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]: +"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`.""" +sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", "\\:") +curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}/sriov_numvfs").stdout) +if curr_num_vfs > 0: +pci_addrs = self.send_command( +'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" ' ++ f"{sys_bus_path}/virtfn*/uevent", +privileged=True, +) +return pci_addrs.stdout.splitlines() +else: +return [] diff --git a/dts/framework/testbed_model/os_session.py b/dts/framework/testbed_model/os_session.py index 79f56b289b..191fc3c0c8 100644 --- a/dts/framework/testbed_model/os_session.py +++ b/dts/framework/testbed_model/os_session.py @@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None: Args: enable: If :data:`True`, enable the forwarding, otherwise disable it. """ + +@abstractmethod +def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool: +"""Update the number of virtual functions (VFs) on a port. + +It should be noted that, due to the nature of VFs, if there are already VFs that exist on +the physical function (PF) prior to calling this function, additional ones cannot be added. +The only way to add more VFs is to remove the existing and then set the desired amount. For +this reason, this method will handle creation in the following order: + +1. Use existing VFs on the PF if the number of existing VFs is greater than or equal to +`num` +2. Throw an exception noting that VFs cannot be created if the PF has some VFs already set +on it, but the total VFs that it has are less then `num`. +3. Create `num` VFs on the PF if there are none on it already + +Args: +num: The number of VFs to set on the port. +pf_port: The port to add the VFs to. + +Raises: +InternalError: If `pf_port` has less than `num` VFs configured on it +already. + +Returns: +:data:`True` if this method successfully create
[RFC PATCH v3 5/5] dts: add functions for managing VFs to Node
From: Jeremy Spewock In order for test suites to create virtual functions there has to be functions in the API that developers can use. This patch adds the ability to create virtual functions to the Node API so that they are reachable within test suites. Bugzilla ID: 1500 Depends-on: patch-143101 ("dts: add binding to different drivers to TG node") Signed-off-by: Jeremy Spewock --- dts/framework/testbed_model/node.py | 96 - 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 85d4eb1f7c..101a8edfbc 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -14,6 +14,7 @@ """ import os +import re import tarfile from abc import ABC from ipaddress import IPv4Interface, IPv6Interface @@ -24,9 +25,10 @@ OS, BuildTargetConfiguration, NodeConfiguration, +PortConfig, TestRunConfiguration, ) -from framework.exception import ConfigurationError +from framework.exception import ConfigurationError, InternalError from framework.logger import DTSLogger, get_dts_logger from framework.settings import SETTINGS @@ -39,7 +41,7 @@ ) from .linux_session import LinuxSession from .os_session import OSSession -from .port import Port +from .port import Port, VirtualFunction class Node(ABC): @@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None: verify=True, ) +def create_virtual_functions( +self, num: int, pf_port: Port, dpdk_driver: str | None = None +) -> list[VirtualFunction]: +"""Create virtual functions (VFs) from a given physical function (PF) on the node. + +Virtual functions will be created if there are not any currently configured on `pf_port`. +If there are greater than or equal to `num` VFs already configured on `pf_port`, those will +be used instead of creating more. In order to create VFs, the PF must be bound to its +kernel driver. This method will handle binding `pf_port` and any other ports in the test +run that reside on the same device back to their OS drivers if this was not done already. +VFs gathered in this method will be bound to `driver` if one is provided, or the DPDK +driver for `pf_port` and then added to `self.ports`. + +Args: +num: The number of VFs to create. Must be greater than 0. +pf_port: The PF to create the VFs on. +dpdk_driver: Optional driver to bind the VFs to after they are created. Defaults to the +DPDK driver of `pf_port`. + +Raises: +InternalError: If `num` is less than or equal to 0. +""" +if num <= 0: +raise InternalError( +"Method for creating virtual functions received a non-positive value." +) +if not dpdk_driver: +dpdk_driver = pf_port.os_driver_for_dpdk +# Get any other port that is on the same device which DTS is aware of +all_device_ports = [ +p for p in self.ports if p.pci.split(".")[0] == pf_port.pci.split(".")[0] +] +# Ports must be bound to the kernel driver in order to create VFs from them +for port in all_device_ports: +self._bind_port_to_driver(port, False) +# Some PMDs require the interface being up in order to make VFs +self.configure_port_state(port) +created_vfs = self.main_session.set_num_virtual_functions(num, pf_port) +# We don't need more then `num` VFs from the list +vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num] +devbind_info = self.main_session.send_command( +f"{self.path_to_devbind_script} -s", privileged=True +).stdout + +ret = [] + +for pci in vf_pcis: +original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", devbind_info) +os_driver = original_driver[1] if original_driver else pf_port.os_driver +vf_config = PortConfig( +self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, pf_port.peer.pci +) +vf_port = VirtualFunction(self.name, vf_config, created_vfs, pf_port) +self.main_session.update_ports([vf_port]) +self._bind_port_to_driver(vf_port) +self.ports.append(vf_port) +ret.append(vf_port) +return ret + +def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]: +"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`. + +Args: +pf_port: The port to search for the VFs on. + +Returns: +A list of VFs in the framework that were created/gathered from `pf_port`. +""" +return [p for p in self.ports if isinstance(p, VirtualFunction) and p.pf_port == pf_port] + +
[PATCH v1 0/1] dts: allow for updating MTU with testpmd
From: Jeremy Spewock There are mechanisms to update the MTU of ports in the framework already, but only when those ports are bound to their kernel drivers. This series adds the functionality needed within testpmd to change the MTU of ports on the SUT which are bound to their DPDK driver. Jeremy Spewock (1): dts: add methods for modifying MTU to testpmd shell dts/framework/remote_session/testpmd_shell.py | 27 +++ 1 file changed, 27 insertions(+) -- 2.46.0
[PATCH v1 1/1] dts: add methods for modifying MTU to testpmd shell
From: Jeremy Spewock There are methods within DTS currently that support updating the MTU of ports on a node, but the methods for doing this in a linux session rely on the ip command and the port being bound to the kernel driver. Since test suites are run while bound to the driver for DPDK, there needs to be a way to modify the value while bound to said driver as well. This is done by using testpmd to modify the MTU. Depends-on: patch-142952 ("dts: add ability to start/stop testpmd ports") Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 27 +++ 1 file changed, 27 insertions(+) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index ca24b28070..0d2c972b8f 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -888,6 +888,33 @@ def show_port_stats(self, port_id: int) -> TestPmdPortStats: return TestPmdPortStats.parse(output) +@requires_stopped_ports +def set_port_mtu(self, port_id: int, mtu: int, verify: bool = True) -> None: +"""Change the MTU of a port using testpmd. + +Some PMDs require that the port be stopped before changing the MTU, and it does no harm to +stop the port before configuring in cases where it isn't required, so we first stop ports, +then update the MTU, then start the ports again afterwards. + +Args: +port_id: ID of the port to adjust the MTU on. +mtu: Desired value for the MTU to be set to. +verify: If `verify` is :data:`True` then the output will be scanned in an attempt to +verify that the mtu was properly set on the port. Defaults to :data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and the MTU was not +properly updated on the port matching `port_id`. +""" +set_mtu_output = self.send_command(f"port config mtu {port_id} {mtu}") +if verify and (f"MTU: {mtu}" not in self.send_command(f"show port info {port_id}")): +self._logger.debug( +f"Failed to set mtu to {mtu} on port {port_id}." f" Output was:\n{set_mtu_output}" +) +raise InteractiveCommandExecutionError( +f"Test pmd failed to update mtu of port {port_id} to {mtu}" +) + def _close(self) -> None: """Overrides :meth:`~.interactive_shell.close`.""" self.stop() -- 2.46.0
[PATCH v2 0/1] dts: allow for updating MTU with testpmd
From: Jeremy Spewock v2: * allow for setting the MTU of all ports with testpmd. * update doc-string Jeremy Spewock (1): dts: add methods for modifying MTU to testpmd shell dts/framework/remote_session/testpmd_shell.py | 44 +++ 1 file changed, 44 insertions(+) -- 2.46.0
[PATCH v2 1/1] dts: add methods for modifying MTU to testpmd shell
From: Jeremy Spewock There are methods within DTS currently that support updating the MTU of ports on a node, but the methods for doing this in a linux session rely on the ip command and the port being bound to the kernel driver. Since test suites are run while bound to the driver for DPDK, there needs to be a way to modify the value while bound to said driver as well. This is done by using testpmd to modify the MTU. Depends-on: patch-142952 ("dts: add ability to start/stop testpmd ports") Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 44 +++ 1 file changed, 44 insertions(+) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index ca24b28070..6891f63bef 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -888,6 +888,50 @@ def show_port_stats(self, port_id: int) -> TestPmdPortStats: return TestPmdPortStats.parse(output) +@requires_stopped_ports +def set_port_mtu(self, port_id: int, mtu: int, verify: bool = True) -> None: +"""Change the MTU of a port using testpmd. + +Some PMDs require that the port be stopped before changing the MTU, and it does no harm to +stop the port before configuring in cases where it isn't required, so ports are stopped +prior to changing their MTU. + +Args: +port_id: ID of the port to adjust the MTU on. +mtu: Desired value for the MTU to be set to. +verify: If `verify` is :data:`True` then the output will be scanned in an attempt to +verify that the mtu was properly set on the port. Defaults to :data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and the MTU was not +properly updated on the port matching `port_id`. +""" +set_mtu_output = self.send_command(f"port config mtu {port_id} {mtu}") +if verify and (f"MTU: {mtu}" not in self.send_command(f"show port info {port_id}")): +self._logger.debug( +f"Failed to set mtu to {mtu} on port {port_id}." f" Output was:\n{set_mtu_output}" +) +raise InteractiveCommandExecutionError( +f"Test pmd failed to update mtu of port {port_id} to {mtu}" +) + +def set_port_mtu_all(self, mtu: int, verify: bool = True) -> None: +"""Change the MTU of all ports using testpmd. + +Runs :meth:`set_port_mtu` for every port that testpmd is aware of. + +Args: +mtu: Desired value for the MTU to be set to. +verify: Whether to verify that setting the MTU on each port was successful or not. +Defaults to :data:`True`. + +Raises: +InteractiveCommandExecutionError: If `verify` is :data:`True` and the MTU was not +properly updated on at least one port. +""" +for port_id in range(len(self._app_params.ports)): +self.set_port_mtu(port_id, mtu, verify) + def _close(self) -> None: """Overrides :meth:`~.interactive_shell.close`.""" self.stop() -- 2.46.0
[PATCH v8 0/1] dts: add second scatter test case
From: Jeremy Spewock v8: * update test suite to use newly submitted capabilities series * split the MTU update patch into its own series. * now that the --max-pkt-len bug is fixed on mlx in 24.07, no longer need to set MTU directly so this is also removed. Jeremy Spewock (1): dts: add test case that utilizes offload to pmd_buffer_scatter dts/tests/TestSuite_pmd_buffer_scatter.py | 47 +++ 1 file changed, 31 insertions(+), 16 deletions(-) -- 2.46.0
[PATCH v8 1/1] dts: add test case that utilizes offload to pmd_buffer_scatter
From: Jeremy Spewock Some NICs tested in DPDK allow for the scattering of packets without an offload and others enforce that you enable the scattered_rx offload in testpmd. The current version of the suite for testing support of scattering packets only tests the case where the NIC supports testing without the offload, so an expansion of coverage is needed to cover the second case as well. depends-on: series-32799 ("dts: add test skipping based on capabilities") Signed-off-by: Jeremy Spewock --- dts/tests/TestSuite_pmd_buffer_scatter.py | 47 +++ 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/dts/tests/TestSuite_pmd_buffer_scatter.py b/dts/tests/TestSuite_pmd_buffer_scatter.py index 64c48b0793..6704c04325 100644 --- a/dts/tests/TestSuite_pmd_buffer_scatter.py +++ b/dts/tests/TestSuite_pmd_buffer_scatter.py @@ -19,7 +19,7 @@ from scapy.layers.inet import IP # type: ignore[import-untyped] from scapy.layers.l2 import Ether # type: ignore[import-untyped] -from scapy.packet import Raw # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] from scapy.utils import hexstr # type: ignore[import-untyped] from framework.params.testpmd import SimpleForwardingModes @@ -55,25 +55,25 @@ def set_up_suite(self) -> None: """Set up the test suite. Setup: -Increase the MTU of both ports on the traffic generator to 9000 -to support larger packet sizes. +The traffic generator needs to send and receive packets that are, at most, as large as +the mbuf size of the ports + 5 in each test case, so 9000 should more than suffice. """ self.tg_node.main_session.configure_port_mtu(9000, self._tg_port_egress) self.tg_node.main_session.configure_port_mtu(9000, self._tg_port_ingress) -def scatter_pktgen_send_packet(self, pktsize: int) -> str: +def scatter_pktgen_send_packet(self, pktsize: int) -> list[Packet]: """Generate and send a packet to the SUT then capture what is forwarded back. Generate an IP packet of a specific length and send it to the SUT, -then capture the resulting received packet and extract its payload. -The desired length of the packet is met by packing its payload +then capture the resulting received packets and filter them down to the ones that have the +correct layers. The desired length of the packet is met by packing its payload with the letter "X" in hexadecimal. Args: pktsize: Size of the packet to generate and send. Returns: -The payload of the received packet as a string. +The filtered down list of received packets. """ packet = Ether() / IP() / Raw() packet.getlayer(2).load = "" @@ -83,20 +83,27 @@ def scatter_pktgen_send_packet(self, pktsize: int) -> str: for X_in_hex in payload: packet.load += struct.pack("=B", int("%s%s" % (X_in_hex[0], X_in_hex[1]), 16)) received_packets = self.send_packet_and_capture(packet) +# filter down the list to packets that have the appropriate structure +received_packets = list( +filter(lambda p: Ether in p and IP in p and Raw in p, received_packets) +) self.verify(len(received_packets) > 0, "Did not receive any packets.") -load = hexstr(received_packets[0].getlayer(2), onlyhex=1) -return load +return received_packets -def pmd_scatter(self, mbsize: int) -> None: +def pmd_scatter(self, mbsize: int, enable_offload: bool = False) -> None: """Testpmd support of receiving and sending scattered multi-segment packets. Support for scattered packets is shown by sending 5 packets of differing length where the length of the packet is calculated by taking mbuf-size + an offset. The offsets used in the test are -1, 0, 1, 4, 5 respectively. +Args: +mbsize: Size to set memory buffers to when starting testpmd. +enable_offload: Whether or not to offload the scattering functionality in testpmd. + Test: -Start testpmd and run functional test with preset mbsize. +Start testpmd and run functional test with preset `mbsize`. """ with TestPmdShell( self.sut_node, @@ -105,16 +112,19 @@ def pmd_scatter(self, mbsize: int) -> None: mbuf_size=[mbsize], max_pkt_len=9000, tx_offloads=0x8000, +enable_scatter=True if enable_offload else None, ) as testpmd: testpmd.start() for offset in [-1, 0, 1, 4, 5]: -recv_payload = self.scatter_pktgen_send_packet(mbsize + offset) -self._logger.debug( -f"Payload of scattered packet after forwarding: \n{recv_payload}" -)
[RFC PATCH v1 4/5] dts: add methods for configuring offloads on a device in testpmd
From: Jeremy Spewock Testpmd offers methods to add and remove offloads from both ports and queues on ports, but there are not yet method bindings in the Testpmd API that the framework provides to reach them. This patch adds these bindings for future test cases/suites that require certain functionalities to be offloaded on the device. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 229 +- 1 file changed, 228 insertions(+), 1 deletion(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 58b8995d21..383a3c48b8 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -32,7 +32,7 @@ from typing_extensions import TypeVarTuple -from framework.exception import InteractiveCommandExecutionError +from framework.exception import InteractiveCommandExecutionError, InternalError from framework.params.testpmd import SimpleForwardingModes, TestPmdParams from framework.params.types import TestPmdParamsDict from framework.parser import ParserFn, TextParser @@ -1314,6 +1314,54 @@ class TestPmdVerbosePacket(TextParser): l4_len: int | None = field(default=None, metadata=TextParser.find_int(r"l4_len=(\d+)")) +class TestPmdOffloadCapabilities(StrEnum): +"""Base class for offload capabilities of ports/queues in testpmd. + +This base class is primarily used to give the Rx and Tx variants a common parent type. +""" + +pass + + +class TestPmdRxOffloadCapabilities(TestPmdOffloadCapabilities): +"""Rx offload capabilities of ports/queues in testpmd.""" + +# *** Common attributes *** # +#: +all = auto() +#: +ipv4_cksum = auto() +#: +udp_cksum = auto() +#: +tcp_cksum = auto() +#: +outer_ipv4_cksum = auto() +#: +security = auto() +# *** End ***# +#: +vlan_strip = auto() +#: +tcp_lro = auto() +#: +qinq_strip = auto() +#: +macsec_strip = auto() +#: +vlan_filter = auto() +#: +vlan_extend = auto() +#: +scatter = auto() +#: +timestamp = auto() +#: +keep_crc = auto() +#: +rss_hash = auto() + + @dataclass class FlowRule: """Dataclass for setting flow rule parameters.""" @@ -1352,6 +1400,51 @@ def __str__(self) -> str: return ret +class TestPmdTxOffloadCapabilities(TestPmdOffloadCapabilities): +"""Tx offload capabilities of ports/queues in testpmd.""" + +# *** Common attributes *** # +#: +all = auto() +#: +ipv4_cksum = auto() +#: +udp_cksum = auto() +#: +tcp_cksum = auto() +#: +outer_ipv4_cksum = auto() +#: +security = auto() +# *** End *** # +#: +vlan_insert = auto() +#: +sctp_cksum = auto() +#: +tcp_tso = auto() +#: +udp_tso = auto() +#: +qinq_insert = auto() +#: +vxlan_tnl_tso = auto() +#: +gre_tnl_tso = auto() +#: +ipip_tnl_tso = auto() +#: +geneve_tnl_tso = auto() +#: +macsec_insert = auto() +#: +mt_lockfree = auto() +#: +multi_segs = auto() +#: +mbuf_fast_free = auto() + + class TestPmdShell(DPDKShell): """Testpmd interactive shell. @@ -1995,6 +2088,140 @@ def set_verbose(self, level: int, verify: bool = True) -> None: f"Testpmd failed to set verbose level to {level}." ) +@stop_then_start_port() +def _set_offload( +self, +port_id: int, +is_rx: bool, +offloads: OffloadCapability, +on: bool, +queue_id: int | None = None, +verify: bool = True, +) -> None: +"""Base method for configuring offloads on ports and queues. + +If `queue_id` is not specified then it is assumed that you want to set the offloads on the +port rather than a queue. +""" +for offload in type(offloads): +if offload not in offloads: +continue +port_type = "rx" if is_rx else "tx" +command = [ +"port", +f"{port_id}", +f"{port_type}_offload", +f"{offload.name}", +f"{'on' if on else 'off'}", +] +if queue_id is not None: +# If modifying queues the command is "port rxq ..." +command.insert(2, f"{port_type}q {queue_id}") +else: +# If modifying a port the command is "port config ..." +command.insert(1, "config") + +self.send_command(" ".join(command)) +if verify: +# verification of ports has to be done based on if it was applied to all queues or +# not because the "Per Port" line doesn't get modified until the port is started. +current_offload_conf: OffloadConfiguration = ( +self.s
[RFC PATCH v1 3/5] dts: add offload configuration querying to testpmd
From: Jeremy Spewock Testpmd offers methods for querying the runtime configuration of offloads on a device that are useful for verification, but bindings to reach these methods do not exist in the Testpmd API offered in the framework. This patch creates methods that can query this configuration and also generalizes the OffloadCapability class to allow it to account for parsing the configuration output as well since the flag values will be the same. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 84 ++- 1 file changed, 81 insertions(+), 3 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index cfb51e3acb..58b8995d21 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -680,21 +680,41 @@ def from_string(cls, line: str) -> Self: return flag @classmethod -def make_parser(cls, per_port: bool) -> ParserFn: +def from_list(cls, lines: list[str]) -> list[Self]: +"""Make a list of instances from a list of strings that contain flag names. + +The strings are expected to separate the flag names by whitespace. + +Args: +lines: The list of strings to parse. + +Returns: +A list of instances parsed from each string in `lines`. +""" +return [cls.from_string(line) for line in lines] + +@classmethod +def make_parser(cls, per_port: bool, find_multiple: bool = False) -> ParserFn: """Make a parser function. Args: per_port: If :data:`True`, will return capabilities per port. If :data:`False`, will return capabilities per queue. +find_multiple: If :data:`True`, will use :func:`TextParser.find_all` to find all +matches for the regex query and return a list of instances based on those matches. +If :data:`False`, will return a single instance of the flag based off a single +match. Returns: ParserFn: A dictionary for the `dataclasses.field` metadata argument containing a parser function that makes an instance of this flag from text. """ granularity = "Port" if per_port else "Queue" +parser_func = TextParser.find_all if find_multiple else TextParser.find +instance_func = cls.from_list if find_multiple else cls.from_string return TextParser.wrap( -TextParser.find(rf"Per {granularity}\s+:(.*)$", re.MULTILINE), -cls.from_string, +parser_func(rf"{granularity}[\s\[\]\d]+:(.*)$", re.MULTILINE), +instance_func, ) @@ -824,6 +844,38 @@ class TxOffloadCapabilities(OffloadCapabilities): per_port: TxOffloadCapability = field(metadata=TxOffloadCapability.make_parser(True)) +@dataclass +class OffloadConfiguration(TextParser): +"""The result of testpmd's ``show port rx/tx_offload configuration`` command.""" + +#: +port_id: int = field( +metadata=TextParser.find_int(r"Offloading Configuration of port (\d+) :") +) +#: Queue offload configurations. +queues: list[RxOffloadCapability] | list[TxOffloadCapability] +#: Port offload configuration. +port: RxOffloadCapability | TxOffloadCapability + + +@dataclass +class RxOffloadConfiguration(OffloadConfiguration): +"""Extends :class:`OffloadingConfiguration` with Rx specific functionality.""" +#: +queues: list[RxOffloadCapability] = field(metadata=RxOffloadCapability.make_parser(False, find_multiple=True)) +#: +port: RxOffloadCapability = field(metadata=RxOffloadCapability.make_parser(True)) + + +@dataclass +class TxOffloadConfiguration(OffloadConfiguration): +"""Extends :class:`OffloadingConfiguration` with Tx specific functionality.""" +#: +queues: list[TxOffloadCapability] = field(metadata=TxOffloadCapability.make_parser(False, find_multiple=True)) +#: +port: TxOffloadCapability = field(metadata=TxOffloadCapability.make_parser(True)) + + T = TypeVarTuple("T") # type: ignore[misc] @@ -1592,6 +1644,32 @@ def show_port_tx_offload_capabilities(self, port_id: int) -> TxOffloadCapabiliti offload_capabilities_out = self.send_command(command) return TxOffloadCapabilities.parse(offload_capabilities_out) +def show_port_rx_offload_configuration(self, port_id: int) -> RxOffloadConfiguration: +"""Get the Rx offload configuration on a given port. + +Args: +port_id: The ID of the port to query the configuration of. + +Returns: +An instance of :class:`RxOffloadConfiguration` containing the offload configuration of +the port. +""" +output = self.send_command(f"show port {port_id} rx_offload configuration") +return RxOffloadConfiguration.parse(output) + +def show_
[RFC PATCH v1 1/5] dts: add TX offload capabilities
From: Jeremy Spewock The ability to query RX offloading capabilities of a device already exist, but there are situations in other test suites where skipping a test case/suite based on if a TX capability is missing is also desirable. This patch expands the RX offloading capabilities class to also allow for collecting TX offloading capabilities by creating a common parent class with parsing utility that is generalized to both. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 382 +- 1 file changed, 287 insertions(+), 95 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index e097a10751..49ce9c 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -661,55 +661,8 @@ class TestPmdPortStats(TextParser): tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)")) - -class RxOffloadCapability(Flag): -"""Rx offload capabilities of a device.""" - -#: -RX_OFFLOAD_VLAN_STRIP = auto() -#: Device supports L3 checksum offload. -RX_OFFLOAD_IPV4_CKSUM = auto() -#: Device supports L4 checksum offload. -RX_OFFLOAD_UDP_CKSUM = auto() -#: Device supports L4 checksum offload. -RX_OFFLOAD_TCP_CKSUM = auto() -#: Device supports Large Receive Offload. -RX_OFFLOAD_TCP_LRO = auto() -#: Device supports QinQ (queue in queue) offload. -RX_OFFLOAD_QINQ_STRIP = auto() -#: Device supports inner packet L3 checksum. -RX_OFFLOAD_OUTER_IPV4_CKSUM = auto() -#: Device supports MACsec. -RX_OFFLOAD_MACSEC_STRIP = auto() -#: Device supports filtering of a VLAN Tag identifier. -RX_OFFLOAD_VLAN_FILTER = 1 << 9 -#: Device supports VLAN offload. -RX_OFFLOAD_VLAN_EXTEND = auto() -#: Device supports receiving segmented mbufs. -RX_OFFLOAD_SCATTER = 1 << 13 -#: Device supports Timestamp. -RX_OFFLOAD_TIMESTAMP = auto() -#: Device supports crypto processing while packet is received in NIC. -RX_OFFLOAD_SECURITY = auto() -#: Device supports CRC stripping. -RX_OFFLOAD_KEEP_CRC = auto() -#: Device supports L4 checksum offload. -RX_OFFLOAD_SCTP_CKSUM = auto() -#: Device supports inner packet L4 checksum. -RX_OFFLOAD_OUTER_UDP_CKSUM = auto() -#: Device supports RSS hashing. -RX_OFFLOAD_RSS_HASH = auto() -#: Device supports -RX_OFFLOAD_BUFFER_SPLIT = auto() -#: Device supports all checksum capabilities. -RX_OFFLOAD_CHECKSUM = RX_OFFLOAD_IPV4_CKSUM | RX_OFFLOAD_UDP_CKSUM | RX_OFFLOAD_TCP_CKSUM -#: Device supports all VLAN capabilities. -RX_OFFLOAD_VLAN = ( -RX_OFFLOAD_VLAN_STRIP -| RX_OFFLOAD_VLAN_FILTER -| RX_OFFLOAD_VLAN_EXTEND -| RX_OFFLOAD_QINQ_STRIP -) +class OffloadCapability(Flag): +"""Offload capabilities of a device.""" @classmethod def from_string(cls, line: str) -> Self: @@ -723,7 +676,7 @@ def from_string(cls, line: str) -> Self: """ flag = cls(0) for flag_name in line.split(): -flag |= cls[f"RX_OFFLOAD_{flag_name}"] +flag |= cls[flag_name] return flag @classmethod @@ -745,20 +698,132 @@ def make_parser(cls, per_port: bool) -> ParserFn: ) +class RxOffloadCapability(OffloadCapability): +"""Rx offload capabilities of a device.""" + +#: Device supports L3 checksum offload. +IPV4_CKSUM = auto() +#: Device supports L4 checksum offload. +UDP_CKSUM = auto() +#: Device supports L4 checksum offload. +TCP_CKSUM = auto() +#: Device supports inner packet L3 checksum. +OUTER_IPV4_CKSUM = auto() +#: Device supports crypto processing while packet is received in NIC. +SECURITY = auto() +#: +VLAN_STRIP = auto() +#: Device supports Large Receive Offload. +TCP_LRO = auto() +#: Device supports QinQ (queue in queue) offload. +QINQ_STRIP = auto() +#: Device supports MACsec. +MACSEC_STRIP = auto() +#: Device supports filtering of a VLAN Tag identifier. +VLAN_FILTER = 1 << 9 +#: Device supports VLAN offload. +VLAN_EXTEND = auto() +#: Device supports receiving segmented mbufs. +SCATTER = 1 << 13 +#: Device supports Timestamp. +TIMESTAMP = auto() +#: Device supports CRC stripping. +KEEP_CRC = auto() +#: Device supports L4 checksum offload. +SCTP_CKSUM = auto() +#: Device supports inner packet L4 checksum. +OUTER_UDP_CKSUM = auto() +#: Device supports RSS hashing. +RSS_HASH = auto() +#: Device supports +BUFFER_SPLIT = auto() +#: Device supports all checksum capabilities. +CHECKSUM = IPV4_CKSUM | UDP_CKSUM | TCP_CKSUM +#: Device supports all VLAN capabilities. +VLAN = VLAN_STRIP | VLAN_FILTER | VLAN_EXTEND | QINQ_STRIP + + +class TxOffloadCapability(OffloadCapability): +"""Tx offload capabilities of a device.""" + +
[RFC PATCH v1 0/5] dts: port over Rx/Tx offload suite
From: Jeremy Spewock This series ports over the functionality of the Rx/Tx offloading test suite from Old DTS. I marked this suite as an RFC since there are still some errors that I need to fix from the formatting script, but I felt that the commits were in a place where they could be viewed to understand the idea of what I was working on. There are a lot of dependencies for this series, I listed them below in the order that I applied them. depends-on: patch-142691 (“dts:add send_packets to test suites and rework packet addressing”) depends-on: series-32799 (“dts: add test skipping based on capabilities”) depends-on: patch-143033 (“dts: add text parser for testpmd verbose output”) depends-on: patch-143385 (“dts: add VLAN methods to testpmd shell”) depends-on: patch-143113 ("dts: add flow rule dataclass to testpmd shell") Jeremy Spewock (5): dts: add TX offload capabilities dts: add a distinction between port and queue offload capabilities dts: add offload configuration querying to testpmd dts: add methods for configuring offloads on a device in testpmd dts: add test suite for RX and TX offloads dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 908 +++--- dts/tests/TestSuite_pmd_buffer_scatter.py | 2 +- dts/tests/TestSuite_rxtx_offload.py | 622 4 files changed, 1414 insertions(+), 121 deletions(-) create mode 100644 dts/tests/TestSuite_rxtx_offload.py -- 2.46.0
[RFC PATCH v1 5/5] dts: add test suite for RX and TX offloads
From: Jeremy Spewock This patch adds a test sutie that ports over and expands upon functionality provided in the RxTx test sutie in Old DTS. This test suite provides convenience methods and decorators in an attempt to reduce code duplication when developers are tasked with testing the same offloaded functionality through 3 different mediums (passed on the command-line, configured on a port at runtime, and configured on queues at runtime). Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 +- dts/tests/TestSuite_rxtx_offload.py| 622 + 2 files changed, 624 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_rxtx_offload.py diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..c1243ea5d8 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"rxtx_offload" ] }, "test_target": { diff --git a/dts/tests/TestSuite_rxtx_offload.py b/dts/tests/TestSuite_rxtx_offload.py new file mode 100644 index 00..c9ed04f29d --- /dev/null +++ b/dts/tests/TestSuite_rxtx_offload.py @@ -0,0 +1,622 @@ +"""Rx/Tx offload configuration suite. + +The goal of this suite is to test the support for three different methods of offloading different +capabilities using testpmd: On a queue, on a port, and on the command-line. Support for configuring +the capability through different means is testing alongside verifying the functionality of the +capability when offloaded. Each of the three methods of setting the capability should be tested +using the same criteria to monitor for differences between the methods. + +Testing consists of enabling the capability if it wasn't passed in through the command-line, +verifying that the capability performs it's expected task, then, in the general case, disabling the +capability and verifying that the same result is not achieved in a default state. Some cases do not +check the base-case since their functionality is enabled by default without offloading the +capability (like invalid checksum verification, for example). + +There should be test cases for each of the 3 configuration strategies for every offload that is +tested. Additionally, there are two additional test cases that validates the ability to enable +every offload that a device supports on its port without actually testing the functionality of the +offload for both Rx and Tx. +""" + +import random +from typing import Callable, ClassVar, Protocol + +from scapy.layers.inet import IP, TCP, UDP +from scapy.layers.l2 import Dot1Q, Ether +from scapy.packet import Packet, Raw +from typing_extensions import Unpack + +from framework.exception import InteractiveCommandExecutionError, TestCaseVerifyError +from framework.params.types import TestPmdParamsDict +from framework.remote_session.testpmd_shell import ( +SimpleForwardingModes, +OffloadCapability, +RxOffloadCapability, +TxOffloadCapability, +TestPmdShell, +OLFlag, +TestPmdVerbosePacket, +NicCapability, +FlowRule +) +from framework.testbed_model.capability import requires +from framework.test_suite import TestSuite, func_test + + +class DecoratedFuncType(Protocol): +"""Protocol used to provide a useful typehint for methods that are decorated. + +Methods decorated by :meth:`TestRxtxOffload.setup_testpmd` are able to pass kwargs into the +a :class:`TestPmdShell` upon creation and therefore have many non-obvious arguments. This type +allows static type checkers the ability to unpack and expose all of those non-obvious +arguments. +""" + +def __call__( +self, +test: "TestRxtxOffload", +port_id: int, +no_set: bool = False, +modify_queues: bool = False, +no_invert: bool = False, +**kwargs: Unpack[TestPmdParamsDict], +) -> None: +"""Function stub to create callable type. + +Args: +test: Instance of the test suite class that the methods belong to. +port_id: ID of the port to set the offloads on. +no_set: Whether to enable the offload before testing or not. When :data:`True`, +the method will validate that the offload is already configured rather than +enabling it. This is used in test cases that enable the offload using the +command-line. Defaults to :data:`False`. +modify_queues: Whether to add offloads to individual queues or the entire port. +If :data:`True`, individual queues will be modified, otherwise the whole +port will. Defaults to :data:`False`. +no_invert: If :data:`True` skip testing behavior of testpmd without the offload +
[RFC PATCH v1 2/5] dts: add a distinction between port and queue offload capabilities
From: Jeremy Spewock Currently in the framework offloading capabilities are collected at a device-level meaning that, if a capability is supported on either an entire port or individual queues on that port, it is marked as supported for the device. In some cases there is a want for being able to get the granularity of if an offload can be applied on queues rather than just on the device in general since all capabilities that are supported on queues are supported on ports, but not all capabilities that are supported on ports are supported on queues. This means that the less granular option of a combination of the two is still achievable by simply specifying that you require a port to be capable of an offload. This allows for granularity where needed, but generalization elsewhere. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 259 +++--- dts/tests/TestSuite_pmd_buffer_scatter.py | 2 +- 2 files changed, 217 insertions(+), 44 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 49ce9c..cfb51e3acb 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -1975,12 +1975,21 @@ def get_capabilities( self.show_port_tx_offload_capabilities ) offload_capabilities = offload_capabilities_func(self.ports[0].id) +# Any offload that can be set on an individual queue can also be set on the whole port, +# but not every capability that can be set on the port can be set on each queue. self._update_capabilities_from_flag( supported_capabilities, unsupported_capabilities, capabilities_class, -offload_capabilities.per_port, -prefix=f"{offload_type}_OFFLOAD_" +offload_capabilities.per_port | offload_capabilities.per_queue, +prefix=f"PORT_{offload_type}_OFFLOAD_" +) +self._update_capabilities_from_flag( +supported_capabilities, +unsupported_capabilities, +capabilities_class, +offload_capabilities.per_queue, +prefix=f"QUEUE_{offload_type}_OFFLOAD_" ) return get_capabilities @@ -2089,167 +2098,331 @@ class NicCapability(NoAliasEnum): TestPmdShell.get_capabilities_rxq_info ) #: -RX_OFFLOAD_VLAN_STRIP: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_VLAN_STRIP: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports L3 checksum offload. -RX_OFFLOAD_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports L4 checksum offload. -RX_OFFLOAD_UDP_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_UDP_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports L4 checksum offload. -RX_OFFLOAD_TCP_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_TCP_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports Large Receive Offload. -RX_OFFLOAD_TCP_LRO: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_TCP_LRO: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports QinQ (queue in queue) offload. -RX_OFFLOAD_QINQ_STRIP: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_QINQ_STRIP: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports inner packet L3 checksum. -RX_OFFLOAD_OUTER_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_OUTER_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports MACsec. -RX_OFFLOAD_MACSEC_STRIP: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_MACSEC_STRIP: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports filtering of a VLAN Tag identifier. -RX_OFFLOAD_VLAN_FILTER: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_VLAN_FILTER: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports VLAN offload. -RX_OFFLOAD_VLAN_EXTEND: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_VLAN_EXTEND: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(
[PATCH v2 0/5] dts: port over Rx/Tx offload suite
From: Jeremy Spewock v2: * added fixes to allow for passing the formatting script * removed some unused classes in TestPmdShell There are a lot of dependencies for this series, I listed them below in the order that I applied them. depends-on: patch-142691 (“dts:add send_packets to test suites and rework packet addressing”) depends-on: series-32799 (“dts: add test skipping based on capabilities”) depends-on: patch-143033 (“dts: add text parser for testpmd verbose output”) depends-on: patch-143385 (“dts: add VLAN methods to testpmd shell”) depends-on: patch-143113 ("dts: add flow rule dataclass to testpmd shell") Jeremy Spewock (5): dts: add TX offload capabilities dts: add a distinction between port and queue offload capabilities dts: add offload configuration querying to testpmd dts: add methods for configuring offloads on a device in testpmd dts: add test suite for RX and TX offloads dts/framework/config/conf_yaml_schema.json| 3 +- dts/framework/remote_session/testpmd_shell.py | 842 +++--- dts/tests/TestSuite_pmd_buffer_scatter.py | 2 +- dts/tests/TestSuite_rxtx_offload.py | 627 + 4 files changed, 1350 insertions(+), 124 deletions(-) create mode 100644 dts/tests/TestSuite_rxtx_offload.py -- 2.46.0
[PATCH v2 1/5] dts: add TX offload capabilities
From: Jeremy Spewock The ability to query RX offloading capabilities of a device already exist, but there are situations in other test suites where skipping a test case/suite based on if a TX capability is missing is also desirable. This patch expands the RX offloading capabilities class to also allow for collecting TX offloading capabilities by creating a common parent class with parsing utility that is generalized to both. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 395 +- 1 file changed, 297 insertions(+), 98 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 99f327a91b..13001d 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -21,7 +21,7 @@ from enum import Flag, auto from functools import partial from pathlib import PurePath -from typing import TYPE_CHECKING, Any, ClassVar, TypeAlias +from typing import TYPE_CHECKING, Any, ClassVar, TypeAlias, cast if TYPE_CHECKING: from enum import Enum as NoAliasEnum @@ -661,55 +661,8 @@ class TestPmdPortStats(TextParser): tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)")) - -class RxOffloadCapability(Flag): -"""Rx offload capabilities of a device.""" - -#: -RX_OFFLOAD_VLAN_STRIP = auto() -#: Device supports L3 checksum offload. -RX_OFFLOAD_IPV4_CKSUM = auto() -#: Device supports L4 checksum offload. -RX_OFFLOAD_UDP_CKSUM = auto() -#: Device supports L4 checksum offload. -RX_OFFLOAD_TCP_CKSUM = auto() -#: Device supports Large Receive Offload. -RX_OFFLOAD_TCP_LRO = auto() -#: Device supports QinQ (queue in queue) offload. -RX_OFFLOAD_QINQ_STRIP = auto() -#: Device supports inner packet L3 checksum. -RX_OFFLOAD_OUTER_IPV4_CKSUM = auto() -#: Device supports MACsec. -RX_OFFLOAD_MACSEC_STRIP = auto() -#: Device supports filtering of a VLAN Tag identifier. -RX_OFFLOAD_VLAN_FILTER = 1 << 9 -#: Device supports VLAN offload. -RX_OFFLOAD_VLAN_EXTEND = auto() -#: Device supports receiving segmented mbufs. -RX_OFFLOAD_SCATTER = 1 << 13 -#: Device supports Timestamp. -RX_OFFLOAD_TIMESTAMP = auto() -#: Device supports crypto processing while packet is received in NIC. -RX_OFFLOAD_SECURITY = auto() -#: Device supports CRC stripping. -RX_OFFLOAD_KEEP_CRC = auto() -#: Device supports L4 checksum offload. -RX_OFFLOAD_SCTP_CKSUM = auto() -#: Device supports inner packet L4 checksum. -RX_OFFLOAD_OUTER_UDP_CKSUM = auto() -#: Device supports RSS hashing. -RX_OFFLOAD_RSS_HASH = auto() -#: Device supports -RX_OFFLOAD_BUFFER_SPLIT = auto() -#: Device supports all checksum capabilities. -RX_OFFLOAD_CHECKSUM = RX_OFFLOAD_IPV4_CKSUM | RX_OFFLOAD_UDP_CKSUM | RX_OFFLOAD_TCP_CKSUM -#: Device supports all VLAN capabilities. -RX_OFFLOAD_VLAN = ( -RX_OFFLOAD_VLAN_STRIP -| RX_OFFLOAD_VLAN_FILTER -| RX_OFFLOAD_VLAN_EXTEND -| RX_OFFLOAD_QINQ_STRIP -) +class OffloadCapability(Flag): +"""Offload capabilities of a device.""" @classmethod def from_string(cls, line: str) -> Self: @@ -723,7 +676,7 @@ def from_string(cls, line: str) -> Self: """ flag = cls(0) for flag_name in line.split(): -flag |= cls[f"RX_OFFLOAD_{flag_name}"] +flag |= cls[flag_name] return flag @classmethod @@ -745,20 +698,130 @@ def make_parser(cls, per_port: bool) -> ParserFn: ) +class RxOffloadCapability(OffloadCapability): +"""Rx offload capabilities of a device.""" + +#: Device supports L3 checksum offload. +IPV4_CKSUM = auto() +#: Device supports L4 checksum offload. +UDP_CKSUM = auto() +#: Device supports L4 checksum offload. +TCP_CKSUM = auto() +#: Device supports inner packet L3 checksum. +OUTER_IPV4_CKSUM = auto() +#: Device supports crypto processing while packet is received in NIC. +SECURITY = auto() +#: +VLAN_STRIP = auto() +#: Device supports Large Receive Offload. +TCP_LRO = auto() +#: Device supports QinQ (queue in queue) offload. +QINQ_STRIP = auto() +#: Device supports MACsec. +MACSEC_STRIP = auto() +#: Device supports filtering of a VLAN Tag identifier. +VLAN_FILTER = 1 << 9 +#: Device supports VLAN offload. +VLAN_EXTEND = auto() +#: Device supports receiving segmented mbufs. +SCATTER = 1 << 13 +#: Device supports Timestamp. +TIMESTAMP = auto() +#: Device supports CRC stripping. +KEEP_CRC = auto() +#: Device supports L4 checksum offload. +SCTP_CKSUM = auto() +#: Device supports inner packet L4 checksum. +OUTER_UDP_CKSUM = auto() +#: Device supports RSS hashing. +RSS_HASH = auto() +#: Device supports +BUFFER_SPLIT = auto() +#: Device support
[PATCH v2 2/5] dts: add a distinction between port and queue offload capabilities
From: Jeremy Spewock Currently in the framework offloading capabilities are collected at a device-level meaning that, if a capability is supported on either an entire port or individual queues on that port, it is marked as supported for the device. In some cases there is a want for being able to get the granularity of if an offload can be applied on queues rather than just on the device in general since all capabilities that are supported on queues are supported on ports, but not all capabilities that are supported on ports are supported on queues. This means that the less granular option of a combination of the two is still achievable by simply specifying that you require a port to be capable of an offload. This allows for granularity where needed, but generalization elsewhere. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 257 +++--- dts/tests/TestSuite_pmd_buffer_scatter.py | 2 +- 2 files changed, 216 insertions(+), 43 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 13001d..df4ed7ce5c 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -1982,12 +1982,21 @@ def get_capabilities( # Cast to the generic type for mypy per_port = cast(OffloadCapability, offload_capabilities.per_port) per_queue = cast(OffloadCapability, offload_capabilities.per_queue) +# Any offload that can be set on an individual queue can also be set on the whole port, +# but not every capability that can be set on the port can be set on each queue. self._update_capabilities_from_flag( supported_capabilities, unsupported_capabilities, capabilities_class, per_port | per_queue, -prefix=f"{offload_type}_OFFLOAD_", +prefix=f"PORT_{offload_type}_OFFLOAD_", +) +self._update_capabilities_from_flag( +supported_capabilities, +unsupported_capabilities, +capabilities_class, +per_queue, +prefix=f"QUEUE_{offload_type}_OFFLOAD_", ) return get_capabilities @@ -2097,167 +2106,331 @@ class NicCapability(NoAliasEnum): TestPmdShell.get_capabilities_rxq_info ) #: -RX_OFFLOAD_VLAN_STRIP: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_VLAN_STRIP: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports L3 checksum offload. -RX_OFFLOAD_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports L4 checksum offload. -RX_OFFLOAD_UDP_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_UDP_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports L4 checksum offload. -RX_OFFLOAD_TCP_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_TCP_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports Large Receive Offload. -RX_OFFLOAD_TCP_LRO: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_TCP_LRO: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports QinQ (queue in queue) offload. -RX_OFFLOAD_QINQ_STRIP: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_QINQ_STRIP: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports inner packet L3 checksum. -RX_OFFLOAD_OUTER_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_OUTER_IPV4_CKSUM: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports MACsec. -RX_OFFLOAD_MACSEC_STRIP: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_MACSEC_STRIP: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports filtering of a VLAN Tag identifier. -RX_OFFLOAD_VLAN_FILTER: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_VLAN_FILTER: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports VLAN offload. -RX_OFFLOAD_VLAN_EXTEND: TestPmdShellCapabilityMethod = partial( +PORT_RX_OFFLOAD_VLAN_EXTEND: TestPmdShellCapabilityMethod = partial( TestPmdShell.get_offload_capabilities_func(True) ) #: Device supports receiving seg
[PATCH v2 3/5] dts: add offload configuration querying to testpmd
From: Jeremy Spewock Testpmd offers methods for querying the runtime configuration of offloads on a device that are useful for verification, but bindings to reach these methods do not exist in the Testpmd API offered in the framework. This patch creates methods that can query this configuration and also generalizes the OffloadCapability class to allow it to account for parsing the configuration output as well since the flag values will be the same. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 92 ++- 1 file changed, 89 insertions(+), 3 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index df4ed7ce5c..71859c63da 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -680,21 +680,45 @@ def from_string(cls, line: str) -> Self: return flag @classmethod -def make_parser(cls, per_port: bool) -> ParserFn: +def from_list(cls, lines: list[str]) -> list[Self]: +"""Make a list of instances from a list of strings that contain flag names. + +The strings are expected to separate the flag names by whitespace. + +Args: +lines: The list of strings to parse. + +Returns: +A list of instances parsed from each string in `lines`. +""" +return [cls.from_string(line) for line in lines] + +@classmethod +def make_parser(cls, per_port: bool, find_multiple: bool = False) -> ParserFn: """Make a parser function. Args: per_port: If :data:`True`, will return capabilities per port. If :data:`False`, will return capabilities per queue. +find_multiple: If :data:`True`, will use :func:`TextParser.find_all` to find all +matches for the regex query and return a list of instances based on those matches. +If :data:`False`, will return a single instance of the flag based off a single +match. Returns: ParserFn: A dictionary for the `dataclasses.field` metadata argument containing a parser function that makes an instance of this flag from text. """ granularity = "Port" if per_port else "Queue" +parser_func: Callable[..., ParserFn] | Callable[..., ParserFn] = ( +TextParser.find_all if find_multiple else TextParser.find +) +instance_func: Callable[..., list[OffloadCapability]] | Callable[..., OffloadCapability] = ( +cls.from_list if find_multiple else cls.from_string +) return TextParser.wrap( -TextParser.find(rf"Per {granularity}\s+:(.*)$", re.MULTILINE), -cls.from_string, +parser_func(rf"{granularity}[\s\[\]\d]+:(.*)$", re.MULTILINE), +instance_func, ) @@ -822,6 +846,42 @@ class TxOffloadCapabilities(OffloadCapabilities): per_port: TxOffloadCapability = field(metadata=TxOffloadCapability.make_parser(True)) +@dataclass +class OffloadConfiguration(TextParser): +"""The result of testpmd's ``show port rx/tx_offload configuration`` command.""" + +#: +port_id: int = field(metadata=TextParser.find_int(r"Offloading Configuration of port (\d+) :")) +#: Queue offload configurations. +queues: list[RxOffloadCapability] | list[TxOffloadCapability] +#: Port offload configuration. +port: RxOffloadCapability | TxOffloadCapability + + +@dataclass +class RxOffloadConfiguration(OffloadConfiguration): +"""Extends :class:`OffloadingConfiguration` with Rx specific functionality.""" + +#: +queues: list[RxOffloadCapability] = field( +metadata=RxOffloadCapability.make_parser(False, find_multiple=True) +) +#: +port: RxOffloadCapability = field(metadata=RxOffloadCapability.make_parser(True)) + + +@dataclass +class TxOffloadConfiguration(OffloadConfiguration): +"""Extends :class:`OffloadingConfiguration` with Tx specific functionality.""" + +#: +queues: list[TxOffloadCapability] = field( +metadata=TxOffloadCapability.make_parser(False, find_multiple=True) +) +#: +port: TxOffloadCapability = field(metadata=TxOffloadCapability.make_parser(True)) + + T = TypeVarTuple("T") # type: ignore[misc] @@ -1590,6 +1650,32 @@ def show_port_tx_offload_capabilities(self, port_id: int) -> TxOffloadCapabiliti offload_capabilities_out = self.send_command(command) return TxOffloadCapabilities.parse(offload_capabilities_out) +def show_port_rx_offload_configuration(self, port_id: int) -> RxOffloadConfiguration: +"""Get the Rx offload configuration on a given port. + +Args: +port_id: The ID of the port to query the configuration of. + +Returns: +An instance of :class:`RxOffloadConfiguration` containing the offload configurati
[PATCH v2 4/5] dts: add methods for configuring offloads on a device in testpmd
From: Jeremy Spewock Testpmd offers methods to add and remove offloads from both ports and queues on ports, but there are not yet method bindings in the Testpmd API that the framework provides to reach them. This patch adds these bindings for future test cases/suites that require certain functionalities to be offloaded on the device. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 142 +- 1 file changed, 141 insertions(+), 1 deletion(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 71859c63da..447d6a617d 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -32,7 +32,7 @@ from typing_extensions import TypeVarTuple -from framework.exception import InteractiveCommandExecutionError +from framework.exception import InteractiveCommandExecutionError, InternalError from framework.params.testpmd import SimpleForwardingModes, TestPmdParams from framework.params.types import TestPmdParamsDict from framework.parser import ParserFn, TextParser @@ -2002,6 +2002,146 @@ def set_verbose(self, level: int, verify: bool = True) -> None: f"Testpmd failed to set verbose level to {level}." ) +@stop_then_start_port() +def _set_offload( +self, +port_id: int, +is_rx: bool, +offloads: OffloadCapability, +on: bool, +queue_id: int | None = None, +verify: bool = True, +) -> None: +"""Base method for configuring offloads on ports and queues. + +If `queue_id` is not specified then it is assumed that you want to set the offloads on the +port rather than a queue. +""" +for offload in type(offloads): +if offload not in offloads or offload.name is None: +continue +port_type = "rx" if is_rx else "tx" +command = [ +"port", +f"{port_id}", +f"{port_type}_offload", +f"{offload.name.lower()}", +f"{'on' if on else 'off'}", +] +if queue_id is not None: +# If modifying queues the command is "port rxq ..." +command.insert(2, f"{port_type}q {queue_id}") +else: +# If modifying a port the command is "port config ..." +command.insert(1, "config") + +self.send_command(" ".join(command)) +if verify: +# verification of ports has to be done based on if it was applied to all queues or +# not because the "Per Port" line doesn't get modified until the port is started. +current_offload_conf: OffloadConfiguration = ( +self.show_port_rx_offload_configuration(port_id) +if is_rx +else self.show_port_tx_offload_configuration(port_id) +) +# Casting to the generic type is required for mypy +queues_capabilities = cast(list[OffloadCapability], current_offload_conf.queues) +if queue_id is not None and len(current_offload_conf.queues) < queue_id + 1: +raise InternalError(f"Queue {queue_id} does not exist in testpmd") +capability_is_set = ( +len(current_offload_conf.queues) > 0 +and (queue_id is not None and offload in queues_capabilities[queue_id]) +or all(offload in conf for conf in queues_capabilities) +) +if capability_is_set != on: +self._logger.debug( +f"Test pmd failed to modify capabilities on port {port_id}:\n" +f"{current_offload_conf.queues}" +) +raise InteractiveCommandExecutionError( +f"Test pmd failed to {'add' if on else 'remove'} capability {offload.name} " +f"{'to' if on else 'from'} port {port_id}." +) + +def set_port_offload( +self, +port_id: int, +is_rx: bool, +offload: OffloadCapability, +on: bool, +verify: bool = True, +) -> None: +"""Configure Rx/Tx offload on a port. + +Args: +port_id: The ID of the port to set configure the offload on. +is_rx: A flag that signifies which type of offload to set. If :data:`True` an Rx +offload will be set, otherwise a Tx offload will be set. +offload: The offload to set on the port. +on: If :data:`True` the specified offload will be set turned on, otherwise the offload +will be turned off. +verify: If :data:`True` an additional command will be sent to check the configuration +
[PATCH v2 5/5] dts: add test suite for RX and TX offloads
From: Jeremy Spewock This patch adds a test sutie that ports over and expands upon functionality provided in the RxTx test sutie in Old DTS. This test suite provides convenience methods and decorators in an attempt to reduce code duplication when developers are tasked with testing the same offloaded functionality through 3 different mediums (passed on the command-line, configured on a port at runtime, and configured on queues at runtime). Signed-off-by: Jeremy Spewock --- dts/framework/config/conf_yaml_schema.json | 3 +- dts/tests/TestSuite_rxtx_offload.py| 627 + 2 files changed, 629 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_rxtx_offload.py diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index f02a310bb5..c1243ea5d8 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -187,7 +187,8 @@ "enum": [ "hello_world", "os_udp", -"pmd_buffer_scatter" +"pmd_buffer_scatter", +"rxtx_offload" ] }, "test_target": { diff --git a/dts/tests/TestSuite_rxtx_offload.py b/dts/tests/TestSuite_rxtx_offload.py new file mode 100644 index 00..d994b44fc1 --- /dev/null +++ b/dts/tests/TestSuite_rxtx_offload.py @@ -0,0 +1,627 @@ +"""Rx/Tx offload configuration suite. + +The goal of this suite is to test the support for three different methods of offloading different +capabilities using testpmd: On a queue, on a port, and on the command-line. Support for configuring +the capability through different means is testing alongside verifying the functionality of the +capability when offloaded. Each of the three methods of setting the capability should be tested +using the same criteria to monitor for differences between the methods. + +Testing consists of enabling the capability if it wasn't passed in through the command-line, +verifying that the capability performs it's expected task, then, in the general case, disabling the +capability and verifying that the same result is not achieved in a default state. Some cases do not +check the base-case since their functionality is enabled by default without offloading the +capability (like invalid checksum verification, for example). + +There should be test cases for each of the 3 configuration strategies for every offload that is +tested. Additionally, there are two additional test cases that validates the ability to enable +every offload that a device supports on its port without actually testing the functionality of the +offload for both Rx and Tx. +""" + +import random +from typing import Callable, ClassVar, Protocol, TypeVar + +from scapy.layers.inet import IP, TCP, UDP # type: ignore[import-untyped] +from scapy.layers.l2 import Dot1Q, Ether # type: ignore[import-untyped] +from scapy.packet import Packet, Raw # type: ignore[import-untyped] +from typing_extensions import Unpack + +from framework.exception import InteractiveCommandExecutionError, TestCaseVerifyError +from framework.params.types import TestPmdParamsDict +from framework.remote_session.testpmd_shell import ( +FlowRule, +NicCapability, +OffloadCapability, +OLFlag, +RxOffloadCapability, +SimpleForwardingModes, +TestPmdShell, +TestPmdVerbosePacket, +TxOffloadCapability, +) +from framework.test_suite import TestSuite, func_test +from framework.testbed_model.capability import requires + +T = TypeVar("T") + + +class DecoratedFuncType(Protocol): +"""Protocol used to provide a useful typehint for methods that are decorated. + +Methods decorated by :meth:`TestRxtxOffload.setup_testpmd` are able to pass kwargs into the +a :class:`TestPmdShell` upon creation and therefore have many non-obvious arguments. This type +allows static type checkers the ability to unpack and expose all of those non-obvious +arguments. +""" + +def __call__( +self: T, +port_id: int, +no_set: bool = False, +modify_queues: bool = False, +no_invert: bool = False, +**kwargs: Unpack[TestPmdParamsDict], +) -> None: +"""Function stub to create callable type. + +Args: +test: Instance of the test suite class that the methods belong to. +port_id: ID of the port to set the offloads on. +no_set: Whether to enable the offload before testing or not. When :data:`True`, +the method will validate that the offload is already configured rather than +enabling it. This is used in test cases that enable the offload using the +command-line. Defaults to :data:`False`. +modify_queues: Whether to add offloads to individual queues or the entire port. +If :data:`True`, individual queues will be modified, otherwise the whole +port will. Defaults to :data:`False`. +
[PATCH v1 1/1] dts: add send_packets to test suites and rework packet addressing
From: Jeremy Spewock Currently the only method provided in the test suite class for sending packets sends a single packet and then captures the results. There is, in some cases, a need to send multiple packets at once while not really needing to capture any traffic received back. The method to do this exists in the traffic generator already, but this patch exposes the method to test suites. This patch also updates the _adjust_addresses method of test suites so that addresses of packets are only modified if the developer did not configure them beforehand. This allows for developers to have more control over the content of their packets when sending them through the framework. Signed-off-by: Jeremy Spewock --- dts/framework/test_suite.py| 87 +++--- dts/framework/testbed_model/tg_node.py | 9 +++ 2 files changed, 75 insertions(+), 21 deletions(-) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 694b2eba65..11aaa0a93a 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -199,7 +199,7 @@ def send_packet_and_capture( Returns: A list of received packets. """ -packet = self._adjust_addresses(packet) +packet = self._adjust_addresses([packet])[0] return self.tg_node.send_packet_and_capture( packet, self._tg_port_egress, @@ -208,6 +208,18 @@ def send_packet_and_capture( duration, ) +def send_packets( +self, +packets: list[Packet], +) -> None: +"""Send packets using the traffic generator and do not capture received traffic. + +Args: +packets: Packets to send. +""" +packets = self._adjust_addresses(packets) +self.tg_node.send_packets(packets, self._tg_port_egress) + def get_expected_packet(self, packet: Packet) -> Packet: """Inject the proper L2/L3 addresses into `packet`. @@ -217,41 +229,74 @@ def get_expected_packet(self, packet: Packet) -> Packet: Returns: `packet` with injected L2/L3 addresses. """ -return self._adjust_addresses(packet, expected=True) +return self._adjust_addresses([packet], expected=True)[0] -def _adjust_addresses(self, packet: Packet, expected: bool = False) -> Packet: +def _adjust_addresses(self, packets: list[Packet], expected: bool = False) -> list[Packet]: """L2 and L3 address additions in both directions. +Packets in `packets` will be directly modified in this method. The returned list of packets +however will be copies of the modified packets in order to keep the two lists distinct. + +Only missing addresses are added to packets, existing addresses will not be overridden. If +any packet in `packets` has multiple IP layers (using GRE, for example) only the inner-most +IP layer will have its addresses adjusted. + Assumptions: Two links between SUT and TG, one link is TG -> SUT, the other SUT -> TG. Args: -packet: The packet to modify. +packets: The packets to modify. expected: If :data:`True`, the direction is SUT -> TG, otherwise the direction is TG -> SUT. + +Returns: +A list containing copies of all packets in `packets` after modification. """ -if expected: -# The packet enters the TG from SUT -# update l2 addresses -packet.src = self._sut_port_egress.mac_address -packet.dst = self._tg_port_ingress.mac_address +ret_packets = [] +for packet in packets: +# The fields parameter of a packet does not include fields of the payload, so this can +# only be the Ether src/dst. +pkt_src_is_unset = "src" not in packet.fields +pkt_dst_is_unset = "dst" not in packet.fields +num_ip_layers = packet.layers().count(IP) + +# Update the last IP layer if there are multiple to account for GRE addressing (the +# framework should be modifying the packet address instead of the tunnel). +l3_to_use = packet.getlayer(IP, num_ip_layers) +if num_ip_layers > 0: +ip_src_is_unset = "src" not in l3_to_use.fields +ip_dst_is_unset = "dst" not in l3_to_use.fields +else: +ip_src_is_unset = None +ip_dst_is_unset = None -# The packet is routed from TG egress to TG ingress -# update l3 addresses -packet.payload.src = self._tg_ip_address_egress.ip.exploded -packet.payload.dst = self._tg_ip_address_ingress.ip.exploded -else: -# The packet leaves TG towards SUT # update l2 addresses -packet.src = self._tg_port_egress.mac_address -packet.dst = self._sut_port_i
[PATCH v1 0/1] dts: adjust packet addressing and sending
From: Jeremy Spewock This patch was originally part of the dynamic queue test suite series, but since other patches require it this series creates an independent patch to allow it to be prioritized. This patch is slightly different than the one in dynamic queue as this version supports address updating on packets with multiple IP layers by modifying the method of checking whether or not the developer updated the addresses. Jeremy Spewock (1): dts: add send_packets to test suites and rework packet addressing dts/framework/test_suite.py| 87 +++--- dts/framework/testbed_model/tg_node.py | 9 +++ 2 files changed, 75 insertions(+), 21 deletions(-) -- 2.46.0
[PATCH v4 1/2] dts: add port queue modification and forwarding stats to testpmd
From: Jeremy Spewock This patch adds methods for querying and modifying port queue state and configuration. In addition to this, it also adds the ability to capture the forwarding statistics that get outputted when you send the "stop" command in testpmd. Querying of port queue information is handled through a TextParser dataclass in case there is future need for using more of the output from the command used to query the information. Signed-off-by: Jeremy Spewock --- dts/framework/remote_session/testpmd_shell.py | 233 +- 1 file changed, 231 insertions(+), 2 deletions(-) diff --git a/dts/framework/remote_session/testpmd_shell.py b/dts/framework/remote_session/testpmd_shell.py index 43e9f56517..b545040638 100644 --- a/dts/framework/remote_session/testpmd_shell.py +++ b/dts/framework/remote_session/testpmd_shell.py @@ -19,7 +19,7 @@ from dataclasses import dataclass, field from enum import Flag, auto from pathlib import PurePath -from typing import ClassVar +from typing import ClassVar, cast from typing_extensions import Self, Unpack @@ -541,6 +541,56 @@ class TestPmdPort(TextParser): ) +@dataclass +class TestPmdPortQueue(TextParser): +"""Dataclass representation of the common parts of the testpmd `show rxq/txq info` commands.""" + +#: +prefetch_threshold: int = field(metadata=TextParser.find_int(r"prefetch threshold: (\d+)")) +#: +host_threshold: int = field(metadata=TextParser.find_int(r"host threshold: (\d+)")) +#: +writeback_threshold: int = field(metadata=TextParser.find_int(r"writeback threshold: (\d+)")) +#: +free_threshold: int = field(metadata=TextParser.find_int(r"free threshold: (\d+)")) +#: +deferred_start: bool = field(metadata=TextParser.find("deferred start: on")) +#: The number of RXD/TXDs is just the ring size of the queue. +ring_size: int = field(metadata=TextParser.find_int(r"Number of (?:RXDs|TXDs): (\d+)")) +#: +is_queue_started: bool = field(metadata=TextParser.find("queue state: started")) +#: +burst_mode: str | None = field( +default=None, metadata=TextParser.find(r"Burst mode: ([^\r\n]+)") +) + + +@dataclass +class TestPmdTxPortQueue(TestPmdPortQueue): +"""Dataclass representation for testpmd `show txq info` command.""" + +#: +rs_threshold: int | None = field( +default=None, metadata=TextParser.find_int(r"RS threshold: (\d+)") +) + + +@dataclass +class TestPmdRxPortQueue(TestPmdPortQueue): +"""Dataclass representation for testpmd `show rxq info` command.""" + +#: +mempool: str | None = field(default=None, metadata=TextParser.find(r"Mempool: ([^\r\n]+)")) +#: +can_drop_packets: bool | None = field( +default=None, metadata=TextParser.find(r"drop packets: on") +) +#: +is_scattering_packets: bool | None = field( +default=None, metadata=TextParser.find(r"scattered packets: on") +) + + @dataclass class TestPmdPortStats(TextParser): """Port statistics.""" @@ -645,7 +695,7 @@ def start(self, verify: bool = True) -> None: "Not all ports came up after starting packet forwarding in testpmd." ) -def stop(self, verify: bool = True) -> None: +def stop(self, verify: bool = True) -> str: """Stop packet forwarding. Args: @@ -653,6 +703,9 @@ def stop(self, verify: bool = True) -> None: forwarding was stopped successfully or not started. If neither is found, it is considered an error. +Returns: +Output gathered from sending the stop command. + Raises: InteractiveCommandExecutionError: If `verify` is :data:`True` and the command to stop forwarding results in an error. @@ -665,6 +718,7 @@ def stop(self, verify: bool = True) -> None: ): self._logger.debug(f"Failed to stop packet forwarding: \n{stop_cmd_output}") raise InteractiveCommandExecutionError("Testpmd failed to stop packet forwarding.") +return stop_cmd_output def get_devices(self) -> list[TestPmdDevice]: """Get a list of device names that are known to testpmd. @@ -806,6 +860,181 @@ def show_port_stats(self, port_id: int) -> TestPmdPortStats: return TestPmdPortStats.parse(output) +def show_port_queue_info( +self, port_id: int, queue_id: int, is_rx_queue: bool +) -> TestPmdPortQueue: +"""Get the info for a queue on a given port. + +Args: +port_id: ID of the port where the queue resides. +queue_id: ID of the queue to query. +is_rx_queue: Whether to check an RX or TX queue. If :data:`True` an RX queue will be +queried, otherwise a TX queue will be queried. + +Raises: +InteractiveCommandExecutionError: If there is a failure when getting the info for the +queue. + +
[RFC v2 0/2] add DTS smoke tests
From: Jeremy Spewock This update to the RFC adds a few more things such as physical devices and virtual devices to the config, driver checks in the smoke tests, and interactive shell handling. Interactive shells are handled by creating two SSH connections upon connecting to the SUT, one which is for normal non-interactive opertaions and another (using paramiko) that is reserved for interactive shells. This interactive session is stored in the main session of the SUT and can be piloted using the interactive handler. This handler creates a new channel off the session and provides methods for sending commands both blindly and by expecting output. Then, classes are made for individual DPDK applications that use one of these interactive handlers and know how to handle that applications specific behaviors and outputs. Old RFC: * http://patches.dpdk.org/project/dpdk/patch/20230413175415.7683-3-jspew...@iol.unh.edu/ Jeremy Spewock (1): dts: added paramiko to dependencies Jeremy Spewock (1): dts: add smoke tests dts/conf.yaml | 9 + dts/framework/config/__init__.py | 21 +++ dts/framework/config/conf_yaml_schema.json| 32 +++- dts/framework/dts.py | 19 ++- dts/framework/exception.py| 11 ++ dts/framework/remote_session/os_session.py| 6 +- .../remote_session/remote/__init__.py | 28 +++ dts/framework/test_result.py | 13 +- dts/framework/test_suite.py | 24 ++- dts/framework/testbed_model/__init__.py | 5 + .../interactive_apps/__init__.py | 6 + .../interactive_apps/interactive_command.py | 57 +++ .../interactive_apps/testpmd_driver.py| 24 +++ dts/framework/testbed_model/node.py | 2 + dts/framework/testbed_model/sut_node.py | 6 + dts/poetry.lock | 160 ++ dts/pyproject.toml| 1 + dts/tests/TestSuite_smoke_tests.py| 94 ++ 18 files changed, 472 insertions(+), 46 deletions(-) create mode 100644 dts/framework/testbed_model/interactive_apps/__init__.py create mode 100644 dts/framework/testbed_model/interactive_apps/interactive_command.py create mode 100644 dts/framework/testbed_model/interactive_apps/testpmd_driver.py create mode 100644 dts/tests/TestSuite_smoke_tests.py -- 2.40.1
[RFC v2 1/2] dts: add smoke tests
From: Jeremy Spewock Adds a new test suite for running smoke tests that verify general configuration aspects of the system under test. If any of these tests fail, the DTS execution terminates as part of a "fail-fast" model. Signed-off-by: Jeremy Spewock --- dts/conf.yaml | 9 ++ dts/framework/config/__init__.py | 21 + dts/framework/config/conf_yaml_schema.json| 32 ++- dts/framework/dts.py | 19 +++- dts/framework/exception.py| 11 +++ dts/framework/remote_session/os_session.py| 6 +- .../remote_session/remote/__init__.py | 28 ++ dts/framework/test_result.py | 13 ++- dts/framework/test_suite.py | 24 - dts/framework/testbed_model/__init__.py | 5 + .../interactive_apps/__init__.py | 6 ++ .../interactive_apps/interactive_command.py | 57 +++ .../interactive_apps/testpmd_driver.py| 24 + dts/framework/testbed_model/node.py | 2 + dts/framework/testbed_model/sut_node.py | 6 ++ dts/tests/TestSuite_smoke_tests.py| 94 +++ 16 files changed, 348 insertions(+), 9 deletions(-) create mode 100644 dts/framework/testbed_model/interactive_apps/__init__.py create mode 100644 dts/framework/testbed_model/interactive_apps/interactive_command.py create mode 100644 dts/framework/testbed_model/interactive_apps/testpmd_driver.py create mode 100644 dts/tests/TestSuite_smoke_tests.py diff --git a/dts/conf.yaml b/dts/conf.yaml index a9bd8a3e..042ef954 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -10,13 +10,22 @@ executions: compiler_wrapper: ccache perf: false func: true +nics: #physical devices to be used for testing + - addresses: + - ":11:00.0" + - ":11:00.1" +driver: "i40e" +vdevs: #names of virtual devices to be used for testing + - "crypto_openssl" test_suites: + - smoke_tests - hello_world system_under_test: "SUT 1" nodes: - name: "SUT 1" hostname: sut1.change.me.localhost user: root +password: "" arch: x86_64 os: linux lcores: "" diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index ebb0823f..f3b8b6e3 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -106,6 +106,21 @@ def from_dict(d: dict) -> "NodeConfiguration": hugepages=hugepage_config, ) +@dataclass(slots=True, frozen=True) +class NICConfiguration: +addresses: list[str] +driver: str + +@staticmethod +def from_dict(d:dict) -> "NICConfiguration": +return NICConfiguration( +addresses=[addr for addr in d.get("addresses", [])], +driver=d.get("driver") +) +@staticmethod +def from_list(l:list[dict]) -> list["NICConfiguration"]: +return [] + [NICConfiguration.from_dict(x) for x in l] + @dataclass(slots=True, frozen=True) class BuildTargetConfiguration: @@ -157,6 +172,8 @@ class ExecutionConfiguration: func: bool test_suites: list[TestSuiteConfig] system_under_test: NodeConfiguration +nics: list[NICConfiguration] +vdevs: list[str] @staticmethod def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": @@ -166,7 +183,9 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": test_suites: list[TestSuiteConfig] = list( map(TestSuiteConfig.from_dict, d["test_suites"]) ) +nic_conf: NICConfiguration = NICConfiguration.from_list(d['nics']) sut_name = d["system_under_test"] +list_of_vdevs = d["vdevs"] assert sut_name in node_map, f"Unknown SUT {sut_name} in execution {d}" return ExecutionConfiguration( @@ -174,7 +193,9 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": perf=d["perf"], func=d["func"], test_suites=test_suites, +nics=nic_conf, system_under_test=node_map[sut_name], +vdevs=list_of_vdevs ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index ca2d4a1e..603859de 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -40,6 +40,18 @@ "mscv" ] }, +"single_nic" : { + "type":"object", + "description": "an object that holds nic information", + "properties": { +"addresses": { + "type":"array", + "items": { +"type":"string" + } +} + } +}, "build_target": { "type": "object", "description": "Targets supported by DTS", @@ -97,7 +109,8 @@ "test_suite": { "type": "string", "enum": [ -"hello_world" +"hello_world", +"smoke_
[RFC v2 2/2] dts: added paramiko to dependencies
From: Jeremy Spewock added paramiko to the dependency files Signed-off-by: Jeremy Spewock --- dts/poetry.lock| 160 ++--- dts/pyproject.toml | 1 + 2 files changed, 124 insertions(+), 37 deletions(-) diff --git a/dts/poetry.lock b/dts/poetry.lock index 0b2a007d..d5b41550 100644 --- a/dts/poetry.lock +++ b/dts/poetry.lock @@ -1,20 +1,33 @@ [[package]] name = "attrs" -version = "22.1.0" +version = "23.1.0" description = "Classes Without Boilerplate" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"] +cov = ["attrs", "coverage[toml] (>=5.3)"] +dev = ["attrs", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest-mypy-plugins", "pytest-xdist", "pytest (>=4.3.0)"] + +[[package]] +name = "bcrypt" +version = "4.0.1" +description = "Modern password hashing for your software and your servers" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] [[package]] name = "black" -version = "22.10.0" +version = "22.12.0" description = "The uncompromising code formatter." category = "dev" optional = false @@ -33,6 +46,17 @@ d = ["aiohttp (>=3.7.4)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + [[package]] name = "click" version = "8.1.3" @@ -52,18 +76,39 @@ category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +[[package]] +name = "cryptography" +version = "40.0.2" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +pep8test = ["black", "ruff", "mypy", "check-manifest"] +sdist = ["setuptools-rust (>=0.11.4)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pytest (>=6.2.0)", "pytest-shard (>=0.1.2)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601"] +test-randomorder = ["pytest-randomly"] +tox = ["tox"] + [[package]] name = "isort" -version = "5.10.1" +version = "5.12.0" description = "A Python utility / library to sort Python imports." category = "dev" optional = false -python-versions = ">=3.6.1,<4.0" +python-versions = ">=3.8.0" [package.extras] -pipfile_deprecated_finder = ["pipreqs", "requirementslib"] -requirements_deprecated_finder = ["pipreqs", "pip-api"] -colors = ["colorama (>=0.4.3,<0.5.0)"] +colors = ["colorama (>=0.4.3)"] +requirements-deprecated-finder = ["pip-api", "pipreqs"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] [[package]] @@ -87,7 +132,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "jsonschema" -version = "4.17.0" +version = "4.17.3" description = "An implementation of JSON Schema validation for Python" category = "main" optional = false @@ -129,15 +174,33 @@ reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "0.4.3" -description = "Experimental type system extensions for programs checked with the mypy typechecker." +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.5" + +[[package]] +name = "paramiko" +version = "3.1.0" +description = "SSH2 protocol library" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +pynacl = ">=1.5" + +[package.extras] +all = ["pyasn1 (>=0.1.7)", "invoke (>=2.0)", "gssapi (>=1.4.1)", "pywin32 (>
[RFC v3 0/2] add DTS smoke tests
From: Jeremy Spewock This update to the RFC addresses comments recieved on the previous about formatting and implementation. Things that are new to this patch series are: * New class similar to the RemoteSession that handles interactive remote sessions * A keepalive for the interactive SSH session * Proper paramiko error handling when connecting to the host * A factory within this new class for creating DPDK applicaiton drivers * An Enum that represents the different DPDK applications and their default paths * Os-agnostic path handling in the SmokeTests test suite * Stdout and stderr are combined for InteractiveShells * A way to move the stdout buffer pointer to the end to "empty" the buffer * Information gathering moved into respective classes (the test still exists in the test suite but this can now be removed assuming the new gathering method is sufficient) Previous RFCs: * v2: https://mails.dpdk.org/archives/dev/2023-May/267915.html * v1: https://mails.dpdk.org/archives/dev/2023-April/266580.html Jeremy Spewock (2): dts: add smoke tests dts: added paramiko to dependencies dts/conf.yaml | 8 + dts/framework/config/__init__.py | 88 ++ dts/framework/config/conf_yaml_schema.json| 32 +++- dts/framework/dts.py | 26 ++- dts/framework/exception.py| 12 ++ dts/framework/remote_session/__init__.py | 10 +- dts/framework/remote_session/os_session.py| 34 +++- dts/framework/remote_session/posix_session.py | 30 .../remote_session/remote/__init__.py | 12 ++ .../remote/interactive_remote_session.py | 113 + .../remote/interactive_shell.py | 98 +++ .../remote_session/remote/testpmd_shell.py| 58 +++ dts/framework/test_result.py | 38 - dts/framework/test_suite.py | 31 +++- dts/framework/testbed_model/node.py | 2 + dts/framework/testbed_model/sut_node.py | 110 +++- dts/poetry.lock | 160 ++ dts/pyproject.toml| 1 + dts/tests/TestSuite_smoke_tests.py| 109 19 files changed, 916 insertions(+), 56 deletions(-) create mode 100644 dts/framework/remote_session/remote/interactive_remote_session.py create mode 100644 dts/framework/remote_session/remote/interactive_shell.py create mode 100644 dts/framework/remote_session/remote/testpmd_shell.py create mode 100644 dts/tests/TestSuite_smoke_tests.py -- 2.40.1
[RFC v3 1/2] dts: add smoke tests
From: Jeremy Spewock Adds a new test suite for running smoke tests that verify general configuration aspects of the system under test. If any of these tests fail, the DTS execution terminates for that build target as part of a "fail-fast" model. Signed-off-by: Jeremy Spewock --- dts/conf.yaml | 8 ++ dts/framework/config/__init__.py | 88 ++ dts/framework/config/conf_yaml_schema.json| 32 - dts/framework/dts.py | 26 +++- dts/framework/exception.py| 12 ++ dts/framework/remote_session/__init__.py | 10 +- dts/framework/remote_session/os_session.py| 34 +- dts/framework/remote_session/posix_session.py | 30 + .../remote_session/remote/__init__.py | 12 ++ .../remote/interactive_remote_session.py | 113 ++ .../remote/interactive_shell.py | 98 +++ .../remote_session/remote/testpmd_shell.py| 58 + dts/framework/test_result.py | 38 +- dts/framework/test_suite.py | 31 - dts/framework/testbed_model/node.py | 2 + dts/framework/testbed_model/sut_node.py | 110 - dts/tests/TestSuite_smoke_tests.py| 109 + 17 files changed, 792 insertions(+), 19 deletions(-) create mode 100644 dts/framework/remote_session/remote/interactive_remote_session.py create mode 100644 dts/framework/remote_session/remote/interactive_shell.py create mode 100644 dts/framework/remote_session/remote/testpmd_shell.py create mode 100644 dts/tests/TestSuite_smoke_tests.py diff --git a/dts/conf.yaml b/dts/conf.yaml index a9bd8a3e..de537c06 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -10,7 +10,15 @@ executions: compiler_wrapper: ccache perf: false func: true +nics: #physical devices to be used for testing + - addresses: + - ":11:00.0" + - ":11:00.1" +driver: "i40e" +vdevs: #names of virtual devices to be used for testing + - "crypto_openssl" test_suites: + - smoke_tests - hello_world system_under_test: "SUT 1" nodes: diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index ebb0823f..a5840429 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -107,6 +107,46 @@ def from_dict(d: dict) -> "NodeConfiguration": ) +@dataclass(slots=True) +class NodeVersionInfo: +"""Class to hold important versions within the node. + +This class, unlike the NodeConfiguration class, cannot be generated at the start. +This is because we need to initialize a connection with the node before we can +collet the information needed in this class. Therefore, it cannot be a part of +the configuration class above. +""" + +os_name: str +os_version: str +kernel_version: str + +@staticmethod +def from_dict(d: dict): +return NodeVersionInfo( +os_name=d["os_name"], +os_version=d["os_version"], +kernel_version=d["kernel_version"], +) + + +@dataclass(slots=True, frozen=True) +class NICConfiguration: +addresses: list[str] +driver: str + +@staticmethod +def from_dict(d: dict) -> "NICConfiguration": +return NICConfiguration( +addresses=[addr for addr in d.get("addresses", [])], +driver=d.get("driver", ""), +) + +@staticmethod +def from_list(nics: list[dict]) -> list["NICConfiguration"]: +return [NICConfiguration.from_dict(x) for x in nics] + + @dataclass(slots=True, frozen=True) class BuildTargetConfiguration: arch: Architecture @@ -128,6 +168,24 @@ def from_dict(d: dict) -> "BuildTargetConfiguration": ) +@dataclass(slots=True) +class BuildTargetVersionInfo: +"""Class to hold important versions within the build target. + +This is very similar to the NodeVersionInfo class, it just instead holds information +for the build target. +""" + +dpdk_version: str +compiler_version: str + +@staticmethod +def from_dict(d: dict): +return BuildTargetVersionInfo( +dpdk_version=d["dpdk_version"], compiler_version=d["compiler_version"] +) + + class TestSuiteConfigDict(TypedDict): suite: str cases: list[str] @@ -157,6 +215,8 @@ class ExecutionConfiguration: func: bool test_suites: list[TestSuiteConfig] system_under_test: NodeConfiguration +nics: list[NICConfiguration] +vdevs: list[str] @staticmethod def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": @@ -166,7 +226,9 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": test_suites: list[TestSuiteConfig] = list( map(TestSuiteConfig.from_dict, d["test_suites"]) ) +nic_conf = NICConfiguration.from_list(
[RFC v3 2/2] dts: added paramiko to dependencies
From: Jeremy Spewock added paramiko to the dependency files Signed-off-by: Jeremy Spewock --- dts/poetry.lock| 160 ++--- dts/pyproject.toml | 1 + 2 files changed, 124 insertions(+), 37 deletions(-) diff --git a/dts/poetry.lock b/dts/poetry.lock index 0b2a007d..dfd9a240 100644 --- a/dts/poetry.lock +++ b/dts/poetry.lock @@ -1,20 +1,33 @@ [[package]] name = "attrs" -version = "22.1.0" +version = "23.1.0" description = "Classes Without Boilerplate" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"] +cov = ["attrs", "coverage[toml] (>=5.3)"] +dev = ["attrs", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest-mypy-plugins", "pytest-xdist", "pytest (>=4.3.0)"] + +[[package]] +name = "bcrypt" +version = "4.0.1" +description = "Modern password hashing for your software and your servers" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] [[package]] name = "black" -version = "22.10.0" +version = "22.12.0" description = "The uncompromising code formatter." category = "dev" optional = false @@ -33,6 +46,17 @@ d = ["aiohttp (>=3.7.4)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + [[package]] name = "click" version = "8.1.3" @@ -52,18 +76,39 @@ category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +[[package]] +name = "cryptography" +version = "41.0.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["black", "ruff", "mypy", "check-sdist"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist", "pretend"] +test-randomorder = ["pytest-randomly"] + [[package]] name = "isort" -version = "5.10.1" +version = "5.12.0" description = "A Python utility / library to sort Python imports." category = "dev" optional = false -python-versions = ">=3.6.1,<4.0" +python-versions = ">=3.8.0" [package.extras] -pipfile_deprecated_finder = ["pipreqs", "requirementslib"] -requirements_deprecated_finder = ["pipreqs", "pip-api"] -colors = ["colorama (>=0.4.3,<0.5.0)"] +colors = ["colorama (>=0.4.3)"] +requirements-deprecated-finder = ["pip-api", "pipreqs"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] [[package]] @@ -87,7 +132,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "jsonschema" -version = "4.17.0" +version = "4.17.3" description = "An implementation of JSON Schema validation for Python" category = "main" optional = false @@ -129,15 +174,33 @@ reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "0.4.3" -description = "Experimental type system extensions for programs checked with the mypy typechecker." +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.5" + +[[package]] +name = "paramiko" +version = "3.2.0" +description = "SSH2 protocol library" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +pynacl = ">=1.5" + +[package.extras] +all = ["pyasn1 (>=0.1.7)", "invoke (>=2.0)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +gssapi = ["pyasn1 (>=0.1.7)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"