diff mbox series

[ovs-dev,3/4] dpif-netdev: Add HXPS Tx queue mode

Message ID 20211124212400.70613-4-maxime.coquelin@redhat.com
State Changes Requested
Headers show
Series dpif-netdev: Hash-based Tx packet steering | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed

Commit Message

Maxime Coquelin Nov. 24, 2021, 9:23 p.m. UTC
This patch adds a new HXPS Tx mode that distributes the
traffic on all the Tx queues, whatever the number of PMD
threads. It would be useful for guests expecting traffic
to be distributed on all the vCPUs.

The idea here is to re-use the 5-tuple hash of the packets,
already computed to build the flows batches (and so it
does not provide flexibility on which fields are part of
the hash).

There are also no user-configurable indirection table,
given the feature is transparent to the guest. The queue
selection is just a modulo operation between the packet
hash and the number of Tx queues.

There are no (at least intentionnally) functionnal changes
for the existing XPS and static modes. There should not be
noticeable performance changes for these modes (only one
more branch in the hot path).

For the HXPS mode, performance could be impacted due to
locking when multiple PMD threads are in use (same as
XPS mode) and also because of the second level of batching.

Regarding the batching, the existing Tx port output_pkts
is not modified. It means that at maximum, NETDEV_MAX_BURST
can be batched for all the Tx queues. A second level of
batching is done in dp_netdev_pmd_flush_output_on_port(),
only for this HXPS mode.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 Documentation/automake.mk           |  1 +
 Documentation/topics/dpdk/hxps.rst  | 51 +++++++++++++++++++
 Documentation/topics/dpdk/index.rst |  1 +
 lib/dpif-netdev.c                   | 78 +++++++++++++++++++++++------
 4 files changed, 117 insertions(+), 14 deletions(-)
 create mode 100644 Documentation/topics/dpdk/hxps.rst
diff mbox series

Patch

diff --git a/Documentation/automake.mk b/Documentation/automake.mk
index 137cc57c5..c982207d5 100644
--- a/Documentation/automake.mk
+++ b/Documentation/automake.mk
@@ -33,6 +33,7 @@  DOC_SOURCE = \
 	Documentation/topics/datapath.rst \
 	Documentation/topics/design.rst \
 	Documentation/topics/dpdk/index.rst \
+	Documentation/topics/dpdk/hxps.rst \
 	Documentation/topics/dpdk/bridge.rst \
 	Documentation/topics/dpdk/jumbo-frames.rst \
 	Documentation/topics/dpdk/memory.rst \
diff --git a/Documentation/topics/dpdk/hxps.rst b/Documentation/topics/dpdk/hxps.rst
new file mode 100644
index 000000000..1395f88c3
--- /dev/null
+++ b/Documentation/topics/dpdk/hxps.rst
@@ -0,0 +1,51 @@ 
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+      Convention for heading levels in Open vSwitch documentation:
+
+      =======  Heading 0 (reserved for the title in a document)
+      -------  Heading 1
+      ~~~~~~~  Heading 2
+      +++++++  Heading 3
+      '''''''  Heading 4
+
+      Avoid deeper levels because they do not render well.
+
+=============================
+Hash-based Tx packet steering
+=============================
+
+HXPS mode distributes the traffic on all the port transmit queues, whatever the
+number of PMD threads. Queue selection is based on the 5-tuples hash already
+computed to build the flows batches, the selected queue being the modulo
+between the hash and the number of Tx queues of the port.
+
+HXPS may be used for example with Vhost-user ports, when the number of vCPUs
+and queues of the guest are greater than the number of PMD threads, aq without
+HXPS, the Tx queues used would be limited to the number of PMD.
+
+Hash-based Tx packet steering may have an impact on the performance, given the
+Tx lock acquisition is required and a second level of batching is performed.
+
+This feature is disabled by default.
+
+Usage
+~~~~~
+
+To enable HXPS::
+
+    $ ovs-vsctl set Interface <iface> other_config:hxps=true
+
+To disable HXPS::
+
+    $ ovs-vsctl set Interface <iface> other_config:hxps=false
diff --git a/Documentation/topics/dpdk/index.rst b/Documentation/topics/dpdk/index.rst
index a5be5e344..ab6132357 100644
--- a/Documentation/topics/dpdk/index.rst
+++ b/Documentation/topics/dpdk/index.rst
@@ -39,3 +39,4 @@  DPDK Support
    /topics/dpdk/qos
    /topics/dpdk/jumbo-frames
    /topics/dpdk/memory
+   /topics/dpdk/hxps
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 0407f30b3..966741751 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -388,12 +388,13 @@  struct dp_netdev_rxq {
 enum txq_mode {
     TXQ_MODE_STATIC,
     TXQ_MODE_XPS,
+    TXQ_MODE_HXPS,
 };
 
 /* A port in a netdev-based datapath. */
 struct dp_netdev_port {
     odp_port_t port_no;
-    enum txq_mode txq_mode;     /* static, XPS */
+    enum txq_mode txq_mode;     /* static, XPS, HXPS */
     bool need_reconfigure;      /* True if we should reconfigure netdev. */
     struct netdev *netdev;
     struct hmap_node node;      /* Node in dp_netdev's 'ports'. */
@@ -405,6 +406,7 @@  struct dp_netdev_port {
     bool emc_enabled;           /* If true EMC will be used. */
     char *type;                 /* Port type as requested by user. */
     char *rxq_affinity_list;    /* Requested affinity of rx queues. */
+    bool txq_hxps;              /* Tx HXPS mode will be used */
 };
 
 static bool dp_netdev_flow_ref(struct dp_netdev_flow *);
@@ -440,6 +442,7 @@  struct tx_port {
     struct hmap_node node;
     long long flush_time;
     struct dp_packet_batch output_pkts;
+    struct dp_packet_batch *txq_pkts; /* Only for HXPS mode */
     struct dp_netdev_rxq *output_pkts_rxqs[NETDEV_MAX_BURST];
 };
 
@@ -4435,6 +4438,7 @@  dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
     int error = 0;
     const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
     bool emc_enabled = smap_get_bool(cfg, "emc-enable", true);
+    bool txq_hxps = smap_get_bool(cfg, "hxps", false);
 
     ovs_mutex_lock(&dp->port_mutex);
     error = get_port_by_number(dp, port_no, &port);
@@ -4476,19 +4480,28 @@  dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
     }
 
     /* Checking for RXq affinity changes. */
-    if (!netdev_is_pmd(port->netdev)
-        || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) {
-        goto unlock;
+    if (netdev_is_pmd(port->netdev)
+        && !nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) {
+
+        error = dpif_netdev_port_set_rxq_affinity(port, affinity_list);
+        if (error) {
+            goto unlock;
+        }
+        free(port->rxq_affinity_list);
+        port->rxq_affinity_list = nullable_xstrdup(affinity_list);
+
+        dp_netdev_request_reconfigure(dp);
     }
 
-    error = dpif_netdev_port_set_rxq_affinity(port, affinity_list);
-    if (error) {
-        goto unlock;
+    if (txq_hxps != port->txq_hxps) {
+        port->txq_hxps = txq_hxps;
+        VLOG_INFO("%s: Txq HXPS mode has been %s.",
+                netdev_get_name(port->netdev),
+                (txq_hxps) ? "enabled" : "disabled");
+        dp_netdev_request_reconfigure(dp);
     }
-    free(port->rxq_affinity_list);
-    port->rxq_affinity_list = nullable_xstrdup(affinity_list);
 
-    dp_netdev_request_reconfigure(dp);
+
 unlock:
     ovs_mutex_unlock(&dp->port_mutex);
     return error;
@@ -4614,7 +4627,28 @@  dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd,
     output_cnt = dp_packet_batch_size(&p->output_pkts);
     ovs_assert(output_cnt > 0);
 
-    netdev_send(p->port->netdev, tx_qid, &p->output_pkts, concurrent_txqs);
+    if (p->port->txq_mode == TXQ_MODE_HXPS) {
+        int n_txq = netdev_n_txq(p->port->netdev);
+
+        /* Re-batch per txq based on packet hash */
+        for (i = 0; i < output_cnt; i++) {
+            struct dp_packet *packet = p->output_pkts.packets[i];
+
+            tx_qid = dp_packet_get_rss_hash(packet) % n_txq;
+            dp_packet_batch_add(&p->txq_pkts[tx_qid], packet);
+        }
+
+        /* Flush batches of each Tx queues */
+        for (i = 0; i < n_txq; i++) {
+            if (dp_packet_batch_is_empty(&p->txq_pkts[i])) {
+                continue;
+            }
+            netdev_send(p->port->netdev, i, &p->txq_pkts[i], true);
+            dp_packet_batch_init(&p->txq_pkts[i]);
+        }
+    } else {
+        netdev_send(p->port->netdev, tx_qid, &p->output_pkts, concurrent_txqs);
+    }
     dp_packet_batch_init(&p->output_pkts);
 
     /* Update time of the next flush. */
@@ -5775,7 +5809,9 @@  reconfigure_datapath(struct dp_netdev *dp)
     HMAP_FOR_EACH (port, node, &dp->ports) {
         if (netdev_is_reconf_required(port->netdev)
             || ((port->txq_mode == TXQ_MODE_XPS)
-                != (netdev_n_txq(port->netdev) < wanted_txqs))) {
+                != (netdev_n_txq(port->netdev) < wanted_txqs))
+            || ((port->txq_mode == TXQ_MODE_HXPS)
+                != port->txq_hxps)) {
             port->need_reconfigure = true;
         }
     }
@@ -5810,8 +5846,13 @@  reconfigure_datapath(struct dp_netdev *dp)
             seq_change(dp->port_seq);
             port_destroy(port);
         } else {
-            port->txq_mode = (netdev_n_txq(port->netdev) < wanted_txqs) ?
-                TXQ_MODE_XPS : TXQ_MODE_STATIC;
+            if (port->txq_hxps && netdev_n_txq(port->netdev) > 1) {
+                port->txq_mode = TXQ_MODE_HXPS;
+            } else if (netdev_n_txq(port->netdev) < wanted_txqs) {
+                port->txq_mode = TXQ_MODE_XPS;
+            } else {
+                port->txq_mode = TXQ_MODE_STATIC;
+            }
         }
     }
 
@@ -6981,6 +7022,14 @@  dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
     tx->flush_time = 0LL;
     dp_packet_batch_init(&tx->output_pkts);
 
+    if (tx->port->txq_mode == TXQ_MODE_HXPS) {
+        int i, n_txq = netdev_n_txq(tx->port->netdev);
+        tx->txq_pkts = xzalloc(n_txq * sizeof *tx->txq_pkts);
+        for (i = 0; i < n_txq; i++) {
+            dp_packet_batch_init(&tx->txq_pkts[i]);
+        }
+    }
+
     hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no));
     pmd->need_reload = true;
 }
@@ -6993,6 +7042,7 @@  dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
     OVS_REQUIRES(pmd->port_mutex)
 {
     hmap_remove(&pmd->tx_ports, &tx->node);
+    free(tx->txq_pkts);
     free(tx);
     pmd->need_reload = true;
 }