From patchwork Thu Nov 5 07:42:14 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394768
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.136; helo=silver.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRb8f1thqz9sTD
for ; Thu, 5 Nov 2020 18:42:41 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id 3EF8D228D1;
Thu, 5 Nov 2020 07:42:39 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id DOhb7Xq6+q+7; Thu, 5 Nov 2020 07:42:35 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by silver.osuosl.org (Postfix) with ESMTP id 487F72050D;
Thu, 5 Nov 2020 07:42:35 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 2C3C3C0893;
Thu, 5 Nov 2020 07:42:35 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
by lists.linuxfoundation.org (Postfix) with ESMTP id 21F60C0889
for ; Thu, 5 Nov 2020 07:42:34 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id EFBFA20767
for ; Thu, 5 Nov 2020 07:42:33 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id JRYBzfaSArif for ;
Thu, 5 Nov 2020 07:42:31 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay9-d.mail.gandi.net (relay9-d.mail.gandi.net
[217.70.183.199])
by silver.osuosl.org (Postfix) with ESMTPS id 32E632050D
for ; Thu, 5 Nov 2020 07:42:31 +0000 (UTC)
X-Originating-IP: 115.99.213.209
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay9-d.mail.gandi.net (Postfix) with ESMTPSA id CBAB2FF806;
Thu, 5 Nov 2020 07:42:23 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:12:14 +0530
Message-Id: <20201105074214.3793878-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 1/7] Add new table Load_Balancer in
Southbound database.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
This patch adds a new table 'Load_Balancer' in SB DB and syncs the Load_Balancer table rows
from NB DB to SB DB. An upcoming patch will make use of this table for handling the
load balancer hairpin traffic.
Signed-off-by: Numan Siddique
---
northd/ovn-northd.c | 146 ++++++++++++++++++++++++++++++++++++++++++
ovn-sb.ovsschema | 27 +++++++-
ovn-sb.xml | 45 +++++++++++++
tests/ovn-northd.at | 87 +++++++++++++++++++++++++
utilities/ovn-sbctl.c | 3 +
5 files changed, 306 insertions(+), 2 deletions(-)
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 684c2bd478..6bbe93befb 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -11872,6 +11872,141 @@ sync_dns_entries(struct northd_context *ctx, struct hmap *datapaths)
}
hmap_destroy(&dns_map);
}
+
+/*
+ * struct 'sync_lb_info' is used to sync the load balancer records between
+ * OVN Northbound db and Southbound db.
+ */
+struct sync_lb_info {
+ struct hmap_node hmap_node;
+ const struct nbrec_load_balancer *nlb; /* LB record in the NB db. */
+ const struct sbrec_load_balancer *slb; /* LB record in the SB db. */
+
+ /* Datapaths to which the LB entry is associated with it. */
+ const struct sbrec_datapath_binding **sbs;
+ size_t n_sbs;
+};
+
+static struct sync_lb_info *
+sync_lb_info_find(struct hmap *sync_lb_map, struct uuid *uuid)
+{
+ struct sync_lb_info *lb_info;
+ size_t hash = uuid_hash(uuid);
+ HMAP_FOR_EACH_WITH_HASH (lb_info, hmap_node, hash, sync_lb_map) {
+ if (uuid_equals(&lb_info->nlb->header_.uuid, uuid)) {
+ return lb_info;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+sync_lb_entries(struct northd_context *ctx, struct hmap *datapaths)
+{
+ struct hmap lb_map = HMAP_INITIALIZER(&lb_map);
+ struct ovn_datapath *od;
+
+ /* Build a local hmap of lbs from the NB db. */
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ if (!od->nbs || !od->nbs->n_load_balancer) {
+ continue;
+ }
+
+ for (size_t i = 0; i < od->nbs->n_load_balancer; i++) {
+ struct sync_lb_info *lb_info =
+ sync_lb_info_find(
+ &lb_map, &od->nbs->load_balancer[i]->header_.uuid);
+
+ if (!lb_info) {
+ size_t hash = uuid_hash(
+ &od->nbs->load_balancer[i]->header_.uuid);
+ lb_info = xzalloc(sizeof *lb_info);;
+ lb_info->nlb = od->nbs->load_balancer[i];
+ hmap_insert(&lb_map, &lb_info->hmap_node, hash);
+ }
+
+ lb_info->n_sbs++;
+ lb_info->sbs = xrealloc(lb_info->sbs,
+ lb_info->n_sbs * sizeof *lb_info->sbs);
+ lb_info->sbs[lb_info->n_sbs - 1] = od->sb;
+ }
+ }
+
+ /* Delete any stale SB load balancer rows. */
+ const struct sbrec_load_balancer *sbrec_lb, *next;
+ SBREC_LOAD_BALANCER_FOR_EACH_SAFE (sbrec_lb, next, ctx->ovnsb_idl) {
+ const char *nb_lb_uuid = smap_get(&sbrec_lb->external_ids, "lb_id");
+ struct uuid lb_uuid;
+ if (!nb_lb_uuid || !uuid_from_string(&lb_uuid, nb_lb_uuid)) {
+ sbrec_load_balancer_delete(sbrec_lb);
+ continue;
+ }
+
+ struct sync_lb_info *lb_info = sync_lb_info_find(&lb_map, &lb_uuid);
+ if (lb_info) {
+ lb_info->slb = sbrec_lb;
+ } else {
+ sbrec_load_balancer_delete(sbrec_lb);
+ }
+ }
+
+ /* Create SB Load balancer records if not present and sync
+ * the SB load balancer columns. */
+ struct sync_lb_info *lb_info;
+ HMAP_FOR_EACH (lb_info, hmap_node, &lb_map) {
+ if (!lb_info->slb) {
+ sbrec_lb = sbrec_load_balancer_insert(ctx->ovnsb_txn);
+ lb_info->slb = sbrec_lb;
+ char *lb_id = xasprintf(
+ UUID_FMT, UUID_ARGS(&lb_info->nlb->header_.uuid));
+ const struct smap external_ids =
+ SMAP_CONST1(&external_ids, "lb_id", lb_id);
+ sbrec_load_balancer_set_external_ids(sbrec_lb, &external_ids);
+ free(lb_id);
+ }
+
+ /* Set the datapaths and other columns. If nothing has changed, then
+ * this will be a no-op.
+ */
+ sbrec_load_balancer_set_datapaths(
+ lb_info->slb,
+ (struct sbrec_datapath_binding **)lb_info->sbs,
+ lb_info->n_sbs);
+
+ sbrec_load_balancer_set_name(lb_info->slb, lb_info->nlb->name);
+ sbrec_load_balancer_set_vips(lb_info->slb, &lb_info->nlb->vips);
+ sbrec_load_balancer_set_protocol(lb_info->slb, lb_info->nlb->protocol);
+ }
+
+ /* Set the list of associated load balanacers to a logical switch
+ * datapath binding in the SB DB. */
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ if (!od->nbs || !od->nbs->n_load_balancer) {
+ continue;
+ }
+
+ const struct sbrec_load_balancer **lbs =
+ xmalloc(od->nbs->n_load_balancer * sizeof *lbs);
+ for (size_t i = 0; i < od->nbs->n_load_balancer; i++) {
+ lb_info = sync_lb_info_find(
+ &lb_map, &od->nbs->load_balancer[i]->header_.uuid);
+ ovs_assert(lb_info);
+ lbs[i] = lb_info->slb;
+ }
+
+ sbrec_datapath_binding_set_load_balancers(
+ od->sb, (struct sbrec_load_balancer **)lbs,
+ od->nbs->n_load_balancer);
+ free(lbs);
+ }
+
+ HMAP_FOR_EACH_POP (lb_info, hmap_node, &lb_map) {
+ free(lb_info->sbs);
+ free(lb_info);
+ }
+ hmap_destroy(&lb_map);
+}
static void
destroy_datapaths_and_ports(struct hmap *datapaths, struct hmap *ports,
@@ -12243,6 +12378,7 @@ ovnnb_db_run(struct northd_context *ctx,
sync_port_groups(ctx, &port_groups);
sync_meters(ctx);
sync_dns_entries(ctx, datapaths);
+ sync_lb_entries(ctx, datapaths);
destroy_ovn_lbs(&lbs);
hmap_destroy(&lbs);
@@ -13003,6 +13139,8 @@ main(int argc, char *argv[])
ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_datapath_binding);
add_column_noalert(ovnsb_idl_loop.idl,
&sbrec_datapath_binding_col_tunnel_key);
+ add_column_noalert(ovnsb_idl_loop.idl,
+ &sbrec_datapath_binding_col_load_balancers);
add_column_noalert(ovnsb_idl_loop.idl,
&sbrec_datapath_binding_col_external_ids);
@@ -13170,6 +13308,14 @@ main(int argc, char *argv[])
add_column_noalert(ovnsb_idl_loop.idl,
&sbrec_service_monitor_col_external_ids);
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_load_balancer);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_datapaths);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_name);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_vips);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_protocol);
+ add_column_noalert(ovnsb_idl_loop.idl,
+ &sbrec_load_balancer_col_external_ids);
+
struct ovsdb_idl_index *sbrec_chassis_by_name
= chassis_index_create(ovnsb_idl_loop.idl);
diff --git a/ovn-sb.ovsschema b/ovn-sb.ovsschema
index d1c506a22c..7db6c6a4dd 100644
--- a/ovn-sb.ovsschema
+++ b/ovn-sb.ovsschema
@@ -1,7 +1,7 @@
{
"name": "OVN_Southbound",
- "version": "2.10.0",
- "cksum": "2548342632 22615",
+ "version": "2.11.0",
+ "cksum": "1470439925 23814",
"tables": {
"SB_Global": {
"columns": {
@@ -152,6 +152,11 @@
"type": {"key": {"type": "integer",
"minInteger": 1,
"maxInteger": 16777215}}},
+ "load_balancers": {"type": {"key": {"type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"},
+ "min": 0,
+ "max": "unlimited"}},
"external_ids": {
"type": {"key": "string", "value": "string",
"min": 0, "max": "unlimited"}}},
@@ -447,6 +452,24 @@
"type": {"key": "string", "value": "string",
"min": 0, "max": "unlimited"}}},
"indexes": [["logical_port", "ip", "port", "protocol"]],
+ "isRoot": true},
+ "Load_Balancer": {
+ "columns": {
+ "name": {"type": "string"},
+ "vips": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "protocol": {
+ "type": {"key": {"type": "string",
+ "enum": ["set", ["tcp", "udp", "sctp"]]},
+ "min": 0, "max": 1}},
+ "datapaths": {
+ "type": {"key": {"type": "uuid",
+ "refTable": "Datapath_Binding"},
+ "min": 1, "max": "unlimited"}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}}},
"isRoot": true}
}
}
diff --git a/ovn-sb.xml b/ovn-sb.xml
index b1480f2186..bdd41c1f97 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -2497,6 +2497,12 @@ tcp.flags = RST;
constructed for each supported encapsulation.
+
+
+ Load balancers associated with the datapath.
+
+
+
Each row in is associated with some
@@ -4126,4 +4132,43 @@ tcp.flags = RST;
+
+
+
+ Each row represents a load balancer.
+
+
+
+ A name for the load balancer. This name has no special meaning or
+ purpose other than to provide convenience for human interaction with
+ the ovn-nb database.
+
+
+
+ A map of virtual IP addresses (and an optional port number with
+ :
as a separator) associated with this load balancer and
+ their corresponding endpoint IP addresses (and optional port numbers
+ with :
as separators) separated by commas.
+
+
+
+
+ Valid protocols are tcp
, udp
, or
+ sctp
. This column is useful when a port number is
+ provided as part of the vips
column. If this column is
+ empty and a port number is provided as part of vips
+ column, OVN assumes the protocol to be tcp
.
+
+
+
+
+ Datapaths to which this load balancer applies to.
+
+
+
+
+ See External IDs at the beginning of this document.
+
+
+
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 0bf20c1a70..872e88c9a4 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1911,4 +1911,91 @@ AT_CHECK(
[ovn-nbctl --wait=sb set logical-switch-port lsp01 options:requested-tnl-key=2])
get_tunnel_keys
AT_CHECK([test $lsp02 = 3 && test $ls1 = 123])
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- NB to SB load balancer sync])
+ovn_start
+
+check ovn-nbctl --wait=hv lb-add lb0 10.0.0.10:80 10.0.0.4:8080
+check_row_count nb:load_balancer 1
+
+echo
+echo "__file__:__line__: Check that there are no SB load balancer rows."
+check_row_count sb:load_balancer 0
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb0
+sw0_sb_uuid=$(fetch_column datapath_binding _uuid external_ids:name=sw0)
+
+echo
+echo "__file__:__line__: Check that there are is SB load balancer row for lb0."
+check_row_count sb:load_balancer 1
+check_column "10.0.0.10:80=10.0.0.4:8080 tcp" sb:load_balancer vips,protocol name=lb0
+
+lb0_uuid=$(fetch_column sb:load_balancer _uuid name=lb0)
+
+echo
+echo "__file__:__line__: Check that SB lb0 has sw0 in datapaths column."
+
+check_column "$sw0_sb_uuid" sb:load_balancer datapaths name=lb0
+check_column "$lb0_uuid" sb:datapath_binding load_balancers external_ids:name=sw0
+
+check ovn-nbctl --wait=sb set load_balancer . vips:"10.0.0.20\:90"="20.0.0.4:8080,30.0.0.4:8080"
+
+echo
+echo "__file__:__line__: Check that SB lb0 has vips and protocol columns are set properly."
+
+check_column "10.0.0.10:80=10.0.0.4:8080 10.0.0.20:90=20.0.0.4:8080,30.0.0.4:8080 tcp" \
+sb:load_balancer vips,protocol name=lb0
+
+check ovn-nbctl lr-add lr0
+check ovn-nbctl --wait=sb lr-lb-add lr0 lb0
+
+echo
+echo "__file__:__line__: Check that SB lb0 has only sw0 in datapaths column."
+check_column "$sw0_sb_uuid" sb:load_balancer datapaths name=lb0
+
+check ovn-nbctl ls-add sw1
+check ovn-nbctl --wait=sb ls-lb-add sw1 lb0
+sw1_sb_uuid=$(fetch_column datapath_binding _uuid external_ids:name=sw1)
+
+echo
+echo "__file__:__line__: Check that SB lb0 has sw0 and sw1 in datapaths column."
+check_column "$sw0_sb_uuid $sw1_sb_uuid" sb:load_balancer datapaths name=lb0
+check_column "$lb0_uuid" sb:datapath_binding load_balancers external_ids:name=sw1
+
+check ovn-nbctl --wait=sb lb-add lb1 10.0.0.30:80 20.0.0.50:8080 udp
+check_row_count sb:load_balancer 1
+
+check ovn-nbctl --wait=sb lr-lb-add lr0 lb1
+check_row_count sb:load_balancer 1
+
+echo
+echo "__file__:__line__: Associate lb1 to sw1 and check that lb1 is created in SB DB."
+
+check ovn-nbctl --wait=sb ls-lb-add sw1 lb1
+check_row_count sb:load_balancer 2
+
+echo
+echo "__file__:__line__: Check that SB lb1 has vips and protocol columns are set properly."
+check_column "10.0.0.30:80=20.0.0.50:8080 udp" sb:load_balancer vips,protocol name=lb1
+
+lb1_uuid=$(fetch_column sb:load_balancer _uuid name=lb1)
+
+echo
+echo "__file__:__line__: Check that SB lb1 has sw1 in datapaths column."
+
+check_column "$sw1_sb_uuid" sb:load_balancer datapaths name=lb1
+
+echo
+echo "__file__:__line__: check that datapath sw1 has lb0 and lb1 set in the load_balancers column."
+check_column "$lb0_uuid $lb1_uuid" sb:datapath_binding load_balancers external_ids:name=sw1
+
+echo
+echo "__file__:__line__: Delete load balancer lb1 an check that datapath sw1's load_balancers are updated accordingly."
+
+ovn-nbctl --wait=sb lb-del lb1
+check_column "$lb0_uuid" sb:datapath_binding load_balancers external_ids:name=sw1
+
AT_CLEANUP
diff --git a/utilities/ovn-sbctl.c b/utilities/ovn-sbctl.c
index 85e448ec04..00c112c7e5 100644
--- a/utilities/ovn-sbctl.c
+++ b/utilities/ovn-sbctl.c
@@ -1441,6 +1441,9 @@ static const struct ctl_table_class tables[SBREC_N_TABLES] = {
[SBREC_TABLE_GATEWAY_CHASSIS].row_ids[0]
= {&sbrec_gateway_chassis_col_name, NULL, NULL},
+
+ [SBREC_TABLE_LOAD_BALANCER].row_ids[0]
+ = {&sbrec_load_balancer_col_name, NULL, NULL},
};
From patchwork Thu Nov 5 07:42:28 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394769
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.133; helo=hemlock.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRb9K57XPz9sSs
for ; Thu, 5 Nov 2020 18:43:16 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id A0A6087082;
Thu, 5 Nov 2020 07:43:14 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id hdCwyqAnP8-2; Thu, 5 Nov 2020 07:43:12 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by hemlock.osuosl.org (Postfix) with ESMTP id 5B67887051;
Thu, 5 Nov 2020 07:43:12 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 4EC72C088B;
Thu, 5 Nov 2020 07:43:12 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])
by lists.linuxfoundation.org (Postfix) with ESMTP id 2B876C0889
for ; Thu, 5 Nov 2020 07:43:10 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by whitealder.osuosl.org (Postfix) with ESMTP id 1A05C85EF5
for ; Thu, 5 Nov 2020 07:43:10 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from whitealder.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id 9xSd4trmshZX for ;
Thu, 5 Nov 2020 07:43:08 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay1-d.mail.gandi.net (relay1-d.mail.gandi.net
[217.70.183.193])
by whitealder.osuosl.org (Postfix) with ESMTPS id BAE7985DA4
for ; Thu, 5 Nov 2020 07:43:07 +0000 (UTC)
X-Originating-IP: 115.99.213.209
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay1-d.mail.gandi.net (Postfix) with ESMTPSA id B358A24000A;
Thu, 5 Nov 2020 07:43:02 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:12:28 +0530
Message-Id: <20201105074228.3793943-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 2/7] northd: Refactor load balancer vip
parsing.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Parsing of the load balancer VIPs is moved to a separate file - lib/lb.c.
ovn-northd makes use of these functions. Upcoming patch will make use of these
util functions for parsing SB Load_Balancers.
Signed-off-by: Numan Siddique
---
lib/automake.mk | 4 +-
lib/lb.c | 237 +++++++++++++++++++++++++++++++++++++++++
lib/lb.h | 79 ++++++++++++++
northd/ovn-northd.c | 250 ++++++--------------------------------------
4 files changed, 349 insertions(+), 221 deletions(-)
create mode 100644 lib/lb.c
create mode 100644 lib/lb.h
diff --git a/lib/automake.mk b/lib/automake.mk
index d38d5c50c7..250c7aefae 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -24,7 +24,9 @@ lib_libovn_la_SOURCES = \
lib/ovn-util.h \
lib/logical-fields.c \
lib/inc-proc-eng.c \
- lib/inc-proc-eng.h
+ lib/inc-proc-eng.h \
+ lib/lb.c \
+ lib/lb.h
nodist_lib_libovn_la_SOURCES = \
lib/ovn-dirs.c \
lib/ovn-nb-idl.c \
diff --git a/lib/lb.c b/lib/lb.c
new file mode 100644
index 0000000000..a61aab35cb
--- /dev/null
+++ b/lib/lb.c
@@ -0,0 +1,237 @@
+/* Copyright (c) 2020, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include
+
+#include "lb.h"
+#include "lib/ovn-nb-idl.h"
+#include "lib/ovn-sb-idl.h"
+#include "lib/ovn-util.h"
+
+/* OpenvSwitch lib includes. */
+#include "openvswitch/vlog.h"
+#include "lib/smap.h"
+
+VLOG_DEFINE_THIS_MODULE(lb);
+
+static struct ovn_lb *
+ovn_lb_create(const struct smap *vips)
+{
+ struct ovn_lb *lb = xzalloc(sizeof *lb);
+
+ lb->n_vips = smap_count(vips);
+ lb->vips = xcalloc(lb->n_vips, sizeof (struct lb_vip));
+ struct smap_node *node;
+ size_t n_vips = 0;
+
+ SMAP_FOR_EACH (node, vips) {
+ char *vip;
+ uint16_t port;
+ int addr_family;
+
+ if (!ip_address_and_port_from_lb_key(node->key, &vip, &port,
+ &addr_family)) {
+ continue;
+ }
+
+ lb->vips[n_vips].vip = vip;
+ lb->vips[n_vips].vip_port = port;
+ lb->vips[n_vips].addr_family = addr_family;
+ lb->vips[n_vips].vip_port_str = xstrdup(node->key);
+ lb->vips[n_vips].backend_ips = xstrdup(node->value);
+
+ char *tokstr = xstrdup(node->value);
+ char *save_ptr = NULL;
+ char *token;
+ size_t n_backends = 0;
+ /* Format for a backend ips : IP1:port1,IP2:port2,...". */
+ for (token = strtok_r(tokstr, ",", &save_ptr);
+ token != NULL;
+ token = strtok_r(NULL, ",", &save_ptr)) {
+ n_backends++;
+ }
+
+ free(tokstr);
+ tokstr = xstrdup(node->value);
+ save_ptr = NULL;
+
+ lb->vips[n_vips].n_backends = n_backends;
+ lb->vips[n_vips].backends = xcalloc(n_backends,
+ sizeof *lb->vips[n_vips].backends);
+ size_t i = 0;
+ for (token = strtok_r(tokstr, ",", &save_ptr);
+ token != NULL;
+ token = strtok_r(NULL, ",", &save_ptr)) {
+ char *backend_ip;
+ uint16_t backend_port;
+
+ if (!ip_address_and_port_from_lb_key(token, &backend_ip,
+ &backend_port,
+ &addr_family)) {
+ continue;
+ }
+
+ lb->vips[n_vips].backends[i].ip = backend_ip;
+ lb->vips[n_vips].backends[i].port = backend_port;
+ lb->vips[n_vips].backends[i].addr_family = addr_family;
+ i++;
+ }
+
+ free(tokstr);
+ n_vips++;
+ }
+
+ return lb;
+}
+
+struct ovn_lb *
+ovn_nb_lb_create(const struct nbrec_load_balancer *nbrec_lb,
+ struct hmap *ports, struct hmap *lbs,
+ void * (*ovn_port_find)(const struct hmap *ports,
+ const char *name))
+{
+ struct ovn_lb *lb = ovn_lb_create(&nbrec_lb->vips);
+ hmap_insert(lbs, &lb->hmap_node, uuid_hash(&nbrec_lb->header_.uuid));
+ lb->nlb = nbrec_lb;
+ lb->nb_lb = true;
+
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ struct lb_vip *lb_vip = &lb->vips[i];
+
+ struct nbrec_load_balancer_health_check *lb_health_check = NULL;
+ if (nbrec_lb->protocol && !strcmp(nbrec_lb->protocol, "sctp")) {
+ if (nbrec_lb->n_health_check > 0) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&rl,
+ "SCTP load balancers do not currently support "
+ "health checks. Not creating health checks for "
+ "load balancer " UUID_FMT,
+ UUID_ARGS(&nbrec_lb->header_.uuid));
+ }
+ } else {
+ for (size_t j = 0; j < nbrec_lb->n_health_check; j++) {
+ if (!strcmp(nbrec_lb->health_check[j]->vip,
+ lb_vip->vip_port_str)) {
+ lb_health_check = nbrec_lb->health_check[i];
+ break;
+ }
+ }
+ }
+
+ lb_vip->lb_health_check = lb_health_check;
+
+ for (size_t j = 0; j < lb_vip->n_backends; j++) {
+ struct lb_vip_backend *backend = &lb_vip->backends[j];
+
+ struct ovn_port *op = NULL;
+ char *svc_mon_src_ip = NULL;
+ const char *s = smap_get(&nbrec_lb->ip_port_mappings,
+ backend->ip);
+ if (s) {
+ char *port_name = xstrdup(s);
+ char *p = strstr(port_name, ":");
+ if (p) {
+ *p = 0;
+ p++;
+ op = ovn_port_find(ports, port_name);
+ svc_mon_src_ip = xstrdup(p);
+ }
+ free(port_name);
+ }
+
+ backend->op = op;
+ backend->svc_mon_src_ip = svc_mon_src_ip;
+ }
+ }
+
+ if (nbrec_lb->n_selection_fields) {
+ char *proto = NULL;
+ if (nbrec_lb->protocol && nbrec_lb->protocol[0]) {
+ proto = nbrec_lb->protocol;
+ }
+
+ struct ds sel_fields = DS_EMPTY_INITIALIZER;
+ for (size_t i = 0; i < lb->nlb->n_selection_fields; i++) {
+ char *field = lb->nlb->selection_fields[i];
+ if (!strcmp(field, "tp_src") && proto) {
+ ds_put_format(&sel_fields, "%s_src,", proto);
+ } else if (!strcmp(field, "tp_dst") && proto) {
+ ds_put_format(&sel_fields, "%s_dst,", proto);
+ } else {
+ ds_put_format(&sel_fields, "%s,", field);
+ }
+ }
+ ds_chomp(&sel_fields, ',');
+ lb->selection_fields = ds_steal_cstr(&sel_fields);
+ }
+
+ return lb;
+}
+
+struct ovn_lb *
+ovn_sb_lb_create(const struct sbrec_load_balancer *sbrec_lb)
+{
+ struct ovn_lb *lb = ovn_lb_create(&sbrec_lb->vips);
+ lb->slb = sbrec_lb;
+ lb->nb_lb = false;
+
+ return lb;
+}
+
+struct ovn_lb *
+ovn_lb_find(struct hmap *lbs, struct uuid *uuid)
+{
+ struct ovn_lb *lb;
+ size_t hash = uuid_hash(uuid);
+ HMAP_FOR_EACH_WITH_HASH (lb, hmap_node, hash, lbs) {
+ if (uuid_equals(&lb->nlb->header_.uuid, uuid)) {
+ return lb;
+ }
+ }
+
+ return NULL;
+}
+
+void
+ovn_lb_destroy(struct ovn_lb *lb)
+{
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ free(lb->vips[i].vip);
+ free(lb->vips[i].backend_ips);
+ free(lb->vips[i].vip_port_str);
+
+ for (size_t j = 0; j < lb->vips[i].n_backends; j++) {
+ free(lb->vips[i].backends[j].ip);
+ free(lb->vips[i].backends[j].svc_mon_src_ip);
+ }
+
+ free(lb->vips[i].backends);
+ }
+ free(lb->vips);
+ if (lb->nb_lb) {
+ free(lb->selection_fields);
+ }
+}
+
+void
+ovn_lbs_destroy(struct hmap *lbs)
+{
+ struct ovn_lb *lb;
+ HMAP_FOR_EACH_POP (lb, hmap_node, lbs) {
+ ovn_lb_destroy(lb);
+ free(lb);
+ }
+ hmap_destroy(lbs);
+}
diff --git a/lib/lb.h b/lib/lb.h
new file mode 100644
index 0000000000..f29ad49ecd
--- /dev/null
+++ b/lib/lb.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2020, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef OVN_LIB_LB_H
+#define OVN_LIB_LB_H 1
+
+#include "openvswitch/hmap.h"
+
+struct nbrec_load_balancer;
+struct sbrec_load_balancer;
+struct ovn_port;
+struct uuid;
+
+struct ovn_lb {
+ struct hmap_node hmap_node;
+
+ bool nb_lb; /* NB load balancer or SB load balancer. */
+ union {
+ struct {
+ const struct nbrec_load_balancer *nlb; /* May be NULL. */
+ char *selection_fields;
+ };
+ const struct sbrec_load_balancer *slb; /* May be NULL. */
+ };
+
+ struct lb_vip *vips;
+ size_t n_vips;
+};
+
+struct lb_vip {
+ char *vip;
+ uint16_t vip_port;
+ int addr_family;
+ char *vip_port_str;
+
+ /* Backend information. */
+ char *backend_ips;
+ struct lb_vip_backend *backends;
+ size_t n_backends;
+
+ /* Valid only for NB load balancer. */
+ struct nbrec_load_balancer_health_check *lb_health_check;
+};
+
+struct lb_vip_backend {
+ char *ip;
+ uint16_t port;
+ int addr_family;
+
+ /* Valid only for NB load balancer. */
+ struct ovn_port *op; /* Logical port to which the ip belong to. */
+ bool health_check;
+ char *svc_mon_src_ip; /* Source IP to use for monitoring. */
+ const struct sbrec_service_monitor *sbrec_monitor;
+};
+
+struct ovn_lb *ovn_nb_lb_create(
+ const struct nbrec_load_balancer *nbrec_lb,
+ struct hmap *ports, struct hmap *lbs,
+ void * (*ovn_port_find)(const struct hmap *ports, const char *name));
+struct ovn_lb *ovn_sb_lb_create(const struct sbrec_load_balancer *sbrec_lb);
+struct ovn_lb * ovn_lb_find(struct hmap *lbs, struct uuid *uuid);
+void ovn_lb_destroy(struct ovn_lb *lb);
+void ovn_lbs_destroy(struct hmap *lbs);
+
+#endif /* OVN_LIB_LB_H 1 */
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 6bbe93befb..d39ae8c9ea 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -35,6 +35,7 @@
#include "lib/ovn-nb-idl.h"
#include "lib/ovn-sb-idl.h"
#include "lib/ovn-util.h"
+#include "lib/lb.h"
#include "ovn/actions.h"
#include "ovn/logical-fields.h"
#include "packets.h"
@@ -3320,53 +3321,6 @@ cleanup_sb_ha_chassis_groups(struct northd_context *ctx,
}
}
-struct ovn_lb {
- struct hmap_node hmap_node;
-
- const struct nbrec_load_balancer *nlb; /* May be NULL. */
- char *selection_fields;
- struct lb_vip *vips;
- size_t n_vips;
-};
-
-struct lb_vip {
- char *vip;
- uint16_t vip_port;
- int addr_family;
- char *backend_ips;
-
- bool health_check;
- struct lb_vip_backend *backends;
- size_t n_backends;
-};
-
-struct lb_vip_backend {
- char *ip;
- uint16_t port;
- int addr_family;
-
- struct ovn_port *op; /* Logical port to which the ip belong to. */
- bool health_check;
- char *svc_mon_src_ip; /* Source IP to use for monitoring. */
- const struct sbrec_service_monitor *sbrec_monitor;
-};
-
-
-static inline struct ovn_lb *
-ovn_lb_find(struct hmap *lbs, struct uuid *uuid)
-{
- struct ovn_lb *lb;
- size_t hash = uuid_hash(uuid);
- HMAP_FOR_EACH_WITH_HASH (lb, hmap_node, hash, lbs) {
- if (uuid_equals(&lb->nlb->header_.uuid, uuid)) {
- return lb;
- }
- }
-
- return NULL;
-}
-
-
struct service_monitor_info {
struct hmap_node hmap_node;
const struct sbrec_service_monitor *sbrec_mon;
@@ -3406,126 +3360,36 @@ create_or_get_service_mon(struct northd_context *ctx,
return mon_info;
}
-static struct ovn_lb *
-ovn_lb_create(struct northd_context *ctx, struct hmap *lbs,
- const struct nbrec_load_balancer *nbrec_lb,
- struct hmap *ports, struct hmap *monitor_map)
+static void
+ovn_lb_svc_create(struct northd_context *ctx, struct ovn_lb *lb,
+ struct hmap *monitor_map)
{
- struct ovn_lb *lb = xzalloc(sizeof *lb);
-
- size_t hash = uuid_hash(&nbrec_lb->header_.uuid);
- lb->nlb = nbrec_lb;
- hmap_insert(lbs, &lb->hmap_node, hash);
-
- lb->n_vips = smap_count(&nbrec_lb->vips);
- lb->vips = xcalloc(lb->n_vips, sizeof (struct lb_vip));
- struct smap_node *node;
- size_t n_vips = 0;
-
- SMAP_FOR_EACH (node, &nbrec_lb->vips) {
- char *vip;
- uint16_t port;
- int addr_family;
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ struct lb_vip *lb_vip = &lb->vips[i];
- if (!ip_address_and_port_from_lb_key(node->key, &vip, &port,
- &addr_family)) {
+ if (!lb_vip->lb_health_check) {
continue;
}
- lb->vips[n_vips].vip = vip;
- lb->vips[n_vips].vip_port = port;
- lb->vips[n_vips].addr_family = addr_family;
- lb->vips[n_vips].backend_ips = xstrdup(node->value);
-
- struct nbrec_load_balancer_health_check *lb_health_check = NULL;
- if (nbrec_lb->protocol && !strcmp(nbrec_lb->protocol, "sctp")) {
- if (nbrec_lb->n_health_check > 0) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
- VLOG_WARN_RL(&rl,
- "SCTP load balancers do not currently support "
- "health checks. Not creating health checks for "
- "load balancer " UUID_FMT,
- UUID_ARGS(&nbrec_lb->header_.uuid));
- }
- } else {
- for (size_t i = 0; i < nbrec_lb->n_health_check; i++) {
- if (!strcmp(nbrec_lb->health_check[i]->vip, node->key)) {
- lb_health_check = nbrec_lb->health_check[i];
- break;
- }
- }
- }
-
- char *tokstr = xstrdup(node->value);
- char *save_ptr = NULL;
- char *token;
- size_t n_backends = 0;
- /* Format for a backend ips : IP1:port1,IP2:port2,...". */
- for (token = strtok_r(tokstr, ",", &save_ptr);
- token != NULL;
- token = strtok_r(NULL, ",", &save_ptr)) {
- n_backends++;
- }
-
- free(tokstr);
- tokstr = xstrdup(node->value);
- save_ptr = NULL;
-
- lb->vips[n_vips].n_backends = n_backends;
- lb->vips[n_vips].backends = xcalloc(n_backends,
- sizeof (struct lb_vip_backend));
- lb->vips[n_vips].health_check = lb_health_check ? true: false;
+ for (size_t j = 0; j < lb_vip->n_backends; j++) {
+ struct lb_vip_backend *backend = &lb_vip->backends[j];
- size_t i = 0;
- for (token = strtok_r(tokstr, ",", &save_ptr);
- token != NULL;
- token = strtok_r(NULL, ",", &save_ptr)) {
- char *backend_ip;
- uint16_t backend_port;
-
- if (!ip_address_and_port_from_lb_key(token, &backend_ip,
- &backend_port,
- &addr_family)) {
- continue;
- }
-
- /* Get the logical port to which this ip belongs to. */
- struct ovn_port *op = NULL;
- char *svc_mon_src_ip = NULL;
- const char *s = smap_get(&nbrec_lb->ip_port_mappings,
- backend_ip);
- if (s) {
- char *port_name = xstrdup(s);
- char *p = strstr(port_name, ":");
- if (p) {
- *p = 0;
- p++;
- op = ovn_port_find(ports, port_name);
- svc_mon_src_ip = xstrdup(p);
- }
- free(port_name);
- }
-
- lb->vips[n_vips].backends[i].ip = backend_ip;
- lb->vips[n_vips].backends[i].port = backend_port;
- lb->vips[n_vips].backends[i].addr_family = addr_family;
- lb->vips[n_vips].backends[i].op = op;
- lb->vips[n_vips].backends[i].svc_mon_src_ip = svc_mon_src_ip;
-
- if (lb_health_check && op && svc_mon_src_ip) {
- const char *protocol = nbrec_lb->protocol;
+ if (backend->op && backend->svc_mon_src_ip) {
+ backend->health_check = true;
+ const char *protocol = lb->nlb->protocol;
if (!protocol || !protocol[0]) {
protocol = "tcp";
}
- lb->vips[n_vips].backends[i].health_check = true;
+ backend->health_check = true;
struct service_monitor_info *mon_info =
- create_or_get_service_mon(ctx, monitor_map, backend_ip,
- op->nbsp->name, backend_port,
+ create_or_get_service_mon(ctx, monitor_map, backend->ip,
+ backend->op->nbsp->name,
+ backend->port,
protocol);
ovs_assert(mon_info);
sbrec_service_monitor_set_options(
- mon_info->sbrec_mon, &lb_health_check->options);
+ mon_info->sbrec_mon, &lb_vip->lb_health_check->options);
struct eth_addr ea;
if (!mon_info->sbrec_mon->src_mac ||
!eth_addr_from_string(mon_info->sbrec_mon->src_mac, &ea) ||
@@ -3535,72 +3399,24 @@ ovn_lb_create(struct northd_context *ctx, struct hmap *lbs,
}
if (!mon_info->sbrec_mon->src_ip ||
- strcmp(mon_info->sbrec_mon->src_ip, svc_mon_src_ip)) {
+ strcmp(mon_info->sbrec_mon->src_ip,
+ backend->svc_mon_src_ip)) {
sbrec_service_monitor_set_src_ip(mon_info->sbrec_mon,
- svc_mon_src_ip);
+ backend->svc_mon_src_ip);
}
- lb->vips[n_vips].backends[i].sbrec_monitor =
- mon_info->sbrec_mon;
+ lb_vip->backends[j].sbrec_monitor = mon_info->sbrec_mon;
mon_info->required = true;
- } else {
- lb->vips[n_vips].backends[i].health_check = false;
- }
-
- i++;
- }
-
- free(tokstr);
- n_vips++;
- }
-
- char *proto = NULL;
- if (nbrec_lb->protocol && nbrec_lb->protocol[0]) {
- proto = nbrec_lb->protocol;
- }
-
- if (lb->nlb->n_selection_fields) {
- struct ds sel_fields = DS_EMPTY_INITIALIZER;
- for (size_t i = 0; i < lb->nlb->n_selection_fields; i++) {
- char *field = lb->nlb->selection_fields[i];
- if (!strcmp(field, "tp_src") && proto) {
- ds_put_format(&sel_fields, "%s_src,", proto);
- } else if (!strcmp(field, "tp_dst") && proto) {
- ds_put_format(&sel_fields, "%s_dst,", proto);
- } else {
- ds_put_format(&sel_fields, "%s,", field);
}
}
- ds_chomp(&sel_fields, ',');
- lb->selection_fields = ds_steal_cstr(&sel_fields);
}
-
- return lb;
-}
-
-static void
-ovn_lb_destroy(struct ovn_lb *lb)
-{
- for (size_t i = 0; i < lb->n_vips; i++) {
- free(lb->vips[i].vip);
- free(lb->vips[i].backend_ips);
-
- for (size_t j = 0; j < lb->vips[i].n_backends; j++) {
- free(lb->vips[i].backends[j].ip);
- free(lb->vips[i].backends[j].svc_mon_src_ip);
- }
-
- free(lb->vips[i].backends);
- }
- free(lb->vips);
- free(lb->selection_fields);
}
static void build_lb_vip_ct_lb_actions(struct lb_vip *lb_vip,
struct ds *action,
char *selection_fields)
{
- if (lb_vip->health_check) {
+ if (lb_vip->lb_health_check) {
ds_put_cstr(action, "ct_lb(backends=");
size_t n_active_backends = 0;
@@ -3655,7 +3471,12 @@ build_ovn_lbs(struct northd_context *ctx, struct hmap *ports,
const struct nbrec_load_balancer *nbrec_lb;
NBREC_LOAD_BALANCER_FOR_EACH (nbrec_lb, ctx->ovnnb_idl) {
- ovn_lb_create(ctx, lbs, nbrec_lb, ports, &monitor_map);
+ ovn_nb_lb_create(nbrec_lb, ports, lbs, (void *)ovn_port_find);
+ }
+
+ struct ovn_lb *lb;
+ HMAP_FOR_EACH (lb, hmap_node, lbs) {
+ ovn_lb_svc_create(ctx, lb, &monitor_map);
}
struct service_monitor_info *mon_info;
@@ -3669,16 +3490,6 @@ build_ovn_lbs(struct northd_context *ctx, struct hmap *ports,
hmap_destroy(&monitor_map);
}
-static void
-destroy_ovn_lbs(struct hmap *lbs)
-{
- struct ovn_lb *lb;
- HMAP_FOR_EACH_POP (lb, hmap_node, lbs) {
- ovn_lb_destroy(lb);
- free(lb);
- }
-}
-
static bool
ovn_port_add_tnlid(struct ovn_port *op, uint32_t tunnel_key)
{
@@ -7028,7 +6839,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
struct ovn_lb *lb;
HMAP_FOR_EACH (lb, hmap_node, lbs) {
for (size_t i = 0; i < lb->n_vips; i++) {
- if (!lb->vips[i].health_check) {
+ if (!lb->vips[i].lb_health_check) {
continue;
}
@@ -12379,8 +12190,7 @@ ovnnb_db_run(struct northd_context *ctx,
sync_meters(ctx);
sync_dns_entries(ctx, datapaths);
sync_lb_entries(ctx, datapaths);
- destroy_ovn_lbs(&lbs);
- hmap_destroy(&lbs);
+ ovn_lbs_destroy(&lbs);
struct ovn_igmp_group *igmp_group, *next_igmp_group;
From patchwork Thu Nov 5 07:43:04 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394770
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.138; helo=whitealder.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRbB85K4pz9sSs
for ; Thu, 5 Nov 2020 18:44:00 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by whitealder.osuosl.org (Postfix) with ESMTP id 8112B85DB1;
Thu, 5 Nov 2020 07:43:58 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from whitealder.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id ArplxU+R-Jpk; Thu, 5 Nov 2020 07:43:52 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by whitealder.osuosl.org (Postfix) with ESMTP id 7CCA285DA4;
Thu, 5 Nov 2020 07:43:52 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 71731C088B;
Thu, 5 Nov 2020 07:43:52 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])
by lists.linuxfoundation.org (Postfix) with ESMTP id 0A127C0889
for ; Thu, 5 Nov 2020 07:43:51 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by whitealder.osuosl.org (Postfix) with ESMTP id ED4FE85DB1
for ; Thu, 5 Nov 2020 07:43:50 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from whitealder.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id tf0xy4QDUluV for ;
Thu, 5 Nov 2020 07:43:47 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay3-d.mail.gandi.net (relay3-d.mail.gandi.net
[217.70.183.195])
by whitealder.osuosl.org (Postfix) with ESMTPS id 3943185DA4
for ; Thu, 5 Nov 2020 07:43:47 +0000 (UTC)
X-Originating-IP: 115.99.213.209
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay3-d.mail.gandi.net (Postfix) with ESMTPSA id 1DC3F6000D;
Thu, 5 Nov 2020 07:43:41 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:13:04 +0530
Message-Id: <20201105074304.3794042-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 3/7] controller: Add load balancer hairpin
OF flows.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Presently to handle the load balancer hairpin traffic (the traffic destined to the
load balancer VIP is dnatted to the backend which originated the traffic), ovn-northd
adds a lot of logical flows to check this scenario. This patch attempts to reduce the
these logical flows. Each ovn-controller will read the load balancers from
the newly added southbound Load_Balancer table and adds the load balancer hairpin OF
flows in the table 68, 69 and 70. If suppose a below load balancer is configured
10.0.0.10:80 = 10.0.0.4:8080, 10.0.0.5:8090, then the below flows are added
table=68, ip.src = 10.0.0.4,ip.dst=10.0.0.4,tcp.dst=8080 actions=load:1->NXM_NX_REG9[7]
table=68, ip.src = 10.0.0.5,ip.dst=10.0.0.5,tcp.dst=8090 actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.4,ip.dst=10.0.0.10,tcp.src=8080 actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.5,ip.dst=10.0.0.10,tcp.src=8090 actions=load:1->NXM_NX_REG9[7]
table=70, ct.trk && ct.dnat && ct.nw_dst == 10.0.0.10. actions=ct(commit, zone=reg12, nat(src=10.0.0.5))
Upcoming patch will add OVN actions which does the lookup in these tables to handle the
hairpin traffic.
Signed-off-by: Numan Siddique
---
controller/lflow.c | 248 ++++++++++++++++++
controller/lflow.h | 6 +-
controller/ovn-controller.c | 27 +-
include/ovn/logical-fields.h | 3 +
tests/ovn.at | 469 +++++++++++++++++++++++++++++++++++
5 files changed, 751 insertions(+), 2 deletions(-)
diff --git a/controller/lflow.c b/controller/lflow.c
index f631679c3f..588c3233fb 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -26,6 +26,7 @@
#include "ovn-controller.h"
#include "ovn/actions.h"
#include "ovn/expr.h"
+#include "lib/lb.h"
#include "lib/ovn-l7.h"
#include "lib/ovn-sb-idl.h"
#include "lib/extend-table.h"
@@ -1138,6 +1139,208 @@ add_neighbor_flows(struct ovsdb_idl_index *sbrec_port_binding_by_name,
}
}
+static void
+add_lb_vip_hairpin_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct lb_vip_backend *lb_backend,
+ uint8_t lb_proto,
+ struct ovn_desired_flow_table *flow_table)
+{
+ uint64_t stub[1024 / 8];
+ struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+ uint8_t value = 1;
+ put_load(&value, sizeof value, MFF_LOG_FLAGS,
+ MLF_LOOKUP_LB_HAIRPIN_BIT, 1, &ofpacts);
+
+ ovs_be32 vip4;
+ struct in6_addr vip6;
+
+ if (lb_vip->addr_family == AF_INET) {
+ ovs_assert(ip_parse(lb_vip->vip, &vip4));
+ } else {
+ ovs_assert(ipv6_parse(lb_vip->vip, &vip6));
+ }
+
+ struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
+ struct match hairpin_reply_match = MATCH_CATCHALL_INITIALIZER;
+
+ if (lb_vip->addr_family == AF_INET) {
+ ovs_be32 ip4;
+ ovs_assert(ip_parse(lb_backend->ip, &ip4));
+
+ match_set_dl_type(&hairpin_match, htons(ETH_TYPE_IP));
+ match_set_nw_src(&hairpin_match, ip4);
+ match_set_nw_dst(&hairpin_match, ip4);
+
+ match_set_dl_type(&hairpin_reply_match,
+ htons(ETH_TYPE_IP));
+ match_set_nw_src(&hairpin_reply_match, ip4);
+ match_set_nw_dst(&hairpin_reply_match, vip4);
+ } else {
+ struct in6_addr ip6;
+ ovs_assert(ipv6_parse(lb_backend->ip, &ip6));
+
+ match_set_dl_type(&hairpin_match, htons(ETH_TYPE_IPV6));
+ match_set_ipv6_src(&hairpin_match, &ip6);
+ match_set_ipv6_dst(&hairpin_match, &ip6);
+
+ match_set_dl_type(&hairpin_reply_match,
+ htons(ETH_TYPE_IPV6));
+ match_set_ipv6_src(&hairpin_reply_match, &ip6);
+ match_set_ipv6_dst(&hairpin_reply_match, &vip6);
+ }
+
+ if (lb_backend->port) {
+ match_set_nw_proto(&hairpin_match, lb_proto);
+ match_set_tp_dst(&hairpin_match, htons(lb_backend->port));
+
+ match_set_nw_proto(&hairpin_reply_match, lb_proto);
+ match_set_tp_src(&hairpin_reply_match,
+ htons(lb_backend->port));
+ }
+
+ for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+ match_set_metadata(&hairpin_match,
+ htonll(lb->slb->datapaths[i]->tunnel_key));
+ match_set_metadata(&hairpin_reply_match,
+ htonll(lb->slb->datapaths[i]->tunnel_key));
+
+ ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN, 100,
+ lb->slb->header_.uuid.parts[0], &hairpin_match,
+ &ofpacts, &lb->slb->header_.uuid);
+
+ ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN_REPLY, 100,
+ lb->slb->header_.uuid.parts[0],
+ &hairpin_reply_match,
+ &ofpacts, &lb->slb->header_.uuid);
+ }
+
+ ofpbuf_uninit(&ofpacts);
+}
+
+static void
+add_lb_ct_snat_vip_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct ovn_desired_flow_table *flow_table)
+{
+ ovs_be32 vip4;
+ struct in6_addr vip6;
+
+ if (lb_vip->addr_family == AF_INET) {
+ ovs_assert(ip_parse(lb_vip->vip, &vip4));
+ } else {
+ ovs_assert(ipv6_parse(lb_vip->vip, &vip6));
+ }
+
+ uint64_t stub[1024 / 8];
+ struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+ struct ofpact_conntrack *ct = ofpact_put_CT(&ofpacts);
+ ct->recirc_table = NX_CT_RECIRC_NONE;
+ ct->zone_src.field = mf_from_id(MFF_LOG_SNAT_ZONE);
+ ct->zone_src.ofs = 0;
+ ct->zone_src.n_bits = 16;
+ ct->flags = NX_CT_F_COMMIT;
+ ct->alg = 0;
+
+ size_t nat_offset;
+ nat_offset = ofpacts.size;
+ ofpbuf_pull(&ofpacts, nat_offset);
+
+ struct ofpact_nat *nat = ofpact_put_NAT(&ofpacts);
+ nat->flags = NX_NAT_F_SRC;
+ nat->range_af = AF_UNSPEC;
+ if (lb_vip->addr_family == AF_INET) {
+ nat->range_af = AF_INET;
+ nat->range.addr.ipv4.min = vip4;
+ } else {
+ nat->range_af = AF_INET6;
+ nat->range.addr.ipv6.min = vip6;
+ }
+ ofpacts.header = ofpbuf_push_uninit(&ofpacts, nat_offset);
+ ofpact_finish(&ofpacts, &ct->ofpact);
+
+ struct match match = MATCH_CATCHALL_INITIALIZER;
+ if (lb_vip->addr_family == AF_INET) {
+ match_set_dl_type(&match, htons(ETH_TYPE_IP));
+ match_set_ct_nw_dst(&match, vip4);
+ } else {
+ match_set_dl_type(&match, htons(ETH_TYPE_IPV6));
+ match_set_ct_ipv6_dst(&match, &vip6);
+ }
+
+ uint32_t ct_state = OVS_CS_F_TRACKED | OVS_CS_F_DST_NAT;
+ match_set_ct_state_masked(&match, ct_state, ct_state);
+
+ for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+ match_set_metadata(&match,
+ htonll(lb->slb->datapaths[i]->tunnel_key));
+
+ ofctrl_add_flow(flow_table, OFTABLE_CT_SNAT_FOR_VIP, 100,
+ lb->slb->header_.uuid.parts[0],
+ &match, &ofpacts, &lb->slb->header_.uuid);
+ }
+
+ ofpbuf_uninit(&ofpacts);
+}
+
+static void
+consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
+ const struct hmap *local_datapaths,
+ struct ovn_desired_flow_table *flow_table)
+{
+ /* Check if we need to add flows or not. If there is one datapath
+ * in the local_datapaths, it means all the datapaths of the lb
+ * will be in the local_datapaths. */
+ size_t i;
+ for (i = 0; i < sbrec_lb->n_datapaths; i++) {
+ if (get_local_datapath(local_datapaths,
+ sbrec_lb->datapaths[i]->tunnel_key)) {
+ break;
+ }
+ }
+
+ if (i == sbrec_lb->n_datapaths) {
+ return;
+ }
+
+ struct ovn_lb *lb = ovn_sb_lb_create(sbrec_lb);
+ uint8_t lb_proto = IPPROTO_TCP;
+ if (lb->slb->protocol && lb->slb->protocol[0]) {
+ if (!strcmp(lb->slb->protocol, "udp")) {
+ lb_proto = IPPROTO_UDP;
+ } else if (!strcmp(lb->slb->protocol, "sctp")) {
+ lb_proto = IPPROTO_SCTP;
+ }
+ }
+
+ for (i = 0; i < lb->n_vips; i++) {
+ struct lb_vip *lb_vip = &lb->vips[i];
+
+ for (size_t j = 0; j < lb_vip->n_backends; j++) {
+ struct lb_vip_backend *lb_backend = &lb_vip->backends[j];
+ add_lb_vip_hairpin_flows(lb, lb_vip, lb_backend, lb_proto,
+ flow_table);
+ }
+
+ add_lb_ct_snat_vip_flows(lb, lb_vip, flow_table);
+ }
+
+ ovn_lb_destroy(lb);
+}
+
+/* Adds OpenFlow flows to flow tables for each Load balancer VIPs and
+ * backends to handle the load balanced hairpin traffic. */
+static void
+add_lb_hairpin_flows(const struct sbrec_load_balancer_table *lb_table,
+ const struct hmap *local_datapaths,
+ struct ovn_desired_flow_table *flow_table)
+{
+ const struct sbrec_load_balancer *lb;
+ SBREC_LOAD_BALANCER_TABLE_FOR_EACH (lb, lb_table) {
+ consider_lb_hairpin_flows(lb, local_datapaths, flow_table);
+ }
+}
+
/* Handles neighbor changes in mac_binding table. */
void
lflow_handle_changed_neighbors(
@@ -1197,6 +1400,8 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
add_neighbor_flows(l_ctx_in->sbrec_port_binding_by_name,
l_ctx_in->mac_binding_table, l_ctx_in->local_datapaths,
l_ctx_out->flow_table);
+ add_lb_hairpin_flows(l_ctx_in->lb_table, l_ctx_in->local_datapaths,
+ l_ctx_out->flow_table);
}
void
@@ -1256,6 +1461,15 @@ lflow_add_flows_for_datapath(const struct sbrec_datapath_binding *dp,
dhcp_opts_destroy(&dhcpv6_opts);
nd_ra_opts_destroy(&nd_ra_opts);
controller_event_opts_destroy(&controller_event_opts);
+
+ /* Add load balancer hairpin flows if the datapath has any load balancers
+ * associated. */
+ for (size_t i = 0; i < dp->n_load_balancers; i++) {
+ consider_lb_hairpin_flows(dp->load_balancers[i],
+ l_ctx_in->local_datapaths,
+ l_ctx_out->flow_table);
+ }
+
return handled;
}
@@ -1273,3 +1487,37 @@ lflow_handle_flows_for_lport(const struct sbrec_port_binding *pb,
return lflow_handle_changed_ref(REF_TYPE_PORTBINDING, pb_ref_name,
l_ctx_in, l_ctx_out, &changed);
}
+
+bool
+lflow_handle_changed_lbs(struct lflow_ctx_in *l_ctx_in,
+ struct lflow_ctx_out *l_ctx_out)
+{
+ const struct sbrec_load_balancer *lb;
+
+ SBREC_LOAD_BALANCER_TABLE_FOR_EACH_TRACKED (lb, l_ctx_in->lb_table) {
+ if (sbrec_load_balancer_is_deleted(lb)) {
+ VLOG_DBG("Remove hairpin flows for deleted load balancer "UUID_FMT,
+ UUID_ARGS(&lb->header_.uuid));
+ ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
+ }
+ }
+
+ SBREC_LOAD_BALANCER_TABLE_FOR_EACH_TRACKED (lb, l_ctx_in->lb_table) {
+ if (sbrec_load_balancer_is_deleted(lb)) {
+ continue;
+ }
+
+ if (!sbrec_load_balancer_is_new(lb)) {
+ VLOG_DBG("Remove hairpin flows for updated load balancer "UUID_FMT,
+ UUID_ARGS(&lb->header_.uuid));
+ ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
+ }
+
+ VLOG_DBG("Add load balancer hairpin flows for "UUID_FMT,
+ UUID_ARGS(&lb->header_.uuid));
+ consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
+ l_ctx_out->flow_table);
+ }
+
+ return true;
+}
diff --git a/controller/lflow.h b/controller/lflow.h
index 1251fb0f45..1225131deb 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -68,6 +68,9 @@ struct uuid;
#define OFTABLE_LOG_TO_PHY 65
#define OFTABLE_MAC_BINDING 66
#define OFTABLE_MAC_LOOKUP 67
+#define OFTABLE_CHK_LB_HAIRPIN 68
+#define OFTABLE_CHK_LB_HAIRPIN_REPLY 69
+#define OFTABLE_CT_SNAT_FOR_VIP 70
/* The number of tables for the ingress and egress pipelines. */
#define LOG_PIPELINE_LEN 24
@@ -132,6 +135,7 @@ struct lflow_ctx_in {
const struct sbrec_logical_flow_table *logical_flow_table;
const struct sbrec_multicast_group_table *mc_group_table;
const struct sbrec_chassis *chassis;
+ const struct sbrec_load_balancer_table *lb_table;
const struct hmap *local_datapaths;
const struct shash *addr_sets;
const struct shash *port_groups;
@@ -160,7 +164,7 @@ void lflow_handle_changed_neighbors(
const struct sbrec_mac_binding_table *,
const struct hmap *local_datapaths,
struct ovn_desired_flow_table *);
-
+bool lflow_handle_changed_lbs(struct lflow_ctx_in *, struct lflow_ctx_out *);
void lflow_destroy(void);
void lflow_cache_init(struct hmap *);
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index a06cae3ccb..4150b4cb1c 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -790,7 +790,8 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
SB_NODE(logical_flow, "logical_flow") \
SB_NODE(dhcp_options, "dhcp_options") \
SB_NODE(dhcpv6_options, "dhcpv6_options") \
- SB_NODE(dns, "dns")
+ SB_NODE(dns, "dns") \
+ SB_NODE(load_balancer, "load_balancer")
enum sb_engine_node {
#define SB_NODE(NAME, NAME_STR) SB_##NAME,
@@ -1682,6 +1683,10 @@ static void init_lflow_ctx(struct engine_node *node,
(struct sbrec_multicast_group_table *)EN_OVSDB_GET(
engine_get_input("SB_multicast_group", node));
+ struct sbrec_load_balancer_table *lb_table =
+ (struct sbrec_load_balancer_table *)EN_OVSDB_GET(
+ engine_get_input("SB_load_balancer", node));
+
const char *chassis_id = chassis_get_id();
const struct sbrec_chassis *chassis = NULL;
struct ovsdb_idl_index *sbrec_chassis_by_name =
@@ -1713,6 +1718,7 @@ static void init_lflow_ctx(struct engine_node *node,
l_ctx_in->logical_flow_table = logical_flow_table;
l_ctx_in->mc_group_table = multicast_group_table;
l_ctx_in->chassis = chassis;
+ l_ctx_in->lb_table = lb_table;
l_ctx_in->local_datapaths = &rt_data->local_datapaths;
l_ctx_in->addr_sets = addr_sets;
l_ctx_in->port_groups = port_groups;
@@ -2131,6 +2137,23 @@ flow_output_runtime_data_handler(struct engine_node *node,
return true;
}
+static bool
+flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
+{
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+ struct ed_type_flow_output *fo = data;
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+ init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+
+ bool handled = lflow_handle_changed_lbs(&l_ctx_in, &l_ctx_out);
+
+ engine_set_node_state(node, EN_UPDATED);
+ return handled;
+}
+
struct ovn_controller_exit_args {
bool *exiting;
bool *restart;
@@ -2327,6 +2350,8 @@ main(int argc, char *argv[])
engine_add_input(&en_flow_output, &en_sb_dhcp_options, NULL);
engine_add_input(&en_flow_output, &en_sb_dhcpv6_options, NULL);
engine_add_input(&en_flow_output, &en_sb_dns, NULL);
+ engine_add_input(&en_flow_output, &en_sb_load_balancer,
+ flow_output_sb_load_balancer_handler);
engine_add_input(&en_ct_zones, &en_ovs_open_vswitch, NULL);
engine_add_input(&en_ct_zones, &en_ovs_bridge, NULL);
diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h
index ac6f2f909b..0fe5bc3bb4 100644
--- a/include/ovn/logical-fields.h
+++ b/include/ovn/logical-fields.h
@@ -57,6 +57,7 @@ enum mff_log_flags_bits {
MLF_LOCAL_ONLY_BIT = 4,
MLF_NESTED_CONTAINER_BIT = 5,
MLF_LOOKUP_MAC_BIT = 6,
+ MLF_LOOKUP_LB_HAIRPIN_BIT = 7,
};
/* MFF_LOG_FLAGS_REG flag assignments */
@@ -88,6 +89,8 @@ enum mff_log_flags {
/* Indicate that the lookup in the mac binding table was successful. */
MLF_LOOKUP_MAC = (1 << MLF_LOOKUP_MAC_BIT),
+
+ MLF_LOOKUP_LB_HAIRPIN = (1 << MLF_LOOKUP_LB_HAIRPIN_BIT),
};
/* OVN logical fields
diff --git a/tests/ovn.at b/tests/ovn.at
index 1c29cdf262..7d9201141e 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -22581,3 +22581,472 @@ AT_CHECK([test "$encap_rec_mvtep" == "$encap_rec_mvtep1"], [0], [])
OVN_CLEANUP([hv1])
AT_CLEANUP
+
+AT_SETUP([ovn -- Load Balancer LS hairpin OF flows])
+ovn_start
+
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+ovs-vsctl -- add-port br-int hv1-vif1 -- \
+ set interface hv1-vif1 external-ids:iface-id=sw0-p1 \
+ options:tx_pcap=hv1/vif1-tx.pcap \
+ options:rxq_pcap=hv1/vif1-rx.pcap \
+ ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif2 -- \
+ set interface hv1-vif2 external-ids:iface-id=sw1-p1 \
+ options:tx_pcap=hv1/vif2-tx.pcap \
+ options:rxq_pcap=hv1/vif2-rx.pcap \
+ ofport-request=2
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.2
+ovs-vsctl -- add-port br-int hv2-vif1 -- \
+ set interface hv2-vif1 external-ids:iface-id=sw0-p2 \
+ options:tx_pcap=hv2/vif1-tx.pcap \
+ options:rxq_pcap=hv2/vif1-rx.pcap \
+ ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif2 -- \
+ set interface hv1-vif2 external-ids:iface-id=sw1-p2 \
+ options:tx_pcap=hv1/vif2-tx.pcap \
+ options:rxq_pcap=hv1/vif2-rx.pcap \
+ ofport-request=2
+
+check ovn-nbctl --wait=hv ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-p1 -- lsp-set-addresses sw0-p1 00:00:00:00:00:01
+
+check ovn-nbctl ls-add sw1
+check ovn-nbctl lsp-add sw1 sw1-p1 -- lsp-set-addresses sw1-p1 00:00:00:00:01:01
+
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xup])
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw1-p1) = xup])
+
+check ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp
+check ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
+check ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
+check ovn-nbctl --wait=hv lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4-tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 1]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+check ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.90:8080 42.42.42.42:4041,52.52.52.52:4042 tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+check ovn-nbctl lsp-add sw0 sw0-p2
+# hv2 should bind sw0-p2 and it should install the LB hairpin flows.
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xup])
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+)
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4-udp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6-tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 5]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 5]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6-udp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw1 lb-ipv6-udp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 7]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 7]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+as hv2 ovs-vsctl del-port hv2-vif1
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xdown])
+
+# Trigger recompute on hv2 as sw0 will not be cleared from local_datapaths.
+as hv2 ovn-appctl -t ovn-controller recompute
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 7]
+)
+
+check ovn-nbctl --wait=hv lb-del lb-ipv4-tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+])
+
+check ovn-nbctl --wait=hv ls-del sw0
+check ovn-nbctl --wait=hv ls-del sw1
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVN_CLEANUP([hv1], [hv2])
+AT_CLEANUP
From patchwork Thu Nov 5 07:43:44 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394772
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.136; helo=silver.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRbBk5mWhz9sSs
for ; Thu, 5 Nov 2020 18:44:30 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id F298522F26;
Thu, 5 Nov 2020 07:44:28 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id SyQMHR+3wg22; Thu, 5 Nov 2020 07:44:16 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by silver.osuosl.org (Postfix) with ESMTP id 92D01228F1;
Thu, 5 Nov 2020 07:44:16 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 76F0EC088B;
Thu, 5 Nov 2020 07:44:16 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])
by lists.linuxfoundation.org (Postfix) with ESMTP id ADF60C0889
for ; Thu, 5 Nov 2020 07:44:14 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by fraxinus.osuosl.org (Postfix) with ESMTP id 9A49285660
for ; Thu, 5 Nov 2020 07:44:14 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from fraxinus.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id PfkgN0T3TWKx for ;
Thu, 5 Nov 2020 07:44:12 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay6-d.mail.gandi.net (relay6-d.mail.gandi.net
[217.70.183.198])
by fraxinus.osuosl.org (Postfix) with ESMTPS id 793888567A
for ; Thu, 5 Nov 2020 07:44:12 +0000 (UTC)
X-Originating-IP: 115.99.213.209
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay6-d.mail.gandi.net (Postfix) with ESMTPSA id AC28DC000C;
Thu, 5 Nov 2020 07:44:07 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:13:44 +0530
Message-Id: <20201105074344.3794210-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 4/7] actions: Add new actions
chk_lb_hairpin, chk_lb_hairpin_reply and ct_snat_to_vip.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
The action - chk_lb_hairpin checks if the packet destined to a load balancer VIP
is to be hairpinned back to the same destination and if so, sets the destination register
bit to 1.
The action - chk_lb_hairpin_reply checks if the packet is a reply of the hairpinned
packet. If so, it sets the destination register bit to 1.
The action ct_snat_to_vip snats the source IP to the load balancer VIP if chk_lb_hairpin()
returned true.
These actions will be used in the upcoming patch by ovn-northd in the hairpin logical flows.
This helps in reducing lots of hairpin logical flows.
Signed-off-by: Numan Siddique
---
controller/lflow.c | 3 ++
include/ovn/actions.h | 15 ++++--
lib/actions.c | 116 ++++++++++++++++++++++++++++++++++++++----
ovn-sb.xml | 37 ++++++++++++++
tests/ovn.at | 39 ++++++++++++++
tests/test-ovn.c | 3 ++
utilities/ovn-trace.c | 65 ++++++++++++++++++++++-
7 files changed, 265 insertions(+), 13 deletions(-)
diff --git a/controller/lflow.c b/controller/lflow.c
index 588c3233fb..6a609b0139 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -698,6 +698,9 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
.output_ptable = output_ptable,
.mac_bind_ptable = OFTABLE_MAC_BINDING,
.mac_lookup_ptable = OFTABLE_MAC_LOOKUP,
+ .lb_hairpin_ptable = OFTABLE_CHK_LB_HAIRPIN,
+ .lb_hairpin_reply_ptable = OFTABLE_CHK_LB_HAIRPIN_REPLY,
+ .ct_snat_vip_ptable = OFTABLE_CT_SNAT_FOR_VIP,
};
ovnacts_encode(ovnacts->data, ovnacts->size, &ep, &ofpacts);
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index b4e5acabb9..630bbe79e4 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -83,7 +83,7 @@ struct ovn_extend_table;
OVNACT(PUT_DHCPV4_OPTS, ovnact_put_opts) \
OVNACT(PUT_DHCPV6_OPTS, ovnact_put_opts) \
OVNACT(SET_QUEUE, ovnact_set_queue) \
- OVNACT(DNS_LOOKUP, ovnact_dns_lookup) \
+ OVNACT(DNS_LOOKUP, ovnact_result) \
OVNACT(LOG, ovnact_log) \
OVNACT(PUT_ND_RA_OPTS, ovnact_put_opts) \
OVNACT(ND_NS, ovnact_nest) \
@@ -97,6 +97,9 @@ struct ovn_extend_table;
OVNACT(DHCP6_REPLY, ovnact_null) \
OVNACT(ICMP6_ERROR, ovnact_nest) \
OVNACT(REJECT, ovnact_nest) \
+ OVNACT(CHK_LB_HAIRPIN, ovnact_result) \
+ OVNACT(CHK_LB_HAIRPIN_REPLY, ovnact_result) \
+ OVNACT(CT_SNAT_TO_VIP, ovnact_null) \
/* enum ovnact_type, with a member OVNACT_ for each action. */
enum OVS_PACKED_ENUM ovnact_type {
@@ -338,8 +341,8 @@ struct ovnact_set_queue {
uint16_t queue_id;
};
-/* OVNACT_DNS_LOOKUP. */
-struct ovnact_dns_lookup {
+/* OVNACT_DNS_LOOKUP, OVNACT_CHK_LB_HAIRPIN, OVNACT_CHK_LB_HAIRPIN_REPLY. */
+struct ovnact_result {
struct ovnact ovnact;
struct expr_field dst; /* 1-bit destination field. */
};
@@ -727,6 +730,12 @@ struct ovnact_encode_params {
resubmit. */
uint8_t mac_lookup_ptable; /* OpenFlow table for
'lookup_arp'/'lookup_nd' to resubmit. */
+ uint8_t lb_hairpin_ptable; /* OpenFlow table for
+ * 'chk_lb_hairpin' to resubmit. */
+ uint8_t lb_hairpin_reply_ptable; /* OpenFlow table for
+ * 'chk_lb_hairpin_reply' to resubmit. */
+ uint8_t ct_snat_vip_ptable; /* OpenFlow table for
+ * 'ct_snat_to_vip' to resubmit. */
};
void ovnacts_encode(const struct ovnact[], size_t ovnacts_len,
diff --git a/lib/actions.c b/lib/actions.c
index 23e54ef2a6..015bcbc4dc 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -2655,13 +2655,14 @@ ovnact_set_queue_free(struct ovnact_set_queue *a OVS_UNUSED)
}
static void
-parse_dns_lookup(struct action_context *ctx, const struct expr_field *dst,
- struct ovnact_dns_lookup *dl)
+parse_ovnact_result(struct action_context *ctx, const char *name,
+ const char *prereq, const struct expr_field *dst,
+ struct ovnact_result *res)
{
- lexer_get(ctx->lexer); /* Skip dns_lookup. */
+ lexer_get(ctx->lexer); /* Skip action name. */
lexer_get(ctx->lexer); /* Skip '('. */
if (!lexer_match(ctx->lexer, LEX_T_RPAREN)) {
- lexer_error(ctx->lexer, "dns_lookup doesn't take any parameters");
+ lexer_error(ctx->lexer, "%s doesn't take any parameters", name);
return;
}
/* Validate that the destination is a 1-bit, modifiable field. */
@@ -2671,19 +2672,29 @@ parse_dns_lookup(struct action_context *ctx, const struct expr_field *dst,
free(error);
return;
}
- dl->dst = *dst;
- add_prerequisite(ctx, "udp");
+ res->dst = *dst;
+
+ if (prereq) {
+ add_prerequisite(ctx, prereq);
+ }
}
static void
-format_DNS_LOOKUP(const struct ovnact_dns_lookup *dl, struct ds *s)
+parse_dns_lookup(struct action_context *ctx, const struct expr_field *dst,
+ struct ovnact_result *dl)
+{
+ parse_ovnact_result(ctx, "dns_lookup", "udp", dst, dl);
+}
+
+static void
+format_DNS_LOOKUP(const struct ovnact_result *dl, struct ds *s)
{
expr_field_format(&dl->dst, s);
ds_put_cstr(s, " = dns_lookup();");
}
static void
-encode_DNS_LOOKUP(const struct ovnact_dns_lookup *dl,
+encode_DNS_LOOKUP(const struct ovnact_result *dl,
const struct ovnact_encode_params *ep OVS_UNUSED,
struct ofpbuf *ofpacts)
{
@@ -2700,7 +2711,7 @@ encode_DNS_LOOKUP(const struct ovnact_dns_lookup *dl,
static void
-ovnact_dns_lookup_free(struct ovnact_dns_lookup *dl OVS_UNUSED)
+ovnact_result_free(struct ovnact_result *dl OVS_UNUSED)
{
}
@@ -3472,6 +3483,83 @@ ovnact_fwd_group_free(struct ovnact_fwd_group *fwd_group)
free(fwd_group->child_ports);
}
+static void
+parse_chk_lb_hairpin(struct action_context *ctx, const struct expr_field *dst,
+ struct ovnact_result *res)
+{
+ parse_ovnact_result(ctx, "chk_lb_hairpin", NULL, dst, res);
+}
+
+static void
+parse_chk_lb_hairpin_reply(struct action_context *ctx,
+ const struct expr_field *dst,
+ struct ovnact_result *res)
+{
+ parse_ovnact_result(ctx, "chk_lb_hairpin_reply", NULL, dst, res);
+}
+
+
+static void
+format_CHK_LB_HAIRPIN(const struct ovnact_result *res, struct ds *s)
+{
+ expr_field_format(&res->dst, s);
+ ds_put_cstr(s, " = chk_lb_hairpin();");
+}
+
+static void
+format_CHK_LB_HAIRPIN_REPLY(const struct ovnact_result *res, struct ds *s)
+{
+ expr_field_format(&res->dst, s);
+ ds_put_cstr(s, " = chk_lb_hairpin_reply();");
+}
+
+static void
+encode_chk_lb_hairpin__(const struct ovnact_result *res,
+ uint8_t hairpin_table,
+ struct ofpbuf *ofpacts)
+{
+ struct mf_subfield dst = expr_resolve_field(&res->dst);
+ ovs_assert(dst.field);
+ put_load(0, MFF_LOG_FLAGS, MLF_LOOKUP_LB_HAIRPIN_BIT, 1, ofpacts);
+ emit_resubmit(ofpacts, hairpin_table);
+
+ struct ofpact_reg_move *orm = ofpact_put_REG_MOVE(ofpacts);
+ orm->dst = dst;
+ orm->src.field = mf_from_id(MFF_LOG_FLAGS);
+ orm->src.ofs = MLF_LOOKUP_LB_HAIRPIN_BIT;
+ orm->src.n_bits = 1;
+}
+
+static void
+encode_CHK_LB_HAIRPIN(const struct ovnact_result *res,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ encode_chk_lb_hairpin__(res, ep->lb_hairpin_ptable, ofpacts);
+}
+
+static void
+encode_CHK_LB_HAIRPIN_REPLY(const struct ovnact_result *res,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ encode_chk_lb_hairpin__(res, ep->lb_hairpin_reply_ptable, ofpacts);
+}
+
+static void
+format_CT_SNAT_TO_VIP(const struct ovnact_null *null OVS_UNUSED, struct ds *s)
+{
+ ds_put_cstr(s, "ct_snat_to_vip;");
+}
+
+static void
+encode_CT_SNAT_TO_VIP(const struct ovnact_null *null OVS_UNUSED,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ emit_resubmit(ofpacts, ep->ct_snat_vip_ptable);
+}
+
/* Parses an assignment or exchange or put_dhcp_opts action. */
static void
parse_set_action(struct action_context *ctx)
@@ -3524,6 +3612,14 @@ parse_set_action(struct action_context *ctx)
&& lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
parse_lookup_mac_bind_ip(ctx, &lhs, 128,
ovnact_put_LOOKUP_ND_IP(ctx->ovnacts));
+ } else if (!strcmp(ctx->lexer->token.s, "chk_lb_hairpin")
+ && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+ parse_chk_lb_hairpin(ctx, &lhs,
+ ovnact_put_CHK_LB_HAIRPIN(ctx->ovnacts));
+ } else if (!strcmp(ctx->lexer->token.s, "chk_lb_hairpin_reply")
+ && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+ parse_chk_lb_hairpin_reply(
+ ctx, &lhs, ovnact_put_CHK_LB_HAIRPIN_REPLY(ctx->ovnacts));
} else {
parse_assignment_action(ctx, false, &lhs);
}
@@ -3610,6 +3706,8 @@ parse_action(struct action_context *ctx)
ovnact_put_DHCP6_REPLY(ctx->ovnacts);
} else if (lexer_match_id(ctx->lexer, "reject")) {
parse_REJECT(ctx);
+ } else if (lexer_match_id(ctx->lexer, "ct_snat_to_vip")) {
+ ovnact_put_CT_SNAT_TO_VIP(ctx->ovnacts);
} else {
lexer_syntax_error(ctx->lexer, "expecting action");
}
diff --git a/ovn-sb.xml b/ovn-sb.xml
index bdd41c1f97..428996ebf5 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -2325,6 +2325,43 @@ tcp.flags = RST;
Delegation Router and managed IPv6 Prefix delegation state machine
+
+ R = chk_lb_hairpin();
+
+
+ This action checks if the packet under consideration was destined
+ to a load balancer VIP and it is hairpinned, i.e., after load
+ balancing the destination IP matches the source IP. If it is so,
+ then the 1-bit destination register R is set to 1.
+
+
+
+ R = chk_lb_hairpin_reply();
+
+
+ This action checks if the packet under consideration is from
+ one of the backend IP of a load balancer VIP and the destination IP
+ is the load balancer VIP. If it is so, then the 1-bit destination
+ register R is set to 1.
+
+
+
+ R = ct_snat_to_vip;
+
+
+ This action sends the packet through the SNAT zone to change the
+ source IP address of the packet to the load balancer VIP if the
+ original destination IP was load balancer VIP and commits the
+ connection. This action applies successfully only for the
+ hairpinned traffic i.e if the action chk_lb_hairpin
+ returned success. This action doesn't take any arguments and it
+ determines the SNAT IP internally.
+
+ The packet is not automatically sent to the next table. The caller
+ has to execute the next;
action explicitly after this
+ action to advance the packet to the next stage.
+
+
diff --git a/tests/ovn.at b/tests/ovn.at
index 7d9201141e..981b79b617 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -1716,6 +1716,45 @@ fwd_group(liveness="false", childports="eth0", "lsp1");
handle_dhcpv6_reply;
encodes as controller(userdata=00.00.00.13.00.00.00.00)
+# chk_lb_hairpin
+reg0[0] = chk_lb_hairpin();
+ encodes as set_field:0/0x80->reg10,resubmit(,68),move:NXM_NX_REG10[7]->NXM_NX_XXREG0[96]
+
+reg2[2] = chk_lb_hairpin();
+ encodes as set_field:0/0x80->reg10,resubmit(,68),move:NXM_NX_REG10[7]->NXM_NX_XXREG0[34]
+
+reg0 = chk_lb_hairpin();
+ Cannot use 32-bit field reg0[0..31] where 1-bit field is required.
+
+reg0[0] = chk_lb_hairpin(foo);
+ chk_lb_hairpin doesn't take any parameters
+
+chk_lb_hairpin;
+ Syntax error at `chk_lb_hairpin' expecting action.
+
+# chk_lb_hairpin_reply
+reg0[0] = chk_lb_hairpin_reply();
+ encodes as set_field:0/0x80->reg10,resubmit(,69),move:NXM_NX_REG10[7]->NXM_NX_XXREG0[96]
+
+reg2[2..3] = chk_lb_hairpin_reply();
+ Cannot use 2-bit field reg2[2..3] where 1-bit field is required.
+
+reg0 = chk_lb_hairpin_reply();
+ Cannot use 32-bit field reg0[0..31] where 1-bit field is required.
+
+reg0[0] = chk_lb_hairpin_reply(foo);
+ chk_lb_hairpin_reply doesn't take any parameters
+
+chk_lb_hairpin_reply;
+ Syntax error at `chk_lb_hairpin_reply' expecting action.
+
+# ct_snat_to_vip
+ct_snat_to_vip;
+ encodes as resubmit(,70)
+
+ct_snat_to_vip(foo);
+ Syntax error at `(' expecting `;'.
+
# Miscellaneous negative tests.
;
Syntax error at `;'.
diff --git a/tests/test-ovn.c b/tests/test-ovn.c
index 80d99b7a8b..6662ced54c 100644
--- a/tests/test-ovn.c
+++ b/tests/test-ovn.c
@@ -1342,6 +1342,9 @@ test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
.output_ptable = OFTABLE_SAVE_INPORT,
.mac_bind_ptable = OFTABLE_MAC_BINDING,
.mac_lookup_ptable = OFTABLE_MAC_LOOKUP,
+ .lb_hairpin_ptable = OFTABLE_CHK_LB_HAIRPIN,
+ .lb_hairpin_reply_ptable = OFTABLE_CHK_LB_HAIRPIN_REPLY,
+ .ct_snat_vip_ptable = OFTABLE_CT_SNAT_FOR_VIP,
};
struct ofpbuf ofpacts;
ofpbuf_init(&ofpacts, 0);
diff --git a/utilities/ovn-trace.c b/utilities/ovn-trace.c
index 29bf7a2084..5d92188ab2 100644
--- a/utilities/ovn-trace.c
+++ b/utilities/ovn-trace.c
@@ -1992,7 +1992,7 @@ execute_next(const struct ovnact_next *next,
static void
-execute_dns_lookup(const struct ovnact_dns_lookup *dl, struct flow *uflow,
+execute_dns_lookup(const struct ovnact_result *dl, struct flow *uflow,
struct ovs_list *super)
{
struct mf_subfield sf = expr_resolve_field(&dl->dst);
@@ -2224,6 +2224,57 @@ execute_ovnfield_load(const struct ovnact_load *load,
}
}
+static void
+execute_chk_lb_hairpin(const struct ovnact_result *dl, struct flow *uflow,
+ struct ovs_list *super)
+{
+ int family = (uflow->dl_type == htons(ETH_TYPE_IP) ? AF_INET
+ : uflow->dl_type == htons(ETH_TYPE_IPV6) ? AF_INET6
+ : AF_UNSPEC);
+ uint8_t res = 0;
+ if (family != AF_UNSPEC && uflow->ct_state & CS_DST_NAT) {
+ if (family == AF_INET) {
+ res = (uflow->nw_src == uflow->nw_dst) ? 1 : 0;
+ } else {
+ res = ipv6_addr_equals(&uflow->ipv6_src, &uflow->ipv6_dst) ? 1 : 0;
+ }
+ }
+
+ struct mf_subfield sf = expr_resolve_field(&dl->dst);
+ union mf_subvalue sv = { .u8_val = res };
+ mf_write_subfield_flow(&sf, &sv, uflow);
+
+ struct ds s = DS_EMPTY_INITIALIZER;
+ expr_field_format(&dl->dst, &s);
+ ovntrace_node_append(super, OVNTRACE_NODE_MODIFY,
+ "%s = %d", ds_cstr(&s), res);
+ ds_destroy(&s);
+}
+
+static void
+execute_chk_lb_hairpin_reply(const struct ovnact_result *dl,
+ struct flow *uflow,
+ struct ovs_list *super)
+{
+ struct mf_subfield sf = expr_resolve_field(&dl->dst);
+ union mf_subvalue sv = { .u8_val = 0 };
+ mf_write_subfield_flow(&sf, &sv, uflow);
+ ovntrace_node_append(super, OVNTRACE_NODE_ERROR,
+ "*** chk_lb_hairpin_reply action not implemented");
+ struct ds s = DS_EMPTY_INITIALIZER;
+ expr_field_format(&dl->dst, &s);
+ ovntrace_node_append(super, OVNTRACE_NODE_MODIFY,
+ "%s = 0", ds_cstr(&s));
+ ds_destroy(&s);
+}
+
+static void
+execute_ct_snat_to_vip(struct flow *uflow OVS_UNUSED, struct ovs_list *super)
+{
+ ovntrace_node_append(super, OVNTRACE_NODE_ERROR,
+ "*** ct_snat_to_vip action not implemented");
+}
+
static void
trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
const struct ovntrace_datapath *dp, struct flow *uflow,
@@ -2440,6 +2491,18 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
pipeline, super);
break;
+ case OVNACT_CHK_LB_HAIRPIN:
+ execute_chk_lb_hairpin(ovnact_get_CHK_LB_HAIRPIN(a), uflow, super);
+ break;
+
+ case OVNACT_CHK_LB_HAIRPIN_REPLY:
+ execute_chk_lb_hairpin_reply(ovnact_get_CHK_LB_HAIRPIN_REPLY(a),
+ uflow, super);
+ break;
+ case OVNACT_CT_SNAT_TO_VIP:
+ execute_ct_snat_to_vip(uflow, super);
+ break;
+
case OVNACT_TRIGGER_EVENT:
break;
From patchwork Thu Nov 5 07:44:09 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394774
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRbC92ZXWz9sSs
for ; Thu, 5 Nov 2020 18:44:53 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by fraxinus.osuosl.org (Postfix) with ESMTP id E876984461;
Thu, 5 Nov 2020 07:44:51 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from fraxinus.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id z6YJeNUjEtdn; Thu, 5 Nov 2020 07:44:49 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by fraxinus.osuosl.org (Postfix) with ESMTP id 7203284417;
Thu, 5 Nov 2020 07:44:49 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 52E9CC1AD6;
Thu, 5 Nov 2020 07:44:49 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
by lists.linuxfoundation.org (Postfix) with ESMTP id C9D6EC088B
for ; Thu, 5 Nov 2020 07:44:47 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id C14682050D
for ; Thu, 5 Nov 2020 07:44:47 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id eB97qwQQ8twt for ;
Thu, 5 Nov 2020 07:44:33 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay11.mail.gandi.net (relay11.mail.gandi.net
[217.70.178.231])
by silver.osuosl.org (Postfix) with ESMTPS id 9A8CF22D10
for ; Thu, 5 Nov 2020 07:44:18 +0000 (UTC)
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay11.mail.gandi.net (Postfix) with ESMTPSA id 81531100004;
Thu, 5 Nov 2020 07:44:13 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:14:09 +0530
Message-Id: <20201105074409.3794287-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 5/7] northd: Make use of new hairpin
actions.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
This patch makes use of the new hairpin OVN actions - chk_lb_hairpin, chk_lb_hairpin_reply
and ct_snat_to_vip.
Suppose there are 'm' load balancers associated to a logical switch and each load balancer
has 'n' VIPs and each VIP has 'p' backends then ovn-northd adds (m * ((n * p) + n))
hairpin logical flows. After this patch, ovn-northd adds just 5 hairpin logical flows.
With this patch number of hairpin related OF flows on a chassis are almost the same as before,
but in a large scale deployment, this reduces memory consumption and load on ovn-northd and
SB DB ovsdb-servers.
Signed-off-by: Numan Siddique
---
northd/ovn-northd.8.xml | 65 +++++++++++-----
northd/ovn-northd.c | 159 ++++++++++++++--------------------------
tests/ovn-northd.at | 28 +++----
tests/ovn.at | 2 +-
4 files changed, 116 insertions(+), 138 deletions(-)
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index b37cecd381..8c0a4a98f5 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -718,24 +718,55 @@
Ingress Table 12: Pre-Hairpin
-
- For all configured load balancer VIPs a priority-2 flow that
- matches on traffic that needs to be hairpinned, i.e., after load
- balancing the destination IP matches the source IP, which sets
-
reg0[6] = 1
and executes ct_snat(VIP)
- to force replies to these packets to come back through OVN.
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-100 flow is added with the match
+ ip && ct.trk&& ct.dnat
to check if the
+ packet needs to be hairpinned ( if after load balancing the destination
+ IP matches the source IP) or not by executing the action
+ reg0[6] = chk_lb_hairpin();
and advances the packet to
+ the next table.
+
+
+ -
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-90 flow is added with the match
ip
to check if
+ the packet is a reply for a hairpinned connection or not by executing
+ the action reg0[6] = chk_lb_hairpin_reply();
and advances
+ the packet to the next table.
+
-
- For all configured load balancer VIPs a priority-1 flow that
- matches on replies to hairpinned traffic, i.e., destination IP is VIP,
- source IP is the backend IP and source L4 port is backend port, which
- sets
reg0[6] = 1
and executes ct_snat;
.
+ A priority-0 flow that simply moves traffic to the next table.
+
+
+ Ingress Table 13: Nat-Hairpin
+
+ -
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-100 flow is added with the match
+
ip && (ct.new || ct.est) && ct.trk &&
+ ct.dnat && reg0[6] == 1
which hairpins the traffic by
+ NATting source IP to the load balancer VIP by executing the action
+ ct_snat_to_vip
and advances the packet to the next table.
+
+
+ -
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-90 flow is added with the match
+
ip && reg0[6] == 1
which matches on the replies
+ of hairpinned traffic ( i.e., destination IP is VIP,
+ source IP is the backend IP and source L4 port is backend port for L4
+ load balancers) and executes ct_snat
and advances the
+ packet to the next table.
+
+
-
A priority-0 flow that simply moves traffic to the next table.
- Ingress Table 13: Hairpin
+ Ingress Table 14: Hairpin
-
A priority-1 flow that hairpins traffic matched by non-default
@@ -748,7 +779,7 @@
- Ingress Table 14: ARP/ND responder
+ Ingress Table 15: ARP/ND responder
This table implements ARP/ND responder in a logical switch for known
@@ -1038,7 +1069,7 @@ output;
-
Ingress Table 15: DHCP option processing
+ Ingress Table 16: DHCP option processing
This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -1099,7 +1130,7 @@ next;
-
Ingress Table 16: DHCP responses
+ Ingress Table 17: DHCP responses
This table implements DHCP responder for the DHCP replies generated by
@@ -1180,7 +1211,7 @@ output;
-
Ingress Table 17 DNS Lookup
+ Ingress Table 18 DNS Lookup
This table looks up and resolves the DNS names to the corresponding
@@ -1209,7 +1240,7 @@ reg0[4] = dns_lookup(); next;
-
Ingress Table 18 DNS Responses
+ Ingress Table 19 DNS Responses
This table implements DNS responder for the DNS replies generated by
@@ -1244,7 +1275,7 @@ output;
-
Ingress table 19 External ports
+ Ingress table 20 External ports
Traffic from the external
logical ports enter the ingress
@@ -1287,7 +1318,7 @@ output;
-
Ingress Table 20 Destination Lookup
+ Ingress Table 21 Destination Lookup
This table implements switching behavior. It contains these logical
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index d39ae8c9ea..bf6c7c219d 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -150,14 +150,15 @@ enum ovn_stage {
PIPELINE_STAGE(SWITCH, IN, LB, 10, "ls_in_lb") \
PIPELINE_STAGE(SWITCH, IN, STATEFUL, 11, "ls_in_stateful") \
PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 12, "ls_in_pre_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 13, "ls_in_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 14, "ls_in_arp_rsp") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 15, "ls_in_dhcp_options") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 16, "ls_in_dhcp_response") \
- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 17, "ls_in_dns_lookup") \
- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 18, "ls_in_dns_response") \
- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 19, "ls_in_external_port") \
- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 20, "ls_in_l2_lkup") \
+ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 13, "ls_in_nat_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 14, "ls_in_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 15, "ls_in_arp_rsp") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 16, "ls_in_dhcp_options") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 17, "ls_in_dhcp_response") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 18, "ls_in_dns_lookup") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 19, "ls_in_dns_response") \
+ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 20, "ls_in_external_port") \
+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 21, "ls_in_l2_lkup") \
\
/* Logical switch egress stages. */ \
PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \
@@ -5740,84 +5741,6 @@ build_lb(struct ovn_datapath *od, struct hmap *lflows)
}
}
-static void
-build_lb_hairpin_rules(struct ovn_datapath *od, struct hmap *lflows,
- struct ovn_lb *lb, struct lb_vip *lb_vip,
- const char *ip_match, const char *proto)
-{
- if (lb_vip->n_backends == 0) {
- return;
- }
-
- struct ds action = DS_EMPTY_INITIALIZER;
- struct ds match_initiator = DS_EMPTY_INITIALIZER;
- struct ds match_reply = DS_EMPTY_INITIALIZER;
- struct ds proto_match = DS_EMPTY_INITIALIZER;
-
- /* Ingress Pre-Hairpin table.
- * - Priority 2: SNAT load balanced traffic that needs to be hairpinned:
- * - Both SRC and DST IP match backend->ip and destination port
- * matches backend->port.
- * - Priority 1: unSNAT replies to hairpinned load balanced traffic.
- * - SRC IP matches backend->ip, DST IP matches LB VIP and source port
- * matches backend->port.
- */
- ds_put_char(&match_reply, '(');
- for (size_t i = 0; i < lb_vip->n_backends; i++) {
- struct lb_vip_backend *backend = &lb_vip->backends[i];
-
- /* Packets that after load balancing have equal source and
- * destination IPs should be hairpinned.
- */
- if (lb_vip->vip_port) {
- ds_put_format(&proto_match, " && %s.dst == %"PRIu16,
- proto, backend->port);
- }
- ds_put_format(&match_initiator, "(%s.src == %s && %s.dst == %s%s)",
- ip_match, backend->ip, ip_match, backend->ip,
- ds_cstr(&proto_match));
-
- /* Replies to hairpinned traffic are originated by backend->ip:port. */
- ds_clear(&proto_match);
- if (lb_vip->vip_port) {
- ds_put_format(&proto_match, " && %s.src == %"PRIu16, proto,
- backend->port);
- }
- ds_put_format(&match_reply, "(%s.src == %s%s)", ip_match, backend->ip,
- ds_cstr(&proto_match));
- ds_clear(&proto_match);
-
- if (i < lb_vip->n_backends - 1) {
- ds_put_cstr(&match_initiator, " || ");
- ds_put_cstr(&match_reply, " || ");
- }
- }
- ds_put_char(&match_reply, ')');
-
- /* SNAT hairpinned initiator traffic so that the reply traffic is
- * also directed through OVN.
- */
- ds_put_format(&action, REGBIT_HAIRPIN " = 1; ct_snat(%s);",
- lb_vip->vip);
- ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 2,
- ds_cstr(&match_initiator), ds_cstr(&action),
- &lb->nlb->header_);
-
- /* Replies to hairpinned traffic are destined to the LB VIP. */
- ds_put_format(&match_reply, " && %s.dst == %s", ip_match, lb_vip->vip);
-
- /* UNSNAT replies for hairpinned traffic. */
- ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 1,
- ds_cstr(&match_reply),
- REGBIT_HAIRPIN " = 1; ct_snat;",
- &lb->nlb->header_);
-
- ds_destroy(&action);
- ds_destroy(&match_initiator);
- ds_destroy(&match_reply);
- ds_destroy(&proto_match);
-}
-
static void
build_lb_rules(struct ovn_datapath *od, struct hmap *lflows, struct ovn_lb *lb)
{
@@ -5862,12 +5785,6 @@ build_lb_rules(struct ovn_datapath *od, struct hmap *lflows, struct ovn_lb *lb)
ds_destroy(&match);
ds_destroy(&action);
-
- /* Also install flows that allow hairpinning of traffic (i.e., if
- * a load balancer VIP is DNAT-ed to a backend that happens to be
- * the source of the traffic).
- */
- build_lb_hairpin_rules(od, lflows, lb, lb_vip, ip_match, proto);
}
}
@@ -5914,24 +5831,53 @@ build_stateful(struct ovn_datapath *od, struct hmap *lflows, struct hmap *lbs)
ovs_assert(lb);
build_lb_rules(od, lflows, lb);
}
+}
- /* Ingress Pre-Hairpin table (Priority 0). Packets that don't need
- * hairpinning should continue processing.
+static void
+build_lb_hairpin(struct ovn_datapath *od, struct hmap *lflows)
+{
+ /* Ingress Pre-Hairpin/Nat-Hairpin/Hairpin tabled (Priority 0).
+ * Packets that don't need hairpinning should continue processing.
*/
ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 0, "1", "next;");
-
- /* Ingress Hairpin table.
- * - Priority 0: Packets that don't need hairpinning should continue
- * processing.
- * - Priority 1: Packets that were SNAT-ed for hairpinning should be
- * looped back (i.e., swap ETH addresses and send back on inport).
- */
- ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1, REGBIT_HAIRPIN " == 1",
- "eth.dst <-> eth.src;"
- "outport = inport;"
- "flags.loopback = 1;"
- "output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 0, "1", "next;");
ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 0, "1", "next;");
+
+ if (has_lb_vip(od)) {
+ /* Check if the packet needs to be hairpinned. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 100,
+ "ip && ct.trk && ct.dnat",
+ REGBIT_HAIRPIN " = chk_lb_hairpin(); next;",
+ &od->nbs->header_);
+
+ /* Check if the packet is a reply of hairpinned traffic. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 90, "ip",
+ REGBIT_HAIRPIN " = chk_lb_hairpin_reply(); "
+ "next;", &od->nbs->header_);
+
+ /* If packet needs to be hairpinned, snat the src ip with the VIP. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 100,
+ "ip && (ct.new || ct.est) && ct.trk && ct.dnat"
+ " && "REGBIT_HAIRPIN " == 1",
+ "ct_snat_to_vip; next;",
+ &od->nbs->header_);
+
+ /* For the reply of hairpinned traffic, snat the src ip to the VIP. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 90,
+ "ip && "REGBIT_HAIRPIN " == 1", "ct_snat;",
+ &od->nbs->header_);
+
+ /* Ingress Hairpin table.
+ * - Priority 1: Packets that were SNAT-ed for hairpinning should be
+ * looped back (i.e., swap ETH addresses and send back on inport).
+ */
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1,
+ REGBIT_HAIRPIN " == 1",
+ "eth.dst <-> eth.src;"
+ "outport = inport;"
+ "flags.loopback = 1;"
+ "output;");
+ }
}
static void
@@ -6611,6 +6557,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
build_qos(od, lflows);
build_lb(od, lflows);
build_stateful(od, lflows, lbs);
+ build_lb_hairpin(od, lflows);
}
/* Build logical flows for the forwarding groups */
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 872e88c9a4..b91d0a9f9e 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1725,13 +1725,13 @@ action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implici
AT_CHECK([grep "ls_out_acl" sw0flows | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CHECK([grep "ls_out_acl" sw1flows | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AS_BOX([2])
@@ -1746,19 +1746,19 @@ AT_CAPTURE_FILE([sw1flows2])
AT_CHECK([grep "ls_out_acl" sw0flows2 | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2002 , dnl
match=(outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CHECK([grep "ls_out_acl" sw1flows2 | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2002 , dnl
match=(outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AS_BOX([3])
@@ -1777,16 +1777,16 @@ match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CHECK([grep "ls_out_acl" sw1flows3 | grep pg0 | sort], [0], [dnl
@@ -1796,16 +1796,16 @@ match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CLEANUP
diff --git a/tests/ovn.at b/tests/ovn.at
index 981b79b617..9206865db1 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -14987,7 +14987,7 @@ logical_port=ls1-lp_ext1`
# There should be a flow in hv2 to drop traffic from ls1-lp_ext1 destined
# to router mac.
AT_CHECK([as hv2 ovs-ofctl dump-flows br-int \
-table=27,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
+table=28,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
grep -c "actions=drop"], [0], [1
])
From patchwork Thu Nov 5 07:44:15 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394771
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.133; helo=hemlock.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRbBg3mDFz9sSs
for ; Thu, 5 Nov 2020 18:44:27 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id E478F870A9;
Thu, 5 Nov 2020 07:44:25 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id f0igo7ebuU1w; Thu, 5 Nov 2020 07:44:25 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by hemlock.osuosl.org (Postfix) with ESMTP id 675648704C;
Thu, 5 Nov 2020 07:44:25 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 4F9C2C088B;
Thu, 5 Nov 2020 07:44:25 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
by lists.linuxfoundation.org (Postfix) with ESMTP id BC20DC1AD4
for ; Thu, 5 Nov 2020 07:44:23 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id A4C8B870AE
for ; Thu, 5 Nov 2020 07:44:23 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id zBycImrOmFwP for ;
Thu, 5 Nov 2020 07:44:23 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay8-d.mail.gandi.net (relay8-d.mail.gandi.net
[217.70.183.201])
by hemlock.osuosl.org (Postfix) with ESMTPS id A9A1D870A5
for ; Thu, 5 Nov 2020 07:44:22 +0000 (UTC)
X-Originating-IP: 115.99.213.209
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay8-d.mail.gandi.net (Postfix) with ESMTPSA id 697EC1BF205;
Thu, 5 Nov 2020 07:44:19 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:14:15 +0530
Message-Id: <20201105074415.3794341-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 6/7] ovn-detrace: Add SB Load Balancer
cookier handler.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Signed-off-by: Numan Siddique
---
utilities/ovn-detrace.in | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/utilities/ovn-detrace.in b/utilities/ovn-detrace.in
index 1dd98df0aa..af42b5fc4e 100755
--- a/utilities/ovn-detrace.in
+++ b/utilities/ovn-detrace.in
@@ -328,6 +328,14 @@ class ChassisHandler(CookieHandlerByUUUID):
def print_record(self, chassis):
print_p('Chassis: %s' % (chassis_str([chassis])))
+class SBLoadBalancerHandler(CookieHandlerByUUUID):
+ def __init__(self, ovnsb_db):
+ super(SBLoadBalancerHandler, self).__init__(ovnsb_db, 'Load_Balancer')
+
+ def print_record(self, lb):
+ print_p('Load Balancer: %s protocol %s vips %s' % (
+ lb.name, lb.protocol, lb.vips))
+
class OvsInterfaceHandler(CookieHandler):
def __init__(self, ovs_db):
super(OvsInterfaceHandler, self).__init__(ovs_db, 'Interface')
@@ -452,7 +460,8 @@ def main():
PortBindingHandler(ovsdb_ovnsb),
MacBindingHandler(ovsdb_ovnsb),
MulticastGroupHandler(ovsdb_ovnsb),
- ChassisHandler(ovsdb_ovnsb)
+ ChassisHandler(ovsdb_ovnsb),
+ SBLoadBalancerHandler(ovsdb_ovnsb)
]
regex_cookie = re.compile(r'^.*cookie 0x([0-9a-fA-F]+)')
From patchwork Thu Nov 5 07:44:21 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1394773
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.133; helo=hemlock.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CRbBp1Thtz9sTL
for ; Thu, 5 Nov 2020 18:44:34 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id C3BEB87093;
Thu, 5 Nov 2020 07:44:32 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id nke7f5tvHIb3; Thu, 5 Nov 2020 07:44:31 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by hemlock.osuosl.org (Postfix) with ESMTP id 43FB2870A5;
Thu, 5 Nov 2020 07:44:31 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 1CD7EC0889;
Thu, 5 Nov 2020 07:44:31 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])
by lists.linuxfoundation.org (Postfix) with ESMTP id DCA9AC0889
for ; Thu, 5 Nov 2020 07:44:29 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by fraxinus.osuosl.org (Postfix) with ESMTP id CAEAB8577F
for ; Thu, 5 Nov 2020 07:44:29 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from fraxinus.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id BU8zMmIg6UKi for ;
Thu, 5 Nov 2020 07:44:29 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay1-d.mail.gandi.net (relay1-d.mail.gandi.net
[217.70.183.193])
by fraxinus.osuosl.org (Postfix) with ESMTPS id C3D9B856E8
for ; Thu, 5 Nov 2020 07:44:28 +0000 (UTC)
X-Originating-IP: 115.99.213.209
Received: from nusiddiq.home.org.home.org (unknown [115.99.213.209])
(Authenticated sender: numans@ovn.org)
by relay1-d.mail.gandi.net (Postfix) with ESMTPSA id A9218240005;
Thu, 5 Nov 2020 07:44:25 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Thu, 5 Nov 2020 13:14:21 +0530
Message-Id: <20201105074421.3794395-1-numans@ovn.org>
X-Mailer: git-send-email 2.28.0
In-Reply-To: <20201105074146.3793721-1-numans@ovn.org>
References: <20201105074146.3793721-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v3 7/7] sbctl: Add Load Balancer support for
vflows option.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Signed-off-by: Numan Siddique
---
utilities/ovn-sbctl.c | 56 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 56 insertions(+)
diff --git a/utilities/ovn-sbctl.c b/utilities/ovn-sbctl.c
index 00c112c7e5..d3c37369d2 100644
--- a/utilities/ovn-sbctl.c
+++ b/utilities/ovn-sbctl.c
@@ -542,6 +542,11 @@ pre_get_info(struct ctl_context *ctx)
ovsdb_idl_add_column(ctx->idl, &sbrec_mac_binding_col_logical_port);
ovsdb_idl_add_column(ctx->idl, &sbrec_mac_binding_col_ip);
ovsdb_idl_add_column(ctx->idl, &sbrec_mac_binding_col_mac);
+
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_datapaths);
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_vips);
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_name);
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_protocol);
}
static struct cmd_show_table cmd_show_tables[] = {
@@ -1009,6 +1014,56 @@ cmd_lflow_list_chassis(struct ctl_context *ctx, struct vconn *vconn,
}
}
+static void
+cmd_lflow_list_load_balancers(struct ctl_context *ctx, struct vconn *vconn,
+ const struct sbrec_datapath_binding *datapath,
+ bool stats, bool print_uuid)
+{
+ const struct sbrec_load_balancer *lb;
+ const struct sbrec_load_balancer *lb_prev = NULL;
+ SBREC_LOAD_BALANCER_FOR_EACH (lb, ctx->idl) {
+ bool dp_found = false;
+ if (datapath) {
+ size_t i;
+ for (i = 0; i < lb->n_datapaths; i++) {
+ if (datapath == lb->datapaths[i]) {
+ dp_found = true;
+ break;
+ }
+ }
+ if (!dp_found) {
+ continue;
+ }
+ }
+
+ if (!lb_prev) {
+ printf("\nLoad Balancers:\n");
+ }
+
+ printf(" ");
+ print_uuid_part(&lb->header_.uuid, print_uuid);
+ printf("name=\"%s\", protocol=\"%s\", ", lb->name, lb->protocol);
+ if (!dp_found) {
+ for (size_t i = 0; i < lb->n_datapaths; i++) {
+ print_vflow_datapath_name(lb->datapaths[i], true);
+ }
+ }
+
+ printf("\n vips:\n");
+ struct smap_node *node;
+ SMAP_FOR_EACH (node, &lb->vips) {
+ printf(" %s = %s\n", node->key, node->value);
+ }
+ printf("\n");
+
+ if (vconn) {
+ sbctl_dump_openflow(vconn, &lb->header_.uuid, stats);
+ }
+
+ lb_prev = lb;
+ }
+}
+
static void
cmd_lflow_list(struct ctl_context *ctx)
{
@@ -1118,6 +1173,7 @@ cmd_lflow_list(struct ctl_context *ctx)
cmd_lflow_list_mac_bindings(ctx, vconn, datapath, stats, print_uuid);
cmd_lflow_list_mc_groups(ctx, vconn, datapath, stats, print_uuid);
cmd_lflow_list_chassis(ctx, vconn, stats, print_uuid);
+ cmd_lflow_list_load_balancers(ctx, vconn, datapath, stats, print_uuid);
}
vconn_close(vconn);