From patchwork Tue Oct 27 17:16:44 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388803
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.133; helo=hemlock.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJKV5g1vz9sRk
for ; Wed, 28 Oct 2020 04:17:02 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id 639D987300;
Tue, 27 Oct 2020 17:17:01 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id mysVanvBpPVS; Tue, 27 Oct 2020 17:17:00 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by hemlock.osuosl.org (Postfix) with ESMTP id 318AD85348;
Tue, 27 Oct 2020 17:17:00 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 1550AC0859;
Tue, 27 Oct 2020 17:17:00 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
by lists.linuxfoundation.org (Postfix) with ESMTP id 02AD8C0051
for ; Tue, 27 Oct 2020 17:16:59 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id E16FE20465
for ; Tue, 27 Oct 2020 17:16:58 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id 9Fb3QWXKoHut for ;
Tue, 27 Oct 2020 17:16:56 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay7-d.mail.gandi.net (relay7-d.mail.gandi.net
[217.70.183.200])
by silver.osuosl.org (Postfix) with ESMTPS id 5291820104
for ; Tue, 27 Oct 2020 17:16:56 +0000 (UTC)
X-Originating-IP: 115.99.168.200
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay7-d.mail.gandi.net (Postfix) with ESMTPSA id 5C94120014;
Tue, 27 Oct 2020 17:16:52 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:46:44 +0530
Message-Id: <20201027171644.1180723-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 1/7] Add new table Load_Balancer in
Southbound database.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
This patch adds a new table 'Load_Balancer' in SB DB and syncs the Load_Balancer table rows
from NB DB to SB DB. An upcoming patch will make use of this table for handling the
load balancer hairpin traffic.
Signed-off-by: Numan Siddique
---
northd/ovn-northd.c | 141 ++++++++++++++++++++++++++++++++++++++++++
ovn-sb.ovsschema | 27 +++++++-
ovn-sb.xml | 45 ++++++++++++++
tests/ovn-northd.at | 81 ++++++++++++++++++++++++
utilities/ovn-sbctl.c | 3 +
5 files changed, 295 insertions(+), 2 deletions(-)
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index c06139d75b..d11888d203 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -11860,6 +11860,136 @@ sync_dns_entries(struct northd_context *ctx, struct hmap *datapaths)
}
hmap_destroy(&dns_map);
}
+
+/*
+ * struct 'sync_lb_info' is used to sync the load balancer records between
+ * OVN Northbound db and Southbound db.
+ */
+struct sync_lb_info {
+ struct hmap_node hmap_node;
+ const struct nbrec_load_balancer *nlb; /* LB record in the NB db. */
+ const struct sbrec_load_balancer *slb; /* LB record in the SB db. */
+
+ /* Datapaths to which the LB entry is associated with it. */
+ const struct sbrec_datapath_binding **sbs;
+ size_t n_sbs;
+};
+
+static inline struct sync_lb_info *
+get_sync_lb_info_from_hmap(struct hmap *sync_lb_map, struct uuid *uuid)
+{
+ struct sync_lb_info *lb_info;
+ size_t hash = uuid_hash(uuid);
+ HMAP_FOR_EACH_WITH_HASH (lb_info, hmap_node, hash, sync_lb_map) {
+ if (uuid_equals(&lb_info->nlb->header_.uuid, uuid)) {
+ return lb_info;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+sync_lb_entries(struct northd_context *ctx, struct hmap *datapaths)
+{
+ struct hmap lb_map = HMAP_INITIALIZER(&lb_map);
+ struct ovn_datapath *od;
+
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ if (!od->nbs || !od->nbs->n_load_balancer) {
+ continue;
+ }
+
+ for (size_t i = 0; i < od->nbs->n_load_balancer; i++) {
+ struct sync_lb_info *lb_info =
+ get_sync_lb_info_from_hmap(
+ &lb_map, &od->nbs->load_balancer[i]->header_.uuid);
+
+ if (!lb_info) {
+ size_t hash = uuid_hash(
+ &od->nbs->load_balancer[i]->header_.uuid);
+ lb_info = xzalloc(sizeof *lb_info);;
+ lb_info->nlb = od->nbs->load_balancer[i];
+ hmap_insert(&lb_map, &lb_info->hmap_node, hash);
+ }
+
+ lb_info->n_sbs++;
+ lb_info->sbs = xrealloc(lb_info->sbs,
+ lb_info->n_sbs * sizeof *lb_info->sbs);
+ lb_info->sbs[lb_info->n_sbs - 1] = od->sb;
+ }
+ }
+
+ const struct sbrec_load_balancer *sbrec_lb, *next;
+ SBREC_LOAD_BALANCER_FOR_EACH_SAFE (sbrec_lb, next, ctx->ovnsb_idl) {
+ const char *nb_lb_uuid = smap_get(&sbrec_lb->external_ids, "lb_id");
+ struct uuid lb_uuid;
+ if (!nb_lb_uuid || !uuid_from_string(&lb_uuid, nb_lb_uuid)) {
+ sbrec_load_balancer_delete(sbrec_lb);
+ continue;
+ }
+
+ struct sync_lb_info *lb_info =
+ get_sync_lb_info_from_hmap(&lb_map, &lb_uuid);
+ if (lb_info) {
+ lb_info->slb = sbrec_lb;
+ } else {
+ sbrec_load_balancer_delete(sbrec_lb);
+ }
+ }
+
+ struct sync_lb_info *lb_info;
+ HMAP_FOR_EACH (lb_info, hmap_node, &lb_map) {
+ if (!lb_info->slb) {
+ sbrec_lb = sbrec_load_balancer_insert(ctx->ovnsb_txn);
+ lb_info->slb = sbrec_lb;
+ char *lb_id = xasprintf(
+ UUID_FMT, UUID_ARGS(&lb_info->nlb->header_.uuid));
+ const struct smap external_ids =
+ SMAP_CONST1(&external_ids, "lb_id", lb_id);
+ sbrec_load_balancer_set_external_ids(sbrec_lb, &external_ids);
+ free(lb_id);
+ }
+
+ /* Set the datapaths and other columns. If nothing has changed, then
+ * this will be a no-op.
+ */
+ sbrec_load_balancer_set_datapaths(
+ lb_info->slb,
+ (struct sbrec_datapath_binding **)lb_info->sbs,
+ lb_info->n_sbs);
+
+ sbrec_load_balancer_set_name(lb_info->slb, lb_info->nlb->name);
+ sbrec_load_balancer_set_vips(lb_info->slb, &lb_info->nlb->vips);
+ sbrec_load_balancer_set_protocol(lb_info->slb, lb_info->nlb->protocol);
+ }
+
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ if (!od->nbs || !od->nbs->n_load_balancer) {
+ continue;
+ }
+
+ const struct sbrec_load_balancer **lbs =
+ xmalloc(od->nbs->n_load_balancer * sizeof *lbs);
+ for (size_t i = 0; i < od->nbs->n_load_balancer; i++) {
+ lb_info = get_sync_lb_info_from_hmap(
+ &lb_map, &od->nbs->load_balancer[i]->header_.uuid);
+ ovs_assert(lb_info);
+ lbs[i] = lb_info->slb;
+ }
+
+ sbrec_datapath_binding_set_load_balancers(
+ od->sb, (struct sbrec_load_balancer **)lbs,
+ od->nbs->n_load_balancer);
+ free(lbs);
+ }
+
+ HMAP_FOR_EACH_POP (lb_info, hmap_node, &lb_map) {
+ free(lb_info->sbs);
+ free(lb_info);
+ }
+ hmap_destroy(&lb_map);
+}
static void
destroy_datapaths_and_ports(struct hmap *datapaths, struct hmap *ports,
@@ -12227,6 +12357,7 @@ ovnnb_db_run(struct northd_context *ctx,
sync_port_groups(ctx, &port_groups);
sync_meters(ctx);
sync_dns_entries(ctx, datapaths);
+ sync_lb_entries(ctx, datapaths);
destroy_ovn_lbs(&lbs);
hmap_destroy(&lbs);
@@ -12987,6 +13118,8 @@ main(int argc, char *argv[])
ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_datapath_binding);
add_column_noalert(ovnsb_idl_loop.idl,
&sbrec_datapath_binding_col_tunnel_key);
+ add_column_noalert(ovnsb_idl_loop.idl,
+ &sbrec_datapath_binding_col_load_balancers);
add_column_noalert(ovnsb_idl_loop.idl,
&sbrec_datapath_binding_col_external_ids);
@@ -13154,6 +13287,14 @@ main(int argc, char *argv[])
add_column_noalert(ovnsb_idl_loop.idl,
&sbrec_service_monitor_col_external_ids);
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_load_balancer);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_datapaths);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_name);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_vips);
+ add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_protocol);
+ add_column_noalert(ovnsb_idl_loop.idl,
+ &sbrec_load_balancer_col_external_ids);
+
struct ovsdb_idl_index *sbrec_chassis_by_name
= chassis_index_create(ovnsb_idl_loop.idl);
diff --git a/ovn-sb.ovsschema b/ovn-sb.ovsschema
index d1c506a22c..7db6c6a4dd 100644
--- a/ovn-sb.ovsschema
+++ b/ovn-sb.ovsschema
@@ -1,7 +1,7 @@
{
"name": "OVN_Southbound",
- "version": "2.10.0",
- "cksum": "2548342632 22615",
+ "version": "2.11.0",
+ "cksum": "1470439925 23814",
"tables": {
"SB_Global": {
"columns": {
@@ -152,6 +152,11 @@
"type": {"key": {"type": "integer",
"minInteger": 1,
"maxInteger": 16777215}}},
+ "load_balancers": {"type": {"key": {"type": "uuid",
+ "refTable": "Load_Balancer",
+ "refType": "weak"},
+ "min": 0,
+ "max": "unlimited"}},
"external_ids": {
"type": {"key": "string", "value": "string",
"min": 0, "max": "unlimited"}}},
@@ -447,6 +452,24 @@
"type": {"key": "string", "value": "string",
"min": 0, "max": "unlimited"}}},
"indexes": [["logical_port", "ip", "port", "protocol"]],
+ "isRoot": true},
+ "Load_Balancer": {
+ "columns": {
+ "name": {"type": "string"},
+ "vips": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "protocol": {
+ "type": {"key": {"type": "string",
+ "enum": ["set", ["tcp", "udp", "sctp"]]},
+ "min": 0, "max": 1}},
+ "datapaths": {
+ "type": {"key": {"type": "uuid",
+ "refTable": "Datapath_Binding"},
+ "min": 1, "max": "unlimited"}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}}},
"isRoot": true}
}
}
diff --git a/ovn-sb.xml b/ovn-sb.xml
index b1480f2186..bdd41c1f97 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -2497,6 +2497,12 @@ tcp.flags = RST;
constructed for each supported encapsulation.
+
+
+ Load balancers associated with the datapath.
+
+
+
Each row in is associated with some
@@ -4126,4 +4132,43 @@ tcp.flags = RST;
+
+
+
+ Each row represents a load balancer.
+
+
+
+ A name for the load balancer. This name has no special meaning or
+ purpose other than to provide convenience for human interaction with
+ the ovn-nb database.
+
+
+
+ A map of virtual IP addresses (and an optional port number with
+ :
as a separator) associated with this load balancer and
+ their corresponding endpoint IP addresses (and optional port numbers
+ with :
as separators) separated by commas.
+
+
+
+
+ Valid protocols are tcp
, udp
, or
+ sctp
. This column is useful when a port number is
+ provided as part of the vips
column. If this column is
+ empty and a port number is provided as part of vips
+ column, OVN assumes the protocol to be tcp
.
+
+
+
+
+ Datapaths to which this load balancer applies to.
+
+
+
+
+ See External IDs at the beginning of this document.
+
+
+
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index e155e26f89..b1f454818e 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -2067,3 +2067,84 @@ action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implici
])
AT_CLEANUP
+
+AT_SETUP([ovn -- NB to SB load balancer sync])
+ovn_start
+
+ovn-nbctl --wait=hv lb-add lb0 10.0.0.10:80 10.0.0.4:8080
+
+AT_CHECK([ovn-nbctl --bare --columns _uuid list load_balancer | wc -l], [0], [dnl
+1
+])
+
+AT_CHECK([ovn-sbctl --bare --columns _uuid list load_balancer | wc -l], [0], [dnl
+0
+])
+
+ovn-nbctl ls-add sw0
+ovn-nbctl --wait=hv ls-lb-add sw0 lb0
+sw0_sb_uuid=$(ovn-sbctl --bare --columns _uuid list datapath sw0)
+AT_CHECK([ovn-sbctl --bare --columns name,vips,protocol list load_balancer], [0], [dnl
+lb0
+10.0.0.10:80=10.0.0.4:8080
+tcp
+])
+
+lb0_uuid=$(ovn-sbctl --bare --columns _uuid list load_balancer lb0)
+
+AT_CHECK([test $(ovn-sbctl --bare --columns datapaths list load_balancer) = $sw0_sb_uuid])
+AT_CHECK([test $(ovn-sbctl --bare --columns load_balancers list datapath sw0) = $lb0_uuid])
+
+ovn-nbctl --wait=sb set load_balancer . vips:"10.0.0.20\:90"="20.0.0.4:8080,30.0.0.4:8080"
+AT_CHECK([ovn-sbctl --bare --columns name,vips,protocol list load_balancer], [0], [dnl
+lb0
+10.0.0.10:80=10.0.0.4:8080 10.0.0.20:90=20.0.0.4:8080,30.0.0.4:8080
+tcp
+])
+
+ovn-nbctl lr-add lr0
+ovn-nbctl --wait=sb lr-lb-add lr0 lb0
+
+AT_CHECK([test $(ovn-sbctl --bare --columns datapaths list load_balancer) = $sw0_sb_uuid])
+
+ovn-nbctl ls-add sw1
+ovn-nbctl --wait=sb ls-lb-add sw1 lb0
+sw1_sb_uuid=$(ovn-sbctl --bare --columns _uuid list datapath sw1)
+
+for i in $sw0_sb_uuid $sw1_sb_uuid; do echo $i >> dp_ids; done
+for i in $(ovn-sbctl --bare --columns datapaths list load_balancer); do echo $i >> lb_dps; done
+
+cat dp_ids | sort > expout
+AT_CHECK([cat lb_dps | sort], [0], [expout])
+AT_CHECK([test $(ovn-sbctl --bare --columns load_balancers list datapath sw1) = $lb0_uuid])
+
+ovn-nbctl --wait=sb lb-add lb1 10.0.0.30:80 20.0.0.50:8080 udp
+
+AT_CHECK([ovn-sbctl --bare --columns _uuid list load_balancer | wc -l], [0], [dnl
+1
+])
+
+ovn-nbctl --wait=sb lr-lb-add lr0 lb1
+AT_CHECK([ovn-sbctl --bare --columns _uuid list load_balancer | wc -l], [0], [dnl
+1
+])
+
+ovn-nbctl --wait=sb ls-lb-add sw1 lb1
+AT_CHECK([ovn-sbctl --bare --columns name,vips,protocol list load_balancer lb1 ], [0], [dnl
+lb1
+10.0.0.30:80=20.0.0.50:8080
+udp
+])
+
+lb1_uuid=$(ovn-sbctl --bare --columns _uuid list load_balancer lb1)
+
+for i in $lb0_uuid $lb1_uuid; do echo $i >> lb_ids; done
+cat lb_ids | sort > expout
+
+for i in $(ovn-sbctl --bare --columns load_balancers list datapath_binding sw1); do echo $i >> sw1_lbs; done
+AT_CHECK([cat sw1_lbs | sort], [0], [expout])
+
+ovn-nbctl --wait=sb lb-del lb1
+AT_CHECK([test $(ovn-sbctl --bare --columns load_balancers list datapath_binding sw1) = $lb0_uuid])
+
+AT_CLEANUP
diff --git a/utilities/ovn-sbctl.c b/utilities/ovn-sbctl.c
index 85e448ec04..00c112c7e5 100644
--- a/utilities/ovn-sbctl.c
+++ b/utilities/ovn-sbctl.c
@@ -1441,6 +1441,9 @@ static const struct ctl_table_class tables[SBREC_N_TABLES] = {
[SBREC_TABLE_GATEWAY_CHASSIS].row_ids[0]
= {&sbrec_gateway_chassis_col_name, NULL, NULL},
+
+ [SBREC_TABLE_LOAD_BALANCER].row_ids[0]
+ = {&sbrec_load_balancer_col_name, NULL, NULL},
};
From patchwork Tue Oct 27 17:16:53 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388805
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.136; helo=silver.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJMh3mpsz9s0b
for ; Wed, 28 Oct 2020 04:18:56 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id C2BC020764;
Tue, 27 Oct 2020 17:18:54 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id b+feKCO9c2lh; Tue, 27 Oct 2020 17:18:46 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by silver.osuosl.org (Postfix) with ESMTP id 5F99E2001C;
Tue, 27 Oct 2020 17:18:46 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 4F82FC0859;
Tue, 27 Oct 2020 17:18:46 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
by lists.linuxfoundation.org (Postfix) with ESMTP id 3B8B8C0051
for ; Tue, 27 Oct 2020 17:18:45 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id 23E8087306
for ; Tue, 27 Oct 2020 17:18:45 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id ycjt7oMkGluL for ;
Tue, 27 Oct 2020 17:18:42 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay2-d.mail.gandi.net (relay2-d.mail.gandi.net
[217.70.183.194])
by hemlock.osuosl.org (Postfix) with ESMTPS id 7944F87303
for ; Tue, 27 Oct 2020 17:18:42 +0000 (UTC)
X-Originating-IP: 115.99.168.200
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay2-d.mail.gandi.net (Postfix) with ESMTPSA id DFE9C40012;
Tue, 27 Oct 2020 17:18:39 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:46:53 +0530
Message-Id: <20201027171653.1180789-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 2/7] northd: Refactor load balancer vip
parsing.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Parsing of the load balancer VIPs is moved to a separate file - lib/lb.c.
ovn-northd makes use of these functions. Upcoming patch will make use of these
util functions for parsing SB Load_Balancers.
Signed-off-by: Numan Siddique
---
lib/automake.mk | 4 +-
lib/lb.c | 236 ++++++++++++++++++++++++++++++++++++
lib/lb.h | 77 ++++++++++++
lib/ovn-util.c | 28 +++++
lib/ovn-util.h | 2 +
northd/ovn-northd.c | 286 +++++---------------------------------------
6 files changed, 378 insertions(+), 255 deletions(-)
create mode 100644 lib/lb.c
create mode 100644 lib/lb.h
diff --git a/lib/automake.mk b/lib/automake.mk
index f3e9c8818b..430cd11fc6 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -23,7 +23,9 @@ lib_libovn_la_SOURCES = \
lib/ovn-util.h \
lib/logical-fields.c \
lib/inc-proc-eng.c \
- lib/inc-proc-eng.h
+ lib/inc-proc-eng.h \
+ lib/lb.c \
+ lib/lb.h
nodist_lib_libovn_la_SOURCES = \
lib/ovn-dirs.c \
lib/ovn-nb-idl.c \
diff --git a/lib/lb.c b/lib/lb.c
new file mode 100644
index 0000000000..db2d3d552f
--- /dev/null
+++ b/lib/lb.c
@@ -0,0 +1,236 @@
+/* Copyright (c) 2020, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include
+
+#include "lb.h"
+#include "lib/ovn-nb-idl.h"
+#include "lib/ovn-sb-idl.h"
+#include "lib/ovn-util.h"
+
+/* OpenvSwitch lib includes. */
+#include "openvswitch/vlog.h"
+#include "lib/smap.h"
+
+VLOG_DEFINE_THIS_MODULE(lb);
+
+static struct ovn_lb *
+ovn_lb_create(const struct smap *vips)
+{
+ struct ovn_lb *lb = xzalloc(sizeof *lb);
+
+ lb->n_vips = smap_count(vips);
+ lb->vips = xcalloc(lb->n_vips, sizeof (struct lb_vip));
+ struct smap_node *node;
+ size_t n_vips = 0;
+
+ SMAP_FOR_EACH (node, vips) {
+ char *vip;
+ uint16_t port;
+ int addr_family;
+
+ if (!ip_address_and_port_from_key(node->key, &vip, &port,
+ &addr_family)) {
+ continue;
+ }
+
+ lb->vips[n_vips].vip = vip;
+ lb->vips[n_vips].vip_port = port;
+ lb->vips[n_vips].addr_family = addr_family;
+ lb->vips[n_vips].vip_port_str = xstrdup(node->key);
+ lb->vips[n_vips].backend_ips = xstrdup(node->value);
+
+ char *tokstr = xstrdup(node->value);
+ char *save_ptr = NULL;
+ char *token;
+ size_t n_backends = 0;
+ /* Format for a backend ips : IP1:port1,IP2:port2,...". */
+ for (token = strtok_r(tokstr, ",", &save_ptr);
+ token != NULL;
+ token = strtok_r(NULL, ",", &save_ptr)) {
+ n_backends++;
+ }
+
+ free(tokstr);
+ tokstr = xstrdup(node->value);
+ save_ptr = NULL;
+
+ lb->vips[n_vips].n_backends = n_backends;
+ lb->vips[n_vips].backends = xcalloc(n_backends,
+ sizeof (struct lb_vip_backend));
+
+ size_t i = 0;
+ for (token = strtok_r(tokstr, ",", &save_ptr);
+ token != NULL;
+ token = strtok_r(NULL, ",", &save_ptr)) {
+ char *backend_ip;
+ uint16_t backend_port;
+
+ if (!ip_address_and_port_from_key(token, &backend_ip,
+ &backend_port,
+ &addr_family)) {
+ continue;
+ }
+
+ lb->vips[n_vips].backends[i].ip = backend_ip;
+ lb->vips[n_vips].backends[i].port = backend_port;
+ lb->vips[n_vips].backends[i].addr_family = addr_family;
+ i++;
+ }
+
+ free(tokstr);
+ n_vips++;
+ }
+
+ return lb;
+}
+
+struct ovn_lb *
+ovn_nb_lb_create(const struct nbrec_load_balancer *nbrec_lb,
+ struct hmap *ports, struct hmap *lbs,
+ void * (*ovn_port_find)(const struct hmap *ports,
+ const char *name))
+{
+ struct ovn_lb *lb = ovn_lb_create(&nbrec_lb->vips);
+ hmap_insert(lbs, &lb->hmap_node, uuid_hash(&nbrec_lb->header_.uuid));
+ lb->nlb = nbrec_lb;
+ lb->nb_lb = true;
+
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ struct lb_vip *lb_vip = &lb->vips[i];
+
+ struct nbrec_load_balancer_health_check *lb_health_check = NULL;
+ if (nbrec_lb->protocol && !strcmp(nbrec_lb->protocol, "sctp")) {
+ if (nbrec_lb->n_health_check > 0) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&rl,
+ "SCTP load balancers do not currently support "
+ "health checks. Not creating health checks for "
+ "load balancer " UUID_FMT,
+ UUID_ARGS(&nbrec_lb->header_.uuid));
+ }
+ } else {
+ for (size_t j = 0; j < nbrec_lb->n_health_check; j++) {
+ if (!strcmp(nbrec_lb->health_check[j]->vip,
+ lb_vip->vip_port_str)) {
+ lb_health_check = nbrec_lb->health_check[i];
+ break;
+ }
+ }
+ }
+
+ lb_vip->lb_health_check = lb_health_check;
+
+ for (size_t j = 0; j < lb_vip->n_backends; j++) {
+ struct lb_vip_backend *backend = &lb_vip->backends[j];
+
+ struct ovn_port *op = NULL;
+ char *svc_mon_src_ip = NULL;
+ const char *s = smap_get(&nbrec_lb->ip_port_mappings,
+ backend->ip);
+ if (s) {
+ char *port_name = xstrdup(s);
+ char *p = strstr(port_name, ":");
+ if (p) {
+ *p = 0;
+ p++;
+ op = ovn_port_find(ports, port_name);
+ svc_mon_src_ip = xstrdup(p);
+ }
+ free(port_name);
+ }
+
+ backend->op = op;
+ backend->svc_mon_src_ip = svc_mon_src_ip;
+ }
+ }
+
+ if (nbrec_lb->n_selection_fields) {
+ char *proto = NULL;
+ if (nbrec_lb->protocol && nbrec_lb->protocol[0]) {
+ proto = nbrec_lb->protocol;
+ }
+
+ struct ds sel_fields = DS_EMPTY_INITIALIZER;
+ for (size_t i = 0; i < lb->nlb->n_selection_fields; i++) {
+ char *field = lb->nlb->selection_fields[i];
+ if (!strcmp(field, "tp_src") && proto) {
+ ds_put_format(&sel_fields, "%s_src,", proto);
+ } else if (!strcmp(field, "tp_dst") && proto) {
+ ds_put_format(&sel_fields, "%s_dst,", proto);
+ } else {
+ ds_put_format(&sel_fields, "%s,", field);
+ }
+ }
+ ds_chomp(&sel_fields, ',');
+ lb->selection_fields = ds_steal_cstr(&sel_fields);
+ }
+
+ return lb;
+}
+
+struct ovn_lb *
+ovn_sb_lb_create(const struct sbrec_load_balancer *sbrec_lb)
+{
+ struct ovn_lb *lb = ovn_lb_create(&sbrec_lb->vips);
+ lb->slb = sbrec_lb;
+ lb->nb_lb = false;
+
+ return lb;
+}
+
+struct ovn_lb *
+ovn_lb_find(struct hmap *lbs, struct uuid *uuid)
+{
+ struct ovn_lb *lb;
+ size_t hash = uuid_hash(uuid);
+ HMAP_FOR_EACH_WITH_HASH (lb, hmap_node, hash, lbs) {
+ if (uuid_equals(&lb->nlb->header_.uuid, uuid)) {
+ return lb;
+ }
+ }
+
+ return NULL;
+}
+
+void
+ovn_lb_destroy(struct ovn_lb *lb)
+{
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ free(lb->vips[i].vip);
+ free(lb->vips[i].backend_ips);
+ free(lb->vips[i].vip_port_str);
+
+ for (size_t j = 0; j < lb->vips[i].n_backends; j++) {
+ free(lb->vips[i].backends[j].ip);
+ free(lb->vips[i].backends[j].svc_mon_src_ip);
+ }
+
+ free(lb->vips[i].backends);
+ }
+ free(lb->vips);
+ free(lb->selection_fields);
+}
+
+void
+ovn_lbs_destroy(struct hmap *lbs)
+{
+ struct ovn_lb *lb;
+ HMAP_FOR_EACH_POP (lb, hmap_node, lbs) {
+ ovn_lb_destroy(lb);
+ free(lb);
+ }
+ hmap_destroy(lbs);
+}
diff --git a/lib/lb.h b/lib/lb.h
new file mode 100644
index 0000000000..ffb3ba1fd1
--- /dev/null
+++ b/lib/lb.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2020, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef OVN_LIB_LB_H
+#define OVN_LIB_LB_H 1
+
+#include "openvswitch/hmap.h"
+
+struct nbrec_load_balancer;
+struct sbrec_load_balancer;
+struct ovn_port;
+struct uuid;
+
+struct ovn_lb {
+ struct hmap_node hmap_node;
+
+ bool nb_lb; /* NB load balancer or SB load balancer. */
+ union {
+ struct {
+ const struct nbrec_load_balancer *nlb; /* May be NULL. */
+ char *selection_fields;
+ };
+ const struct sbrec_load_balancer *slb; /* May be NULL. */
+ };
+
+ struct lb_vip *vips;
+ size_t n_vips;
+};
+
+struct lb_vip {
+ char *vip;
+ uint16_t vip_port;
+ int addr_family;
+ char *backend_ips;
+ char *vip_port_str;
+
+ /* Valid only for NB load balancer. */
+ struct nbrec_load_balancer_health_check *lb_health_check;
+ struct lb_vip_backend *backends;
+ size_t n_backends;
+};
+
+struct lb_vip_backend {
+ char *ip;
+ uint16_t port;
+ int addr_family;
+
+ /* Valid only for NB load balancer. */
+ struct ovn_port *op; /* Logical port to which the ip belong to. */
+ bool health_check;
+ char *svc_mon_src_ip; /* Source IP to use for monitoring. */
+ const struct sbrec_service_monitor *sbrec_monitor;
+};
+
+struct ovn_lb *ovn_nb_lb_create(
+ const struct nbrec_load_balancer *nbrec_lb,
+ struct hmap *ports, struct hmap *lbs,
+ void * (*ovn_port_find)(const struct hmap *ports, const char *name));
+struct ovn_lb *ovn_sb_lb_create(const struct sbrec_load_balancer *sbrec_lb);
+struct ovn_lb * ovn_lb_find(struct hmap *lbs, struct uuid *uuid);
+void ovn_lb_destroy(struct ovn_lb *lb);
+void ovn_lbs_destroy(struct hmap *lbs);
+
+#endif /* OVN_LIB_LB_H 1 */
diff --git a/lib/ovn-util.c b/lib/ovn-util.c
index 1daf665037..02770bd55a 100644
--- a/lib/ovn-util.c
+++ b/lib/ovn-util.c
@@ -24,6 +24,7 @@
#include "ovn-sb-idl.h"
#include "unixctl.h"
#include
+#include "socket-util.h"
VLOG_DEFINE_THIS_MODULE(ovn_util);
@@ -667,3 +668,30 @@ ovn_smap_get_uint(const struct smap *smap, const char *key, unsigned int def)
return u_value;
}
+
+/* For a 'key' of the form "IP:port" or just "IP", sets 'port' and
+ * 'ip_address'. The caller must free() the memory allocated for
+ * 'ip_address'.
+ * Returns true if parsing of 'key' was successful, false otherwise.
+ */
+bool
+ip_address_and_port_from_key(const char *key, char **ip_address,
+ uint16_t *port, int *addr_family)
+{
+ struct sockaddr_storage ss;
+ if (!inet_parse_active(key, 0, &ss, false)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "bad ip address or port for key %s", key);
+ *ip_address = NULL;
+ *port = 0;
+ *addr_family = 0;
+ return false;
+ }
+
+ struct ds s = DS_EMPTY_INITIALIZER;
+ ss_format_address_nobracks(&ss, &s);
+ *ip_address = ds_steal_cstr(&s);
+ *port = ss_get_port(&ss);
+ *addr_family = ss.ss_family;
+ return true;
+}
diff --git a/lib/ovn-util.h b/lib/ovn-util.h
index 1d22a691f5..7fe53fd736 100644
--- a/lib/ovn-util.h
+++ b/lib/ovn-util.h
@@ -157,6 +157,8 @@ char *normalize_v46_prefix(const struct v46_ip *prefix, unsigned int plen);
unsigned int ovn_smap_get_uint(const struct smap *smap, const char *key,
unsigned int def);
+bool ip_address_and_port_from_key(const char *key, char **ip_address,
+ uint16_t *port, int *addr_family);
/* Returns a lowercase copy of orig.
* Caller must free the returned string.
*/
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index d11888d203..1da31caf3d 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -35,6 +35,7 @@
#include "lib/ovn-nb-idl.h"
#include "lib/ovn-sb-idl.h"
#include "lib/ovn-util.h"
+#include "lib/lb.h"
#include "ovn/actions.h"
#include "ovn/logical-fields.h"
#include "packets.h"
@@ -2525,10 +2526,6 @@ join_logical_ports(struct northd_context *ctx,
}
}
-static bool
-ip_address_and_port_from_lb_key(const char *key, char **ip_address,
- uint16_t *port, int *addr_family);
-
static void
get_router_load_balancer_ips(const struct ovn_datapath *od,
struct sset *all_ips_v4, struct sset *all_ips_v6)
@@ -2548,8 +2545,8 @@ get_router_load_balancer_ips(const struct ovn_datapath *od,
uint16_t port;
int addr_family;
- if (!ip_address_and_port_from_lb_key(node->key, &ip_address, &port,
- &addr_family)) {
+ if (!ip_address_and_port_from_key(node->key, &ip_address, &port,
+ &addr_family)) {
continue;
}
@@ -3309,53 +3306,6 @@ cleanup_sb_ha_chassis_groups(struct northd_context *ctx,
}
}
-struct ovn_lb {
- struct hmap_node hmap_node;
-
- const struct nbrec_load_balancer *nlb; /* May be NULL. */
- char *selection_fields;
- struct lb_vip *vips;
- size_t n_vips;
-};
-
-struct lb_vip {
- char *vip;
- uint16_t vip_port;
- int addr_family;
- char *backend_ips;
-
- bool health_check;
- struct lb_vip_backend *backends;
- size_t n_backends;
-};
-
-struct lb_vip_backend {
- char *ip;
- uint16_t port;
- int addr_family;
-
- struct ovn_port *op; /* Logical port to which the ip belong to. */
- bool health_check;
- char *svc_mon_src_ip; /* Source IP to use for monitoring. */
- const struct sbrec_service_monitor *sbrec_monitor;
-};
-
-
-static inline struct ovn_lb *
-ovn_lb_find(struct hmap *lbs, struct uuid *uuid)
-{
- struct ovn_lb *lb;
- size_t hash = uuid_hash(uuid);
- HMAP_FOR_EACH_WITH_HASH (lb, hmap_node, hash, lbs) {
- if (uuid_equals(&lb->nlb->header_.uuid, uuid)) {
- return lb;
- }
- }
-
- return NULL;
-}
-
-
struct service_monitor_info {
struct hmap_node hmap_node;
const struct sbrec_service_monitor *sbrec_mon;
@@ -3395,126 +3345,36 @@ create_or_get_service_mon(struct northd_context *ctx,
return mon_info;
}
-static struct ovn_lb *
-ovn_lb_create(struct northd_context *ctx, struct hmap *lbs,
- const struct nbrec_load_balancer *nbrec_lb,
- struct hmap *ports, struct hmap *monitor_map)
+static void
+ovn_lb_svc_create(struct northd_context *ctx, struct ovn_lb *lb,
+ struct hmap *monitor_map)
{
- struct ovn_lb *lb = xzalloc(sizeof *lb);
-
- size_t hash = uuid_hash(&nbrec_lb->header_.uuid);
- lb->nlb = nbrec_lb;
- hmap_insert(lbs, &lb->hmap_node, hash);
-
- lb->n_vips = smap_count(&nbrec_lb->vips);
- lb->vips = xcalloc(lb->n_vips, sizeof (struct lb_vip));
- struct smap_node *node;
- size_t n_vips = 0;
-
- SMAP_FOR_EACH (node, &nbrec_lb->vips) {
- char *vip;
- uint16_t port;
- int addr_family;
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ struct lb_vip *lb_vip = &lb->vips[i];
- if (!ip_address_and_port_from_lb_key(node->key, &vip, &port,
- &addr_family)) {
+ if (!lb_vip->lb_health_check) {
continue;
}
- lb->vips[n_vips].vip = vip;
- lb->vips[n_vips].vip_port = port;
- lb->vips[n_vips].addr_family = addr_family;
- lb->vips[n_vips].backend_ips = xstrdup(node->value);
-
- struct nbrec_load_balancer_health_check *lb_health_check = NULL;
- if (nbrec_lb->protocol && !strcmp(nbrec_lb->protocol, "sctp")) {
- if (nbrec_lb->n_health_check > 0) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
- VLOG_WARN_RL(&rl,
- "SCTP load balancers do not currently support "
- "health checks. Not creating health checks for "
- "load balancer " UUID_FMT,
- UUID_ARGS(&nbrec_lb->header_.uuid));
- }
- } else {
- for (size_t i = 0; i < nbrec_lb->n_health_check; i++) {
- if (!strcmp(nbrec_lb->health_check[i]->vip, node->key)) {
- lb_health_check = nbrec_lb->health_check[i];
- break;
- }
- }
- }
-
- char *tokstr = xstrdup(node->value);
- char *save_ptr = NULL;
- char *token;
- size_t n_backends = 0;
- /* Format for a backend ips : IP1:port1,IP2:port2,...". */
- for (token = strtok_r(tokstr, ",", &save_ptr);
- token != NULL;
- token = strtok_r(NULL, ",", &save_ptr)) {
- n_backends++;
- }
-
- free(tokstr);
- tokstr = xstrdup(node->value);
- save_ptr = NULL;
-
- lb->vips[n_vips].n_backends = n_backends;
- lb->vips[n_vips].backends = xcalloc(n_backends,
- sizeof (struct lb_vip_backend));
- lb->vips[n_vips].health_check = lb_health_check ? true: false;
-
- size_t i = 0;
- for (token = strtok_r(tokstr, ",", &save_ptr);
- token != NULL;
- token = strtok_r(NULL, ",", &save_ptr)) {
- char *backend_ip;
- uint16_t backend_port;
-
- if (!ip_address_and_port_from_lb_key(token, &backend_ip,
- &backend_port,
- &addr_family)) {
- continue;
- }
+ for (size_t j = 0; j < lb_vip->n_backends; j++) {
+ struct lb_vip_backend *backend = &lb_vip->backends[j];
- /* Get the logical port to which this ip belongs to. */
- struct ovn_port *op = NULL;
- char *svc_mon_src_ip = NULL;
- const char *s = smap_get(&nbrec_lb->ip_port_mappings,
- backend_ip);
- if (s) {
- char *port_name = xstrdup(s);
- char *p = strstr(port_name, ":");
- if (p) {
- *p = 0;
- p++;
- op = ovn_port_find(ports, port_name);
- svc_mon_src_ip = xstrdup(p);
- }
- free(port_name);
- }
-
- lb->vips[n_vips].backends[i].ip = backend_ip;
- lb->vips[n_vips].backends[i].port = backend_port;
- lb->vips[n_vips].backends[i].addr_family = addr_family;
- lb->vips[n_vips].backends[i].op = op;
- lb->vips[n_vips].backends[i].svc_mon_src_ip = svc_mon_src_ip;
-
- if (lb_health_check && op && svc_mon_src_ip) {
- const char *protocol = nbrec_lb->protocol;
+ if (backend->op && backend->svc_mon_src_ip) {
+ backend->health_check = true;
+ const char *protocol = lb->nlb->protocol;
if (!protocol || !protocol[0]) {
protocol = "tcp";
}
- lb->vips[n_vips].backends[i].health_check = true;
+ backend->health_check = true;
struct service_monitor_info *mon_info =
- create_or_get_service_mon(ctx, monitor_map, backend_ip,
- op->nbsp->name, backend_port,
+ create_or_get_service_mon(ctx, monitor_map, backend->ip,
+ backend->op->nbsp->name,
+ backend->port,
protocol);
ovs_assert(mon_info);
sbrec_service_monitor_set_options(
- mon_info->sbrec_mon, &lb_health_check->options);
+ mon_info->sbrec_mon, &lb_vip->lb_health_check->options);
struct eth_addr ea;
if (!mon_info->sbrec_mon->src_mac ||
!eth_addr_from_string(mon_info->sbrec_mon->src_mac, &ea) ||
@@ -3524,72 +3384,24 @@ ovn_lb_create(struct northd_context *ctx, struct hmap *lbs,
}
if (!mon_info->sbrec_mon->src_ip ||
- strcmp(mon_info->sbrec_mon->src_ip, svc_mon_src_ip)) {
+ strcmp(mon_info->sbrec_mon->src_ip,
+ backend->svc_mon_src_ip)) {
sbrec_service_monitor_set_src_ip(mon_info->sbrec_mon,
- svc_mon_src_ip);
+ backend->svc_mon_src_ip);
}
- lb->vips[n_vips].backends[i].sbrec_monitor =
- mon_info->sbrec_mon;
+ lb_vip->backends[j].sbrec_monitor = mon_info->sbrec_mon;
mon_info->required = true;
- } else {
- lb->vips[n_vips].backends[i].health_check = false;
}
-
- i++;
}
-
- free(tokstr);
- n_vips++;
- }
-
- char *proto = NULL;
- if (nbrec_lb->protocol && nbrec_lb->protocol[0]) {
- proto = nbrec_lb->protocol;
}
-
- if (lb->nlb->n_selection_fields) {
- struct ds sel_fields = DS_EMPTY_INITIALIZER;
- for (size_t i = 0; i < lb->nlb->n_selection_fields; i++) {
- char *field = lb->nlb->selection_fields[i];
- if (!strcmp(field, "tp_src") && proto) {
- ds_put_format(&sel_fields, "%s_src,", proto);
- } else if (!strcmp(field, "tp_dst") && proto) {
- ds_put_format(&sel_fields, "%s_dst,", proto);
- } else {
- ds_put_format(&sel_fields, "%s,", field);
- }
- }
- ds_chomp(&sel_fields, ',');
- lb->selection_fields = ds_steal_cstr(&sel_fields);
- }
-
- return lb;
-}
-
-static void
-ovn_lb_destroy(struct ovn_lb *lb)
-{
- for (size_t i = 0; i < lb->n_vips; i++) {
- free(lb->vips[i].vip);
- free(lb->vips[i].backend_ips);
-
- for (size_t j = 0; j < lb->vips[i].n_backends; j++) {
- free(lb->vips[i].backends[j].ip);
- free(lb->vips[i].backends[j].svc_mon_src_ip);
- }
-
- free(lb->vips[i].backends);
- }
- free(lb->vips);
- free(lb->selection_fields);
}
static void build_lb_vip_ct_lb_actions(struct lb_vip *lb_vip,
struct ds *action,
char *selection_fields)
{
- if (lb_vip->health_check) {
+ if (lb_vip->lb_health_check) {
ds_put_cstr(action, "ct_lb(backends=");
size_t n_active_backends = 0;
@@ -3644,7 +3456,12 @@ build_ovn_lbs(struct northd_context *ctx, struct hmap *ports,
const struct nbrec_load_balancer *nbrec_lb;
NBREC_LOAD_BALANCER_FOR_EACH (nbrec_lb, ctx->ovnnb_idl) {
- ovn_lb_create(ctx, lbs, nbrec_lb, ports, &monitor_map);
+ ovn_nb_lb_create(nbrec_lb, ports, lbs, (void *)ovn_port_find);
+ }
+
+ struct ovn_lb *lb;
+ HMAP_FOR_EACH (lb, hmap_node, lbs) {
+ ovn_lb_svc_create(ctx, lb, &monitor_map);
}
struct service_monitor_info *mon_info;
@@ -3658,16 +3475,6 @@ build_ovn_lbs(struct northd_context *ctx, struct hmap *ports,
hmap_destroy(&monitor_map);
}
-static void
-destroy_ovn_lbs(struct hmap *lbs)
-{
- struct ovn_lb *lb;
- HMAP_FOR_EACH_POP (lb, hmap_node, lbs) {
- ovn_lb_destroy(lb);
- free(lb);
- }
-}
-
/* Updates the southbound Port_Binding table so that it contains the logical
* switch ports specified by the northbound database.
*
@@ -5030,34 +4837,6 @@ build_pre_acls(struct ovn_datapath *od, struct hmap *lflows)
}
}
-/* For a 'key' of the form "IP:port" or just "IP", sets 'port' and
- * 'ip_address'. The caller must free() the memory allocated for
- * 'ip_address'.
- * Returns true if parsing of 'key' was successful, false otherwise.
- */
-static bool
-ip_address_and_port_from_lb_key(const char *key, char **ip_address,
- uint16_t *port, int *addr_family)
-{
- struct sockaddr_storage ss;
- if (!inet_parse_active(key, 0, &ss, false)) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
- VLOG_WARN_RL(&rl, "bad ip address or port for load balancer key %s",
- key);
- *ip_address = NULL;
- *port = 0;
- *addr_family = 0;
- return false;
- }
-
- struct ds s = DS_EMPTY_INITIALIZER;
- ss_format_address_nobracks(&ss, &s);
- *ip_address = ds_steal_cstr(&s);
- *port = ss_get_port(&ss);
- *addr_family = ss.ss_family;
- return true;
-}
-
/*
* Returns true if logical switch is configured with DNS records, false
* otherwise.
@@ -7004,7 +6783,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
struct ovn_lb *lb;
HMAP_FOR_EACH (lb, hmap_node, lbs) {
for (size_t i = 0; i < lb->n_vips; i++) {
- if (!lb->vips[i].health_check) {
+ if (!lb->vips[i].lb_health_check) {
continue;
}
@@ -12358,8 +12137,7 @@ ovnnb_db_run(struct northd_context *ctx,
sync_meters(ctx);
sync_dns_entries(ctx, datapaths);
sync_lb_entries(ctx, datapaths);
- destroy_ovn_lbs(&lbs);
- hmap_destroy(&lbs);
+ ovn_lbs_destroy(&lbs);
struct ovn_igmp_group *igmp_group, *next_igmp_group;
From patchwork Tue Oct 27 17:18:41 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388806
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJNR0PC7z9sVT
for ; Wed, 28 Oct 2020 04:19:34 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by fraxinus.osuosl.org (Postfix) with ESMTP id BDEBA84E77;
Tue, 27 Oct 2020 17:19:32 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from fraxinus.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id 6L_ndENfO-ii; Tue, 27 Oct 2020 17:19:29 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by fraxinus.osuosl.org (Postfix) with ESMTP id 7BD7685DCF;
Tue, 27 Oct 2020 17:19:29 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 6B66BC0859;
Tue, 27 Oct 2020 17:19:29 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])
by lists.linuxfoundation.org (Postfix) with ESMTP id 401DAC0051
for ; Tue, 27 Oct 2020 17:19:27 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by whitealder.osuosl.org (Postfix) with ESMTP id 239BF85230
for ; Tue, 27 Oct 2020 17:19:27 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from whitealder.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id LTCmSR8xS3P3 for ;
Tue, 27 Oct 2020 17:19:24 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay10.mail.gandi.net (relay10.mail.gandi.net
[217.70.178.230])
by whitealder.osuosl.org (Postfix) with ESMTPS id 70F218690D
for ; Tue, 27 Oct 2020 17:19:23 +0000 (UTC)
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay10.mail.gandi.net (Postfix) with ESMTPSA id 110BE240003;
Tue, 27 Oct 2020 17:19:18 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:48:41 +0530
Message-Id: <20201027171841.1180996-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 3/7] controller: Add load balancer hairpin
OF flows.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Presently to handle the load balancer hairpin traffic (the traffic destined to the
load balancer VIP is dnatted to the backend which originated the traffic), ovn-northd
adds a lot of logical flows to check this scenario. This patch attempts to reduce the
these logical flows. Each ovn-controller will read the load balancers from
the newly added southbound Load_Balancer table and adds the load balancer hairpin OF
flows in the table 68, 69 and 70. If suppose a below load balancer is configured
10.0.0.10:80 = 10.0.0.4:8080, 10.0.0.5:8090, then the below flows are added
table=68, ip.src = 10.0.0.4,ip.dst=10.0.0.4,tcp.dst=8080 actions=load:1->NXM_NX_REG9[7]
table=68, ip.src = 10.0.0.5,ip.dst=10.0.0.5,tcp.dst=8090 actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.4,ip.dst=10.0.0.10,tcp.src=8080 actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.5,ip.dst=10.0.0.10,tcp.src=8090 actions=load:1->NXM_NX_REG9[7]
table=70, ct.trk && ct.dnat && ct.nw_dst == 10.0.0.10. actions=ct(commit, zone=reg12, nat(src=10.0.0.5))
Upcoming patch will add OVN actions which does the lookup in these tables to handle the
hairpin traffic.
Signed-off-by: Numan Siddique
---
controller/lflow.c | 253 +++++++++++++++++++
controller/lflow.h | 6 +-
controller/ovn-controller.c | 27 +-
include/ovn/logical-fields.h | 3 +
tests/ovn.at | 469 +++++++++++++++++++++++++++++++++++
5 files changed, 756 insertions(+), 2 deletions(-)
diff --git a/controller/lflow.c b/controller/lflow.c
index f631679c3f..657482626d 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -26,6 +26,7 @@
#include "ovn-controller.h"
#include "ovn/actions.h"
#include "ovn/expr.h"
+#include "lib/lb.h"
#include "lib/ovn-l7.h"
#include "lib/ovn-sb-idl.h"
#include "lib/extend-table.h"
@@ -1138,6 +1139,213 @@ add_neighbor_flows(struct ovsdb_idl_index *sbrec_port_binding_by_name,
}
}
+static void
+add_lb_vip_hairpin_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct lb_vip_backend *lb_backend,
+ uint8_t lb_proto,
+ struct ovn_desired_flow_table *flow_table)
+{
+ uint64_t stub[1024 / 8];
+ struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+ uint8_t value = 1;
+ put_load(&value, sizeof value, MFF_LOG_FLAGS,
+ MLF_LOOKUP_LB_HAIRPIN_BIT, 1, &ofpacts);
+
+ ovs_be32 vip4;
+ struct in6_addr vip6;
+
+ if (lb_vip->addr_family == AF_INET) {
+ ovs_assert(ip_parse(lb_vip->vip, &vip4));
+ } else {
+ ovs_assert(ipv6_parse(lb_vip->vip, &vip6));
+ }
+
+ struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
+ struct match hairpin_reply_match = MATCH_CATCHALL_INITIALIZER;
+
+ if (lb_vip->addr_family == AF_INET) {
+ ovs_be32 ip4;
+ ovs_assert(ip_parse(lb_backend->ip, &ip4));
+
+ match_set_dl_type(&hairpin_match, htons(ETH_TYPE_IP));
+ match_set_nw_src(&hairpin_match, ip4);
+ match_set_nw_dst(&hairpin_match, ip4);
+
+ match_set_dl_type(&hairpin_reply_match,
+ htons(ETH_TYPE_IP));
+ match_set_nw_src(&hairpin_reply_match, ip4);
+ match_set_nw_dst(&hairpin_reply_match, vip4);
+ } else {
+ struct in6_addr ip6;
+ ovs_assert(ipv6_parse(lb_backend->ip, &ip6));
+
+ match_set_dl_type(&hairpin_match, htons(ETH_TYPE_IPV6));
+ match_set_ipv6_src(&hairpin_match, &ip6);
+ match_set_ipv6_dst(&hairpin_match, &ip6);
+
+ match_set_dl_type(&hairpin_reply_match,
+ htons(ETH_TYPE_IPV6));
+ match_set_ipv6_src(&hairpin_reply_match, &ip6);
+ match_set_ipv6_dst(&hairpin_reply_match, &vip6);
+ }
+
+ if (lb_backend->port) {
+ match_set_nw_proto(&hairpin_match, lb_proto);
+ match_set_tp_dst(&hairpin_match, htons(lb_backend->port));
+
+ match_set_nw_proto(&hairpin_reply_match, lb_proto);
+ match_set_tp_src(&hairpin_reply_match,
+ htons(lb_backend->port));
+ }
+
+ for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+ match_set_metadata(&hairpin_match,
+ htonll(lb->slb->datapaths[i]->tunnel_key));
+ match_set_metadata(&hairpin_reply_match,
+ htonll(lb->slb->datapaths[i]->tunnel_key));
+
+ ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN, 100,
+ lb->slb->header_.uuid.parts[0], &hairpin_match,
+ &ofpacts, &lb->slb->header_.uuid);
+
+ ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN_REPLY, 100,
+ lb->slb->header_.uuid.parts[0],
+ &hairpin_reply_match,
+ &ofpacts, &lb->slb->header_.uuid);
+ }
+
+ ofpbuf_uninit(&ofpacts);
+}
+
+static void
+add_lb_ct_snat_vip_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct ovn_desired_flow_table *flow_table)
+{
+ ovs_be32 vip4;
+ struct in6_addr vip6;
+
+ if (lb_vip->addr_family == AF_INET) {
+ ovs_assert(ip_parse(lb_vip->vip, &vip4));
+ } else {
+ ovs_assert(ipv6_parse(lb_vip->vip, &vip6));
+ }
+
+ uint64_t stub[1024 / 8];
+ struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+ struct ofpact_conntrack *ct = ofpact_put_CT(&ofpacts);
+ ct->recirc_table = NX_CT_RECIRC_NONE;
+ ct->zone_src.field = mf_from_id(MFF_LOG_SNAT_ZONE);
+ ct->zone_src.ofs = 0;
+ ct->zone_src.n_bits = 16;
+ ct->flags = 0;
+ ct->alg = 0;
+
+ size_t nat_offset;
+ nat_offset = ofpacts.size;
+ ofpbuf_pull(&ofpacts, nat_offset);
+
+ struct ofpact_nat *nat = ofpact_put_NAT(&ofpacts);
+ nat->flags = NX_NAT_F_SRC;
+ nat->range_af = AF_UNSPEC;
+ if (lb_vip->addr_family == AF_INET) {
+ nat->range_af = AF_INET;
+ nat->range.addr.ipv4.min = vip4;
+ } else {
+ nat->range_af = AF_INET6;
+ nat->range.addr.ipv6.min = vip6;
+ }
+ ofpacts.header = ofpbuf_push_uninit(&ofpacts, nat_offset);
+ ct = ofpacts.header;
+ ct->flags |= NX_CT_F_COMMIT;
+
+ ofpact_finish(&ofpacts, &ct->ofpact);
+
+ struct match match = MATCH_CATCHALL_INITIALIZER;
+ if (lb_vip->addr_family == AF_INET) {
+ match_set_dl_type(&match, htons(ETH_TYPE_IP));
+ match_set_ct_nw_dst(&match, vip4);
+ } else {
+ match_set_dl_type(&match, htons(ETH_TYPE_IPV6));
+ match_set_ct_ipv6_dst(&match, &vip6);
+ }
+
+ uint32_t ct_state = OVS_CS_F_TRACKED | OVS_CS_F_DST_NAT;
+ match_set_ct_state_masked(&match, ct_state, ct_state);
+
+ for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+ match_set_metadata(&match,
+ htonll(lb->slb->datapaths[i]->tunnel_key));
+
+ ofctrl_add_flow(flow_table, OFTABLE_CT_SNAT_FOR_VIP, 100,
+ lb->slb->header_.uuid.parts[0],
+ &match, &ofpacts, &lb->slb->header_.uuid);
+ }
+
+ ofpbuf_uninit(&ofpacts);
+}
+
+static void
+consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
+ const struct hmap *local_datapaths,
+ struct ovn_desired_flow_table *flow_table)
+{
+ bool consider_lb = false;
+
+ /* Check if we need to add flows or not. If there is one datapath
+ * in the local_datapaths, it means all the datapaths of the lb
+ * will be in the local_datapaths. */
+ for (size_t i = 0; i < sbrec_lb->n_datapaths; i++) {
+ if (get_local_datapath(local_datapaths,
+ sbrec_lb->datapaths[i]->tunnel_key)) {
+ consider_lb = true;
+ break;
+ }
+ }
+
+ if (!consider_lb) {
+ return;
+ }
+
+ struct ovn_lb *lb = ovn_sb_lb_create(sbrec_lb);
+ uint8_t lb_proto = IPPROTO_TCP;
+ if (lb->slb->protocol && lb->slb->protocol[0]) {
+ if (!strcmp(lb->slb->protocol, "udp")) {
+ lb_proto = IPPROTO_UDP;
+ } else if (!strcmp(lb->slb->protocol, "sctp")) {
+ lb_proto = IPPROTO_SCTP;
+ }
+ }
+
+ for (size_t i = 0; i < lb->n_vips; i++) {
+ struct lb_vip *lb_vip = &lb->vips[i];
+
+ for (size_t j = 0; j < lb_vip->n_backends; j++) {
+ struct lb_vip_backend *lb_backend = &lb_vip->backends[j];
+ add_lb_vip_hairpin_flows(lb, lb_vip, lb_backend, lb_proto,
+ flow_table);
+ }
+
+ add_lb_ct_snat_vip_flows(lb, lb_vip, flow_table);
+ }
+
+ ovn_lb_destroy(lb);
+}
+
+/* Adds OpenFlow flows to flow tables for each Load balancer VIPs and
+ * backends to handle the load balanced hairpin traffic. */
+static void
+add_lb_hairpin_flows(const struct sbrec_load_balancer_table *lb_table,
+ const struct hmap *local_datapaths,
+ struct ovn_desired_flow_table *flow_table)
+{
+ const struct sbrec_load_balancer *lb;
+ SBREC_LOAD_BALANCER_TABLE_FOR_EACH (lb, lb_table) {
+ consider_lb_hairpin_flows(lb, local_datapaths, flow_table);
+ }
+}
+
/* Handles neighbor changes in mac_binding table. */
void
lflow_handle_changed_neighbors(
@@ -1197,6 +1405,8 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
add_neighbor_flows(l_ctx_in->sbrec_port_binding_by_name,
l_ctx_in->mac_binding_table, l_ctx_in->local_datapaths,
l_ctx_out->flow_table);
+ add_lb_hairpin_flows(l_ctx_in->lb_table, l_ctx_in->local_datapaths,
+ l_ctx_out->flow_table);
}
void
@@ -1256,6 +1466,15 @@ lflow_add_flows_for_datapath(const struct sbrec_datapath_binding *dp,
dhcp_opts_destroy(&dhcpv6_opts);
nd_ra_opts_destroy(&nd_ra_opts);
controller_event_opts_destroy(&controller_event_opts);
+
+ /* Add load balancer hairpin flows if the datapath has any load balancers
+ * associated. */
+ for (size_t i = 0; i < dp->n_load_balancers; i++) {
+ consider_lb_hairpin_flows(dp->load_balancers[i],
+ l_ctx_in->local_datapaths,
+ l_ctx_out->flow_table);
+ }
+
return handled;
}
@@ -1273,3 +1492,37 @@ lflow_handle_flows_for_lport(const struct sbrec_port_binding *pb,
return lflow_handle_changed_ref(REF_TYPE_PORTBINDING, pb_ref_name,
l_ctx_in, l_ctx_out, &changed);
}
+
+bool
+lflow_handle_changed_lbs(struct lflow_ctx_in *l_ctx_in,
+ struct lflow_ctx_out *l_ctx_out)
+{
+ const struct sbrec_load_balancer *lb;
+
+ SBREC_LOAD_BALANCER_TABLE_FOR_EACH_TRACKED (lb, l_ctx_in->lb_table) {
+ if (sbrec_load_balancer_is_deleted(lb)) {
+ VLOG_DBG("Remove hairpin flows for deleted load balancer "UUID_FMT,
+ UUID_ARGS(&lb->header_.uuid));
+ ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
+ }
+ }
+
+ SBREC_LOAD_BALANCER_TABLE_FOR_EACH_TRACKED (lb, l_ctx_in->lb_table) {
+ if (sbrec_load_balancer_is_deleted(lb)) {
+ continue;
+ }
+
+ if (!sbrec_load_balancer_is_new(lb)) {
+ VLOG_DBG("Remove hairpin flows for updated load balancer "UUID_FMT,
+ UUID_ARGS(&lb->header_.uuid));
+ ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
+ }
+
+ VLOG_DBG("Add load balancer hairpin flows for "UUID_FMT,
+ UUID_ARGS(&lb->header_.uuid));
+ consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
+ l_ctx_out->flow_table);
+ }
+
+ return true;
+}
diff --git a/controller/lflow.h b/controller/lflow.h
index 1251fb0f45..1225131deb 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -68,6 +68,9 @@ struct uuid;
#define OFTABLE_LOG_TO_PHY 65
#define OFTABLE_MAC_BINDING 66
#define OFTABLE_MAC_LOOKUP 67
+#define OFTABLE_CHK_LB_HAIRPIN 68
+#define OFTABLE_CHK_LB_HAIRPIN_REPLY 69
+#define OFTABLE_CT_SNAT_FOR_VIP 70
/* The number of tables for the ingress and egress pipelines. */
#define LOG_PIPELINE_LEN 24
@@ -132,6 +135,7 @@ struct lflow_ctx_in {
const struct sbrec_logical_flow_table *logical_flow_table;
const struct sbrec_multicast_group_table *mc_group_table;
const struct sbrec_chassis *chassis;
+ const struct sbrec_load_balancer_table *lb_table;
const struct hmap *local_datapaths;
const struct shash *addr_sets;
const struct shash *port_groups;
@@ -160,7 +164,7 @@ void lflow_handle_changed_neighbors(
const struct sbrec_mac_binding_table *,
const struct hmap *local_datapaths,
struct ovn_desired_flow_table *);
-
+bool lflow_handle_changed_lbs(struct lflow_ctx_in *, struct lflow_ctx_out *);
void lflow_destroy(void);
void lflow_cache_init(struct hmap *);
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index a06cae3ccb..4150b4cb1c 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -790,7 +790,8 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
SB_NODE(logical_flow, "logical_flow") \
SB_NODE(dhcp_options, "dhcp_options") \
SB_NODE(dhcpv6_options, "dhcpv6_options") \
- SB_NODE(dns, "dns")
+ SB_NODE(dns, "dns") \
+ SB_NODE(load_balancer, "load_balancer")
enum sb_engine_node {
#define SB_NODE(NAME, NAME_STR) SB_##NAME,
@@ -1682,6 +1683,10 @@ static void init_lflow_ctx(struct engine_node *node,
(struct sbrec_multicast_group_table *)EN_OVSDB_GET(
engine_get_input("SB_multicast_group", node));
+ struct sbrec_load_balancer_table *lb_table =
+ (struct sbrec_load_balancer_table *)EN_OVSDB_GET(
+ engine_get_input("SB_load_balancer", node));
+
const char *chassis_id = chassis_get_id();
const struct sbrec_chassis *chassis = NULL;
struct ovsdb_idl_index *sbrec_chassis_by_name =
@@ -1713,6 +1718,7 @@ static void init_lflow_ctx(struct engine_node *node,
l_ctx_in->logical_flow_table = logical_flow_table;
l_ctx_in->mc_group_table = multicast_group_table;
l_ctx_in->chassis = chassis;
+ l_ctx_in->lb_table = lb_table;
l_ctx_in->local_datapaths = &rt_data->local_datapaths;
l_ctx_in->addr_sets = addr_sets;
l_ctx_in->port_groups = port_groups;
@@ -2131,6 +2137,23 @@ flow_output_runtime_data_handler(struct engine_node *node,
return true;
}
+static bool
+flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
+{
+ struct ed_type_runtime_data *rt_data =
+ engine_get_input_data("runtime_data", node);
+
+ struct ed_type_flow_output *fo = data;
+ struct lflow_ctx_in l_ctx_in;
+ struct lflow_ctx_out l_ctx_out;
+ init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+
+ bool handled = lflow_handle_changed_lbs(&l_ctx_in, &l_ctx_out);
+
+ engine_set_node_state(node, EN_UPDATED);
+ return handled;
+}
+
struct ovn_controller_exit_args {
bool *exiting;
bool *restart;
@@ -2327,6 +2350,8 @@ main(int argc, char *argv[])
engine_add_input(&en_flow_output, &en_sb_dhcp_options, NULL);
engine_add_input(&en_flow_output, &en_sb_dhcpv6_options, NULL);
engine_add_input(&en_flow_output, &en_sb_dns, NULL);
+ engine_add_input(&en_flow_output, &en_sb_load_balancer,
+ flow_output_sb_load_balancer_handler);
engine_add_input(&en_ct_zones, &en_ovs_open_vswitch, NULL);
engine_add_input(&en_ct_zones, &en_ovs_bridge, NULL);
diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h
index ac6f2f909b..0fe5bc3bb4 100644
--- a/include/ovn/logical-fields.h
+++ b/include/ovn/logical-fields.h
@@ -57,6 +57,7 @@ enum mff_log_flags_bits {
MLF_LOCAL_ONLY_BIT = 4,
MLF_NESTED_CONTAINER_BIT = 5,
MLF_LOOKUP_MAC_BIT = 6,
+ MLF_LOOKUP_LB_HAIRPIN_BIT = 7,
};
/* MFF_LOG_FLAGS_REG flag assignments */
@@ -88,6 +89,8 @@ enum mff_log_flags {
/* Indicate that the lookup in the mac binding table was successful. */
MLF_LOOKUP_MAC = (1 << MLF_LOOKUP_MAC_BIT),
+
+ MLF_LOOKUP_LB_HAIRPIN = (1 << MLF_LOOKUP_LB_HAIRPIN_BIT),
};
/* OVN logical fields
diff --git a/tests/ovn.at b/tests/ovn.at
index 04b7a3df73..3679978612 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -22800,3 +22800,472 @@ AT_CHECK([test "$encap_rec_mvtep" == "$encap_rec_mvtep1"], [0], [])
OVN_CLEANUP([hv1])
AT_CLEANUP
+
+AT_SETUP([ovn -- Load Balancer LS hairpin OF flows])
+ovn_start
+
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+ovs-vsctl -- add-port br-int hv1-vif1 -- \
+ set interface hv1-vif1 external-ids:iface-id=sw0-p1 \
+ options:tx_pcap=hv1/vif1-tx.pcap \
+ options:rxq_pcap=hv1/vif1-rx.pcap \
+ ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif2 -- \
+ set interface hv1-vif2 external-ids:iface-id=sw1-p1 \
+ options:tx_pcap=hv1/vif2-tx.pcap \
+ options:rxq_pcap=hv1/vif2-rx.pcap \
+ ofport-request=2
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.2
+ovs-vsctl -- add-port br-int hv2-vif1 -- \
+ set interface hv2-vif1 external-ids:iface-id=sw0-p2 \
+ options:tx_pcap=hv2/vif1-tx.pcap \
+ options:rxq_pcap=hv2/vif1-rx.pcap \
+ ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif2 -- \
+ set interface hv1-vif2 external-ids:iface-id=sw1-p2 \
+ options:tx_pcap=hv1/vif2-tx.pcap \
+ options:rxq_pcap=hv1/vif2-rx.pcap \
+ ofport-request=2
+
+ovn-nbctl --wait=hv ls-add sw0
+ovn-nbctl lsp-add sw0 sw0-p1 -- lsp-set-addresses sw0-p1 00:00:00:00:00:01
+
+ovn-nbctl ls-add sw1
+ovn-nbctl lsp-add sw1 sw1-p1 -- lsp-set-addresses sw1-p1 00:00:00:00:01:01
+
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xup])
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw1-p1) = xup])
+
+ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp
+ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
+ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
+ovn-nbctl --wait=hv lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4-tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 1]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.90:8080 42.42.42.42:4041,52.52.52.52:4042 tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
+NXST_FLOW reply (xid=0x8):
+])
+
+ovn-nbctl lsp-add sw0 sw0-p2
+# hv2 should bind sw0-p2 and it should install the LB hairpin flows.
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xup])
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+)
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4-udp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6-tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 5]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 5]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6-udp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+ovn-nbctl --wait=hv ls-lb-add sw1 lb-ipv6-udp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 7]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 7]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+])
+
+as hv2 ovs-vsctl del-port hv2-vif1
+OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xdown])
+
+# Trigger recompute on hv2 as sw0 will not be cleared from local_datapaths.
+as hv2 ovn-appctl -t ovn-controller recompute
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 7]
+)
+
+ovn-nbctl --wait=hv lb-del lb-ipv4-tcp
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+])
+
+ovn-nbctl --wait=hv ls-del sw0
+ovn-nbctl --wait=hv ls-del sw1
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVS_WAIT_UNTIL(
+ [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+)
+
+OVN_CLEANUP([hv1], [hv2])
+AT_CLEANUP
From patchwork Tue Oct 27 17:19:20 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388807
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.133; helo=hemlock.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJNT4z2Rz9sVj
for ; Wed, 28 Oct 2020 04:19:37 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id 1A2B58731E;
Tue, 27 Oct 2020 17:19:36 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id 7opgNT+ATXqv; Tue, 27 Oct 2020 17:19:34 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by hemlock.osuosl.org (Postfix) with ESMTP id CBDDD87313;
Tue, 27 Oct 2020 17:19:34 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id BFCBCC0859;
Tue, 27 Oct 2020 17:19:34 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])
by lists.linuxfoundation.org (Postfix) with ESMTP id CC3D9C0051
for ; Tue, 27 Oct 2020 17:19:33 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by fraxinus.osuosl.org (Postfix) with ESMTP id C8748860CD
for ; Tue, 27 Oct 2020 17:19:33 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from fraxinus.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id iD6dTdD5-2K5 for ;
Tue, 27 Oct 2020 17:19:31 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay7-d.mail.gandi.net (relay7-d.mail.gandi.net
[217.70.183.200])
by fraxinus.osuosl.org (Postfix) with ESMTPS id 74A3186090
for ; Tue, 27 Oct 2020 17:19:30 +0000 (UTC)
X-Originating-IP: 115.99.168.200
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay7-d.mail.gandi.net (Postfix) with ESMTPSA id B7E1120009;
Tue, 27 Oct 2020 17:19:27 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:49:20 +0530
Message-Id: <20201027171920.1181126-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 4/7] actions: Add new actions
chk_lb_hairpin, chk_lb_hairpin_reply and ct_snat_to_vip.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
The action - chk_lb_hairpin checks if the packet destined to a load balancer VIP
is to be hairpinned back to the same destination and if so, sets the destination register
bit to 1.
The action - chk_lb_hairpin_reply checks if the packet is a reply of the hairpinned
packet. If so, it sets the destination register bit to 1.
The action ct_snat_to_vip snats the source IP to the load balancer VIP if chk_lb_hairpin()
returned true.
These actions will be used in the upcoming patch by ovn-northd in the hairpin logical flows.
This helps in reducing lots of hairpin logical flows.
Signed-off-by: Numan Siddique
---
controller/lflow.c | 3 ++
include/ovn/actions.h | 15 ++++--
lib/actions.c | 116 ++++++++++++++++++++++++++++++++++++++----
ovn-sb.xml | 37 ++++++++++++++
tests/ovn.at | 39 ++++++++++++++
tests/test-ovn.c | 3 ++
utilities/ovn-trace.c | 65 ++++++++++++++++++++++-
7 files changed, 265 insertions(+), 13 deletions(-)
diff --git a/controller/lflow.c b/controller/lflow.c
index 657482626d..588c72dc22 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -698,6 +698,9 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
.output_ptable = output_ptable,
.mac_bind_ptable = OFTABLE_MAC_BINDING,
.mac_lookup_ptable = OFTABLE_MAC_LOOKUP,
+ .lb_hairpin_ptable = OFTABLE_CHK_LB_HAIRPIN,
+ .lb_hairpin_reply_ptable = OFTABLE_CHK_LB_HAIRPIN_REPLY,
+ .ct_snat_vip_ptable = OFTABLE_CT_SNAT_FOR_VIP,
};
ovnacts_encode(ovnacts->data, ovnacts->size, &ep, &ofpacts);
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index b4e5acabb9..32f9c53dfc 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -83,7 +83,7 @@ struct ovn_extend_table;
OVNACT(PUT_DHCPV4_OPTS, ovnact_put_opts) \
OVNACT(PUT_DHCPV6_OPTS, ovnact_put_opts) \
OVNACT(SET_QUEUE, ovnact_set_queue) \
- OVNACT(DNS_LOOKUP, ovnact_dns_lookup) \
+ OVNACT(DNS_LOOKUP, ovnact_result) \
OVNACT(LOG, ovnact_log) \
OVNACT(PUT_ND_RA_OPTS, ovnact_put_opts) \
OVNACT(ND_NS, ovnact_nest) \
@@ -97,6 +97,9 @@ struct ovn_extend_table;
OVNACT(DHCP6_REPLY, ovnact_null) \
OVNACT(ICMP6_ERROR, ovnact_nest) \
OVNACT(REJECT, ovnact_nest) \
+ OVNACT(CHK_LB_HAIRPIN, ovnact_result) \
+ OVNACT(CHK_LB_HAIRPIN_REPLY, ovnact_result) \
+ OVNACT(CT_SNAT_TO_VIP, ovnact_null) \
/* enum ovnact_type, with a member OVNACT_ for each action. */
enum OVS_PACKED_ENUM ovnact_type {
@@ -338,8 +341,8 @@ struct ovnact_set_queue {
uint16_t queue_id;
};
-/* OVNACT_DNS_LOOKUP. */
-struct ovnact_dns_lookup {
+/* OVNACT_DNS_LOOKUP, OVNACT_CHK_LB_HAIRPIN, OVNACT_CHK_LB_HAIRPIN_REPLY. */
+struct ovnact_result {
struct ovnact ovnact;
struct expr_field dst; /* 1-bit destination field. */
};
@@ -727,6 +730,12 @@ struct ovnact_encode_params {
resubmit. */
uint8_t mac_lookup_ptable; /* OpenFlow table for
'lookup_arp'/'lookup_nd' to resubmit. */
+ uint8_t lb_hairpin_ptable; /* OpenFlow table for
+ * 'chk_lb_hairpin' to resubmit. */
+ uint8_t lb_hairpin_reply_ptable; /* OpenFlow table for
+ * 'chk_lb_hairpin_reply' to resubmit. */
+ uint8_t ct_snat_vip_ptable; /* OpenFlow table for
+ * 'ct_snat_to_vip' to resubmit. */
};
void ovnacts_encode(const struct ovnact[], size_t ovnacts_len,
diff --git a/lib/actions.c b/lib/actions.c
index 23e54ef2a6..015bcbc4dc 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -2655,13 +2655,14 @@ ovnact_set_queue_free(struct ovnact_set_queue *a OVS_UNUSED)
}
static void
-parse_dns_lookup(struct action_context *ctx, const struct expr_field *dst,
- struct ovnact_dns_lookup *dl)
+parse_ovnact_result(struct action_context *ctx, const char *name,
+ const char *prereq, const struct expr_field *dst,
+ struct ovnact_result *res)
{
- lexer_get(ctx->lexer); /* Skip dns_lookup. */
+ lexer_get(ctx->lexer); /* Skip action name. */
lexer_get(ctx->lexer); /* Skip '('. */
if (!lexer_match(ctx->lexer, LEX_T_RPAREN)) {
- lexer_error(ctx->lexer, "dns_lookup doesn't take any parameters");
+ lexer_error(ctx->lexer, "%s doesn't take any parameters", name);
return;
}
/* Validate that the destination is a 1-bit, modifiable field. */
@@ -2671,19 +2672,29 @@ parse_dns_lookup(struct action_context *ctx, const struct expr_field *dst,
free(error);
return;
}
- dl->dst = *dst;
- add_prerequisite(ctx, "udp");
+ res->dst = *dst;
+
+ if (prereq) {
+ add_prerequisite(ctx, prereq);
+ }
}
static void
-format_DNS_LOOKUP(const struct ovnact_dns_lookup *dl, struct ds *s)
+parse_dns_lookup(struct action_context *ctx, const struct expr_field *dst,
+ struct ovnact_result *dl)
+{
+ parse_ovnact_result(ctx, "dns_lookup", "udp", dst, dl);
+}
+
+static void
+format_DNS_LOOKUP(const struct ovnact_result *dl, struct ds *s)
{
expr_field_format(&dl->dst, s);
ds_put_cstr(s, " = dns_lookup();");
}
static void
-encode_DNS_LOOKUP(const struct ovnact_dns_lookup *dl,
+encode_DNS_LOOKUP(const struct ovnact_result *dl,
const struct ovnact_encode_params *ep OVS_UNUSED,
struct ofpbuf *ofpacts)
{
@@ -2700,7 +2711,7 @@ encode_DNS_LOOKUP(const struct ovnact_dns_lookup *dl,
static void
-ovnact_dns_lookup_free(struct ovnact_dns_lookup *dl OVS_UNUSED)
+ovnact_result_free(struct ovnact_result *dl OVS_UNUSED)
{
}
@@ -3472,6 +3483,83 @@ ovnact_fwd_group_free(struct ovnact_fwd_group *fwd_group)
free(fwd_group->child_ports);
}
+static void
+parse_chk_lb_hairpin(struct action_context *ctx, const struct expr_field *dst,
+ struct ovnact_result *res)
+{
+ parse_ovnact_result(ctx, "chk_lb_hairpin", NULL, dst, res);
+}
+
+static void
+parse_chk_lb_hairpin_reply(struct action_context *ctx,
+ const struct expr_field *dst,
+ struct ovnact_result *res)
+{
+ parse_ovnact_result(ctx, "chk_lb_hairpin_reply", NULL, dst, res);
+}
+
+
+static void
+format_CHK_LB_HAIRPIN(const struct ovnact_result *res, struct ds *s)
+{
+ expr_field_format(&res->dst, s);
+ ds_put_cstr(s, " = chk_lb_hairpin();");
+}
+
+static void
+format_CHK_LB_HAIRPIN_REPLY(const struct ovnact_result *res, struct ds *s)
+{
+ expr_field_format(&res->dst, s);
+ ds_put_cstr(s, " = chk_lb_hairpin_reply();");
+}
+
+static void
+encode_chk_lb_hairpin__(const struct ovnact_result *res,
+ uint8_t hairpin_table,
+ struct ofpbuf *ofpacts)
+{
+ struct mf_subfield dst = expr_resolve_field(&res->dst);
+ ovs_assert(dst.field);
+ put_load(0, MFF_LOG_FLAGS, MLF_LOOKUP_LB_HAIRPIN_BIT, 1, ofpacts);
+ emit_resubmit(ofpacts, hairpin_table);
+
+ struct ofpact_reg_move *orm = ofpact_put_REG_MOVE(ofpacts);
+ orm->dst = dst;
+ orm->src.field = mf_from_id(MFF_LOG_FLAGS);
+ orm->src.ofs = MLF_LOOKUP_LB_HAIRPIN_BIT;
+ orm->src.n_bits = 1;
+}
+
+static void
+encode_CHK_LB_HAIRPIN(const struct ovnact_result *res,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ encode_chk_lb_hairpin__(res, ep->lb_hairpin_ptable, ofpacts);
+}
+
+static void
+encode_CHK_LB_HAIRPIN_REPLY(const struct ovnact_result *res,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ encode_chk_lb_hairpin__(res, ep->lb_hairpin_reply_ptable, ofpacts);
+}
+
+static void
+format_CT_SNAT_TO_VIP(const struct ovnact_null *null OVS_UNUSED, struct ds *s)
+{
+ ds_put_cstr(s, "ct_snat_to_vip;");
+}
+
+static void
+encode_CT_SNAT_TO_VIP(const struct ovnact_null *null OVS_UNUSED,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ emit_resubmit(ofpacts, ep->ct_snat_vip_ptable);
+}
+
/* Parses an assignment or exchange or put_dhcp_opts action. */
static void
parse_set_action(struct action_context *ctx)
@@ -3524,6 +3612,14 @@ parse_set_action(struct action_context *ctx)
&& lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
parse_lookup_mac_bind_ip(ctx, &lhs, 128,
ovnact_put_LOOKUP_ND_IP(ctx->ovnacts));
+ } else if (!strcmp(ctx->lexer->token.s, "chk_lb_hairpin")
+ && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+ parse_chk_lb_hairpin(ctx, &lhs,
+ ovnact_put_CHK_LB_HAIRPIN(ctx->ovnacts));
+ } else if (!strcmp(ctx->lexer->token.s, "chk_lb_hairpin_reply")
+ && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+ parse_chk_lb_hairpin_reply(
+ ctx, &lhs, ovnact_put_CHK_LB_HAIRPIN_REPLY(ctx->ovnacts));
} else {
parse_assignment_action(ctx, false, &lhs);
}
@@ -3610,6 +3706,8 @@ parse_action(struct action_context *ctx)
ovnact_put_DHCP6_REPLY(ctx->ovnacts);
} else if (lexer_match_id(ctx->lexer, "reject")) {
parse_REJECT(ctx);
+ } else if (lexer_match_id(ctx->lexer, "ct_snat_to_vip")) {
+ ovnact_put_CT_SNAT_TO_VIP(ctx->ovnacts);
} else {
lexer_syntax_error(ctx->lexer, "expecting action");
}
diff --git a/ovn-sb.xml b/ovn-sb.xml
index bdd41c1f97..428996ebf5 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -2325,6 +2325,43 @@ tcp.flags = RST;
Delegation Router and managed IPv6 Prefix delegation state machine
+
+ R = chk_lb_hairpin();
+
+
+ This action checks if the packet under consideration was destined
+ to a load balancer VIP and it is hairpinned, i.e., after load
+ balancing the destination IP matches the source IP. If it is so,
+ then the 1-bit destination register R is set to 1.
+
+
+
+ R = chk_lb_hairpin_reply();
+
+
+ This action checks if the packet under consideration is from
+ one of the backend IP of a load balancer VIP and the destination IP
+ is the load balancer VIP. If it is so, then the 1-bit destination
+ register R is set to 1.
+
+
+
+ R = ct_snat_to_vip;
+
+
+ This action sends the packet through the SNAT zone to change the
+ source IP address of the packet to the load balancer VIP if the
+ original destination IP was load balancer VIP and commits the
+ connection. This action applies successfully only for the
+ hairpinned traffic i.e if the action chk_lb_hairpin
+ returned success. This action doesn't take any arguments and it
+ determines the SNAT IP internally.
+
+ The packet is not automatically sent to the next table. The caller
+ has to execute the next;
action explicitly after this
+ action to advance the packet to the next stage.
+
+
diff --git a/tests/ovn.at b/tests/ovn.at
index 3679978612..3dd195e1bc 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -1716,6 +1716,45 @@ fwd_group(liveness="false", childports="eth0", "lsp1");
handle_dhcpv6_reply;
encodes as controller(userdata=00.00.00.13.00.00.00.00)
+# chk_lb_hairpin
+reg0[0] = chk_lb_hairpin();
+ encodes as set_field:0/0x80->reg10,resubmit(,68),move:NXM_NX_REG10[7]->NXM_NX_XXREG0[96]
+
+reg2[2] = chk_lb_hairpin();
+ encodes as set_field:0/0x80->reg10,resubmit(,68),move:NXM_NX_REG10[7]->NXM_NX_XXREG0[34]
+
+reg0 = chk_lb_hairpin();
+ Cannot use 32-bit field reg0[0..31] where 1-bit field is required.
+
+reg0[0] = chk_lb_hairpin(foo);
+ chk_lb_hairpin doesn't take any parameters
+
+chk_lb_hairpin;
+ Syntax error at `chk_lb_hairpin' expecting action.
+
+# chk_lb_hairpin_reply
+reg0[0] = chk_lb_hairpin_reply();
+ encodes as set_field:0/0x80->reg10,resubmit(,69),move:NXM_NX_REG10[7]->NXM_NX_XXREG0[96]
+
+reg2[2..3] = chk_lb_hairpin_reply();
+ Cannot use 2-bit field reg2[2..3] where 1-bit field is required.
+
+reg0 = chk_lb_hairpin_reply();
+ Cannot use 32-bit field reg0[0..31] where 1-bit field is required.
+
+reg0[0] = chk_lb_hairpin_reply(foo);
+ chk_lb_hairpin_reply doesn't take any parameters
+
+chk_lb_hairpin_reply;
+ Syntax error at `chk_lb_hairpin_reply' expecting action.
+
+# ct_snat_to_vip
+ct_snat_to_vip;
+ encodes as resubmit(,70)
+
+ct_snat_to_vip(foo);
+ Syntax error at `(' expecting `;'.
+
# Miscellaneous negative tests.
;
Syntax error at `;'.
diff --git a/tests/test-ovn.c b/tests/test-ovn.c
index 80d99b7a8b..6662ced54c 100644
--- a/tests/test-ovn.c
+++ b/tests/test-ovn.c
@@ -1342,6 +1342,9 @@ test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
.output_ptable = OFTABLE_SAVE_INPORT,
.mac_bind_ptable = OFTABLE_MAC_BINDING,
.mac_lookup_ptable = OFTABLE_MAC_LOOKUP,
+ .lb_hairpin_ptable = OFTABLE_CHK_LB_HAIRPIN,
+ .lb_hairpin_reply_ptable = OFTABLE_CHK_LB_HAIRPIN_REPLY,
+ .ct_snat_vip_ptable = OFTABLE_CT_SNAT_FOR_VIP,
};
struct ofpbuf ofpacts;
ofpbuf_init(&ofpacts, 0);
diff --git a/utilities/ovn-trace.c b/utilities/ovn-trace.c
index 29bf7a2084..5d92188ab2 100644
--- a/utilities/ovn-trace.c
+++ b/utilities/ovn-trace.c
@@ -1992,7 +1992,7 @@ execute_next(const struct ovnact_next *next,
static void
-execute_dns_lookup(const struct ovnact_dns_lookup *dl, struct flow *uflow,
+execute_dns_lookup(const struct ovnact_result *dl, struct flow *uflow,
struct ovs_list *super)
{
struct mf_subfield sf = expr_resolve_field(&dl->dst);
@@ -2224,6 +2224,57 @@ execute_ovnfield_load(const struct ovnact_load *load,
}
}
+static void
+execute_chk_lb_hairpin(const struct ovnact_result *dl, struct flow *uflow,
+ struct ovs_list *super)
+{
+ int family = (uflow->dl_type == htons(ETH_TYPE_IP) ? AF_INET
+ : uflow->dl_type == htons(ETH_TYPE_IPV6) ? AF_INET6
+ : AF_UNSPEC);
+ uint8_t res = 0;
+ if (family != AF_UNSPEC && uflow->ct_state & CS_DST_NAT) {
+ if (family == AF_INET) {
+ res = (uflow->nw_src == uflow->nw_dst) ? 1 : 0;
+ } else {
+ res = ipv6_addr_equals(&uflow->ipv6_src, &uflow->ipv6_dst) ? 1 : 0;
+ }
+ }
+
+ struct mf_subfield sf = expr_resolve_field(&dl->dst);
+ union mf_subvalue sv = { .u8_val = res };
+ mf_write_subfield_flow(&sf, &sv, uflow);
+
+ struct ds s = DS_EMPTY_INITIALIZER;
+ expr_field_format(&dl->dst, &s);
+ ovntrace_node_append(super, OVNTRACE_NODE_MODIFY,
+ "%s = %d", ds_cstr(&s), res);
+ ds_destroy(&s);
+}
+
+static void
+execute_chk_lb_hairpin_reply(const struct ovnact_result *dl,
+ struct flow *uflow,
+ struct ovs_list *super)
+{
+ struct mf_subfield sf = expr_resolve_field(&dl->dst);
+ union mf_subvalue sv = { .u8_val = 0 };
+ mf_write_subfield_flow(&sf, &sv, uflow);
+ ovntrace_node_append(super, OVNTRACE_NODE_ERROR,
+ "*** chk_lb_hairpin_reply action not implemented");
+ struct ds s = DS_EMPTY_INITIALIZER;
+ expr_field_format(&dl->dst, &s);
+ ovntrace_node_append(super, OVNTRACE_NODE_MODIFY,
+ "%s = 0", ds_cstr(&s));
+ ds_destroy(&s);
+}
+
+static void
+execute_ct_snat_to_vip(struct flow *uflow OVS_UNUSED, struct ovs_list *super)
+{
+ ovntrace_node_append(super, OVNTRACE_NODE_ERROR,
+ "*** ct_snat_to_vip action not implemented");
+}
+
static void
trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
const struct ovntrace_datapath *dp, struct flow *uflow,
@@ -2440,6 +2491,18 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
pipeline, super);
break;
+ case OVNACT_CHK_LB_HAIRPIN:
+ execute_chk_lb_hairpin(ovnact_get_CHK_LB_HAIRPIN(a), uflow, super);
+ break;
+
+ case OVNACT_CHK_LB_HAIRPIN_REPLY:
+ execute_chk_lb_hairpin_reply(ovnact_get_CHK_LB_HAIRPIN_REPLY(a),
+ uflow, super);
+ break;
+ case OVNACT_CT_SNAT_TO_VIP:
+ execute_ct_snat_to_vip(uflow, super);
+ break;
+
case OVNACT_TRIGGER_EVENT:
break;
From patchwork Tue Oct 27 17:19:29 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388809
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.133; helo=hemlock.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJP808R3z9sTr
for ; Wed, 28 Oct 2020 04:20:12 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id 3029787376;
Tue, 27 Oct 2020 17:20:10 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id 6dzkW-GoSgBU; Tue, 27 Oct 2020 17:20:02 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by hemlock.osuosl.org (Postfix) with ESMTP id 7FBBD87315;
Tue, 27 Oct 2020 17:20:00 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 5F75FC08A1;
Tue, 27 Oct 2020 17:20:00 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])
by lists.linuxfoundation.org (Postfix) with ESMTP id D2A3CC0859
for ; Tue, 27 Oct 2020 17:19:57 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by whitealder.osuosl.org (Postfix) with ESMTP id B0FF585230
for ; Tue, 27 Oct 2020 17:19:57 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from whitealder.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id rwtB2hiLPpp8 for ;
Tue, 27 Oct 2020 17:19:42 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay8-d.mail.gandi.net (relay8-d.mail.gandi.net
[217.70.183.201])
by whitealder.osuosl.org (Postfix) with ESMTPS id 1F4A086915
for ; Tue, 27 Oct 2020 17:19:41 +0000 (UTC)
X-Originating-IP: 115.99.168.200
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay8-d.mail.gandi.net (Postfix) with ESMTPSA id D4C351BF211;
Tue, 27 Oct 2020 17:19:37 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:49:29 +0530
Message-Id: <20201027171929.1181202-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 5/7] northd: Make use of new hairpin
actions.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
This patch makes use of the new hairpin OVN actions - chk_lb_hairpin, chk_lb_hairpin_reply
and ct_snat_to_vip.
Suppose there are 'm' load balancers associated to a logical switch and each load balancer
has 'n' VIPs and each VIP has 'p' backends then ovn-northd adds (m * ((n * p) + n))
hairpin logical flows. After this patch, ovn-northd adds just 5 hairpin logical flows.
With this patch number of hairpin related OF flows on a chassis are almost the same as before,
but in a large scale deployment, this reduces memory consumption and load on ovn-northd and
SB DB ovsdb-servers.
Signed-off-by: Numan Siddique
---
northd/ovn-northd.8.xml | 65 +++++++++++-----
northd/ovn-northd.c | 159 ++++++++++++++--------------------------
tests/ovn-northd.at | 28 +++----
tests/ovn.at | 36 ++++-----
4 files changed, 133 insertions(+), 155 deletions(-)
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index 9b96ce9a38..53ee4b58c0 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -718,24 +718,55 @@
Ingress Table 12: Pre-Hairpin
-
- For all configured load balancer VIPs a priority-2 flow that
- matches on traffic that needs to be hairpinned, i.e., after load
- balancing the destination IP matches the source IP, which sets
-
reg0[6] = 1
and executes ct_snat(VIP)
- to force replies to these packets to come back through OVN.
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-100 flow is added with the match
+ ip && ct.trk&& ct.dnat
to check if the
+ packet needs to be hairpinned ( if after load balancing the destination
+ IP matches the source IP) or not by executing the action
+ reg0[6] = chk_lb_hairpin();
and advances the packet to
+ the next table.
+
+
+ -
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-90 flow is added with the match
ip
to check if
+ the packet is a reply for a hairpinned connection or not by executing
+ the action reg0[6] = chk_lb_hairpin_reply();
and advances
+ the packet to the next table.
+
-
- For all configured load balancer VIPs a priority-1 flow that
- matches on replies to hairpinned traffic, i.e., destination IP is VIP,
- source IP is the backend IP and source L4 port is backend port, which
- sets
reg0[6] = 1
and executes ct_snat;
.
+ A priority-0 flow that simply moves traffic to the next table.
+
+
+ Ingress Table 13: Nat-Hairpin
+
+ -
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-100 flow is added with the match
+
ip && (ct.new || ct.est) && ct.trk &&
+ ct.dnat && reg0[6] == 1
which hairpins the traffic by
+ NATting source IP to the load balancer VIP by executing the action
+ ct_snat_to_vip
and advances the packet to the next table.
+
+
+ -
+ If the logical switch has load balancer(s) configured, then a
+ priorirty-90 flow is added with the match
+
ip && reg0[6] == 1
which matches on the replies
+ of hairpinned traffic ( i.e., destination IP is VIP,
+ source IP is the backend IP and source L4 port is backend port for L4
+ load balancers) and executes ct_snat
and advances the
+ packet to the next table.
+
+
-
A priority-0 flow that simply moves traffic to the next table.
- Ingress Table 13: Hairpin
+ Ingress Table 14: Hairpin
-
A priority-1 flow that hairpins traffic matched by non-default
@@ -748,7 +779,7 @@
- Ingress Table 14: ARP/ND responder
+ Ingress Table 15: ARP/ND responder
This table implements ARP/ND responder in a logical switch for known
@@ -1038,7 +1069,7 @@ output;
-
Ingress Table 15: DHCP option processing
+ Ingress Table 16: DHCP option processing
This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -1099,7 +1130,7 @@ next;
-
Ingress Table 16: DHCP responses
+ Ingress Table 17: DHCP responses
This table implements DHCP responder for the DHCP replies generated by
@@ -1180,7 +1211,7 @@ output;
-
Ingress Table 17 DNS Lookup
+ Ingress Table 18 DNS Lookup
This table looks up and resolves the DNS names to the corresponding
@@ -1209,7 +1240,7 @@ reg0[4] = dns_lookup(); next;
-
Ingress Table 18 DNS Responses
+ Ingress Table 19 DNS Responses
This table implements DNS responder for the DNS replies generated by
@@ -1244,7 +1275,7 @@ output;
-
Ingress table 19 External ports
+ Ingress table 20 External ports
Traffic from the external
logical ports enter the ingress
@@ -1287,7 +1318,7 @@ output;
-
Ingress Table 20 Destination Lookup
+ Ingress Table 21 Destination Lookup
This table implements switching behavior. It contains these logical
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 1da31caf3d..70cfae52a4 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -150,14 +150,15 @@ enum ovn_stage {
PIPELINE_STAGE(SWITCH, IN, LB, 10, "ls_in_lb") \
PIPELINE_STAGE(SWITCH, IN, STATEFUL, 11, "ls_in_stateful") \
PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 12, "ls_in_pre_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 13, "ls_in_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 14, "ls_in_arp_rsp") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 15, "ls_in_dhcp_options") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 16, "ls_in_dhcp_response") \
- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 17, "ls_in_dns_lookup") \
- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 18, "ls_in_dns_response") \
- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 19, "ls_in_external_port") \
- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 20, "ls_in_l2_lkup") \
+ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 13, "ls_in_nat_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 14, "ls_in_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 15, "ls_in_arp_rsp") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 16, "ls_in_dhcp_options") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 17, "ls_in_dhcp_response") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 18, "ls_in_dns_lookup") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 19, "ls_in_dns_response") \
+ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 20, "ls_in_external_port") \
+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 21, "ls_in_l2_lkup") \
\
/* Logical switch egress stages. */ \
PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \
@@ -5690,84 +5691,6 @@ build_lb(struct ovn_datapath *od, struct hmap *lflows)
}
}
-static void
-build_lb_hairpin_rules(struct ovn_datapath *od, struct hmap *lflows,
- struct ovn_lb *lb, struct lb_vip *lb_vip,
- const char *ip_match, const char *proto)
-{
- if (lb_vip->n_backends == 0) {
- return;
- }
-
- struct ds action = DS_EMPTY_INITIALIZER;
- struct ds match_initiator = DS_EMPTY_INITIALIZER;
- struct ds match_reply = DS_EMPTY_INITIALIZER;
- struct ds proto_match = DS_EMPTY_INITIALIZER;
-
- /* Ingress Pre-Hairpin table.
- * - Priority 2: SNAT load balanced traffic that needs to be hairpinned:
- * - Both SRC and DST IP match backend->ip and destination port
- * matches backend->port.
- * - Priority 1: unSNAT replies to hairpinned load balanced traffic.
- * - SRC IP matches backend->ip, DST IP matches LB VIP and source port
- * matches backend->port.
- */
- ds_put_char(&match_reply, '(');
- for (size_t i = 0; i < lb_vip->n_backends; i++) {
- struct lb_vip_backend *backend = &lb_vip->backends[i];
-
- /* Packets that after load balancing have equal source and
- * destination IPs should be hairpinned.
- */
- if (lb_vip->vip_port) {
- ds_put_format(&proto_match, " && %s.dst == %"PRIu16,
- proto, backend->port);
- }
- ds_put_format(&match_initiator, "(%s.src == %s && %s.dst == %s%s)",
- ip_match, backend->ip, ip_match, backend->ip,
- ds_cstr(&proto_match));
-
- /* Replies to hairpinned traffic are originated by backend->ip:port. */
- ds_clear(&proto_match);
- if (lb_vip->vip_port) {
- ds_put_format(&proto_match, " && %s.src == %"PRIu16, proto,
- backend->port);
- }
- ds_put_format(&match_reply, "(%s.src == %s%s)", ip_match, backend->ip,
- ds_cstr(&proto_match));
- ds_clear(&proto_match);
-
- if (i < lb_vip->n_backends - 1) {
- ds_put_cstr(&match_initiator, " || ");
- ds_put_cstr(&match_reply, " || ");
- }
- }
- ds_put_char(&match_reply, ')');
-
- /* SNAT hairpinned initiator traffic so that the reply traffic is
- * also directed through OVN.
- */
- ds_put_format(&action, REGBIT_HAIRPIN " = 1; ct_snat(%s);",
- lb_vip->vip);
- ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 2,
- ds_cstr(&match_initiator), ds_cstr(&action),
- &lb->nlb->header_);
-
- /* Replies to hairpinned traffic are destined to the LB VIP. */
- ds_put_format(&match_reply, " && %s.dst == %s", ip_match, lb_vip->vip);
-
- /* UNSNAT replies for hairpinned traffic. */
- ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 1,
- ds_cstr(&match_reply),
- REGBIT_HAIRPIN " = 1; ct_snat;",
- &lb->nlb->header_);
-
- ds_destroy(&action);
- ds_destroy(&match_initiator);
- ds_destroy(&match_reply);
- ds_destroy(&proto_match);
-}
-
static void
build_lb_rules(struct ovn_datapath *od, struct hmap *lflows, struct ovn_lb *lb)
{
@@ -5812,12 +5735,6 @@ build_lb_rules(struct ovn_datapath *od, struct hmap *lflows, struct ovn_lb *lb)
ds_destroy(&match);
ds_destroy(&action);
-
- /* Also install flows that allow hairpinning of traffic (i.e., if
- * a load balancer VIP is DNAT-ed to a backend that happens to be
- * the source of the traffic).
- */
- build_lb_hairpin_rules(od, lflows, lb, lb_vip, ip_match, proto);
}
}
@@ -5864,24 +5781,53 @@ build_stateful(struct ovn_datapath *od, struct hmap *lflows, struct hmap *lbs)
ovs_assert(lb);
build_lb_rules(od, lflows, lb);
}
+}
- /* Ingress Pre-Hairpin table (Priority 0). Packets that don't need
- * hairpinning should continue processing.
+static void
+build_lb_hairpin(struct ovn_datapath *od, struct hmap *lflows)
+{
+ /* Ingress Pre-Hairpin/Nat-Hairpin/Hairpin tabled (Priority 0).
+ * Packets that don't need hairpinning should continue processing.
*/
ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 0, "1", "next;");
-
- /* Ingress Hairpin table.
- * - Priority 0: Packets that don't need hairpinning should continue
- * processing.
- * - Priority 1: Packets that were SNAT-ed for hairpinning should be
- * looped back (i.e., swap ETH addresses and send back on inport).
- */
- ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1, REGBIT_HAIRPIN " == 1",
- "eth.dst <-> eth.src;"
- "outport = inport;"
- "flags.loopback = 1;"
- "output;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 0, "1", "next;");
ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 0, "1", "next;");
+
+ if (has_lb_vip(od)) {
+ /* Check if the packet needs to be hairpinned. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 100,
+ "ip && ct.trk && ct.dnat",
+ REGBIT_HAIRPIN " = chk_lb_hairpin(); next;",
+ &od->nbs->header_);
+
+ /* Check if the packet is a reply of hairpinned traffic. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 90, "ip",
+ REGBIT_HAIRPIN " = chk_lb_hairpin_reply(); "
+ "next;", &od->nbs->header_);
+
+ /* If packet needs to be hairpinned, snat the src ip with the VIP. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 100,
+ "ip && (ct.new || ct.est) && ct.trk && ct.dnat"
+ " && "REGBIT_HAIRPIN " == 1",
+ "ct_snat_to_vip; next;",
+ &od->nbs->header_);
+
+ /* For the reply of hairpinned traffic, snat the src ip to the VIP. */
+ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 90,
+ "ip && "REGBIT_HAIRPIN " == 1", "ct_snat;",
+ &od->nbs->header_);
+
+ /* Ingress Hairpin table.
+ * - Priority 1: Packets that were SNAT-ed for hairpinning should be
+ * looped back (i.e., swap ETH addresses and send back on inport).
+ */
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1,
+ REGBIT_HAIRPIN " == 1",
+ "eth.dst <-> eth.src;"
+ "outport = inport;"
+ "flags.loopback = 1;"
+ "output;");
+ }
}
static void
@@ -6554,6 +6500,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
build_qos(od, lflows);
build_lb(od, lflows);
build_stateful(od, lflows, lbs);
+ build_lb_hairpin(od, lflows);
}
/* Build logical flows for the forwarding groups */
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index b1f454818e..a03cf90441 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1997,13 +1997,13 @@ action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implici
AT_CHECK([ovn-sbctl lflow-list sw0 | grep "ls_out_acl" | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CHECK([ovn-sbctl lflow-list sw1 | grep "ls_out_acl" | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && udp" reject
@@ -2011,19 +2011,19 @@ ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && udp" reject
AT_CHECK([ovn-sbctl lflow-list sw0 | grep "ls_out_acl" | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2002 , dnl
match=(outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CHECK([ovn-sbctl lflow-list sw1 | grep "ls_out_acl" | grep pg0 | sort], [0], [dnl
table=5 (ls_out_acl ), priority=2002 , dnl
match=(outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
ovn-nbctl --wait=sb acl-add pg0 to-lport 1001 "outport == @pg0 && ip" allow-related
@@ -2035,16 +2035,16 @@ match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CHECK([ovn-sbctl lflow-list sw1 | grep "ls_out_acl" | grep pg0 | sort], [0], [dnl
@@ -2054,16 +2054,16 @@ match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2002 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(ct_commit { ct_label.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
table=5 (ls_out_acl ), priority=2003 , dnl
match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=20); };)
+action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
])
AT_CLEANUP
diff --git a/tests/ovn.at b/tests/ovn.at
index 3dd195e1bc..5dc63a4bdd 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -14761,38 +14761,38 @@ logical_port=ls1-lp_ext1`
test "$chassis" = "$hv1_uuid"])
# There should be DHCPv4/v6 OF flows for the ls1-lp_ext1 port in hv1
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep "0a.00.00.06" | grep reg14=0x$ln_public_key | \
wc -l], [0], [3
])
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep tp_src=546 | grep \
"ae.70.00.00.00.00.00.00.00.00.00.00.00.00.00.06" | \
grep reg14=0x$ln_public_key | wc -l], [0], [1
])
# There should be no DHCPv4/v6 flows for ls1-lp_ext1 on hv2
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep "0a.00.00.06" | wc -l], [0], [0
])
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep tp_src=546 | grep \
"ae.70.00.00.00.00.00.00.00.00.00.00.00.00.00.06" | wc -l], [0], [0
])
# No DHCPv4/v6 flows for the external port - ls1-lp_ext2 - 10.0.0.7 in hv1 and
# hv2 as requested-chassis option is not set.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep "0a.00.00.07" | wc -l], [0], [0
])
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep "0a.00.00.07" | wc -l], [0], [0
])
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep tp_src=546 | grep \
"ae.70.00.00.00.00.00.00.00.00.00.00.00.00.00.07" | wc -l], [0], [0
])
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep tp_src=546 | grep \
"ae.70.00.00.00.00.00.00.00.00.00.00.00.00.00.07" | wc -l], [0], [0
])
@@ -15044,21 +15044,21 @@ logical_port=ls1-lp_ext1`
test "$chassis" = "$hv2_uuid"])
# There should be OF flows for DHCP4/v6 for the ls1-lp_ext1 port in hv2
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep "0a.00.00.06" | grep reg14=0x$ln_public_key | \
wc -l], [0], [3
])
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep tp_src=546 | grep \
"ae.70.00.00.00.00.00.00.00.00.00.00.00.00.00.06" | \
grep reg14=0x$ln_public_key | wc -l], [0], [1
])
# There should be no DHCPv4/v6 flows for ls1-lp_ext1 on hv1
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep "0a.00.00.06" | wc -l], [0], [0
])
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=23 | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=24 | \
grep controller | grep tp_src=546 | grep \
"ae.70.00.00.00.00.00.00.00.00.00.00.00.00.00.06" | \
grep reg14=0x$ln_public_key | wc -l], [0], [0
@@ -15324,7 +15324,7 @@ logical_port=ls1-lp_ext1`
# There should be a flow in hv2 to drop traffic from ls1-lp_ext1 destined
# to router mac.
AT_CHECK([as hv2 ovs-ofctl dump-flows br-int \
-table=27,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
+table=28,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
grep -c "actions=drop"], [0], [1
])
@@ -16595,9 +16595,9 @@ ovn-nbctl --wait=hv sync
ovn-sbctl dump-flows sw0 | grep ls_in_arp_rsp | grep bind_vport > lflows.txt
AT_CHECK([cat lflows.txt], [0], [dnl
- table=14(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p1" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
- table=14(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p2" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
- table=14(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p3" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
+ table=15(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p1" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
+ table=15(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p2" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
+ table=15(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p3" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
])
ovn-sbctl dump-flows lr0 | grep lr_in_arp_resolve | grep "reg0 == 10.0.0.10" \
@@ -16807,8 +16807,8 @@ ovn-nbctl --wait=hv set logical_switch_port sw0-vir options:virtual-ip=10.0.0.10
ovn-sbctl dump-flows sw0 | grep ls_in_arp_rsp | grep bind_vport > lflows.txt
AT_CHECK([cat lflows.txt], [0], [dnl
- table=14(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p1" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
- table=14(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p3" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
+ table=15(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p1" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
+ table=15(ls_in_arp_rsp ), priority=100 , match=(inport == "sw0-p3" && ((arp.op == 1 && arp.spa == 10.0.0.10 && arp.tpa == 10.0.0.10) || (arp.op == 2 && arp.spa == 10.0.0.10))), action=(bind_vport("sw0-vir", inport); next;)
])
ovn-nbctl --wait=hv remove logical_switch_port sw0-vir options virtual-parents
From patchwork Tue Oct 27 17:19:39 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388810
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.136; helo=silver.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJPG1gwpz9sRK
for ; Wed, 28 Oct 2020 04:20:18 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id 9F61E20104;
Tue, 27 Oct 2020 17:20:16 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id mY2Xf3IcqxNT; Tue, 27 Oct 2020 17:20:09 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by silver.osuosl.org (Postfix) with ESMTP id CA3BB2E10B;
Tue, 27 Oct 2020 17:19:56 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 954F6C08A1;
Tue, 27 Oct 2020 17:19:56 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])
by lists.linuxfoundation.org (Postfix) with ESMTP id 91A13C0859
for ; Tue, 27 Oct 2020 17:19:54 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by silver.osuosl.org (Postfix) with ESMTP id 77EDF2E0F3
for ; Tue, 27 Oct 2020 17:19:54 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from silver.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id FeQN08dQYfek for ;
Tue, 27 Oct 2020 17:19:50 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay7-d.mail.gandi.net (relay7-d.mail.gandi.net
[217.70.183.200])
by silver.osuosl.org (Postfix) with ESMTPS id 3E2BB2741E
for ; Tue, 27 Oct 2020 17:19:46 +0000 (UTC)
X-Originating-IP: 115.99.168.200
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay7-d.mail.gandi.net (Postfix) with ESMTPSA id 15C9A2000C;
Tue, 27 Oct 2020 17:19:43 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:49:39 +0530
Message-Id: <20201027171939.1181265-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 6/7] ovn-detrace: Add SB Load Balancer
cookier handler.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Signed-off-by: Numan Siddique
---
utilities/ovn-detrace.in | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/utilities/ovn-detrace.in b/utilities/ovn-detrace.in
index 4f8dd5f3d8..1214be6fa1 100755
--- a/utilities/ovn-detrace.in
+++ b/utilities/ovn-detrace.in
@@ -316,6 +316,14 @@ class ChassisHandler(CookieHandlerByUUUID):
def print_record(self, chassis):
print_p('Chassis: %s' % (chassis_str([chassis])))
+class SBLoadBalancerHandler(CookieHandlerByUUUID):
+ def __init__(self, ovnsb_db):
+ super(SBLoadBalancerHandler, self).__init__(ovnsb_db, 'Load_Balancer')
+
+ def print_record(self, lb):
+ print_p('Load Balancer: %s protocol %s vips %s' % (
+ lb.name, lb.protocol, lb.vips))
+
class OvsInterfaceHandler(CookieHandler):
def __init__(self, ovs_db):
super(OvsInterfaceHandler, self).__init__(ovs_db, 'Interface')
@@ -424,7 +432,8 @@ def main():
PortBindingHandler(ovsdb_ovnsb),
MacBindingHandler(ovsdb_ovnsb),
MulticastGroupHandler(ovsdb_ovnsb),
- ChassisHandler(ovsdb_ovnsb)
+ ChassisHandler(ovsdb_ovnsb),
+ SBLoadBalancerHandler(ovsdb_ovnsb)
]
regex_cookie = re.compile(r'^.*cookie 0x([0-9a-fA-F]+)')
From patchwork Tue Oct 27 17:19:45 2020
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Numan Siddique
X-Patchwork-Id: 1388808
Return-Path:
X-Original-To: incoming@patchwork.ozlabs.org
Delivered-To: patchwork-incoming@bilbo.ozlabs.org
Authentication-Results: ozlabs.org;
spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org
(client-ip=140.211.166.137; helo=fraxinus.osuosl.org;
envelope-from=ovs-dev-bounces@openvswitch.org; receiver=)
Authentication-Results: ozlabs.org;
dmarc=none (p=none dis=none) header.from=ovn.org
Received: from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by ozlabs.org (Postfix) with ESMTPS id 4CLJNx4C40z9sTr
for ; Wed, 28 Oct 2020 04:20:01 +1100 (AEDT)
Received: from localhost (localhost [127.0.0.1])
by fraxinus.osuosl.org (Postfix) with ESMTP id 2FE7F862FB;
Tue, 27 Oct 2020 17:20:00 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from fraxinus.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id StqJmQae2v05; Tue, 27 Oct 2020 17:19:58 +0000 (UTC)
Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])
by fraxinus.osuosl.org (Postfix) with ESMTP id AF43C862CA;
Tue, 27 Oct 2020 17:19:58 +0000 (UTC)
Received: from lf-lists.osuosl.org (localhost [127.0.0.1])
by lists.linuxfoundation.org (Postfix) with ESMTP id 88FF9C08A1;
Tue, 27 Oct 2020 17:19:58 +0000 (UTC)
X-Original-To: dev@openvswitch.org
Delivered-To: ovs-dev@lists.linuxfoundation.org
Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])
by lists.linuxfoundation.org (Postfix) with ESMTP id E18C5C0051
for ; Tue, 27 Oct 2020 17:19:56 +0000 (UTC)
Received: from localhost (localhost [127.0.0.1])
by hemlock.osuosl.org (Postfix) with ESMTP id D147E87328
for ; Tue, 27 Oct 2020 17:19:56 +0000 (UTC)
X-Virus-Scanned: amavisd-new at osuosl.org
Received: from hemlock.osuosl.org ([127.0.0.1])
by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id c5b7kQcgLIWA for ;
Tue, 27 Oct 2020 17:19:56 +0000 (UTC)
X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6
Received: from relay5-d.mail.gandi.net (relay5-d.mail.gandi.net
[217.70.183.197])
by hemlock.osuosl.org (Postfix) with ESMTPS id D11A787316
for ; Tue, 27 Oct 2020 17:19:55 +0000 (UTC)
X-Originating-IP: 115.99.168.200
Received: from nusiddiq.home.org.com (unknown [115.99.168.200])
(Authenticated sender: numans@ovn.org)
by relay5-d.mail.gandi.net (Postfix) with ESMTPSA id 91D521C000A;
Tue, 27 Oct 2020 17:19:51 +0000 (UTC)
From: numans@ovn.org
To: dev@openvswitch.org
Date: Tue, 27 Oct 2020 22:49:45 +0530
Message-Id: <20201027171945.1181332-1-numans@ovn.org>
X-Mailer: git-send-email 2.26.2
In-Reply-To: <20201027171531.1178296-1-numans@ovn.org>
References: <20201027171531.1178296-1-numans@ovn.org>
MIME-Version: 1.0
Subject: [ovs-dev] [PATCH ovn v2 7/7] sbctl: Add Load Balancer support for
vflows option.
X-BeenThere: ovs-dev@openvswitch.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id:
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: ovs-dev-bounces@openvswitch.org
Sender: "dev"
From: Numan Siddique
Signed-off-by: Numan Siddique
---
utilities/ovn-sbctl.c | 55 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 55 insertions(+)
diff --git a/utilities/ovn-sbctl.c b/utilities/ovn-sbctl.c
index 00c112c7e5..5b593b38cb 100644
--- a/utilities/ovn-sbctl.c
+++ b/utilities/ovn-sbctl.c
@@ -542,6 +542,11 @@ pre_get_info(struct ctl_context *ctx)
ovsdb_idl_add_column(ctx->idl, &sbrec_mac_binding_col_logical_port);
ovsdb_idl_add_column(ctx->idl, &sbrec_mac_binding_col_ip);
ovsdb_idl_add_column(ctx->idl, &sbrec_mac_binding_col_mac);
+
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_datapaths);
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_vips);
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_name);
+ ovsdb_idl_add_column(ctx->idl, &sbrec_load_balancer_col_protocol);
}
static struct cmd_show_table cmd_show_tables[] = {
@@ -1009,6 +1014,55 @@ cmd_lflow_list_chassis(struct ctl_context *ctx, struct vconn *vconn,
}
}
+static void
+cmd_lflow_list_load_balancers(struct ctl_context *ctx, struct vconn *vconn,
+ const struct sbrec_datapath_binding *datapath,
+ bool stats, bool print_uuid)
+{
+ const struct sbrec_load_balancer *lb;
+ const struct sbrec_load_balancer *lb_prev = NULL;
+ SBREC_LOAD_BALANCER_FOR_EACH (lb, ctx->idl) {
+ bool dp_found = false;
+ if (datapath) {
+ for (size_t i = 0; i < lb->n_datapaths; i++) {
+ if (datapath == lb->datapaths[i]) {
+ dp_found = true;
+ break;
+ }
+ }
+ if (datapath && !dp_found) {
+ continue;
+ }
+ }
+
+ if (!lb_prev) {
+ printf("\nLoad Balancers:\n");
+ }
+
+ printf(" ");
+ print_uuid_part(&lb->header_.uuid, print_uuid);
+ printf("name=\"%s\", protocol=\"%s\", ", lb->name, lb->protocol);
+ if (!dp_found) {
+ for (size_t i = 0; i < lb->n_datapaths; i++) {
+ print_vflow_datapath_name(lb->datapaths[i], true);
+ }
+ }
+
+ printf("\n vips:\n");
+ struct smap_node *node;
+ SMAP_FOR_EACH (node, &lb->vips) {
+ printf(" %s = %s\n", node->key, node->value);
+ }
+ printf("\n");
+
+ if (vconn) {
+ sbctl_dump_openflow(vconn, &lb->header_.uuid, stats);
+ }
+
+ lb_prev = lb;
+ }
+}
+
static void
cmd_lflow_list(struct ctl_context *ctx)
{
@@ -1118,6 +1172,7 @@ cmd_lflow_list(struct ctl_context *ctx)
cmd_lflow_list_mac_bindings(ctx, vconn, datapath, stats, print_uuid);
cmd_lflow_list_mc_groups(ctx, vconn, datapath, stats, print_uuid);
cmd_lflow_list_chassis(ctx, vconn, stats, print_uuid);
+ cmd_lflow_list_load_balancers(ctx, vconn, datapath, stats, print_uuid);
}
vconn_close(vconn);