@@ -123,11 +123,6 @@ lflow_northd_handler(struct engine_node *node,
return false;
}
- /* Fall back to recompute if load balancers have changed. */
- if (northd_has_lbs_in_tracked_data(&northd_data->trk_data)) {
- return false;
- }
-
const struct engine_context *eng_ctx = engine_get_context();
struct lflow_data *lflow_data = data;
@@ -140,6 +135,12 @@ lflow_northd_handler(struct engine_node *node,
return false;
}
+ if (!lflow_handle_northd_lb_changes(eng_ctx->ovnsb_idl_txn,
+ &northd_data->trk_data.trk_lbs,
+ &lflow_input, lflow_data->lflow_table)) {
+ return false;
+ }
+
engine_set_node_state(node, EN_UPDATED);
return true;
}
@@ -21,6 +21,7 @@
/* OVN includes */
#include "lb.h"
+#include "lflow-mgr.h"
#include "lib/lb.h"
#include "northd.h"
@@ -33,6 +34,7 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb, size_t n_ls_datapaths,
lb_dps->lb = lb;
lb_dps->nb_ls_map = bitmap_allocate(n_ls_datapaths);
lb_dps->nb_lr_map = bitmap_allocate(n_lr_datapaths);
+ lb_dps->lflow_ref = lflow_ref_create();
return lb_dps;
}
@@ -42,6 +44,7 @@ ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *lb_dps)
{
bitmap_free(lb_dps->nb_lr_map);
bitmap_free(lb_dps->nb_ls_map);
+ lflow_ref_destroy(lb_dps->lflow_ref);
free(lb_dps);
}
@@ -20,6 +20,8 @@
#include "openvswitch/hmap.h"
#include "uuid.h"
+struct lflow_ref;
+
struct ovn_lb_datapaths {
struct hmap_node hmap_node;
@@ -29,6 +31,30 @@ struct ovn_lb_datapaths {
size_t n_nb_lr;
unsigned long *nb_lr_map;
+
+ /* Reference of lflows generated for this load balancer.
+ *
+ * This data is initialized and destroyed by the en_northd node, but
+ * populated and used only by the en_lflow node. Ideally this data should
+ * be maintained as part of en_lflow's data (struct lflow_data): a hash
+ * index from ovn_port key to lflows. However, it would be less efficient
+ * and more complex:
+ *
+ * 1. It would require an extra search (using the index) to find the
+ * lflows.
+ *
+ * 2. Building the index needs to be thread-safe, using either a global
+ * lock which is obviously less efficient, or hash-based lock array which
+ * is more complex.
+ *
+ * Maintaining the lflow_ref here is more straightforward. The drawback is
+ * that we need to keep in mind that this data belongs to en_lflow node,
+ * so never access it from any other nodes.
+ *
+ * 'lflow_ref' is used to reference logical flows generated for this
+ * load balancer.
+ */
+ struct lflow_ref *lflow_ref;
};
struct ovn_lb_datapaths *ovn_lb_datapaths_create(const struct ovn_northd_lb *,
@@ -378,7 +378,15 @@ struct lflow_ref_node {
/* The lflow. */
struct ovn_lflow *lflow;
- /* Index id of the datapath this lflow_ref_node belongs to. */
+ /* Indicates of the lflow was added with dp_group or not using
+ * ovn_lflow_add_with_dp_group() macro. */
+ bool dpgrp_lflow;
+ /* dpgrp bitmap and bitmap length. Valid only of dpgrp_lflow is true. */
+ unsigned long *dpgrp_bitmap;
+ size_t dpgrp_bitmap_len;
+
+ /* Index id of the datapath this lflow_ref_node belongs to.
+ * Valid only if dpgrp_lflow is false. */
size_t dp_index;
/* Indicates if the lflow_ref_node for an lflow - L(M, A) is linked
@@ -432,9 +440,19 @@ lflow_ref_unlink_lflows(struct lflow_ref *lflow_ref)
struct lflow_ref_node *lrn;
LIST_FOR_EACH (lrn, lflow_list_node, &lflow_ref->lflows_ref_list) {
- if (dec_dp_refcnt(&lrn->lflow->dp_refcnts_map,
- lrn->dp_index)) {
- bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
+ if (lrn->dpgrp_lflow) {
+ size_t index;
+ BITMAP_FOR_EACH_1 (index, lrn->dpgrp_bitmap_len,
+ lrn->dpgrp_bitmap) {
+ if (dec_dp_refcnt(&lrn->lflow->dp_refcnts_map, index)) {
+ bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
+ }
+ }
+ } else {
+ if (dec_dp_refcnt(&lrn->lflow->dp_refcnts_map,
+ lrn->dp_index)) {
+ bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
+ }
}
lrn->linked = false;
@@ -505,18 +523,26 @@ lflow_table_add_lflow(struct lflow_table *lflow_table,
io_port, ctrl_meter, stage_hint, where);
if (lflow_ref) {
- /* lflow referencing is only supported if 'od' is not NULL. */
- ovs_assert(od);
-
struct lflow_ref_node *lrn =
lflow_ref_node_find(&lflow_ref->lflow_ref_nodes, lflow, hash);
if (!lrn) {
lrn = xzalloc(sizeof *lrn);
lrn->lflow = lflow;
- lrn->dp_index = od->index;
+ lrn->dpgrp_lflow = !od;
+ if (lrn->dpgrp_lflow) {
+ lrn->dpgrp_bitmap = bitmap_clone(dp_bitmap, dp_bitmap_len);
+ lrn->dpgrp_bitmap_len = dp_bitmap_len;
+
+ size_t index;
+ BITMAP_FOR_EACH_1 (index, dp_bitmap_len, dp_bitmap) {
+ inc_dp_refcnt(&lflow->dp_refcnts_map, index);
+ }
+ } else {
+ lrn->dp_index = od->index;
+ inc_dp_refcnt(&lflow->dp_refcnts_map, lrn->dp_index);
+ }
ovs_list_insert(&lflow_ref->lflows_ref_list,
&lrn->lflow_list_node);
- inc_dp_refcnt(&lflow->dp_refcnts_map, lrn->dp_index);
ovs_list_insert(&lflow->referenced_by, &lrn->ref_list_node);
hmap_insert(&lflow_ref->lflow_ref_nodes, &lrn->ref_node, hash);
@@ -1233,5 +1259,8 @@ lflow_ref_node_destroy(struct lflow_ref_node *lrn,
}
ovs_list_remove(&lrn->lflow_list_node);
ovs_list_remove(&lrn->ref_list_node);
+ if (lrn->dpgrp_lflow) {
+ bitmap_free(lrn->dpgrp_bitmap);
+ }
free(lrn);
}
@@ -7467,7 +7467,7 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows,
ovn_lflow_add_with_dp_group(
lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), ds_cstr(action),
- &lb->nlb->header_, NULL);
+ &lb->nlb->header_, lb_dps->lflow_ref);
}
}
@@ -7912,7 +7912,7 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
}
build_lb_affinity_ls_flows(lflows, lb_dps, lb_vip, ls_datapaths,
- NULL);
+ lb_dps->lflow_ref);
unsigned long *dp_non_meter = NULL;
bool build_non_meter = false;
@@ -7936,7 +7936,7 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
lflows, od, S_SWITCH_IN_LB, priority,
ds_cstr(match), ds_cstr(action),
NULL, meter, &lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
}
if (!reject || build_non_meter) {
@@ -7944,7 +7944,7 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map,
ods_size(ls_datapaths), S_SWITCH_IN_LB, priority,
ds_cstr(match), ds_cstr(action), &lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
bitmap_free(dp_non_meter);
}
@@ -9365,7 +9365,7 @@ build_lswitch_arp_nd_service_monitor(const struct ovn_lb_datapaths *lb_dps,
S_SWITCH_IN_ARP_ND_RSP, 110,
ds_cstr(match), ds_cstr(actions),
&lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
}
}
@@ -11387,7 +11387,8 @@ build_lrouter_nat_flows_for_lb(
if (!od->n_l3dgw_ports) {
bitmap_set1(gw_dp_bitmap[type], index);
} else {
- build_distr_lrouter_nat_flows_for_lb(&ctx, type, od, NULL);
+ build_distr_lrouter_nat_flows_for_lb(&ctx, type, od,
+ lb_dps->lflow_ref);
}
if (lb->affinity_timeout) {
@@ -11408,17 +11409,17 @@ build_lrouter_nat_flows_for_lb(
* S_ROUTER_IN_DNAT stage. */
ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, 120,
ds_cstr(&unsnat_match), "next;",
- &lb->nlb->header_, NULL);
+ &lb->nlb->header_, lb_dps->lflow_ref);
}
}
for (size_t type = 0; type < LROUTER_NAT_LB_FLOW_MAX; type++) {
build_gw_lrouter_nat_flows_for_lb(&ctx, type, lr_datapaths,
gw_dp_bitmap[type],
- NULL);
+ lb_dps->lflow_ref);
build_lb_affinity_lr_flows(lflows, lb, lb_vip, ds_cstr(match),
aff_action[type], aff_dp_bitmap[type],
- lr_datapaths, NULL);
+ lr_datapaths, lb_dps->lflow_ref);
}
ds_destroy(&unsnat_match);
@@ -11467,7 +11468,7 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
od->nbs->copp,
meter_groups),
&lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
/* Ignore L4 port information in the key because fragmented packets
* may not have L4 information. The pre-stateful table will send
@@ -11517,7 +11518,7 @@ build_lrouter_defrag_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
ovn_lflow_add_with_dp_group(
lflows, lb_dps->nb_lr_map, ods_size(lr_datapaths),
S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), "ct_dnat;",
- &lb_dps->lb->nlb->header_, NULL);
+ &lb_dps->lb->nlb->header_, lb_dps->lflow_ref);
}
}
@@ -11559,7 +11560,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
copp_meter_get(COPP_EVENT_ELB,
od->nbr->copp,
meter_groups),
- &lb->nlb->header_, NULL);
+ &lb->nlb->header_, lb_dps->lflow_ref);
}
}
@@ -11569,7 +11570,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120,
"flags.skip_snat_for_lb == 1 && ip", "next;",
- NULL);
+ lb_dps->lflow_ref);
}
}
}
@@ -16378,6 +16379,7 @@ void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
void
lflow_reset_northd_refs(struct lflow_input *lflow_input)
{
+ struct ovn_lb_datapaths *lb_dps;
struct ovn_port *op;
HMAP_FOR_EACH (op, key_node, lflow_input->ls_ports) {
@@ -16389,6 +16391,10 @@ lflow_reset_northd_refs(struct lflow_input *lflow_input)
lflow_ref_clear(op->lflow_ref);
lflow_ref_clear(op->stateful_lflow_ref);
}
+
+ HMAP_FOR_EACH (lb_dps, hmap_node, lflow_input->lb_datapaths_map) {
+ lflow_ref_clear(lb_dps->lflow_ref);
+ }
}
bool
@@ -16550,6 +16556,69 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
return true;
}
+bool
+lflow_handle_northd_lb_changes(struct ovsdb_idl_txn *ovnsb_txn,
+ struct tracked_lbs *trk_lbs,
+ struct lflow_input *lflow_input,
+ struct lflow_table *lflows)
+{
+ struct ovn_lb_datapaths *lb_dps;
+ struct hmapx_node *hmapx_node;
+ HMAPX_FOR_EACH (hmapx_node, &trk_lbs->deleted) {
+ lb_dps = hmapx_node->data;
+
+ lflow_ref_resync_flows(
+ lb_dps->lflow_ref, lflows, ovnsb_txn, lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
+ }
+
+ HMAPX_FOR_EACH (hmapx_node, &trk_lbs->crupdated) {
+ lb_dps = hmapx_node->data;
+
+ /* unlink old lflows. */
+ lflow_ref_unlink_lflows(lb_dps->lflow_ref);
+
+ /* Generate new lflows. */
+ struct ds match = DS_EMPTY_INITIALIZER;
+ struct ds actions = DS_EMPTY_INITIALIZER;
+
+ build_lswitch_arp_nd_service_monitor(lb_dps, lflow_input->ls_ports,
+ lflows, &actions,
+ &match);
+ build_lrouter_defrag_flows_for_lb(lb_dps, lflows,
+ lflow_input->lr_datapaths, &match);
+ build_lrouter_flows_for_lb(lb_dps, lflows,
+ lflow_input->meter_groups,
+ lflow_input->lr_datapaths,
+ lflow_input->lr_stateful_table,
+ lflow_input->features,
+ lflow_input->svc_monitor_map,
+ &match, &actions);
+ build_lswitch_flows_for_lb(lb_dps, lflows,
+ lflow_input->meter_groups,
+ lflow_input->ls_datapaths,
+ lflow_input->features,
+ lflow_input->svc_monitor_map,
+ &match, &actions);
+
+ ds_destroy(&match);
+ ds_destroy(&actions);
+
+ /* Sync the new flows to SB. */
+ lflow_ref_sync_lflows(lb_dps->lflow_ref, lflows, ovnsb_txn,
+ lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
+ }
+
+ return true;
+}
+
static bool
mirror_needs_update(const struct nbrec_mirror *nb_mirror,
const struct sbrec_mirror *sb_mirror)
@@ -672,6 +672,10 @@ bool lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
struct tracked_ovn_ports *,
struct lflow_input *,
struct lflow_table *lflows);
+bool lflow_handle_northd_lb_changes(struct ovsdb_idl_txn *ovnsb_txn,
+ struct tracked_lbs *,
+ struct lflow_input *,
+ struct lflow_table *lflows);
bool northd_handle_sb_port_binding_changes(
const struct sbrec_port_binding_table *, struct hmap *ls_ports,
struct hmap *lr_ports);
@@ -10519,7 +10519,7 @@ check ovn-nbctl --wait=sb lb-add lb1 10.0.0.10:80 10.0.0.3:80
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE(1)
@@ -10529,21 +10529,26 @@ check ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:10.0.0.3=sw0-p1:1
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set load_balancer . options:foo=bar
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb -- lb-add lb2 20.0.0.10:80 20.0.0.20:80 -- lb-add lb3 30.0.0.10:80 30.0.0.20:80
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE(1)
@@ -10553,7 +10558,7 @@ check ovn-nbctl --wait=sb -- lb-del lb2 -- lb-del lb3
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE(1)
@@ -10764,8 +10769,9 @@ check ovn-nbctl --wait=sb add load_balancer_group . load_Balancer $lb1_uuid
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb clear load_balancer_group . load_Balancer
@@ -10780,7 +10786,7 @@ check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb add load_balancer_group . load_Balancer $lb1_uuid
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE
@@ -10789,6 +10795,7 @@ check ovn-nbctl --wait=sb add logical_switch sw0 load_balancer_group $lbg1_uuid
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
@@ -10797,6 +10804,7 @@ check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set load_balancer . options:bar=foo
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
@@ -10806,6 +10814,7 @@ check ovn-nbctl --wait=sb set load_balancer lb1 vips:'"10.0.0.10:80"'='"10.0.0.1
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
CHECK_NO_CHANGE_AFTER_RECOMPUTE
@@ -10903,6 +10912,7 @@ check_engine_stats northd recompute nocompute
check_engine_stats lr_stateful recompute nocompute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
# Add back lb group to logical switch and then delete it.
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
@@ -10912,6 +10922,7 @@ check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb clear logical_switch sw0 load_balancer_group -- \
@@ -10945,14 +10956,17 @@ check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
check_engine_stats lflow norecompute nocompute
check_engine_stats sync_to_sb_lb norecompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set load_balancer_group . load_balancer="$lb2_uuid,$lb3_uuid,$lb4_uuid"
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set logical_switch sw0 load_balancer_group=$lbg1_uuid
@@ -11144,6 +11158,207 @@ CHECK_NO_CHANGE_AFTER_RECOMPUTE
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD_NO_HV([
+AT_SETUP([Load balancer incremental processing for multiple LBs with same VIPs])
+ovn_start
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl ls-add sw1
+check ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.3:80
+check ovn-nbctl --wait=sb lb-add lb2 10.0.0.10:80 10.0.0.3:80
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb ls-lb-add sw0 lb1
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_uuid=$(fetch_column Logical_flow _uuid match='"ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80"')
+sw0_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=sw0)
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = "$sw0_uuid"])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" = ""])
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb ls-lb-add sw1 lb2
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = ""])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" != ""])
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb clear load_balancer lb2 vips
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = "$sw0_uuid"])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" = ""])
+
+# Add back the vip to lb2.
+check ovn-nbctl lb-add lb2 10.0.0.10:80 10.0.0.3:80
+
+# Create additional logical switches and associate lb1 to sw0, sw1 and sw2
+# and associate lb2 to sw3, sw4 and sw5
+check ovn-nbctl ls-add sw2
+check ovn-nbctl ls-add sw3
+check ovn-nbctl ls-add sw4
+check ovn-nbctl ls-add sw5
+check ovn-nbctl --wait=sb ls-lb-del sw1 lb2
+check ovn-nbctl ls-lb-add sw1 lb1
+check ovn-nbctl ls-lb-add sw2 lb1
+check ovn-nbctl ls-lb-add sw3 lb2
+check ovn-nbctl ls-lb-add sw4 lb2
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb ls-lb-add sw5 lb2
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = ""])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" != ""])
+
+sw1_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=sw1)
+sw2_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=sw2)
+sw3_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=sw3)
+sw4_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=sw4)
+sw5_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=sw5)
+
+dpgrp_dps=$(ovn-sbctl --bare --columns datapaths list logical_dp_group $lb_lflow_dpgrp)
+
+AT_CHECK([echo $dpgrp_dps | grep $sw0_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw1_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw2_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [0], [ignore])
+
+echo "dpgrp_dps - $dpgrp_dps"
+
+# Clear the vips for lb2. The logical lb logical flow dp group should have
+# only sw0, sw1 and sw2 uuids.
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb clear load_balancer lb2 vips
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" != ""])
+
+dpgrp_dps=$(ovn-sbctl --bare --columns datapaths list logical_dp_group $lb_lflow_dpgrp)
+
+AT_CHECK([echo $dpgrp_dps | grep $sw0_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw1_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw2_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [1], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [1], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [1], [ignore])
+
+# Clear the vips for lb1. The logical flow should be deleted.
+check ovn-nbctl --wait=sb clear load_balancer lb1 vips
+
+AT_CHECK([ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid], [1], [ignore], [ignore])
+
+lb_lflow_uuid=$(fetch_column Logical_flow _uuid match='"ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80"')
+AT_CHECK([test "$lb_lflow_uuid" = ""])
+
+
+# Now add back the vips, create another lb with the same vips and associate to
+# sw0 and sw1
+check ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.3:80
+check ovn-nbctl lb-add lb2 10.0.0.10:80 10.0.0.3:80
+check ovn-nbctl --wait=sb lb-add lb3 10.0.0.10:80 10.0.0.3:80
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+
+check ovn-nbctl ls-lb-add sw0 lb3
+check ovn-nbctl --wait=sb ls-lb-add sw1 lb3
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_uuid=$(fetch_column Logical_flow _uuid match='"ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80"')
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = ""])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" != ""])
+
+dpgrp_dps=$(ovn-sbctl --bare --columns datapaths list logical_dp_group $lb_lflow_dpgrp)
+
+AT_CHECK([echo $dpgrp_dps | grep $sw0_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw1_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw2_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [0], [ignore])
+
+# Now clear lb1 vips.
+# Since lb3 is associated with sw0 and sw1, the logical flow db group
+# should have reference to sw0 and sw1, but not to sw2.
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb clear load_balancer lb1 vips
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = ""])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" != ""])
+
+dpgrp_dps=$(ovn-sbctl --bare --columns datapaths list logical_dp_group $lb_lflow_dpgrp)
+
+echo "dpgrp dps - $dpgrp_dps"
+
+AT_CHECK([echo $dpgrp_dps | grep $sw0_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw1_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw2_uuid], [1], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [0], [ignore])
+
+# Now clear lb3 vips. The logical flow db group
+# should have reference only to sw3, sw4 and sw5 because lb2 is
+# associated to them.
+
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check ovn-nbctl --wait=sb clear load_balancer lb3 vips
+check_engine_stats lflow recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+
+lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dp" = ""])
+
+lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list logical_flow $lb_lflow_uuid)
+AT_CHECK([test "$lb_lflow_dpgrp" != ""])
+
+dpgrp_dps=$(ovn-sbctl --bare --columns datapaths list logical_dp_group $lb_lflow_dpgrp)
+
+echo "dpgrp dps - $dpgrp_dps"
+
+AT_CHECK([echo $dpgrp_dps | grep $sw0_uuid], [1], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw1_uuid], [1], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw2_uuid], [1], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [0], [ignore])
+AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [0], [ignore])
+
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD_NO_HV([
AT_SETUP([Logical router incremental processing for NAT])