@@ -123,11 +123,6 @@ lflow_northd_handler(struct engine_node *node,
return false;
}
- /* Fall back to recompute if load balancers have changed. */
- if (northd_has_lbs_in_tracked_data(&northd_data->trk_data)) {
- return false;
- }
-
const struct engine_context *eng_ctx = engine_get_context();
struct lflow_data *lflow_data = data;
@@ -141,6 +136,12 @@ lflow_northd_handler(struct engine_node *node,
return false;
}
+ if (!lflow_handle_northd_lb_changes(
+ eng_ctx->ovnsb_idl_txn, &northd_data->trk_data.trk_lbs,
+ &lflow_input, lflow_data->lflow_table)) {
+ return false;
+ }
+
engine_set_node_state(node, EN_UPDATED);
return true;
}
@@ -23,7 +23,8 @@
/* OVN includes */
#include "lb.h"
-#include "lib/ovn-nb-idl.h"
+#include "lflow-mgr.h"
+#include "lib/lb.h"
#include "northd.h"
#include "ovn/lex.h"
@@ -563,6 +564,7 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb, size_t n_ls_datapaths,
lb_dps->lb = lb;
lb_dps->nb_ls_map = bitmap_allocate(n_ls_datapaths);
lb_dps->nb_lr_map = bitmap_allocate(n_lr_datapaths);
+ lb_dps->lflow_ref = lflow_ref_create();
return lb_dps;
}
@@ -572,6 +574,7 @@ ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *lb_dps)
{
bitmap_free(lb_dps->nb_lr_map);
bitmap_free(lb_dps->nb_ls_map);
+ lflow_ref_destroy(lb_dps->lflow_ref);
free(lb_dps);
}
@@ -128,6 +128,7 @@ void ovn_lb_group_reinit(
const struct nbrec_load_balancer_group *,
const struct hmap *lbs);
+struct lflow_ref;
struct ovn_lb_datapaths {
struct hmap_node hmap_node;
@@ -137,6 +138,33 @@ struct ovn_lb_datapaths {
size_t n_nb_lr;
unsigned long *nb_lr_map;
+
+ /* Reference of lflows generated for this load balancer.
+ *
+ * This data is initialized and destroyed by the en_northd node, but
+ * populated and used only by the en_lflow node. Ideally this data should
+ * be maintained as part of en_lflow's data (struct lflow_data): a hash
+ * index from ovn_port key to lflows. However, it would be less efficient
+ * and more complex:
+ *
+ * 1. It would require an extra search (using the index) to find the
+ * lflows.
+ *
+ * 2. Building the index needs to be thread-safe, using either a global
+ * lock which is obviously less efficient, or hash-based lock array which
+ * is more complex.
+ *
+ * Maintaining the lflow_ref here is more straightforward. The drawback is
+ * that we need to keep in mind that this data belongs to en_lflow node,
+ * so never access it from any other nodes.
+ *
+ * 'lflow_ref' is used to reference logical flows generated for this
+ * load balancer.
+ *
+ * Note: lflow_ref is not thread safe. Only one thread should
+ * access ovn_lb_datapaths->lflow_ref at any given time.
+ */
+ struct lflow_ref *lflow_ref;
};
struct ovn_lb_datapaths *ovn_lb_datapaths_create(const struct ovn_northd_lb *,
@@ -535,7 +535,15 @@ struct lflow_ref_node {
/* The lflow. */
struct ovn_lflow *lflow;
- /* Index id of the datapath this lflow_ref_node belongs to. */
+ /* Indicates whether the lflow was added with a dp_group using the
+ * ovn_lflow_add_with_dp_group() macro. */
+ bool dpgrp_lflow;
+ /* dpgrp bitmap and bitmap length. Valid only of dpgrp_lflow is true. */
+ unsigned long *dpgrp_bitmap;
+ size_t dpgrp_bitmap_len;
+
+ /* Index id of the datapath this lflow_ref_node belongs to.
+ * Valid only if dpgrp_lflow is false. */
size_t dp_index;
/* Indicates if the lflow_ref_node for an lflow - L(M, A) is linked
@@ -573,7 +581,9 @@ lflow_ref_destroy(struct lflow_ref *lflow_ref)
/* Unlinks the lflows referenced by the 'lflow_ref'.
* For each lflow_ref_node (lrn) in the lflow_ref, it basically clears
- * the datapath id (lrn->dp_index) from the lrn->lflow's dpg bitmap.
+ * the datapath id (lrn->dp_index) or all the datapath id bits in the
+ * dp group bitmap (set when ovn_lflow_add_with_dp_group macro was used)
+ * from the lrn->lflow's dpg bitmap
*/
void
lflow_ref_unlink_lflows(struct lflow_ref *lflow_ref)
@@ -581,9 +591,19 @@ lflow_ref_unlink_lflows(struct lflow_ref *lflow_ref)
struct lflow_ref_node *lrn;
HMAP_FOR_EACH (lrn, ref_node, &lflow_ref->lflow_ref_nodes) {
- if (dp_refcnt_release(&lrn->lflow->dp_refcnts_map,
- lrn->dp_index)) {
- bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
+ if (lrn->dpgrp_lflow) {
+ size_t index;
+ BITMAP_FOR_EACH_1 (index, lrn->dpgrp_bitmap_len,
+ lrn->dpgrp_bitmap) {
+ if (dp_refcnt_release(&lrn->lflow->dp_refcnts_map, index)) {
+ bitmap_set0(lrn->lflow->dpg_bitmap, index);
+ }
+ }
+ } else {
+ if (dp_refcnt_release(&lrn->lflow->dp_refcnts_map,
+ lrn->dp_index)) {
+ bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
+ }
}
lrn->linked = false;
@@ -673,17 +693,25 @@ lflow_table_add_lflow(struct lflow_table *lflow_table,
io_port, ctrl_meter, stage_hint, where);
if (lflow_ref) {
- /* lflow referencing is only supported if 'od' is not NULL. */
- ovs_assert(od);
-
struct lflow_ref_node *lrn =
lflow_ref_node_find(&lflow_ref->lflow_ref_nodes, lflow, hash);
if (!lrn) {
lrn = xzalloc(sizeof *lrn);
lrn->lflow = lflow;
lrn->lflow_ref = lflow_ref;
- lrn->dp_index = od->index;
- dp_refcnt_use(&lflow->dp_refcnts_map, lrn->dp_index);
+ lrn->dpgrp_lflow = !od;
+ if (lrn->dpgrp_lflow) {
+ lrn->dpgrp_bitmap = bitmap_clone(dp_bitmap, dp_bitmap_len);
+ lrn->dpgrp_bitmap_len = dp_bitmap_len;
+
+ size_t index;
+ BITMAP_FOR_EACH_1 (index, dp_bitmap_len, dp_bitmap) {
+ dp_refcnt_use(&lflow->dp_refcnts_map, index);
+ }
+ } else {
+ lrn->dp_index = od->index;
+ dp_refcnt_use(&lflow->dp_refcnts_map, lrn->dp_index);
+ }
ovs_list_insert(&lflow->referenced_by, &lrn->ref_list_node);
hmap_insert(&lflow_ref->lflow_ref_nodes, &lrn->ref_node, hash);
}
@@ -1416,5 +1444,8 @@ lflow_ref_node_destroy(struct lflow_ref_node *lrn)
{
hmap_remove(&lrn->lflow_ref->lflow_ref_nodes, &lrn->ref_node);
ovs_list_remove(&lrn->ref_list_node);
+ if (lrn->dpgrp_lflow) {
+ bitmap_free(lrn->dpgrp_bitmap);
+ }
free(lrn);
}
@@ -7446,7 +7446,7 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows,
ovn_lflow_add_with_dp_group(
lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), ds_cstr(action),
- &lb->nlb->header_, NULL);
+ &lb->nlb->header_, lb_dps->lflow_ref);
}
}
@@ -7893,7 +7893,7 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
}
build_lb_affinity_ls_flows(lflows, lb_dps, lb_vip, ls_datapaths,
- NULL);
+ lb_dps->lflow_ref);
unsigned long *dp_non_meter = NULL;
bool build_non_meter = false;
@@ -7917,7 +7917,7 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
lflows, od, S_SWITCH_IN_LB, priority,
ds_cstr(match), ds_cstr(action),
NULL, meter, &lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
}
if (!reject || build_non_meter) {
@@ -7925,7 +7925,7 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map,
ods_size(ls_datapaths), S_SWITCH_IN_LB, priority,
ds_cstr(match), ds_cstr(action), &lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
bitmap_free(dp_non_meter);
}
@@ -9283,12 +9283,13 @@ build_lswitch_arp_nd_responder_default(struct ovn_datapath *od,
/* Ingress table 19: ARP/ND responder for service monitor source ip.
* (priority 110)*/
static void
-build_lswitch_arp_nd_service_monitor(const struct ovn_northd_lb *lb,
+build_lswitch_arp_nd_service_monitor(const struct ovn_lb_datapaths *lb_dps,
const struct hmap *ls_ports,
struct lflow_table *lflows,
struct ds *actions,
struct ds *match)
{
+ const struct ovn_northd_lb *lb = lb_dps->lb;
for (size_t i = 0; i < lb->n_vips; i++) {
struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[i];
if (!lb_vip_nb->lb_health_check) {
@@ -9352,7 +9353,7 @@ build_lswitch_arp_nd_service_monitor(const struct ovn_northd_lb *lb,
S_SWITCH_IN_ARP_ND_RSP, 110,
ds_cstr(match), ds_cstr(actions),
&lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
}
}
@@ -11377,7 +11378,8 @@ build_lrouter_nat_flows_for_lb(
if (!od->n_l3dgw_ports) {
bitmap_set1(gw_dp_bitmap[type], index);
} else {
- build_distr_lrouter_nat_flows_for_lb(&ctx, type, od, NULL);
+ build_distr_lrouter_nat_flows_for_lb(&ctx, type, od,
+ lb_dps->lflow_ref);
}
if (lb->affinity_timeout) {
@@ -11398,17 +11400,17 @@ build_lrouter_nat_flows_for_lb(
* S_ROUTER_IN_DNAT stage. */
ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, 120,
ds_cstr(&unsnat_match), "next;",
- &lb->nlb->header_, NULL);
+ &lb->nlb->header_, lb_dps->lflow_ref);
}
}
for (size_t type = 0; type < LROUTER_NAT_LB_FLOW_MAX; type++) {
build_gw_lrouter_nat_flows_for_lb(&ctx, type, lr_datapaths,
gw_dp_bitmap[type],
- NULL);
+ lb_dps->lflow_ref);
build_lb_affinity_lr_flows(lflows, lb, lb_vip, ds_cstr(match),
aff_action[type], aff_dp_bitmap[type],
- lr_datapaths, NULL);
+ lr_datapaths, lb_dps->lflow_ref);
}
ds_destroy(&unsnat_match);
@@ -11457,7 +11459,7 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
od->nbs->copp,
meter_groups),
&lb->nlb->header_,
- NULL);
+ lb_dps->lflow_ref);
}
/* Ignore L4 port information in the key because fragmented packets
* may not have L4 information. The pre-stateful table will send
@@ -11507,7 +11509,7 @@ build_lrouter_defrag_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
ovn_lflow_add_with_dp_group(
lflows, lb_dps->nb_lr_map, ods_size(lr_datapaths),
S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), "ct_dnat;",
- &lb_dps->lb->nlb->header_, NULL);
+ &lb_dps->lb->nlb->header_, lb_dps->lflow_ref);
}
}
@@ -11549,7 +11551,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
copp_meter_get(COPP_EVENT_ELB,
od->nbr->copp,
meter_groups),
- &lb->nlb->header_, NULL);
+ &lb->nlb->header_, lb_dps->lflow_ref);
}
}
@@ -11559,7 +11561,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120,
"flags.skip_snat_for_lb == 1 && ip", "next;",
- NULL);
+ lb_dps->lflow_ref);
}
}
}
@@ -15951,8 +15953,10 @@ build_lflows_thread(void *arg)
struct ovn_port *op;
int bnum;
- /* Note: lflow_ref is not thread safe. Ensure that op->lflow_ref
- * is not accessed by multiple threads at the same time. */
+ /* Note: lflow_ref is not thread safe. Ensure that
+ * - op->lflow_ref
+ * - lb_dps->lflow_ref
+ * are not accessed by multiple threads at the same time. */
while (!stop_parallel_processing()) {
wait_for_work(control);
lsi = (struct lswitch_flow_build_info *) control->data;
@@ -16030,7 +16034,7 @@ build_lflows_thread(void *arg)
if (stop_parallel_processing()) {
return NULL;
}
- build_lswitch_arp_nd_service_monitor(lb_dps->lb,
+ build_lswitch_arp_nd_service_monitor(lb_dps,
lsi->ls_ports,
lsi->lflows,
&lsi->match,
@@ -16268,7 +16272,7 @@ build_lswitch_and_lrouter_flows(
stopwatch_stop(LFLOWS_PORTS_STOPWATCH_NAME, time_msec());
stopwatch_start(LFLOWS_LBS_STOPWATCH_NAME, time_msec());
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- build_lswitch_arp_nd_service_monitor(lb_dps->lb, lsi.ls_ports,
+ build_lswitch_arp_nd_service_monitor(lb_dps, lsi.ls_ports,
lsi.lflows, &lsi.actions,
&lsi.match);
build_lrouter_defrag_flows_for_lb(lb_dps, lsi.lflows,
@@ -16458,6 +16462,7 @@ void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
void
lflow_reset_northd_refs(struct lflow_input *lflow_input)
{
+ struct ovn_lb_datapaths *lb_dps;
struct ovn_port *op;
HMAP_FOR_EACH (op, key_node, lflow_input->ls_ports) {
@@ -16469,6 +16474,10 @@ lflow_reset_northd_refs(struct lflow_input *lflow_input)
lflow_ref_clear(op->lflow_ref);
lflow_ref_clear(op->stateful_lflow_ref);
}
+
+ HMAP_FOR_EACH (lb_dps, hmap_node, lflow_input->lb_datapaths_map) {
+ lflow_ref_clear(lb_dps->lflow_ref);
+ }
}
bool
@@ -16626,6 +16635,72 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
return true;
}
+bool
+lflow_handle_northd_lb_changes(struct ovsdb_idl_txn *ovnsb_txn,
+ struct tracked_lbs *trk_lbs,
+ struct lflow_input *lflow_input,
+ struct lflow_table *lflows)
+{
+ struct ovn_lb_datapaths *lb_dps;
+ struct hmapx_node *hmapx_node;
+ HMAPX_FOR_EACH (hmapx_node, &trk_lbs->deleted) {
+ lb_dps = hmapx_node->data;
+
+ lflow_ref_resync_flows(
+ lb_dps->lflow_ref, lflows, ovnsb_txn, lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
+ }
+
+ HMAPX_FOR_EACH (hmapx_node, &trk_lbs->crupdated) {
+ lb_dps = hmapx_node->data;
+
+ /* unlink old lflows. */
+ lflow_ref_unlink_lflows(lb_dps->lflow_ref);
+
+ /* Generate new lflows. */
+ struct ds match = DS_EMPTY_INITIALIZER;
+ struct ds actions = DS_EMPTY_INITIALIZER;
+
+ build_lswitch_arp_nd_service_monitor(lb_dps, lflow_input->ls_ports,
+ lflows, &actions,
+ &match);
+ build_lrouter_defrag_flows_for_lb(lb_dps, lflows,
+ lflow_input->lr_datapaths, &match);
+ build_lrouter_flows_for_lb(lb_dps, lflows,
+ lflow_input->meter_groups,
+ lflow_input->lr_datapaths,
+ lflow_input->lr_stateful_table,
+ lflow_input->features,
+ lflow_input->svc_monitor_map,
+ &match, &actions);
+ build_lswitch_flows_for_lb(lb_dps, lflows,
+ lflow_input->meter_groups,
+ lflow_input->ls_datapaths,
+ lflow_input->features,
+ lflow_input->svc_monitor_map,
+ &match, &actions);
+
+ ds_destroy(&match);
+ ds_destroy(&actions);
+
+ /* Sync the new flows to SB. */
+ bool handled = lflow_ref_sync_lflows(
+ lb_dps->lflow_ref, lflows, ovnsb_txn, lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
+ if (!handled) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
static bool
mirror_needs_update(const struct nbrec_mirror *nb_mirror,
const struct sbrec_mirror *sb_mirror)
@@ -685,6 +685,10 @@ bool lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
struct tracked_ovn_ports *,
struct lflow_input *,
struct lflow_table *lflows);
+bool lflow_handle_northd_lb_changes(struct ovsdb_idl_txn *ovnsb_txn,
+ struct tracked_lbs *,
+ struct lflow_input *,
+ struct lflow_table *lflows);
bool northd_handle_sb_port_binding_changes(
const struct sbrec_port_binding_table *, struct hmap *ls_ports,
struct hmap *lr_ports);
@@ -10604,7 +10604,7 @@ check ovn-nbctl --wait=sb lb-add lb1 10.0.0.10:80 10.0.0.3:80
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE(1)
@@ -10614,21 +10614,26 @@ check ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:10.0.0.3=sw0-p1:1
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set load_balancer . options:foo=bar
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
+check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb -- lb-add lb2 20.0.0.10:80 20.0.0.20:80 -- lb-add lb3 30.0.0.10:80 30.0.0.20:80
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE(1)
@@ -10638,7 +10643,7 @@ check ovn-nbctl --wait=sb -- lb-del lb2 -- lb-del lb3
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE(1)
@@ -10849,8 +10854,9 @@ check ovn-nbctl --wait=sb add load_balancer_group . load_Balancer $lb1_uuid
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb clear load_balancer_group . load_Balancer
@@ -10865,7 +10871,7 @@ check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb add load_balancer_group . load_Balancer $lb1_uuid
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
CHECK_NO_CHANGE_AFTER_RECOMPUTE
@@ -10874,6 +10880,7 @@ check ovn-nbctl --wait=sb add logical_switch sw0 load_balancer_group $lbg1_uuid
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
@@ -10882,6 +10889,7 @@ check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set load_balancer . options:bar=foo
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
@@ -10891,6 +10899,7 @@ check ovn-nbctl --wait=sb set load_balancer lb1 vips:'"10.0.0.10:80"'='"10.0.0.1
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
CHECK_NO_CHANGE_AFTER_RECOMPUTE
@@ -10988,6 +10997,7 @@ check_engine_stats northd recompute nocompute
check_engine_stats lr_stateful recompute nocompute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
# Add back lb group to logical switch and then delete it.
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
@@ -10997,6 +11007,7 @@ check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
check_engine_stats lflow recompute nocompute
check_engine_stats sync_to_sb_lb recompute compute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb clear logical_switch sw0 load_balancer_group -- \
@@ -11030,14 +11041,17 @@ check_engine_stats northd norecompute compute
check_engine_stats lr_stateful norecompute compute
check_engine_stats lflow norecompute nocompute
check_engine_stats sync_to_sb_lb norecompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set load_balancer_group . load_balancer="$lb2_uuid,$lb3_uuid,$lb4_uuid"
check_engine_stats lb_data norecompute compute
check_engine_stats northd norecompute compute
+check_engine_stats ls_stateful norecompute compute
check_engine_stats lr_stateful norecompute compute
-check_engine_stats lflow recompute nocompute
+check_engine_stats lflow norecompute compute
check_engine_stats sync_to_sb_lb recompute nocompute
+CHECK_NO_CHANGE_AFTER_RECOMPUTE
check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
check ovn-nbctl --wait=sb set logical_switch sw0 load_balancer_group=$lbg1_uuid