@@ -27,6 +27,8 @@ enum objdep_type {
OBJDEP_TYPE_PORTBINDING,
OBJDEP_TYPE_MC_GROUP,
OBJDEP_TYPE_TEMPLATE,
+ OBJDEP_TYPE_LFLOW,
+ OBJDEP_TYPE_LFLOW_OD,
OBJDEP_TYPE_MAX,
};
@@ -630,13 +630,10 @@ ovn_pipeline_from_name(const char *pipeline)
uint32_t
sbrec_logical_flow_hash(const struct sbrec_logical_flow *lf)
{
- const struct sbrec_datapath_binding *ld = lf->logical_datapath;
- uint32_t hash = ovn_logical_flow_hash(lf->table_id,
- ovn_pipeline_from_name(lf->pipeline),
- lf->priority, lf->match,
- lf->actions);
-
- return ld ? ovn_logical_flow_hash_datapath(&ld->header_.uuid, hash) : hash;
+ return ovn_logical_flow_hash(lf->table_id,
+ ovn_pipeline_from_name(lf->pipeline),
+ lf->priority, lf->match,
+ lf->actions);
}
uint32_t
@@ -33,7 +33,9 @@ northd_ovn_northd_SOURCES = \
northd/inc-proc-northd.c \
northd/inc-proc-northd.h \
northd/ipam.c \
- northd/ipam.h
+ northd/ipam.h \
+ northd/lflow-mgr.c \
+ northd/lflow-mgr.h
northd_ovn_northd_LDADD = \
lib/libovn.la \
$(OVSDB_LIBDIR)/libovsdb.la \
@@ -24,6 +24,7 @@
#include "en-ls-lb-acls.h"
#include "en-northd.h"
#include "en-meters.h"
+#include "lflow-mgr.h"
#include "lib/inc-proc-eng.h"
#include "northd.h"
@@ -58,6 +59,8 @@ lflow_get_input_data(struct engine_node *node,
EN_OVSDB_GET(engine_get_input("SB_multicast_group", node));
lflow_input->sbrec_igmp_group_table =
EN_OVSDB_GET(engine_get_input("SB_igmp_group", node));
+ lflow_input->sbrec_logical_dp_group_table =
+ EN_OVSDB_GET(engine_get_input("SB_logical_dp_group", node));
lflow_input->sbrec_mcast_group_by_name_dp =
engine_ovsdb_node_get_index(
@@ -91,8 +94,8 @@ void en_lflow_run(struct engine_node *node, void *data)
lflow_input.bfd_connections = &bfd_connections;
struct lflow_data *lflow_data = data;
- lflow_data_destroy(lflow_data);
- lflow_data_init(lflow_data);
+ lflow_table_clear(lflow_data->lflow_table);
+ lflow_table_init(lflow_data->lflow_table);
stopwatch_start(BUILD_LFLOWS_STOPWATCH_NAME, time_msec());
build_bfd_table(eng_ctx->ovnsb_idl_txn,
@@ -100,7 +103,8 @@ void en_lflow_run(struct engine_node *node, void *data)
lflow_input.sbrec_bfd_table,
lflow_input.lr_ports,
&bfd_connections);
- build_lflows(eng_ctx->ovnsb_idl_txn, &lflow_input, &lflow_data->lflows);
+ build_lflows(eng_ctx->ovnsb_idl_txn, &lflow_input,
+ lflow_data->lflow_table);
bfd_cleanup_connections(lflow_input.nbrec_bfd_table,
&bfd_connections);
hmap_destroy(&bfd_connections);
@@ -131,7 +135,7 @@ lflow_northd_handler(struct engine_node *node,
if (!lflow_handle_northd_port_changes(eng_ctx->ovnsb_idl_txn,
&northd_data->trk_northd_changes.trk_ovn_ports,
- &lflow_input, &lflow_data->lflows)) {
+ &lflow_input, lflow_data->lflow_table)) {
return false;
}
@@ -161,11 +165,14 @@ void *en_lflow_init(struct engine_node *node OVS_UNUSED,
struct engine_arg *arg OVS_UNUSED)
{
struct lflow_data *data = xmalloc(sizeof *data);
- lflow_data_init(data);
+ data->lflow_table = lflow_table_alloc();
+ lflow_table_init(data->lflow_table);
return data;
}
-void en_lflow_cleanup(void *data)
+void en_lflow_cleanup(void *data_)
{
- lflow_data_destroy(data);
+ struct lflow_data *data = data_;
+ lflow_table_destroy(data->lflow_table);
+ data->lflow_table = NULL;
}
@@ -9,6 +9,12 @@
#include "lib/inc-proc-eng.h"
+struct lflow_table;
+
+struct lflow_data {
+ struct lflow_table *lflow_table;
+};
+
void en_lflow_run(struct engine_node *node, void *data);
void *en_lflow_init(struct engine_node *node, struct engine_arg *arg);
void en_lflow_cleanup(void *data);
@@ -99,7 +99,8 @@ static unixctl_cb_func chassis_features_list;
SB_NODE(bfd, "bfd") \
SB_NODE(fdb, "fdb") \
SB_NODE(static_mac_binding, "static_mac_binding") \
- SB_NODE(chassis_template_var, "chassis_template_var")
+ SB_NODE(chassis_template_var, "chassis_template_var") \
+ SB_NODE(logical_dp_group, "logical_dp_group")
enum sb_engine_node {
#define SB_NODE(NAME, NAME_STR) SB_##NAME,
@@ -234,6 +235,7 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
engine_add_input(&en_lflow, &en_sb_logical_flow, NULL);
engine_add_input(&en_lflow, &en_sb_multicast_group, NULL);
engine_add_input(&en_lflow, &en_sb_igmp_group, NULL);
+ engine_add_input(&en_lflow, &en_sb_logical_dp_group, NULL);
engine_add_input(&en_lflow, &en_northd, lflow_northd_handler);
engine_add_input(&en_lflow, &en_port_group, lflow_port_group_handler);
engine_add_input(&en_lflow, &en_lr_lb_nat_data, NULL);
new file mode 100644
@@ -0,0 +1,1167 @@
+/*
+ * Copyright (c) 2023, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include <getopt.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+/* OVS includes */
+#include "include/openvswitch/hmap.h"
+#include "include/openvswitch/thread.h"
+#include "lib/bitmap.h"
+#include "lib/uuidset.h"
+#include "openvswitch/util.h"
+#include "openvswitch/vlog.h"
+#include "ovs-thread.h"
+#include "stopwatch.h"
+
+/* OVN includes */
+#include "debug.h"
+#include "lflow-mgr.h"
+#include "lib/objdep.h"
+#include "lib/ovn-parallel-hmap.h"
+#include "northd.h"
+
+VLOG_DEFINE_THIS_MODULE(lflow_mgr);
+
+/* Static function declarations. */
+struct ovn_lflow;
+
+static void ovn_lflow_init(struct ovn_lflow *, struct ovn_datapath *od,
+ size_t dp_bitmap_len, enum ovn_stage stage,
+ uint16_t priority, char *match,
+ char *actions, char *io_port,
+ char *ctrl_meter, char *stage_hint,
+ const char *where, uint32_t hash);
+static struct ovn_lflow *ovn_lflow_find(const struct hmap *lflows,
+ enum ovn_stage stage,
+ uint16_t priority, const char *match,
+ const char *actions,
+ const char *ctrl_meter, uint32_t hash);
+static struct ovn_lflow * ovn_lflow_uuid_find(
+ const struct hmap *lflows_hash_map, const struct uuid *lflow_uuid);
+static void ovn_lflow_destroy(struct lflow_table *lflow_table,
+ struct ovn_lflow *lflow);
+static char *ovn_lflow_hint(const struct ovsdb_idl_row *row);
+
+static struct ovn_lflow *do_ovn_lflow_add(
+ struct lflow_table *, const struct ovn_datapath *,
+ const unsigned long *dp_bitmap, size_t dp_bitmap_len, uint32_t hash,
+ enum ovn_stage stage, uint16_t priority, const char *match,
+ const char *actions, const char *io_port,
+ const char *ctrl_meter,
+ const struct ovsdb_idl_row *stage_hint,
+ const char *where);
+
+
+static struct ovs_mutex *lflow_hash_lock(const struct hmap *lflow_table,
+ uint32_t hash);
+static void lflow_hash_unlock(struct ovs_mutex *hash_lock);
+
+static struct ovn_dp_group *ovn_dp_group_get(
+ struct hmap *dp_groups, size_t desired_n,
+ const unsigned long *desired_bitmap,
+ size_t bitmap_len);
+static struct ovn_dp_group *ovn_dp_group_create(
+ struct ovsdb_idl_txn *ovnsb_txn, struct hmap *dp_groups,
+ struct sbrec_logical_dp_group *, size_t desired_n,
+ const unsigned long *desired_bitmap,
+ size_t bitmap_len, bool is_switch,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths);
+static struct ovn_dp_group *ovn_dp_group_get(
+ struct hmap *dp_groups, size_t desired_n,
+ const unsigned long *desired_bitmap,
+ size_t bitmap_len);
+static struct sbrec_logical_dp_group *ovn_sb_insert_or_update_logical_dp_group(
+ struct ovsdb_idl_txn *ovnsb_txn,
+ struct sbrec_logical_dp_group *,
+ const unsigned long *dpg_bitmap,
+ const struct ovn_datapaths *);
+static struct ovn_dp_group *ovn_dp_group_find(const struct hmap *dp_groups,
+ const unsigned long *dpg_bitmap,
+ size_t bitmap_len,
+ uint32_t hash);
+static void inc_ovn_dp_group_ref(struct ovn_dp_group *);
+static void dec_ovn_dp_group_ref(struct hmap *dp_groups,
+ struct ovn_dp_group *);
+static void ovn_dp_group_add_with_reference(struct ovn_lflow *,
+ const struct ovn_datapath *od,
+ const unsigned long *dp_bitmap,
+ size_t bitmap_len);
+
+static bool is_lflow_and_od_type_match(const struct ovn_datapath *,
+ struct ovn_lflow *);
+static void unlink_objres_lflows(struct resource_to_objects_node *,
+ const struct ovn_datapath *od,
+ struct lflow_table *,
+ struct objdep_mgr *);
+static void sync_lflows_from_objres(
+ struct resource_to_objects_node *, struct lflow_table *,
+ struct objdep_mgr *, struct ovsdb_idl_txn *,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *,
+ const struct sbrec_logical_dp_group_table *);
+static void sync_lflow_to_sb(struct ovn_lflow *,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ struct lflow_table *,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow *sbflow,
+ const struct sbrec_logical_dp_group_table *);
+
+extern int parallelization_state;
+extern thread_local size_t thread_lflow_counter;
+
+static bool lflow_hash_lock_initialized = false;
+/* The lflow_hash_lock is a mutex array that protects updates to the shared
+ * lflow table across threads when parallel lflow build and dp-group are both
+ * enabled. To avoid high contention between threads, a big array of mutexes
+ * are used instead of just one. This is possible because when parallel build
+ * is used we only use hmap_insert_fast() to update the hmap, which would not
+ * touch the bucket array but only the list in a single bucket. We only need to
+ * make sure that when adding lflows to the same hash bucket, the same lock is
+ * used, so that no two threads can add to the bucket at the same time. It is
+ * ok that the same lock is used to protect multiple buckets, so a fixed sized
+ * mutex array is used instead of 1-1 mapping to the hash buckets. This
+ * simplies the implementation while effectively reduces lock contention
+ * because the chance that different threads contending the same lock amongst
+ * the big number of locks is very low. */
+#define LFLOW_HASH_LOCK_MASK 0xFFFF
+static struct ovs_mutex lflow_hash_locks[LFLOW_HASH_LOCK_MASK + 1];
+
+/* Full thread safety analysis is not possible with hash locks, because
+ * they are taken conditionally based on the 'parallelization_state' and
+ * a flow hash. Also, the order in which two hash locks are taken is not
+ * predictable during the static analysis.
+ *
+ * Since the order of taking two locks depends on a random hash, to avoid
+ * ABBA deadlocks, no two hash locks can be nested. In that sense an array
+ * of hash locks is similar to a single mutex.
+ *
+ * Using a fake mutex to partially simulate thread safety restrictions, as
+ * if it were actually a single mutex.
+ *
+ * OVS_NO_THREAD_SAFETY_ANALYSIS below allows us to ignore conditional
+ * nature of the lock. Unlike other attributes, it applies to the
+ * implementation and not to the interface. So, we can define a function
+ * that acquires the lock without analysing the way it does that.
+ */
+extern struct ovs_mutex fake_hash_mutex;
+
+struct ovn_lflow {
+ struct hmap_node hmap_node;
+ struct hmap_node hash_node;
+ struct ovs_list list_node; /* For temporary list of lflows. Don't remove
+ at destroy. */
+
+ struct ovn_datapath *od; /* 'logical_datapath' in SB schema. */
+ unsigned long *dpg_bitmap; /* Bitmap of all datapaths by their 'index'.*/
+ enum ovn_stage stage;
+ uint16_t priority;
+ char *match;
+ char *actions;
+ char *io_port;
+ char *stage_hint;
+ char *ctrl_meter;
+ size_t n_ods; /* Number of datapaths referenced by 'od' and
+ * 'dpg_bitmap'. */
+ struct ovn_dp_group *dpg; /* Link to unique Sb datapath group. */
+
+ const char *where;
+
+ struct uuid sb_uuid; /* SB DB row uuid, specified by northd. */
+ struct uuid lflow_uuid;
+};
+
+struct lflow_table {
+ struct hmap match_map;
+ struct hmap hash_map;
+ struct hmap ls_dp_groups;
+ struct hmap lr_dp_groups;
+ ssize_t max_seen_lflow_size;
+};
+
+struct lflow_table *
+lflow_table_alloc(void)
+{
+ struct lflow_table *lflow_table = xzalloc(sizeof *lflow_table);
+ lflow_table->max_seen_lflow_size = 128;
+
+ return lflow_table;
+}
+
+void
+lflow_table_init(struct lflow_table *lflow_table)
+{
+ fast_hmap_size_for(&lflow_table->match_map,
+ lflow_table->max_seen_lflow_size);
+ fast_hmap_size_for(&lflow_table->hash_map,
+ lflow_table->max_seen_lflow_size);
+ ovn_dp_groups_init(&lflow_table->ls_dp_groups);
+ ovn_dp_groups_init(&lflow_table->lr_dp_groups);
+}
+
+void
+lflow_table_clear(struct lflow_table *lflow_table)
+{
+ struct ovn_lflow *lflow;
+ HMAP_FOR_EACH_SAFE (lflow, hmap_node, &lflow_table->match_map) {
+ ovn_lflow_destroy(lflow_table, lflow);
+ }
+ hmap_destroy(&lflow_table->match_map);
+ hmap_destroy(&lflow_table->hash_map);
+
+ ovn_dp_groups_destroy(&lflow_table->ls_dp_groups);
+ ovn_dp_groups_destroy(&lflow_table->lr_dp_groups);
+}
+
+void
+lflow_table_destroy(struct lflow_table *lflow_table)
+{
+ lflow_table_clear(lflow_table);
+ free(lflow_table);
+}
+
+void
+lflow_table_expand(struct lflow_table *lflow_table)
+{
+ hmap_expand(&lflow_table->match_map);
+ hmap_expand(&lflow_table->hash_map);
+
+ if (hmap_count(&lflow_table->match_map) >
+ lflow_table->max_seen_lflow_size) {
+ lflow_table->max_seen_lflow_size = hmap_count(&lflow_table->match_map);
+ }
+}
+
+void
+lflow_table_set_size(struct lflow_table *lflow_table, size_t size)
+{
+ lflow_table->match_map.n = size;
+ lflow_table->hash_map.n = size;
+}
+
+void
+lflow_table_sync_to_sb(struct lflow_table *lflow_table,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *sb_flow_table,
+ const struct sbrec_logical_dp_group_table *dpgrp_table)
+{
+ struct hmap lflows_temp = HMAP_INITIALIZER(&lflows_temp);
+ struct hmap *lflows = &lflow_table->match_map;
+ struct ovn_lflow *lflow;
+
+ /* Push changes to the Logical_Flow table to database. */
+ const struct sbrec_logical_flow *sbflow;
+ SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_SAFE (sbflow, sb_flow_table) {
+ struct sbrec_logical_dp_group *dp_group = sbflow->logical_dp_group;
+ struct ovn_datapath *logical_datapath_od = NULL;
+ size_t i;
+
+ /* Find one valid datapath to get the datapath type. */
+ struct sbrec_datapath_binding *dp = sbflow->logical_datapath;
+ if (dp) {
+ logical_datapath_od = ovn_datapath_from_sbrec(
+ &ls_datapaths->datapaths,
+ &lr_datapaths->datapaths,
+ dp);
+ if (logical_datapath_od
+ && ovn_datapath_is_stale(logical_datapath_od)) {
+ logical_datapath_od = NULL;
+ }
+ }
+ for (i = 0; dp_group && i < dp_group->n_datapaths; i++) {
+ logical_datapath_od = ovn_datapath_from_sbrec(
+ &ls_datapaths->datapaths,
+ &lr_datapaths->datapaths,
+ dp_group->datapaths[i]);
+ if (logical_datapath_od
+ && !ovn_datapath_is_stale(logical_datapath_od)) {
+ break;
+ }
+ logical_datapath_od = NULL;
+ }
+
+ if (!logical_datapath_od) {
+ /* This lflow has no valid logical datapaths. */
+ sbrec_logical_flow_delete(sbflow);
+ continue;
+ }
+
+ enum ovn_pipeline pipeline
+ = !strcmp(sbflow->pipeline, "ingress") ? P_IN : P_OUT;
+
+ lflow = ovn_lflow_find(
+ lflows,
+ ovn_stage_build(ovn_datapath_get_type(logical_datapath_od),
+ pipeline, sbflow->table_id),
+ sbflow->priority, sbflow->match, sbflow->actions,
+ sbflow->controller_meter, sbflow->hash);
+ if (lflow) {
+ sync_lflow_to_sb(lflow, ovnsb_txn, lflow_table, ls_datapaths,
+ lr_datapaths, ovn_internal_version_changed,
+ sbflow, dpgrp_table);
+
+ hmap_remove(lflows, &lflow->hmap_node);
+ hmap_insert(&lflows_temp, &lflow->hmap_node,
+ hmap_node_hash(&lflow->hmap_node));
+ } else {
+ sbrec_logical_flow_delete(sbflow);
+ }
+ }
+
+ HMAP_FOR_EACH_SAFE (lflow, hmap_node, lflows) {
+ sync_lflow_to_sb(lflow, ovnsb_txn, lflow_table, ls_datapaths,
+ lr_datapaths, ovn_internal_version_changed,
+ NULL, dpgrp_table);
+
+ hmap_remove(lflows, &lflow->hmap_node);
+ hmap_insert(&lflows_temp, &lflow->hmap_node,
+ hmap_node_hash(&lflow->hmap_node));
+ }
+ hmap_swap(lflows, &lflows_temp);
+ hmap_destroy(&lflows_temp);
+}
+
+/* lflow ref mgr */
+struct lflow_ref {
+ char *res_name;
+ struct objdep_mgr objdep_mgr;
+ const struct ovn_datapath *od;
+};
+
+struct lflow_ref *
+lflow_ref_alloc(const char *res_name)
+{
+ struct lflow_ref *lflow_ref = xzalloc(sizeof *lflow_ref);
+ lflow_ref->res_name = xstrdup(res_name);
+ objdep_mgr_init(&lflow_ref->objdep_mgr);
+
+ return lflow_ref;
+}
+
+void
+lflow_ref_set_od(struct lflow_ref *lflow_ref, const struct ovn_datapath *od)
+{
+ lflow_ref->od = od;
+}
+
+void
+lflow_ref_destroy(struct lflow_ref *lflow_ref)
+{
+ free(lflow_ref->res_name);
+ objdep_mgr_destroy(&lflow_ref->objdep_mgr);
+ free(lflow_ref);
+}
+
+void
+lflow_ref_clear_lflows(struct lflow_ref *lflow_ref,
+ const struct ovn_datapath *od,
+ struct lflow_table *lflow_table)
+{
+ struct resource_to_objects_node *res_node = objdep_mgr_find_objs(
+ &lflow_ref->objdep_mgr, OBJDEP_TYPE_LFLOW, lflow_ref->res_name);
+
+ unlink_objres_lflows(res_node, od, lflow_table, &lflow_ref->objdep_mgr);
+}
+
+void
+lflow_ref_clear_and_sync_lflows(struct lflow_ref *lflow_ref,
+ const struct ovn_datapath *od,
+ struct lflow_table *lflow_table,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *sbflow_table,
+ const struct sbrec_logical_dp_group_table *dpgrp_table)
+{
+ struct resource_to_objects_node *res_node = objdep_mgr_find_objs(
+ &lflow_ref->objdep_mgr, OBJDEP_TYPE_LFLOW, lflow_ref->res_name);
+
+ unlink_objres_lflows(res_node, od, lflow_table, &lflow_ref->objdep_mgr);
+ sync_lflows_from_objres(res_node, lflow_table, &lflow_ref->objdep_mgr,
+ ovnsb_txn, ls_datapaths, lr_datapaths,
+ ovn_internal_version_changed, sbflow_table,
+ dpgrp_table);
+}
+
+void
+lflow_ref_sync_lflows_to_sb(struct lflow_ref *lflow_ref,
+ struct lflow_table *lflow_table,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *sbflow_table,
+ const struct sbrec_logical_dp_group_table *dpgrp_table)
+{
+ struct resource_to_objects_node *res_node = objdep_mgr_find_objs(
+ &lflow_ref->objdep_mgr, OBJDEP_TYPE_LFLOW, lflow_ref->res_name);
+
+ sync_lflows_from_objres(res_node, lflow_table, &lflow_ref->objdep_mgr,
+ ovnsb_txn, ls_datapaths, lr_datapaths,
+ ovn_internal_version_changed, sbflow_table,
+ dpgrp_table);
+}
+
+void
+lflow_table_add_lflow(struct lflow_table *lflow_table,
+ const struct ovn_datapath *od,
+ const unsigned long *dp_bitmap, size_t dp_bitmap_len,
+ enum ovn_stage stage, uint16_t priority,
+ const char *match, const char *actions,
+ const char *io_port, const char *ctrl_meter,
+ const struct ovsdb_idl_row *stage_hint,
+ const char *where,
+ struct lflow_ref *lflow_ref)
+ OVS_EXCLUDED(fake_hash_mutex)
+{
+ struct ovs_mutex *hash_lock;
+ uint32_t hash;
+
+ ovs_assert(!od ||
+ ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od));
+
+ hash = ovn_logical_flow_hash(ovn_stage_get_table(stage),
+ ovn_stage_get_pipeline(stage),
+ priority, match,
+ actions);
+
+ hash_lock = lflow_hash_lock(&lflow_table->match_map, hash);
+ struct ovn_lflow *lflow =
+ do_ovn_lflow_add(lflow_table, od, dp_bitmap,
+ dp_bitmap_len, hash, stage,
+ priority, match, actions,
+ io_port, ctrl_meter, stage_hint, where);
+
+ if (lflow_ref) {
+ objdep_mgr_add(&lflow_ref->objdep_mgr, OBJDEP_TYPE_LFLOW,
+ lflow_ref->res_name, &lflow->lflow_uuid);
+
+ if (lflow_ref->od && lflow_ref->od != od) {
+ char uuid_s[UUID_LEN + 1];
+ sprintf(uuid_s, UUID_FMT, UUID_ARGS(&lflow->lflow_uuid));
+
+ struct uuid u = UUID_ZERO;
+ u.parts[0] = od->index;
+ objdep_mgr_add(&lflow_ref->objdep_mgr, OBJDEP_TYPE_LFLOW_OD,
+ uuid_s, &u);
+ }
+ }
+
+ lflow_hash_unlock(hash_lock);
+
+}
+
+void
+lflow_table_add_lflow_default_drop(struct lflow_table *lflow_table,
+ const struct ovn_datapath *od,
+ enum ovn_stage stage,
+ const char *where,
+ struct lflow_ref *lflow_ref)
+{
+ lflow_table_add_lflow(lflow_table, od, NULL, 0, stage, 0, "1",
+ debug_drop_action(), NULL, NULL, NULL,
+ where, lflow_ref);
+}
+
+/* Given a desired bitmap, finds a datapath group in 'dp_groups'. If it
+ * doesn't exist, creates a new one and adds it to 'dp_groups'.
+ * If 'sb_group' is provided, function will try to re-use this group by
+ * either taking it directly, or by modifying, if it's not already in use. */
+struct ovn_dp_group *
+ovn_dp_group_get_or_create(struct ovsdb_idl_txn *ovnsb_txn,
+ struct hmap *dp_groups,
+ struct sbrec_logical_dp_group *sb_group,
+ size_t desired_n,
+ const unsigned long *desired_bitmap,
+ size_t bitmap_len,
+ bool is_switch,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths)
+{
+ struct ovn_dp_group *dpg;
+
+ dpg = ovn_dp_group_get(dp_groups, desired_n, desired_bitmap, bitmap_len);
+ if (dpg) {
+ return dpg;
+ }
+
+ return ovn_dp_group_create(ovnsb_txn, dp_groups, sb_group, desired_n,
+ desired_bitmap, bitmap_len, is_switch,
+ ls_datapaths, lr_datapaths);
+}
+
+void
+ovn_dp_groups_destroy(struct hmap *dp_groups)
+{
+ struct ovn_dp_group *dpg;
+ HMAP_FOR_EACH_POP (dpg, node, dp_groups) {
+ bitmap_free(dpg->bitmap);
+ free(dpg);
+ }
+ hmap_destroy(dp_groups);
+}
+
+
+void
+lflow_hash_lock_init(void)
+{
+ if (!lflow_hash_lock_initialized) {
+ for (size_t i = 0; i < LFLOW_HASH_LOCK_MASK + 1; i++) {
+ ovs_mutex_init(&lflow_hash_locks[i]);
+ }
+ lflow_hash_lock_initialized = true;
+ }
+}
+
+void
+lflow_hash_lock_destroy(void)
+{
+ if (lflow_hash_lock_initialized) {
+ for (size_t i = 0; i < LFLOW_HASH_LOCK_MASK + 1; i++) {
+ ovs_mutex_destroy(&lflow_hash_locks[i]);
+ }
+ }
+ lflow_hash_lock_initialized = false;
+}
+
+/* static functions. */
+static void
+ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
+ size_t dp_bitmap_len, enum ovn_stage stage, uint16_t priority,
+ char *match, char *actions, char *io_port, char *ctrl_meter,
+ char *stage_hint, const char *where, uint32_t hash)
+{
+ ovs_list_init(&lflow->list_node);
+ lflow->dpg_bitmap = bitmap_allocate(dp_bitmap_len);
+ lflow->od = od;
+ lflow->stage = stage;
+ lflow->priority = priority;
+ lflow->match = match;
+ lflow->actions = actions;
+ lflow->io_port = io_port;
+ lflow->stage_hint = stage_hint;
+ lflow->ctrl_meter = ctrl_meter;
+ lflow->dpg = NULL;
+ lflow->where = where;
+ lflow->sb_uuid = UUID_ZERO;
+ lflow->lflow_uuid = uuid_random();
+ lflow->lflow_uuid.parts[0] = hash;
+}
+
+static struct ovs_mutex *
+lflow_hash_lock(const struct hmap *lflow_table, uint32_t hash)
+ OVS_ACQUIRES(fake_hash_mutex)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ struct ovs_mutex *hash_lock = NULL;
+
+ if (parallelization_state == STATE_USE_PARALLELIZATION) {
+ hash_lock =
+ &lflow_hash_locks[hash & lflow_table->mask & LFLOW_HASH_LOCK_MASK];
+ ovs_mutex_lock(hash_lock);
+ }
+ return hash_lock;
+}
+
+static void
+lflow_hash_unlock(struct ovs_mutex *hash_lock)
+ OVS_RELEASES(fake_hash_mutex)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ if (hash_lock) {
+ ovs_mutex_unlock(hash_lock);
+ }
+}
+
+static bool
+ovn_lflow_equal(const struct ovn_lflow *a, enum ovn_stage stage,
+ uint16_t priority, const char *match,
+ const char *actions, const char *ctrl_meter)
+{
+ return (a->stage == stage
+ && a->priority == priority
+ && !strcmp(a->match, match)
+ && !strcmp(a->actions, actions)
+ && nullable_string_is_equal(a->ctrl_meter, ctrl_meter));
+}
+
+static struct ovn_lflow *
+ovn_lflow_find(const struct hmap *lflows,
+ enum ovn_stage stage, uint16_t priority,
+ const char *match, const char *actions,
+ const char *ctrl_meter, uint32_t hash)
+{
+ struct ovn_lflow *lflow;
+ HMAP_FOR_EACH_WITH_HASH (lflow, hmap_node, hash, lflows) {
+ if (ovn_lflow_equal(lflow, stage, priority, match, actions,
+ ctrl_meter)) {
+ return lflow;
+ }
+ }
+ return NULL;
+}
+
+static struct ovn_lflow *
+ovn_lflow_uuid_find(const struct hmap *lflows_hash_map,
+ const struct uuid *lflow_uuid)
+{
+ uint32_t hash = lflow_uuid->parts[0];
+ struct ovn_lflow *lflow;
+ HMAP_FOR_EACH_WITH_HASH (lflow, hash_node, hash, lflows_hash_map) {
+ if (uuid_equals(&lflow->lflow_uuid, lflow_uuid)) {
+ return lflow;
+ }
+ }
+ return NULL;
+}
+
+static char *
+ovn_lflow_hint(const struct ovsdb_idl_row *row)
+{
+ if (!row) {
+ return NULL;
+ }
+ return xasprintf("%08x", row->uuid.parts[0]);
+}
+
+static void
+ovn_lflow_destroy(struct lflow_table *lflow_table, struct ovn_lflow *lflow)
+{
+ if (lflow) {
+ if (lflow_table) {
+ hmap_remove(&lflow_table->match_map, &lflow->hmap_node);
+ hmap_remove(&lflow_table->hash_map, &lflow->hash_node);
+ }
+ bitmap_free(lflow->dpg_bitmap);
+ free(lflow->match);
+ free(lflow->actions);
+ free(lflow->io_port);
+ free(lflow->stage_hint);
+ free(lflow->ctrl_meter);
+ free(lflow);
+ }
+}
+
+static struct ovn_lflow *
+do_ovn_lflow_add(struct lflow_table *lflow_table,
+ const struct ovn_datapath *od,
+ const unsigned long *dp_bitmap, size_t dp_bitmap_len,
+ uint32_t hash, enum ovn_stage stage, uint16_t priority,
+ const char *match, const char *actions,
+ const char *io_port, const char *ctrl_meter,
+ const struct ovsdb_idl_row *stage_hint,
+ const char *where)
+ OVS_REQUIRES(fake_hash_mutex)
+{
+ struct ovn_lflow *old_lflow;
+ struct ovn_lflow *lflow;
+
+ size_t bitmap_len = od ? ods_size(od->datapaths) : dp_bitmap_len;
+ ovs_assert(bitmap_len);
+
+ old_lflow = ovn_lflow_find(&lflow_table->match_map, stage,
+ priority, match, actions, ctrl_meter, hash);
+ if (old_lflow) {
+ ovn_dp_group_add_with_reference(old_lflow, od, dp_bitmap,
+ bitmap_len);
+ return old_lflow;
+ }
+
+ lflow = xmalloc(sizeof *lflow);
+ /* While adding new logical flows we're not setting single datapath, but
+ * collecting a group. 'od' will be updated later for all flows with only
+ * one datapath in a group, so it could be hashed correctly. */
+ ovn_lflow_init(lflow, NULL, bitmap_len, stage, priority,
+ xstrdup(match), xstrdup(actions),
+ io_port ? xstrdup(io_port) : NULL,
+ nullable_xstrdup(ctrl_meter),
+ ovn_lflow_hint(stage_hint), where, hash);
+
+ ovn_dp_group_add_with_reference(lflow, od, dp_bitmap, bitmap_len);
+
+ if (parallelization_state != STATE_USE_PARALLELIZATION) {
+ hmap_insert(&lflow_table->match_map, &lflow->hmap_node, hash);
+ hmap_insert(&lflow_table->hash_map, &lflow->hash_node, hash);
+ } else {
+ hmap_insert_fast(&lflow_table->match_map, &lflow->hmap_node,
+ hash);
+ hmap_insert_fast(&lflow_table->hash_map, &lflow->hash_node,
+ hash);
+ thread_lflow_counter++;
+ }
+
+ return lflow;
+}
+
+static void
+sync_lflow_to_sb(struct ovn_lflow *lflow,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ struct lflow_table *lflow_table,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow *sbflow,
+ const struct sbrec_logical_dp_group_table *sb_dpgrp_table)
+{
+ struct sbrec_logical_dp_group *sbrec_dp_group = NULL;
+ struct ovn_dp_group *pre_sync_dpg = lflow->dpg;
+ struct ovn_datapath **datapaths_array;
+ struct hmap *dp_groups;
+ size_t n_datapaths;
+ bool is_switch;
+
+ if (ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH) {
+ n_datapaths = ods_size(ls_datapaths);
+ datapaths_array = ls_datapaths->array;
+ dp_groups = &lflow_table->ls_dp_groups;
+ is_switch = true;
+ } else {
+ n_datapaths = ods_size(lr_datapaths);
+ datapaths_array = lr_datapaths->array;
+ dp_groups = &lflow_table->lr_dp_groups;
+ is_switch = false;
+ }
+
+ lflow->n_ods = bitmap_count1(lflow->dpg_bitmap, n_datapaths);
+ ovs_assert(lflow->n_ods);
+
+ if (lflow->n_ods == 1) {
+ /* There is only one datapath, so it should be moved out of the
+ * group to a single 'od'. */
+ size_t index = bitmap_scan(lflow->dpg_bitmap, true, 0,
+ n_datapaths);
+
+ lflow->od = datapaths_array[index];
+ lflow->dpg = NULL;
+ } else {
+ lflow->od = NULL;
+ }
+
+ if (!sbflow) {
+ lflow->sb_uuid = uuid_random();
+ sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
+ &lflow->sb_uuid);
+ const char *pipeline = ovn_stage_get_pipeline_name(lflow->stage);
+ uint8_t table = ovn_stage_get_table(lflow->stage);
+ sbrec_logical_flow_set_pipeline(sbflow, pipeline);
+ sbrec_logical_flow_set_table_id(sbflow, table);
+ sbrec_logical_flow_set_priority(sbflow, lflow->priority);
+ sbrec_logical_flow_set_match(sbflow, lflow->match);
+ sbrec_logical_flow_set_actions(sbflow, lflow->actions);
+ if (lflow->io_port) {
+ struct smap tags = SMAP_INITIALIZER(&tags);
+ smap_add(&tags, "in_out_port", lflow->io_port);
+ sbrec_logical_flow_set_tags(sbflow, &tags);
+ smap_destroy(&tags);
+ }
+ sbrec_logical_flow_set_controller_meter(sbflow, lflow->ctrl_meter);
+
+ /* Trim the source locator lflow->where, which looks something like
+ * "ovn/northd/northd.c:1234", down to just the part following the
+ * last slash, e.g. "northd.c:1234". */
+ const char *slash = strrchr(lflow->where, '/');
+#if _WIN32
+ const char *backslash = strrchr(lflow->where, '\\');
+ if (!slash || backslash > slash) {
+ slash = backslash;
+ }
+#endif
+ const char *where = slash ? slash + 1 : lflow->where;
+
+ struct smap ids = SMAP_INITIALIZER(&ids);
+ smap_add(&ids, "stage-name", ovn_stage_to_str(lflow->stage));
+ smap_add(&ids, "source", where);
+ if (lflow->stage_hint) {
+ smap_add(&ids, "stage-hint", lflow->stage_hint);
+ }
+ sbrec_logical_flow_set_external_ids(sbflow, &ids);
+ smap_destroy(&ids);
+
+ } else {
+ lflow->sb_uuid = sbflow->header_.uuid;
+ sbrec_dp_group = sbflow->logical_dp_group;
+
+ if (ovn_internal_version_changed) {
+ const char *stage_name = smap_get_def(&sbflow->external_ids,
+ "stage-name", "");
+ const char *stage_hint = smap_get_def(&sbflow->external_ids,
+ "stage-hint", "");
+ const char *source = smap_get_def(&sbflow->external_ids,
+ "source", "");
+
+ if (strcmp(stage_name, ovn_stage_to_str(lflow->stage))) {
+ sbrec_logical_flow_update_external_ids_setkey(
+ sbflow, "stage-name", ovn_stage_to_str(lflow->stage));
+ }
+ if (lflow->stage_hint) {
+ if (strcmp(stage_hint, lflow->stage_hint)) {
+ sbrec_logical_flow_update_external_ids_setkey(
+ sbflow, "stage-hint", lflow->stage_hint);
+ }
+ }
+ if (lflow->where) {
+
+ /* Trim the source locator lflow->where, which looks something
+ * like "ovn/northd/northd.c:1234", down to just the part
+ * following the last slash, e.g. "northd.c:1234". */
+ const char *slash = strrchr(lflow->where, '/');
+#if _WIN32
+ const char *backslash = strrchr(lflow->where, '\\');
+ if (!slash || backslash > slash) {
+ slash = backslash;
+ }
+#endif
+ const char *where = slash ? slash + 1 : lflow->where;
+
+ if (strcmp(source, where)) {
+ sbrec_logical_flow_update_external_ids_setkey(
+ sbflow, "source", where);
+ }
+ }
+ }
+ }
+
+ if (lflow->od) {
+ sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
+ sbrec_logical_flow_set_logical_dp_group(sbflow, NULL);
+ } else {
+ sbrec_logical_flow_set_logical_datapath(sbflow, NULL);
+ lflow->dpg = ovn_dp_group_get(dp_groups, lflow->n_ods,
+ lflow->dpg_bitmap,
+ n_datapaths);
+ if (lflow->dpg) {
+ /* Update the dpg's sb dp_group. */
+ lflow->dpg->dp_group = sbrec_logical_dp_group_table_get_for_uuid(
+ sb_dpgrp_table,
+ &lflow->dpg->dpg_uuid);
+ ovs_assert(lflow->dpg->dp_group);
+ } else {
+ lflow->dpg = ovn_dp_group_create(
+ ovnsb_txn, dp_groups, sbrec_dp_group,
+ lflow->n_ods, lflow->dpg_bitmap,
+ n_datapaths, is_switch,
+ ls_datapaths,
+ lr_datapaths);
+ }
+ sbrec_logical_flow_set_logical_dp_group(sbflow,
+ lflow->dpg->dp_group);
+ }
+
+ if (pre_sync_dpg != lflow->dpg) {
+ if (lflow->dpg) {
+ inc_ovn_dp_group_ref(lflow->dpg);
+ }
+ if (pre_sync_dpg) {
+ dec_ovn_dp_group_ref(dp_groups, pre_sync_dpg);
+ }
+ }
+}
+
+static struct ovn_dp_group *
+ovn_dp_group_find(const struct hmap *dp_groups,
+ const unsigned long *dpg_bitmap, size_t bitmap_len,
+ uint32_t hash)
+{
+ struct ovn_dp_group *dpg;
+
+ HMAP_FOR_EACH_WITH_HASH (dpg, node, hash, dp_groups) {
+ if (bitmap_equal(dpg->bitmap, dpg_bitmap, bitmap_len)) {
+ return dpg;
+ }
+ }
+ return NULL;
+}
+
+static void
+inc_ovn_dp_group_ref(struct ovn_dp_group *dpg)
+{
+ dpg->refcnt++;
+}
+
+static void
+dec_ovn_dp_group_ref(struct hmap *dp_groups, struct ovn_dp_group *dpg)
+{
+ dpg->refcnt--;
+
+ if (!dpg->refcnt) {
+ hmap_remove(dp_groups, &dpg->node);
+ free(dpg->bitmap);
+ free(dpg);
+ }
+}
+
+static struct sbrec_logical_dp_group *
+ovn_sb_insert_or_update_logical_dp_group(
+ struct ovsdb_idl_txn *ovnsb_txn,
+ struct sbrec_logical_dp_group *dp_group,
+ const unsigned long *dpg_bitmap,
+ const struct ovn_datapaths *datapaths)
+{
+ const struct sbrec_datapath_binding **sb;
+ size_t n = 0, index;
+
+ sb = xmalloc(bitmap_count1(dpg_bitmap, ods_size(datapaths)) * sizeof *sb);
+ BITMAP_FOR_EACH_1 (index, ods_size(datapaths), dpg_bitmap) {
+ sb[n++] = datapaths->array[index]->sb;
+ }
+ if (!dp_group) {
+ struct uuid dpg_uuid = uuid_random();
+ dp_group = sbrec_logical_dp_group_insert_persist_uuid(
+ ovnsb_txn, &dpg_uuid);
+ }
+ sbrec_logical_dp_group_set_datapaths(
+ dp_group, (struct sbrec_datapath_binding **) sb, n);
+ free(sb);
+
+ return dp_group;
+}
+
+static struct ovn_dp_group *
+ovn_dp_group_get(struct hmap *dp_groups, size_t desired_n,
+ const unsigned long *desired_bitmap,
+ size_t bitmap_len)
+{
+ uint32_t hash;
+
+ hash = hash_int(desired_n, 0);
+ return ovn_dp_group_find(dp_groups, desired_bitmap, bitmap_len, hash);
+}
+
+/* Creates a new datapath group and adds it to 'dp_groups'.
+ * If 'sb_group' is provided, function will try to re-use this group by
+ * either taking it directly, or by modifying, if it's not already in use.
+ * Caller should first call ovn_dp_group_get() before calling this function. */
+static struct ovn_dp_group *
+ovn_dp_group_create(struct ovsdb_idl_txn *ovnsb_txn,
+ struct hmap *dp_groups,
+ struct sbrec_logical_dp_group *sb_group,
+ size_t desired_n,
+ const unsigned long *desired_bitmap,
+ size_t bitmap_len,
+ bool is_switch,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths)
+{
+ struct ovn_dp_group *dpg;
+
+ bool update_dp_group = false, can_modify = false;
+ unsigned long *dpg_bitmap;
+ size_t i, n = 0;
+
+ dpg_bitmap = sb_group ? bitmap_allocate(bitmap_len) : NULL;
+ for (i = 0; sb_group && i < sb_group->n_datapaths; i++) {
+ struct ovn_datapath *datapath_od;
+
+ datapath_od = ovn_datapath_from_sbrec(
+ ls_datapaths ? &ls_datapaths->datapaths : NULL,
+ lr_datapaths ? &lr_datapaths->datapaths : NULL,
+ sb_group->datapaths[i]);
+ if (!datapath_od || ovn_datapath_is_stale(datapath_od)) {
+ break;
+ }
+ bitmap_set1(dpg_bitmap, datapath_od->index);
+ n++;
+ }
+ if (!sb_group || i != sb_group->n_datapaths) {
+ /* No group or stale group. Not going to be used. */
+ update_dp_group = true;
+ can_modify = true;
+ } else if (!bitmap_equal(dpg_bitmap, desired_bitmap, bitmap_len)) {
+ /* The group in Sb is different. */
+ update_dp_group = true;
+ /* We can modify existing group if it's not already in use. */
+ can_modify = !ovn_dp_group_find(dp_groups, dpg_bitmap,
+ bitmap_len, hash_int(n, 0));
+ }
+
+ bitmap_free(dpg_bitmap);
+
+ dpg = xzalloc(sizeof *dpg);
+ dpg->bitmap = bitmap_clone(desired_bitmap, bitmap_len);
+ if (!update_dp_group) {
+ dpg->dp_group = sb_group;
+ } else {
+ dpg->dp_group = ovn_sb_insert_or_update_logical_dp_group(
+ ovnsb_txn,
+ can_modify ? sb_group : NULL,
+ desired_bitmap,
+ is_switch ? ls_datapaths : lr_datapaths);
+ }
+ dpg->dpg_uuid = dpg->dp_group->header_.uuid;
+ hmap_insert(dp_groups, &dpg->node, hash_int(desired_n, 0));
+
+ return dpg;
+}
+
+/* Adds an OVN datapath to a datapath group of existing logical flow.
+ * Version to use when hash bucket locking is NOT required or the corresponding
+ * hash lock is already taken. */
+static void
+ovn_dp_group_add_with_reference(struct ovn_lflow *lflow_ref,
+ const struct ovn_datapath *od,
+ const unsigned long *dp_bitmap,
+ size_t bitmap_len)
+ OVS_REQUIRES(fake_hash_mutex)
+{
+ if (od) {
+ bitmap_set1(lflow_ref->dpg_bitmap, od->index);
+ }
+ if (dp_bitmap) {
+ bitmap_or(lflow_ref->dpg_bitmap, dp_bitmap, bitmap_len);
+ }
+}
+
+static bool
+is_lflow_and_od_type_match(const struct ovn_datapath *od,
+ struct ovn_lflow *lflow)
+{
+ enum ovn_datapath_type type = od->nbs ? DP_SWITCH : DP_ROUTER;
+ return ovn_stage_to_datapath_type(lflow->stage) == type;
+}
+
+/* Unlinks the lflows stored in the resource to object nodes for the
+ * datapath 'od' from the lflow dependecy manager.
+ * It basically clears the datapath id of the 'od' for the lflows
+ * in the 'res_node'.
+ */
+static void
+unlink_objres_lflows(struct resource_to_objects_node *res_node,
+ const struct ovn_datapath *od,
+ struct lflow_table *lflow_table,
+ struct objdep_mgr *lflowdep_mgr)
+{
+ if (!res_node) {
+ return;
+ }
+
+ struct object_to_resources_list_node *resource_list_node;
+ RESOURCE_FOR_EACH_OBJ (resource_list_node, res_node) {
+ const struct uuid *obj_uuid = &resource_list_node->obj_uuid;
+ struct ovn_lflow *lflow = ovn_lflow_uuid_find(
+ &lflow_table->hash_map, obj_uuid);
+ if (!lflow) {
+ continue;
+ }
+
+ /* Check if the lflow datapath is same the od datapath. */
+ if (is_lflow_and_od_type_match(od, lflow)) {
+ bitmap_set0(lflow->dpg_bitmap, od->index);
+ } else {
+ /* The datapath type doesn't match. Which means this lflow was
+ * added due to a resource in the other type.
+ * Eg. For every logical switch port whose lswitch is connected
+ * to a router, an lflow is added in the lr_in_arp_resolve stage.
+ * Get the datapath index of this router and clear it.
+ *
+ * OBJDEP_TYPE_LFLOW_OD type is used to store this lflow object to
+ * logical router resource linking (logical router index is stored
+ * in the uuid.parts[0]).
+ */
+ char uuid_s[UUID_LEN + 1];
+ sprintf(uuid_s, UUID_FMT, UUID_ARGS(&lflow->lflow_uuid));
+
+ struct resource_to_objects_node *lflow_od_res =
+ objdep_mgr_find_objs(lflowdep_mgr, OBJDEP_TYPE_LFLOW_OD,
+ uuid_s);
+ if (lflow_od_res) {
+ struct object_to_resources_list_node *r_node;
+ RESOURCE_FOR_EACH_OBJ (r_node, lflow_od_res) {
+ size_t index = r_node->obj_uuid.parts[0];
+ bitmap_set0(lflow->dpg_bitmap, index);
+ }
+ }
+ }
+ }
+}
+
+static void
+sync_lflows_from_objres(struct resource_to_objects_node *res_node,
+ struct lflow_table *lflow_table,
+ struct objdep_mgr *lflowdep_mgr,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *sbflow_table,
+ const struct sbrec_logical_dp_group_table *dpgrp_table)
+{
+ if (!res_node) {
+ return;
+ }
+
+ struct uuidset lflow_uuidset = UUIDSET_INITIALIZER(&lflow_uuidset);
+ struct object_to_resources_list_node *resource_list_node;
+ RESOURCE_FOR_EACH_OBJ (resource_list_node, res_node) {
+ const struct uuid *obj_uuid = &resource_list_node->obj_uuid;
+
+ struct ovn_lflow *lflow = ovn_lflow_uuid_find(
+ &lflow_table->hash_map, obj_uuid);
+ if (!lflow) {
+ continue;
+ }
+
+ const struct sbrec_logical_flow *sblflow =
+ sbrec_logical_flow_table_get_for_uuid(sbflow_table,
+ &lflow->sb_uuid);
+
+ size_t n_datapaths;
+ if (ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH) {
+ n_datapaths = ods_size(ls_datapaths);
+ } else {
+ n_datapaths = ods_size(lr_datapaths);
+ }
+
+ size_t n_ods = bitmap_count1(lflow->dpg_bitmap, n_datapaths);
+
+ if (n_ods) {
+ sync_lflow_to_sb(lflow, ovnsb_txn, lflow_table, ls_datapaths,
+ lr_datapaths, ovn_internal_version_changed,
+ sblflow, dpgrp_table);
+ } else {
+ if (sblflow) {
+ sbrec_logical_flow_delete(sblflow);
+ ovn_lflow_destroy(lflow_table, lflow);
+ } else {
+ VLOG_ERR("SB lflow "UUID_FMT" not found when "
+ "deleting lflows for resource %s (type %d). "
+ "This should not happen. Asserting",
+ UUID_ARGS(&lflow->sb_uuid),
+ resource_list_node->resource_node->res_name,
+ resource_list_node->resource_node->type);
+ ovs_assert(sblflow);
+ }
+ uuidset_insert(&lflow_uuidset, obj_uuid);
+ }
+ }
+
+ struct uuidset_node *unode;
+ UUIDSET_FOR_EACH (unode, &lflow_uuidset) {
+ objdep_mgr_remove_obj(lflowdep_mgr, &unode->uuid);
+ }
+ uuidset_destroy(&lflow_uuidset);
+}
new file mode 100644
@@ -0,0 +1,188 @@
+ /*
+ * Copyright (c) 2023, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef LFLOW_MGR_H
+#define LFLOW_MGR_H 1
+
+#include "include/openvswitch/hmap.h"
+#include "include/openvswitch/uuid.h"
+
+#include "northd.h"
+
+struct ovsdb_idl_txn;
+struct ovn_datapath;
+struct ovsdb_idl_row;
+
+/* lflow map which stores the logical flows. */
+struct lflow_table;
+struct lflow_table *lflow_table_alloc(void);
+void lflow_table_init(struct lflow_table *);
+void lflow_table_clear(struct lflow_table *);
+void lflow_table_destroy(struct lflow_table *);
+void lflow_table_expand(struct lflow_table *);
+void lflow_table_set_size(struct lflow_table *, size_t);
+void lflow_table_sync_to_sb(struct lflow_table *,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *,
+ const struct sbrec_logical_dp_group_table *);
+void lflow_table_destroy(struct lflow_table *);
+
+void lflow_hash_lock_init(void);
+void lflow_hash_lock_destroy(void);
+
+/* lflow_mgr manages logical flows for a resource (like logical port
+ * or datapath). */
+struct lflow_ref;
+
+/* Allocates an lflow manager. */
+struct lflow_ref *lflow_ref_alloc(const char *res_name);
+void lflow_ref_set_od(struct lflow_ref *, const struct ovn_datapath *);
+void lflow_ref_destroy(struct lflow_ref *);
+void lflow_ref_clear_lflows(struct lflow_ref *, const struct ovn_datapath *,
+ struct lflow_table *);
+void lflow_ref_clear_and_sync_lflows(struct lflow_ref *,
+ const struct ovn_datapath *,
+ struct lflow_table *lflow_table,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *,
+ const struct sbrec_logical_dp_group_table *);
+void lflow_ref_sync_lflows_to_sb(struct lflow_ref *,
+ struct lflow_table *lflow_table,
+ struct ovsdb_idl_txn *ovnsb_txn,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths,
+ bool ovn_internal_version_changed,
+ const struct sbrec_logical_flow_table *,
+ const struct sbrec_logical_dp_group_table *);
+
+
+void lflow_table_add_lflow(struct lflow_table *, const struct ovn_datapath *,
+ const unsigned long *dp_bitmap,
+ size_t dp_bitmap_len, enum ovn_stage stage,
+ uint16_t priority, const char *match,
+ const char *actions, const char *io_port,
+ const char *ctrl_meter,
+ const struct ovsdb_idl_row *stage_hint,
+ const char *where, struct lflow_ref *);
+void lflow_table_add_lflow_default_drop(struct lflow_table *,
+ const struct ovn_datapath *,
+ enum ovn_stage stage,
+ const char *where,
+ struct lflow_ref *);
+
+/* Adds a row with the specified contents to the Logical_Flow table. */
+#define ovn_lflow_add_with_hint__(LFLOW_TABLE, OD, STAGE, PRIORITY, MATCH, \
+ ACTIONS, IN_OUT_PORT, CTRL_METER, \
+ STAGE_HINT) \
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, IN_OUT_PORT, CTRL_METER, STAGE_HINT, \
+ OVS_SOURCE_LOCATOR, NULL)
+
+#define ovn_lflow_add_with_lflow_ref_hint__(LFLOW_TABLE, OD, STAGE, PRIORITY, \
+ MATCH, ACTIONS, IN_OUT_PORT, \
+ CTRL_METER, STAGE_HINT, LFLOW_REF)\
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, IN_OUT_PORT, CTRL_METER, STAGE_HINT, \
+ OVS_SOURCE_LOCATOR, LFLOW_REF)
+
+#define ovn_lflow_add_with_hint(LFLOW_TABLE, OD, STAGE, PRIORITY, MATCH, \
+ ACTIONS, STAGE_HINT) \
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, NULL, NULL, STAGE_HINT, \
+ OVS_SOURCE_LOCATOR, NULL)
+
+#define ovn_lflow_add_with_lflow_ref_hint(LFLOW_TABLE, OD, STAGE, PRIORITY, \
+ MATCH, ACTIONS, STAGE_HINT, \
+ LFLOW_REF) \
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, NULL, NULL, STAGE_HINT, \
+ OVS_SOURCE_LOCATOR, LFLOW_REF)
+
+#define ovn_lflow_add_with_dp_group(LFLOW_TABLE, DP_BITMAP, DP_BITMAP_LEN, \
+ STAGE, PRIORITY, MATCH, ACTIONS, \
+ STAGE_HINT) \
+ lflow_table_add_lflow(LFLOW_TABLE, NULL, DP_BITMAP, DP_BITMAP_LEN, STAGE, \
+ PRIORITY, MATCH, ACTIONS, NULL, NULL, STAGE_HINT, \
+ OVS_SOURCE_LOCATOR, NULL)
+
+#define ovn_lflow_add_default_drop(LFLOW_TABLE, OD, STAGE) \
+ lflow_table_add_lflow_default_drop(LFLOW_TABLE, OD, STAGE, \
+ OVS_SOURCE_LOCATOR, NULL)
+
+
+/* This macro is similar to ovn_lflow_add_with_hint, except that it requires
+ * the IN_OUT_PORT argument, which tells the lport name that appears in the
+ * MATCH, which helps ovn-controller to bypass lflows parsing when the lport is
+ * not local to the chassis. The critiera of the lport to be added using this
+ * argument:
+ *
+ * - For ingress pipeline, the lport that is used to match "inport".
+ * - For egress pipeline, the lport that is used to match "outport".
+ *
+ * For now, only LS pipelines should use this macro. */
+#define ovn_lflow_add_with_lport_and_hint(LFLOW_TABLE, OD, STAGE, PRIORITY, \
+ MATCH, ACTIONS, IN_OUT_PORT, \
+ STAGE_HINT, LFLOW_REF) \
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, IN_OUT_PORT, NULL, STAGE_HINT, \
+ OVS_SOURCE_LOCATOR, LFLOW_REF)
+
+#define ovn_lflow_add(LFLOW_TABLE, OD, STAGE, PRIORITY, MATCH, ACTIONS) \
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, NULL, NULL, NULL, OVS_SOURCE_LOCATOR, NULL)
+
+#define ovn_lflow_add_with_lflow_ref(LFLOW_TABLE, OD, STAGE, PRIORITY, MATCH, \
+ ACTIONS, LFLOW_REF) \
+ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, PRIORITY, MATCH, \
+ ACTIONS, NULL, NULL, NULL, OVS_SOURCE_LOCATOR, \
+ LFLOW_REF)
+
+#define ovn_lflow_metered(LFLOW_TABLE, OD, STAGE, PRIORITY, MATCH, ACTIONS, \
+ CTRL_METER) \
+ ovn_lflow_add_with_hint__(LFLOW_TABLE, OD, STAGE, PRIORITY, MATCH, \
+ ACTIONS, NULL, CTRL_METER, NULL)
+
+struct sbrec_logical_dp_group;
+
+struct ovn_dp_group {
+ unsigned long *bitmap;
+ const struct sbrec_logical_dp_group *dp_group;
+ struct uuid dpg_uuid;
+ struct hmap_node node;
+ size_t refcnt;
+};
+
+static inline void
+ovn_dp_groups_init(struct hmap *dp_groups)
+{
+ hmap_init(dp_groups);
+}
+
+void ovn_dp_groups_destroy(struct hmap *dp_groups);
+struct ovn_dp_group *ovn_dp_group_get_or_create(
+ struct ovsdb_idl_txn *ovnsb_txn, struct hmap *dp_groups,
+ struct sbrec_logical_dp_group *sb_group,
+ size_t desired_n, const unsigned long *desired_bitmap,
+ size_t bitmap_len, bool is_switch,
+ const struct ovn_datapaths *ls_datapaths,
+ const struct ovn_datapaths *lr_datapaths);
+
+#endif /* LFLOW_MGR_H */
\ No newline at end of file
@@ -40,6 +40,7 @@
#include "lib/ovn-sb-idl.h"
#include "lib/ovn-util.h"
#include "lib/lb.h"
+#include "lflow-mgr.h"
#include "memory.h"
#include "northd.h"
#include "en-lb-data.h"
@@ -67,7 +68,7 @@
VLOG_DEFINE_THIS_MODULE(northd);
static bool controller_event_en;
-static bool lflow_hash_lock_initialized = false;
+
static bool check_lsp_is_up;
@@ -96,116 +97,6 @@ static bool default_acl_drop;
#define MAX_OVN_TAGS 4096
-/* Pipeline stages. */
-
-/* The two purposes for which ovn-northd uses OVN logical datapaths. */
-enum ovn_datapath_type {
- DP_SWITCH, /* OVN logical switch. */
- DP_ROUTER /* OVN logical router. */
-};
-
-/* Returns an "enum ovn_stage" built from the arguments.
- *
- * (It's better to use ovn_stage_build() for type-safety reasons, but inline
- * functions can't be used in enums or switch cases.) */
-#define OVN_STAGE_BUILD(DP_TYPE, PIPELINE, TABLE) \
- (((DP_TYPE) << 9) | ((PIPELINE) << 8) | (TABLE))
-
-/* A stage within an OVN logical switch or router.
- *
- * An "enum ovn_stage" indicates whether the stage is part of a logical switch
- * or router, whether the stage is part of the ingress or egress pipeline, and
- * the table within that pipeline. The first three components are combined to
- * form the stage's full name, e.g. S_SWITCH_IN_PORT_SEC_L2,
- * S_ROUTER_OUT_DELIVERY. */
-enum ovn_stage {
-#define PIPELINE_STAGES \
- /* Logical switch ingress stages. */ \
- PIPELINE_STAGE(SWITCH, IN, CHECK_PORT_SEC, 0, "ls_in_check_port_sec") \
- PIPELINE_STAGE(SWITCH, IN, APPLY_PORT_SEC, 1, "ls_in_apply_port_sec") \
- PIPELINE_STAGE(SWITCH, IN, LOOKUP_FDB , 2, "ls_in_lookup_fdb") \
- PIPELINE_STAGE(SWITCH, IN, PUT_FDB, 3, "ls_in_put_fdb") \
- PIPELINE_STAGE(SWITCH, IN, PRE_ACL, 4, "ls_in_pre_acl") \
- PIPELINE_STAGE(SWITCH, IN, PRE_LB, 5, "ls_in_pre_lb") \
- PIPELINE_STAGE(SWITCH, IN, PRE_STATEFUL, 6, "ls_in_pre_stateful") \
- PIPELINE_STAGE(SWITCH, IN, ACL_HINT, 7, "ls_in_acl_hint") \
- PIPELINE_STAGE(SWITCH, IN, ACL_EVAL, 8, "ls_in_acl_eval") \
- PIPELINE_STAGE(SWITCH, IN, ACL_ACTION, 9, "ls_in_acl_action") \
- PIPELINE_STAGE(SWITCH, IN, QOS_MARK, 10, "ls_in_qos_mark") \
- PIPELINE_STAGE(SWITCH, IN, QOS_METER, 11, "ls_in_qos_meter") \
- PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 12, "ls_in_lb_aff_check") \
- PIPELINE_STAGE(SWITCH, IN, LB, 13, "ls_in_lb") \
- PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 14, "ls_in_lb_aff_learn") \
- PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 15, "ls_in_pre_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 16, "ls_in_nat_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 17, "ls_in_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_EVAL, 18, \
- "ls_in_acl_after_lb_eval") \
- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_ACTION, 19, \
- "ls_in_acl_after_lb_action") \
- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 20, "ls_in_stateful") \
- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 21, "ls_in_arp_rsp") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 22, "ls_in_dhcp_options") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 23, "ls_in_dhcp_response") \
- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 24, "ls_in_dns_lookup") \
- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 25, "ls_in_dns_response") \
- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 26, "ls_in_external_port") \
- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 27, "ls_in_l2_lkup") \
- PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 28, "ls_in_l2_unknown") \
- \
- /* Logical switch egress stages. */ \
- PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 0, "ls_out_pre_acl") \
- PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 1, "ls_out_pre_lb") \
- PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful") \
- PIPELINE_STAGE(SWITCH, OUT, ACL_HINT, 3, "ls_out_acl_hint") \
- PIPELINE_STAGE(SWITCH, OUT, ACL_EVAL, 4, "ls_out_acl_eval") \
- PIPELINE_STAGE(SWITCH, OUT, ACL_ACTION, 5, "ls_out_acl_action") \
- PIPELINE_STAGE(SWITCH, OUT, QOS_MARK, 6, "ls_out_qos_mark") \
- PIPELINE_STAGE(SWITCH, OUT, QOS_METER, 7, "ls_out_qos_meter") \
- PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 8, "ls_out_stateful") \
- PIPELINE_STAGE(SWITCH, OUT, CHECK_PORT_SEC, 9, "ls_out_check_port_sec") \
- PIPELINE_STAGE(SWITCH, OUT, APPLY_PORT_SEC, 10, "ls_out_apply_port_sec") \
- \
- /* Logical router ingress stages. */ \
- PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \
- PIPELINE_STAGE(ROUTER, IN, LOOKUP_NEIGHBOR, 1, "lr_in_lookup_neighbor") \
- PIPELINE_STAGE(ROUTER, IN, LEARN_NEIGHBOR, 2, "lr_in_learn_neighbor") \
- PIPELINE_STAGE(ROUTER, IN, IP_INPUT, 3, "lr_in_ip_input") \
- PIPELINE_STAGE(ROUTER, IN, UNSNAT, 4, "lr_in_unsnat") \
- PIPELINE_STAGE(ROUTER, IN, DEFRAG, 5, "lr_in_defrag") \
- PIPELINE_STAGE(ROUTER, IN, LB_AFF_CHECK, 6, "lr_in_lb_aff_check") \
- PIPELINE_STAGE(ROUTER, IN, DNAT, 7, "lr_in_dnat") \
- PIPELINE_STAGE(ROUTER, IN, LB_AFF_LEARN, 8, "lr_in_lb_aff_learn") \
- PIPELINE_STAGE(ROUTER, IN, ECMP_STATEFUL, 9, "lr_in_ecmp_stateful") \
- PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 10, "lr_in_nd_ra_options") \
- PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 11, "lr_in_nd_ra_response") \
- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_PRE, 12, "lr_in_ip_routing_pre") \
- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 13, "lr_in_ip_routing") \
- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_ECMP, 14, "lr_in_ip_routing_ecmp") \
- PIPELINE_STAGE(ROUTER, IN, POLICY, 15, "lr_in_policy") \
- PIPELINE_STAGE(ROUTER, IN, POLICY_ECMP, 16, "lr_in_policy_ecmp") \
- PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 17, "lr_in_arp_resolve") \
- PIPELINE_STAGE(ROUTER, IN, CHK_PKT_LEN, 18, "lr_in_chk_pkt_len") \
- PIPELINE_STAGE(ROUTER, IN, LARGER_PKTS, 19, "lr_in_larger_pkts") \
- PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 20, "lr_in_gw_redirect") \
- PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 21, "lr_in_arp_request") \
- \
- /* Logical router egress stages. */ \
- PIPELINE_STAGE(ROUTER, OUT, CHECK_DNAT_LOCAL, 0, \
- "lr_out_chk_dnat_local") \
- PIPELINE_STAGE(ROUTER, OUT, UNDNAT, 1, "lr_out_undnat") \
- PIPELINE_STAGE(ROUTER, OUT, POST_UNDNAT, 2, "lr_out_post_undnat") \
- PIPELINE_STAGE(ROUTER, OUT, SNAT, 3, "lr_out_snat") \
- PIPELINE_STAGE(ROUTER, OUT, POST_SNAT, 4, "lr_out_post_snat") \
- PIPELINE_STAGE(ROUTER, OUT, EGR_LOOP, 5, "lr_out_egr_loop") \
- PIPELINE_STAGE(ROUTER, OUT, DELIVERY, 6, "lr_out_delivery")
-
-#define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
- S_##DP_TYPE##_##PIPELINE##_##STAGE \
- = OVN_STAGE_BUILD(DP_##DP_TYPE, P_##PIPELINE, TABLE),
- PIPELINE_STAGES
-#undef PIPELINE_STAGE
-};
/* Due to various hard-coded priorities need to implement ACLs, the
* northbound database supports a smaller range of ACL priorities than
@@ -390,51 +281,9 @@ enum ovn_stage {
#define ROUTE_PRIO_OFFSET_STATIC 1
#define ROUTE_PRIO_OFFSET_CONNECTED 2
-/* Returns an "enum ovn_stage" built from the arguments. */
-static enum ovn_stage
-ovn_stage_build(enum ovn_datapath_type dp_type, enum ovn_pipeline pipeline,
- uint8_t table)
-{
- return OVN_STAGE_BUILD(dp_type, pipeline, table);
-}
-
-/* Returns the pipeline to which 'stage' belongs. */
-static enum ovn_pipeline
-ovn_stage_get_pipeline(enum ovn_stage stage)
-{
- return (stage >> 8) & 1;
-}
-
-/* Returns the pipeline name to which 'stage' belongs. */
-static const char *
-ovn_stage_get_pipeline_name(enum ovn_stage stage)
-{
- return ovn_stage_get_pipeline(stage) == P_IN ? "ingress" : "egress";
-}
-
-/* Returns the table to which 'stage' belongs. */
-static uint8_t
-ovn_stage_get_table(enum ovn_stage stage)
-{
- return stage & 0xff;
-}
-
-/* Returns a string name for 'stage'. */
-static const char *
-ovn_stage_to_str(enum ovn_stage stage)
-{
- switch (stage) {
-#define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
- case S_##DP_TYPE##_##PIPELINE##_##STAGE: return NAME;
- PIPELINE_STAGES
-#undef PIPELINE_STAGE
- default: return "<unknown>";
- }
-}
-
/* Returns the type of the datapath to which a flow with the given 'stage' may
* be added. */
-static enum ovn_datapath_type
+enum ovn_datapath_type
ovn_stage_to_datapath_type(enum ovn_stage stage)
{
switch (stage) {
@@ -679,13 +528,6 @@ ovn_datapath_destroy(struct hmap *datapaths, struct ovn_datapath *od)
}
}
-/* Returns 'od''s datapath type. */
-static enum ovn_datapath_type
-ovn_datapath_get_type(const struct ovn_datapath *od)
-{
- return od->nbs ? DP_SWITCH : DP_ROUTER;
-}
-
static struct ovn_datapath *
ovn_datapath_find_(const struct hmap *datapaths,
const struct uuid *uuid)
@@ -721,13 +563,7 @@ ovn_datapath_find_by_key(struct hmap *datapaths, uint32_t dp_key)
return NULL;
}
-static bool
-ovn_datapath_is_stale(const struct ovn_datapath *od)
-{
- return !od->nbr && !od->nbs;
-}
-
-static struct ovn_datapath *
+struct ovn_datapath *
ovn_datapath_from_sbrec(const struct hmap *ls_datapaths,
const struct hmap *lr_datapaths,
const struct sbrec_datapath_binding *sb)
@@ -1290,19 +1126,6 @@ struct ovn_port_routable_addresses {
size_t n_addrs;
};
-/* A node that maintains link between an object (such as an ovn_port) and
- * a lflow. */
-struct lflow_ref_node {
- /* This list follows different lflows referenced by the same object. List
- * head is, for example, ovn_port->lflows. */
- struct ovs_list lflow_list_node;
- /* This list follows different objects that reference the same lflow. List
- * head is ovn_lflow->referenced_by. */
- struct ovs_list ref_list_node;
- /* The lflow. */
- struct ovn_lflow *lflow;
-};
-
static bool lsp_can_be_inc_processed(const struct nbrec_logical_switch_port *);
static bool
@@ -1382,6 +1205,8 @@ ovn_port_set_nb(struct ovn_port *op,
init_mcast_port_info(&op->mcast_info, op->nbsp, op->nbrp);
}
+static bool lsp_is_router(const struct nbrec_logical_switch_port *nbsp);
+
static struct ovn_port *
ovn_port_create(struct hmap *ports, const char *key,
const struct nbrec_logical_switch_port *nbsp,
@@ -1400,12 +1225,22 @@ ovn_port_create(struct hmap *ports, const char *key,
op->l3dgw_port = op->cr_port = NULL;
hmap_insert(ports, &op->key_node, hash_string(op->key, 0));
- ovs_list_init(&op->lflows);
+ op->lflow_ref = lflow_ref_alloc(key);
+ op->lbnat_lflow_ref = lflow_ref_alloc(key);
+
return op;
}
static void
-ovn_port_destroy_orphan(struct ovn_port *port)
+ovn_port_set_od(struct ovn_port *op, struct ovn_datapath *od)
+{
+ op->od = od;
+ lflow_ref_set_od(op->lflow_ref, od);
+ lflow_ref_set_od(op->lbnat_lflow_ref, od);
+}
+
+static void
+ovn_port_cleanup(struct ovn_port *port)
{
if (port->tunnel_key) {
ovs_assert(port->od);
@@ -1415,6 +1250,8 @@ ovn_port_destroy_orphan(struct ovn_port *port)
destroy_lport_addresses(&port->lsp_addrs[i]);
}
free(port->lsp_addrs);
+ port->n_lsp_addrs = 0;
+ port->lsp_addrs = NULL;
if (port->peer) {
port->peer->peer = NULL;
@@ -1424,18 +1261,22 @@ ovn_port_destroy_orphan(struct ovn_port *port)
destroy_lport_addresses(&port->ps_addrs[i]);
}
free(port->ps_addrs);
+ port->ps_addrs = NULL;
+ port->n_ps_addrs = 0;
destroy_lport_addresses(&port->lrp_networks);
destroy_lport_addresses(&port->proxy_arp_addrs);
+}
+
+static void
+ovn_port_destroy_orphan(struct ovn_port *port)
+{
+ ovn_port_cleanup(port);
free(port->json_key);
free(port->key);
+ lflow_ref_destroy(port->lflow_ref);
+ lflow_ref_destroy(port->lbnat_lflow_ref);
- struct lflow_ref_node *l;
- LIST_FOR_EACH_SAFE (l, lflow_list_node, &port->lflows) {
- ovs_list_remove(&l->lflow_list_node);
- ovs_list_remove(&l->ref_list_node);
- free(l);
- }
free(port);
}
@@ -2364,7 +2205,7 @@ join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
parse_lsp_addrs(op);
- op->od = od;
+ ovn_port_set_od(op, od);
if (op->has_unknown) {
od->has_unknown = true;
}
@@ -2415,7 +2256,7 @@ join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
}
op->lrp_networks = lrp_networks;
- op->od = od;
+ ovn_port_set_od(op, od);
hmap_insert(&od->ports, &op->dp_node,
hmap_node_hash(&op->key_node));
@@ -2455,7 +2296,7 @@ join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
}
crp->l3dgw_port = op;
op->cr_port = crp;
- crp->od = od;
+ ovn_port_set_od(crp, od);
free(redirect_name);
/* Add to l3dgw_ports in od, for later use during flow
@@ -3898,124 +3739,6 @@ build_lb_port_related_data(
build_lswitch_lbs_from_lrouter(lr_datapaths, lb_dps_map, lb_group_dps_map);
}
-
-struct ovn_dp_group {
- unsigned long *bitmap;
- struct sbrec_logical_dp_group *dp_group;
- struct hmap_node node;
-};
-
-static struct ovn_dp_group *
-ovn_dp_group_find(const struct hmap *dp_groups,
- const unsigned long *dpg_bitmap, size_t bitmap_len,
- uint32_t hash)
-{
- struct ovn_dp_group *dpg;
-
- HMAP_FOR_EACH_WITH_HASH (dpg, node, hash, dp_groups) {
- if (bitmap_equal(dpg->bitmap, dpg_bitmap, bitmap_len)) {
- return dpg;
- }
- }
- return NULL;
-}
-
-static struct sbrec_logical_dp_group *
-ovn_sb_insert_or_update_logical_dp_group(
- struct ovsdb_idl_txn *ovnsb_txn,
- struct sbrec_logical_dp_group *dp_group,
- const unsigned long *dpg_bitmap,
- const struct ovn_datapaths *datapaths)
-{
- const struct sbrec_datapath_binding **sb;
- size_t n = 0, index;
-
- sb = xmalloc(bitmap_count1(dpg_bitmap, ods_size(datapaths)) * sizeof *sb);
- BITMAP_FOR_EACH_1 (index, ods_size(datapaths), dpg_bitmap) {
- sb[n++] = datapaths->array[index]->sb;
- }
- if (!dp_group) {
- dp_group = sbrec_logical_dp_group_insert(ovnsb_txn);
- }
- sbrec_logical_dp_group_set_datapaths(
- dp_group, (struct sbrec_datapath_binding **) sb, n);
- free(sb);
-
- return dp_group;
-}
-
-/* Given a desired bitmap, finds a datapath group in 'dp_groups'. If it
- * doesn't exist, creates a new one and adds it to 'dp_groups'.
- * If 'sb_group' is provided, function will try to re-use this group by
- * either taking it directly, or by modifying, if it's not already in use. */
-static struct ovn_dp_group *
-ovn_dp_group_get_or_create(struct ovsdb_idl_txn *ovnsb_txn,
- struct hmap *dp_groups,
- struct sbrec_logical_dp_group *sb_group,
- size_t desired_n,
- const unsigned long *desired_bitmap,
- size_t bitmap_len,
- bool is_switch,
- const struct ovn_datapaths *ls_datapaths,
- const struct ovn_datapaths *lr_datapaths)
-{
- struct ovn_dp_group *dpg;
- uint32_t hash;
-
- hash = hash_int(desired_n, 0);
- dpg = ovn_dp_group_find(dp_groups, desired_bitmap, bitmap_len, hash);
- if (dpg) {
- return dpg;
- }
-
- bool update_dp_group = false, can_modify = false;
- unsigned long *dpg_bitmap;
- size_t i, n = 0;
-
- dpg_bitmap = sb_group ? bitmap_allocate(bitmap_len) : NULL;
- for (i = 0; sb_group && i < sb_group->n_datapaths; i++) {
- struct ovn_datapath *datapath_od;
-
- datapath_od = ovn_datapath_from_sbrec(
- ls_datapaths ? &ls_datapaths->datapaths : NULL,
- lr_datapaths ? &lr_datapaths->datapaths : NULL,
- sb_group->datapaths[i]);
- if (!datapath_od || ovn_datapath_is_stale(datapath_od)) {
- break;
- }
- bitmap_set1(dpg_bitmap, datapath_od->index);
- n++;
- }
- if (!sb_group || i != sb_group->n_datapaths) {
- /* No group or stale group. Not going to be used. */
- update_dp_group = true;
- can_modify = true;
- } else if (!bitmap_equal(dpg_bitmap, desired_bitmap, bitmap_len)) {
- /* The group in Sb is different. */
- update_dp_group = true;
- /* We can modify existing group if it's not already in use. */
- can_modify = !ovn_dp_group_find(dp_groups, dpg_bitmap,
- bitmap_len, hash_int(n, 0));
- }
-
- bitmap_free(dpg_bitmap);
-
- dpg = xzalloc(sizeof *dpg);
- dpg->bitmap = bitmap_clone(desired_bitmap, bitmap_len);
- if (!update_dp_group) {
- dpg->dp_group = sb_group;
- } else {
- dpg->dp_group = ovn_sb_insert_or_update_logical_dp_group(
- ovnsb_txn,
- can_modify ? sb_group : NULL,
- desired_bitmap,
- is_switch ? ls_datapaths : lr_datapaths);
- }
- hmap_insert(dp_groups, &dpg->node, hash);
-
- return dpg;
-}
-
struct sb_lb {
struct hmap_node hmap_node;
@@ -4873,28 +4596,20 @@ ovn_port_find_in_datapath(struct ovn_datapath *od, const char *name)
return NULL;
}
-static struct ovn_port *
-ls_port_create(struct ovsdb_idl_txn *ovnsb_txn, struct hmap *ls_ports,
- const char *key, const struct nbrec_logical_switch_port *nbsp,
- struct ovn_datapath *od, const struct sbrec_port_binding *sb,
- struct ovs_list *lflows,
- const struct sbrec_mirror_table *sbrec_mirror_table,
- const struct sbrec_chassis_table *sbrec_chassis_table,
- struct ovsdb_idl_index *sbrec_chassis_by_name,
- struct ovsdb_idl_index *sbrec_chassis_by_hostname)
-{
- struct ovn_port *op = ovn_port_create(ls_ports, key, nbsp, NULL,
- NULL);
+static bool
+ls_port_init(struct ovn_port *op, struct ovsdb_idl_txn *ovnsb_txn,
+ struct hmap *ls_ports, struct ovn_datapath *od,
+ const struct sbrec_port_binding *sb,
+ const struct sbrec_mirror_table *sbrec_mirror_table,
+ const struct sbrec_chassis_table *sbrec_chassis_table,
+ struct ovsdb_idl_index *sbrec_chassis_by_name,
+ struct ovsdb_idl_index *sbrec_chassis_by_hostname)
+{
+ ovn_port_set_od(op, od);
parse_lsp_addrs(op);
- op->od = od;
- hmap_insert(&od->ports, &op->dp_node, hmap_node_hash(&op->key_node));
- if (lflows) {
- ovs_list_splice(&op->lflows, lflows->next, lflows);
- }
-
/* Assign explicitly requested tunnel ids first. */
if (!ovn_port_assign_requested_tnl_id(sbrec_chassis_table, op)) {
- return NULL;
+ return false;
}
if (sb) {
op->sb = sb;
@@ -4911,14 +4626,57 @@ ls_port_create(struct ovsdb_idl_txn *ovnsb_txn, struct hmap *ls_ports,
}
/* Assign new tunnel ids where needed. */
if (!ovn_port_allocate_key(sbrec_chassis_table, ls_ports, op)) {
- return NULL;
+ return false;
}
ovn_port_update_sbrec(ovnsb_txn, sbrec_chassis_by_name,
sbrec_chassis_by_hostname, NULL, sbrec_mirror_table,
op, NULL, NULL);
+ return true;
+}
+
+static struct ovn_port *
+ls_port_create(struct ovsdb_idl_txn *ovnsb_txn, struct hmap *ls_ports,
+ const char *key, const struct nbrec_logical_switch_port *nbsp,
+ struct ovn_datapath *od, const struct sbrec_port_binding *sb,
+ const struct sbrec_mirror_table *sbrec_mirror_table,
+ const struct sbrec_chassis_table *sbrec_chassis_table,
+ struct ovsdb_idl_index *sbrec_chassis_by_name,
+ struct ovsdb_idl_index *sbrec_chassis_by_hostname)
+{
+ struct ovn_port *op = ovn_port_create(ls_ports, key, nbsp, NULL,
+ NULL);
+ hmap_insert(&od->ports, &op->dp_node, hmap_node_hash(&op->key_node));
+ if (!ls_port_init(op, ovnsb_txn, ls_ports, od, sb,
+ sbrec_mirror_table, sbrec_chassis_table,
+ sbrec_chassis_by_name, sbrec_chassis_by_hostname)) {
+ ovn_port_destroy(ls_ports, op);
+ return NULL;
+ }
+
return op;
}
+static bool
+ls_port_reinit(struct ovn_port *op, struct ovsdb_idl_txn *ovnsb_txn,
+ struct hmap *ls_ports,
+ const struct nbrec_logical_switch_port *nbsp,
+ const struct nbrec_logical_router_port *nbrp,
+ struct ovn_datapath *od,
+ const struct sbrec_port_binding *sb,
+ const struct sbrec_mirror_table *sbrec_mirror_table,
+ const struct sbrec_chassis_table *sbrec_chassis_table,
+ struct ovsdb_idl_index *sbrec_chassis_by_name,
+ struct ovsdb_idl_index *sbrec_chassis_by_hostname)
+{
+ ovn_port_cleanup(op);
+ op->sb = sb;
+ ovn_port_set_nb(op, nbsp, nbrp);
+ op->l3dgw_port = op->cr_port = NULL;
+ return ls_port_init(op, ovnsb_txn, ls_ports, od, sb,
+ sbrec_mirror_table, sbrec_chassis_table,
+ sbrec_chassis_by_name, sbrec_chassis_by_hostname);
+}
+
/* Returns true if the logical switch has changes which can be
* incrementally handled.
* Presently supports i-p for the below changes:
@@ -5058,7 +4816,7 @@ ls_handle_lsp_changes(struct ovsdb_idl_txn *ovnsb_idl_txn,
goto fail;
}
op = ls_port_create(ovnsb_idl_txn, &nd->ls_ports,
- new_nbsp->name, new_nbsp, od, NULL, NULL,
+ new_nbsp->name, new_nbsp, od, NULL,
ni->sbrec_mirror_table,
ni->sbrec_chassis_table,
ni->sbrec_chassis_by_name,
@@ -5089,19 +4847,15 @@ ls_handle_lsp_changes(struct ovsdb_idl_txn *ovnsb_idl_txn,
op->visited = true;
continue;
}
- struct ovs_list lflows = OVS_LIST_INITIALIZER(&lflows);
- ovs_list_splice(&lflows, op->lflows.next, &op->lflows);
- ovn_port_destroy(&nd->ls_ports, op);
- op = ls_port_create(ovnsb_idl_txn, &nd->ls_ports,
- new_nbsp->name, new_nbsp, od, sb, &lflows,
- ni->sbrec_mirror_table,
+ if (!ls_port_reinit(op, ovnsb_idl_txn, &nd->ls_ports,
+ new_nbsp, NULL,
+ od, sb, ni->sbrec_mirror_table,
ni->sbrec_chassis_table,
ni->sbrec_chassis_by_name,
- ni->sbrec_chassis_by_hostname);
- ovs_assert(ovs_list_is_empty(&lflows));
- if (!op) {
+ ni->sbrec_chassis_by_hostname)) {
goto fail;
}
+
add_op_to_northd_tracked_ports(&trk_ports->updated, op);
}
op->visited = true;
@@ -5979,170 +5733,7 @@ ovn_igmp_group_destroy(struct hmap *igmp_groups,
* function of most of the northbound database.
*/
-struct ovn_lflow {
- struct hmap_node hmap_node;
- struct ovs_list list_node; /* For temporary list of lflows. Don't remove
- at destroy. */
-
- struct ovn_datapath *od; /* 'logical_datapath' in SB schema. */
- unsigned long *dpg_bitmap; /* Bitmap of all datapaths by their 'index'.*/
- enum ovn_stage stage;
- uint16_t priority;
- char *match;
- char *actions;
- char *io_port;
- char *stage_hint;
- char *ctrl_meter;
- size_t n_ods; /* Number of datapaths referenced by 'od' and
- * 'dpg_bitmap'. */
- struct ovn_dp_group *dpg; /* Link to unique Sb datapath group. */
-
- struct ovs_list referenced_by; /* List of struct lflow_ref_node. */
- const char *where;
-
- struct uuid sb_uuid; /* SB DB row uuid, specified by northd. */
-};
-
-static void ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow);
-static struct ovn_lflow *ovn_lflow_find(const struct hmap *lflows,
- const struct ovn_datapath *od,
- enum ovn_stage stage,
- uint16_t priority, const char *match,
- const char *actions,
- const char *ctrl_meter, uint32_t hash);
-
-static char *
-ovn_lflow_hint(const struct ovsdb_idl_row *row)
-{
- if (!row) {
- return NULL;
- }
- return xasprintf("%08x", row->uuid.parts[0]);
-}
-
-static bool
-ovn_lflow_equal(const struct ovn_lflow *a, const struct ovn_datapath *od,
- enum ovn_stage stage, uint16_t priority, const char *match,
- const char *actions, const char *ctrl_meter)
-{
- return (a->od == od
- && a->stage == stage
- && a->priority == priority
- && !strcmp(a->match, match)
- && !strcmp(a->actions, actions)
- && nullable_string_is_equal(a->ctrl_meter, ctrl_meter));
-}
-
-enum {
- STATE_NULL, /* parallelization is off */
- STATE_INIT_HASH_SIZES, /* parallelization is on; hashes sizing needed */
- STATE_USE_PARALLELIZATION /* parallelization is on */
-};
-static int parallelization_state = STATE_NULL;
-
-static void
-ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
- size_t dp_bitmap_len, enum ovn_stage stage, uint16_t priority,
- char *match, char *actions, char *io_port, char *ctrl_meter,
- char *stage_hint, const char *where)
-{
- ovs_list_init(&lflow->list_node);
- ovs_list_init(&lflow->referenced_by);
- lflow->dpg_bitmap = bitmap_allocate(dp_bitmap_len);
- lflow->od = od;
- lflow->stage = stage;
- lflow->priority = priority;
- lflow->match = match;
- lflow->actions = actions;
- lflow->io_port = io_port;
- lflow->stage_hint = stage_hint;
- lflow->ctrl_meter = ctrl_meter;
- lflow->dpg = NULL;
- lflow->where = where;
- lflow->sb_uuid = UUID_ZERO;
-}
-
-/* The lflow_hash_lock is a mutex array that protects updates to the shared
- * lflow table across threads when parallel lflow build and dp-group are both
- * enabled. To avoid high contention between threads, a big array of mutexes
- * are used instead of just one. This is possible because when parallel build
- * is used we only use hmap_insert_fast() to update the hmap, which would not
- * touch the bucket array but only the list in a single bucket. We only need to
- * make sure that when adding lflows to the same hash bucket, the same lock is
- * used, so that no two threads can add to the bucket at the same time. It is
- * ok that the same lock is used to protect multiple buckets, so a fixed sized
- * mutex array is used instead of 1-1 mapping to the hash buckets. This
- * simplies the implementation while effectively reduces lock contention
- * because the chance that different threads contending the same lock amongst
- * the big number of locks is very low. */
-#define LFLOW_HASH_LOCK_MASK 0xFFFF
-static struct ovs_mutex lflow_hash_locks[LFLOW_HASH_LOCK_MASK + 1];
-
-static void
-lflow_hash_lock_init(void)
-{
- if (!lflow_hash_lock_initialized) {
- for (size_t i = 0; i < LFLOW_HASH_LOCK_MASK + 1; i++) {
- ovs_mutex_init(&lflow_hash_locks[i]);
- }
- lflow_hash_lock_initialized = true;
- }
-}
-
-static void
-lflow_hash_lock_destroy(void)
-{
- if (lflow_hash_lock_initialized) {
- for (size_t i = 0; i < LFLOW_HASH_LOCK_MASK + 1; i++) {
- ovs_mutex_destroy(&lflow_hash_locks[i]);
- }
- }
- lflow_hash_lock_initialized = false;
-}
-
-/* Full thread safety analysis is not possible with hash locks, because
- * they are taken conditionally based on the 'parallelization_state' and
- * a flow hash. Also, the order in which two hash locks are taken is not
- * predictable during the static analysis.
- *
- * Since the order of taking two locks depends on a random hash, to avoid
- * ABBA deadlocks, no two hash locks can be nested. In that sense an array
- * of hash locks is similar to a single mutex.
- *
- * Using a fake mutex to partially simulate thread safety restrictions, as
- * if it were actually a single mutex.
- *
- * OVS_NO_THREAD_SAFETY_ANALYSIS below allows us to ignore conditional
- * nature of the lock. Unlike other attributes, it applies to the
- * implementation and not to the interface. So, we can define a function
- * that acquires the lock without analysing the way it does that.
- */
-extern struct ovs_mutex fake_hash_mutex;
-
-static struct ovs_mutex *
-lflow_hash_lock(const struct hmap *lflow_map, uint32_t hash)
- OVS_ACQUIRES(fake_hash_mutex)
- OVS_NO_THREAD_SAFETY_ANALYSIS
-{
- struct ovs_mutex *hash_lock = NULL;
-
- if (parallelization_state == STATE_USE_PARALLELIZATION) {
- hash_lock =
- &lflow_hash_locks[hash & lflow_map->mask & LFLOW_HASH_LOCK_MASK];
- ovs_mutex_lock(hash_lock);
- }
- return hash_lock;
-}
-
-static void
-lflow_hash_unlock(struct ovs_mutex *hash_lock)
- OVS_RELEASES(fake_hash_mutex)
- OVS_NO_THREAD_SAFETY_ANALYSIS
-{
- if (hash_lock) {
- ovs_mutex_unlock(hash_lock);
- }
-}
+int parallelization_state = STATE_NULL;
/* This thread-local var is used for parallel lflow building when dp-groups is
@@ -6155,240 +5746,7 @@ lflow_hash_unlock(struct ovs_mutex *hash_lock)
* threads are collected to fix the lflow hmap's size (by the function
* fix_flow_map_size()).
* */
-static thread_local size_t thread_lflow_counter = 0;
-
-/* Adds an OVN datapath to a datapath group of existing logical flow.
- * Version to use when hash bucket locking is NOT required or the corresponding
- * hash lock is already taken. */
-static void
-ovn_dp_group_add_with_reference(struct ovn_lflow *lflow_ref,
- const struct ovn_datapath *od,
- const unsigned long *dp_bitmap,
- size_t bitmap_len)
- OVS_REQUIRES(fake_hash_mutex)
-{
- if (od) {
- bitmap_set1(lflow_ref->dpg_bitmap, od->index);
- }
- if (dp_bitmap) {
- bitmap_or(lflow_ref->dpg_bitmap, dp_bitmap, bitmap_len);
- }
-}
-
-/* This global variable collects the lflows generated by do_ovn_lflow_add().
- * start_collecting_lflows() will enable the lflow collection and the calls to
- * do_ovn_lflow_add (or the macros ovn_lflow_add_...) will add generated lflows
- * to the list. end_collecting_lflows() will disable it. */
-static thread_local struct ovs_list collected_lflows;
-static thread_local bool collecting_lflows = false;
-
-static void
-start_collecting_lflows(void)
-{
- ovs_assert(!collecting_lflows);
- ovs_list_init(&collected_lflows);
- collecting_lflows = true;
-}
-
-static void
-end_collecting_lflows(void)
-{
- ovs_assert(collecting_lflows);
- collecting_lflows = false;
-}
-
-/* Adds a row with the specified contents to the Logical_Flow table.
- * Version to use when hash bucket locking is NOT required. */
-static void
-do_ovn_lflow_add(struct hmap *lflow_map, const struct ovn_datapath *od,
- const unsigned long *dp_bitmap, size_t dp_bitmap_len,
- uint32_t hash, enum ovn_stage stage, uint16_t priority,
- const char *match, const char *actions, const char *io_port,
- const struct ovsdb_idl_row *stage_hint,
- const char *where, const char *ctrl_meter)
- OVS_REQUIRES(fake_hash_mutex)
-{
-
- struct ovn_lflow *old_lflow;
- struct ovn_lflow *lflow;
-
- size_t bitmap_len = od ? ods_size(od->datapaths) : dp_bitmap_len;
- ovs_assert(bitmap_len);
-
- if (collecting_lflows) {
- ovs_assert(od);
- ovs_assert(!dp_bitmap);
- } else {
- old_lflow = ovn_lflow_find(lflow_map, NULL, stage, priority, match,
- actions, ctrl_meter, hash);
- if (old_lflow) {
- ovn_dp_group_add_with_reference(old_lflow, od, dp_bitmap,
- bitmap_len);
- return;
- }
- }
-
- lflow = xmalloc(sizeof *lflow);
- /* While adding new logical flows we're not setting single datapath, but
- * collecting a group. 'od' will be updated later for all flows with only
- * one datapath in a group, so it could be hashed correctly. */
- ovn_lflow_init(lflow, NULL, bitmap_len, stage, priority,
- xstrdup(match), xstrdup(actions),
- io_port ? xstrdup(io_port) : NULL,
- nullable_xstrdup(ctrl_meter),
- ovn_lflow_hint(stage_hint), where);
-
- ovn_dp_group_add_with_reference(lflow, od, dp_bitmap, bitmap_len);
-
- if (parallelization_state != STATE_USE_PARALLELIZATION) {
- hmap_insert(lflow_map, &lflow->hmap_node, hash);
- } else {
- hmap_insert_fast(lflow_map, &lflow->hmap_node, hash);
- thread_lflow_counter++;
- }
-
- if (collecting_lflows) {
- ovs_list_insert(&collected_lflows, &lflow->list_node);
- }
-}
-
-/* Adds a row with the specified contents to the Logical_Flow table. */
-static void
-ovn_lflow_add_at(struct hmap *lflow_map, const struct ovn_datapath *od,
- const unsigned long *dp_bitmap, size_t dp_bitmap_len,
- enum ovn_stage stage, uint16_t priority,
- const char *match, const char *actions, const char *io_port,
- const char *ctrl_meter,
- const struct ovsdb_idl_row *stage_hint, const char *where)
- OVS_EXCLUDED(fake_hash_mutex)
-{
- struct ovs_mutex *hash_lock;
- uint32_t hash;
-
- ovs_assert(!od ||
- ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od));
-
- hash = ovn_logical_flow_hash(ovn_stage_get_table(stage),
- ovn_stage_get_pipeline(stage),
- priority, match,
- actions);
-
- hash_lock = lflow_hash_lock(lflow_map, hash);
- do_ovn_lflow_add(lflow_map, od, dp_bitmap, dp_bitmap_len, hash, stage,
- priority, match, actions, io_port, stage_hint, where,
- ctrl_meter);
- lflow_hash_unlock(hash_lock);
-}
-
-static void
-__ovn_lflow_add_default_drop(struct hmap *lflow_map,
- struct ovn_datapath *od,
- enum ovn_stage stage,
- const char *where)
-{
- ovn_lflow_add_at(lflow_map, od, NULL, 0, stage, 0, "1",
- debug_drop_action(),
- NULL, NULL, NULL, where );
-}
-
-/* Adds a row with the specified contents to the Logical_Flow table. */
-#define ovn_lflow_add_with_hint__(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, \
- ACTIONS, IN_OUT_PORT, CTRL_METER, \
- STAGE_HINT) \
- ovn_lflow_add_at(LFLOW_MAP, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS, \
- IN_OUT_PORT, CTRL_METER, STAGE_HINT, OVS_SOURCE_LOCATOR)
-
-#define ovn_lflow_add_with_hint(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, \
- ACTIONS, STAGE_HINT) \
- ovn_lflow_add_at(LFLOW_MAP, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS, \
- NULL, NULL, STAGE_HINT, OVS_SOURCE_LOCATOR)
-
-#define ovn_lflow_add_with_dp_group(LFLOW_MAP, DP_BITMAP, DP_BITMAP_LEN, \
- STAGE, PRIORITY, MATCH, ACTIONS, \
- STAGE_HINT) \
- ovn_lflow_add_at(LFLOW_MAP, NULL, DP_BITMAP, DP_BITMAP_LEN, STAGE, \
- PRIORITY, MATCH, ACTIONS, NULL, NULL, STAGE_HINT, \
- OVS_SOURCE_LOCATOR)
-
-#define ovn_lflow_add_default_drop(LFLOW_MAP, OD, STAGE) \
- __ovn_lflow_add_default_drop(LFLOW_MAP, OD, STAGE, OVS_SOURCE_LOCATOR)
-
-
-/* This macro is similar to ovn_lflow_add_with_hint, except that it requires
- * the IN_OUT_PORT argument, which tells the lport name that appears in the
- * MATCH, which helps ovn-controller to bypass lflows parsing when the lport is
- * not local to the chassis. The critiera of the lport to be added using this
- * argument:
- *
- * - For ingress pipeline, the lport that is used to match "inport".
- * - For egress pipeline, the lport that is used to match "outport".
- *
- * For now, only LS pipelines should use this macro. */
-#define ovn_lflow_add_with_lport_and_hint(LFLOW_MAP, OD, STAGE, PRIORITY, \
- MATCH, ACTIONS, IN_OUT_PORT, \
- STAGE_HINT) \
- ovn_lflow_add_at(LFLOW_MAP, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS, \
- IN_OUT_PORT, NULL, STAGE_HINT, OVS_SOURCE_LOCATOR)
-
-#define ovn_lflow_add(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS) \
- ovn_lflow_add_at(LFLOW_MAP, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS, \
- NULL, NULL, NULL, OVS_SOURCE_LOCATOR)
-
-#define ovn_lflow_metered(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, \
- CTRL_METER) \
- ovn_lflow_add_with_hint__(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, \
- ACTIONS, NULL, CTRL_METER, NULL)
-
-static struct ovn_lflow *
-ovn_lflow_find(const struct hmap *lflows, const struct ovn_datapath *od,
- enum ovn_stage stage, uint16_t priority,
- const char *match, const char *actions, const char *ctrl_meter,
- uint32_t hash)
-{
- struct ovn_lflow *lflow;
- HMAP_FOR_EACH_WITH_HASH (lflow, hmap_node, hash, lflows) {
- if (ovn_lflow_equal(lflow, od, stage, priority, match, actions,
- ctrl_meter)) {
- return lflow;
- }
- }
- return NULL;
-}
-
-static void
-ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow)
-{
- if (lflow) {
- if (lflows) {
- hmap_remove(lflows, &lflow->hmap_node);
- }
- bitmap_free(lflow->dpg_bitmap);
- free(lflow->match);
- free(lflow->actions);
- free(lflow->io_port);
- free(lflow->stage_hint);
- free(lflow->ctrl_meter);
- struct lflow_ref_node *l;
- LIST_FOR_EACH_SAFE (l, ref_list_node, &lflow->referenced_by) {
- ovs_list_remove(&l->lflow_list_node);
- ovs_list_remove(&l->ref_list_node);
- free(l);
- }
- free(lflow);
- }
-}
-
-static void
-link_ovn_port_to_lflows(struct ovn_port *op, struct ovs_list *lflows)
-{
- struct ovn_lflow *f;
- LIST_FOR_EACH (f, list_node, lflows) {
- struct lflow_ref_node *lfrn = xmalloc(sizeof *lfrn);
- lfrn->lflow = f;
- ovs_list_insert(&op->lflows, &lfrn->lflow_list_node);
- ovs_list_insert(&f->referenced_by, &lfrn->ref_list_node);
- }
-}
+thread_local size_t thread_lflow_counter = 0;
static bool
build_dhcpv4_action(struct ovn_port *op, ovs_be32 offer_ip,
@@ -6565,7 +5923,7 @@ build_dhcpv6_action(struct ovn_port *op, struct in6_addr *offer_ip,
* build_lswitch_lflows_admission_control() handles the port security.
*/
static void
-build_lswitch_port_sec_op(struct ovn_port *op, struct hmap *lflows,
+build_lswitch_port_sec_op(struct ovn_port *op, struct lflow_table *lflows,
struct ds *actions, struct ds *match)
{
ovs_assert(op->nbsp);
@@ -6582,13 +5940,13 @@ build_lswitch_port_sec_op(struct ovn_port *op, struct hmap *lflows,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_IN_CHECK_PORT_SEC,
100, ds_cstr(match), REGBIT_PORT_SEC_DROP" = 1; next;",
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_, op->lflow_ref);
ds_clear(match);
ds_put_format(match, "outport == %s", op->json_key);
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_IN_L2_UNKNOWN, 50, ds_cstr(match),
- debug_drop_action(), op->key, &op->nbsp->header_);
+ debug_drop_action(), op->key, &op->nbsp->header_, op->lflow_ref);
return;
}
@@ -6604,14 +5962,16 @@ build_lswitch_port_sec_op(struct ovn_port *op, struct hmap *lflows,
ovn_lflow_add_with_lport_and_hint(lflows, op->od,
S_SWITCH_IN_CHECK_PORT_SEC, 70,
ds_cstr(match), ds_cstr(actions),
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_,
+ op->lflow_ref);
} else if (queue_id) {
ds_put_cstr(actions,
REGBIT_PORT_SEC_DROP" = check_in_port_sec(); next;");
ovn_lflow_add_with_lport_and_hint(lflows, op->od,
S_SWITCH_IN_CHECK_PORT_SEC, 70,
ds_cstr(match), ds_cstr(actions),
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_,
+ op->lflow_ref);
if (!lsp_is_localnet(op->nbsp) && !op->od->n_localnet_ports) {
return;
@@ -6626,7 +5986,8 @@ build_lswitch_port_sec_op(struct ovn_port *op, struct hmap *lflows,
ovn_lflow_add_with_lport_and_hint(lflows, op->od,
S_SWITCH_OUT_APPLY_PORT_SEC, 100,
ds_cstr(match), ds_cstr(actions),
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_,
+ op->lflow_ref);
} else if (op->od->n_localnet_ports) {
ds_put_format(match, "outport == %s && inport == %s",
op->od->localnet_ports[0]->json_key,
@@ -6635,14 +5996,15 @@ build_lswitch_port_sec_op(struct ovn_port *op, struct hmap *lflows,
S_SWITCH_OUT_APPLY_PORT_SEC, 110,
ds_cstr(match), ds_cstr(actions),
op->od->localnet_ports[0]->key,
- &op->od->localnet_ports[0]->nbsp->header_);
+ &op->od->localnet_ports[0]->nbsp->header_,
+ op->lflow_ref);
}
}
}
static void
build_lswitch_learn_fdb_op(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *actions, struct ds *match)
{
ovs_assert(op->nbsp);
@@ -6657,7 +6019,8 @@ build_lswitch_learn_fdb_op(
ovn_lflow_add_with_lport_and_hint(lflows, op->od,
S_SWITCH_IN_LOOKUP_FDB, 100,
ds_cstr(match), ds_cstr(actions),
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_,
+ op->lflow_ref);
ds_put_cstr(match, " && "REGBIT_LKUP_FDB" == 0");
ds_clear(actions);
@@ -6665,13 +6028,14 @@ build_lswitch_learn_fdb_op(
ovn_lflow_add_with_lport_and_hint(lflows, op->od, S_SWITCH_IN_PUT_FDB,
100, ds_cstr(match),
ds_cstr(actions), op->key,
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
}
static void
build_lswitch_learn_fdb_od(
- struct ovn_datapath *od, struct hmap *lflows)
+ struct ovn_datapath *od, struct lflow_table *lflows)
{
ovs_assert(od->nbs);
ovn_lflow_add(lflows, od, S_SWITCH_IN_LOOKUP_FDB, 0, "1", "next;");
@@ -6685,7 +6049,7 @@ build_lswitch_learn_fdb_od(
* (priority 100). */
static void
build_lswitch_output_port_sec_od(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbs);
ovn_lflow_add(lflows, od, S_SWITCH_OUT_CHECK_PORT_SEC, 100,
@@ -6703,7 +6067,7 @@ static void
skip_port_from_conntrack(const struct ovn_datapath *od, struct ovn_port *op,
bool has_stateful_acl, enum ovn_stage in_stage,
enum ovn_stage out_stage, uint16_t priority,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
/* Can't use ct() for router ports. Consider the following configuration:
* lp1(10.0.0.2) on hostA--ls1--lr0--ls2--lp2(10.0.1.2) on hostB, For a
@@ -6725,10 +6089,10 @@ skip_port_from_conntrack(const struct ovn_datapath *od, struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(lflows, od, in_stage, priority,
ingress_match, ingress_action,
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_, NULL);
ovn_lflow_add_with_lport_and_hint(lflows, od, out_stage, priority,
egress_match, egress_action,
- op->key, &op->nbsp->header_);
+ op->key, &op->nbsp->header_, NULL);
free(ingress_match);
free(egress_match);
@@ -6737,7 +6101,7 @@ skip_port_from_conntrack(const struct ovn_datapath *od, struct ovn_port *op,
static void
build_stateless_filter(const struct ovn_datapath *od,
const struct nbrec_acl *acl,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
const char *action = REGBIT_ACL_STATELESS" = 1; next;";
if (!strcmp(acl->direction, "from-lport")) {
@@ -6758,7 +6122,7 @@ build_stateless_filter(const struct ovn_datapath *od,
static void
build_stateless_filters(const struct ovn_datapath *od,
const struct ls_port_group_table *ls_port_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
for (size_t i = 0; i < od->nbs->n_acls; i++) {
const struct nbrec_acl *acl = od->nbs->acls[i];
@@ -6786,7 +6150,7 @@ build_stateless_filters(const struct ovn_datapath *od,
}
static void
-build_pre_acls(struct ovn_datapath *od, struct hmap *lflows)
+build_pre_acls(struct ovn_datapath *od, struct lflow_table *lflows)
{
/* Ingress and Egress Pre-ACL Table (Priority 0): Packets are
* allowed by default. */
@@ -6803,7 +6167,7 @@ build_pre_acls(struct ovn_datapath *od, struct hmap *lflows)
static void
build_ls_lbacls_rec_pre_acls(const struct ls_lbacls_record *ls_lbacls_rec,
const struct ls_port_group_table *ls_port_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
const struct ovn_datapath *od = ls_lbacls_rec->od;
@@ -6922,7 +6286,7 @@ build_empty_lb_event_flow(struct ovn_lb_vip *lb_vip,
static void
build_interconn_mcast_snoop_flows(struct ovn_datapath *od,
const struct shash *meter_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct mcast_switch_info *mcast_sw_info = &od->mcast_info.sw;
if (!mcast_sw_info->enabled
@@ -6956,7 +6320,7 @@ build_interconn_mcast_snoop_flows(struct ovn_datapath *od,
static void
build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
/* Handle IGMP/MLD packets crossing AZs. */
build_interconn_mcast_snoop_flows(od, meter_groups, lflows);
@@ -6992,7 +6356,7 @@ build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups,
static void
build_ls_lbacls_rec_pre_lb(const struct ls_lbacls_record *ls_lbacls_rec,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
const struct ovn_datapath *od = ls_lbacls_rec->od;
@@ -7058,7 +6422,7 @@ build_ls_lbacls_rec_pre_lb(const struct ls_lbacls_record *ls_lbacls_rec,
static void
build_pre_stateful(struct ovn_datapath *od,
const struct chassis_features *features,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
/* Ingress and Egress pre-stateful Table (Priority 0): Packets are
* allowed by default. */
@@ -7090,7 +6454,7 @@ build_pre_stateful(struct ovn_datapath *od,
static void
build_acl_hints(const struct ls_lbacls_record *ls_lbacls_rec,
const struct chassis_features *features,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
const struct ovn_datapath *od = ls_lbacls_rec->od;
@@ -7259,7 +6623,7 @@ build_acl_log(struct ds *actions, const struct nbrec_acl *acl,
}
static void
-consider_acl(struct hmap *lflows, const struct ovn_datapath *od,
+consider_acl(struct lflow_table *lflows, const struct ovn_datapath *od,
const struct nbrec_acl *acl, bool has_stateful,
bool ct_masked_mark, const struct shash *meter_groups,
uint64_t max_acl_tier, struct ds *match, struct ds *actions)
@@ -7489,7 +6853,7 @@ ovn_update_ipv6_options(struct hmap *lr_ports)
static void
build_acl_action_lflows(const struct ls_lbacls_record *ls_lbacls_rec,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const char *default_acl_action,
const struct shash *meter_groups,
struct ds *match,
@@ -7566,7 +6930,8 @@ build_acl_action_lflows(const struct ls_lbacls_record *ls_lbacls_rec,
}
static void
-build_acl_log_related_flows(const struct ovn_datapath *od, struct hmap *lflows,
+build_acl_log_related_flows(const struct ovn_datapath *od,
+ struct lflow_table *lflows,
const struct nbrec_acl *acl, bool has_stateful,
bool ct_masked_mark,
const struct shash *meter_groups,
@@ -7641,7 +7006,7 @@ build_acl_log_related_flows(const struct ovn_datapath *od, struct hmap *lflows,
static void
build_acls(const struct ls_lbacls_record *ls_lbacls_rec,
const struct chassis_features *features,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct ls_port_group_table *ls_port_groups,
const struct shash *meter_groups)
{
@@ -7887,7 +7252,7 @@ build_acls(const struct ls_lbacls_record *ls_lbacls_rec,
}
static void
-build_qos(struct ovn_datapath *od, struct hmap *lflows) {
+build_qos(struct ovn_datapath *od, struct lflow_table *lflows) {
struct ds action = DS_EMPTY_INITIALIZER;
ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;");
@@ -7948,7 +7313,7 @@ build_qos(struct ovn_datapath *od, struct hmap *lflows) {
}
static void
-build_lb_rules_pre_stateful(struct hmap *lflows,
+build_lb_rules_pre_stateful(struct lflow_table *lflows,
struct ovn_lb_datapaths *lb_dps,
bool ct_lb_mark,
const struct ovn_datapaths *ls_datapaths,
@@ -8050,7 +7415,8 @@ build_lb_rules_pre_stateful(struct hmap *lflows,
*
*/
static void
-build_lb_affinity_lr_flows(struct hmap *lflows, const struct ovn_northd_lb *lb,
+build_lb_affinity_lr_flows(struct lflow_table *lflows,
+ const struct ovn_northd_lb *lb,
struct ovn_lb_vip *lb_vip, char *new_lb_match,
char *lb_action, const unsigned long *dp_bitmap,
const struct ovn_datapaths *lr_datapaths)
@@ -8236,7 +7602,7 @@ build_lb_affinity_lr_flows(struct hmap *lflows, const struct ovn_northd_lb *lb,
*
*/
static void
-build_lb_affinity_ls_flows(struct hmap *lflows,
+build_lb_affinity_ls_flows(struct lflow_table *lflows,
struct ovn_lb_datapaths *lb_dps,
struct ovn_lb_vip *lb_vip,
const struct ovn_datapaths *ls_datapaths)
@@ -8379,7 +7745,7 @@ build_lb_affinity_ls_flows(struct hmap *lflows,
static void
build_lswitch_lb_affinity_default_flows(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbs);
ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_CHECK, 0, "1", "next;");
@@ -8388,7 +7754,7 @@ build_lswitch_lb_affinity_default_flows(struct ovn_datapath *od,
static void
build_lrouter_lb_affinity_default_flows(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbr);
ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_CHECK, 0, "1", "next;");
@@ -8396,7 +7762,7 @@ build_lrouter_lb_affinity_default_flows(struct ovn_datapath *od,
}
static void
-build_lb_rules(struct hmap *lflows, struct ovn_lb_datapaths *lb_dps,
+build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps,
const struct ovn_datapaths *ls_datapaths,
const struct chassis_features *features, struct ds *match,
struct ds *action, const struct shash *meter_groups,
@@ -8476,7 +7842,7 @@ build_lb_rules(struct hmap *lflows, struct ovn_lb_datapaths *lb_dps,
static void
build_stateful(struct ovn_datapath *od,
const struct chassis_features *features,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
const char *ct_block_action = features->ct_no_masked_label
? "ct_mark.blocked"
@@ -8526,7 +7892,7 @@ build_stateful(struct ovn_datapath *od,
static void
build_lb_hairpin(const struct ls_lbacls_record *ls_lbacls_rec,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
const struct ovn_datapath *od = ls_lbacls_rec->od;
@@ -8585,7 +7951,7 @@ build_lb_hairpin(const struct ls_lbacls_record *ls_lbacls_rec,
}
static void
-build_vtep_hairpin(struct ovn_datapath *od, struct hmap *lflows)
+build_vtep_hairpin(struct ovn_datapath *od, struct lflow_table *lflows)
{
if (!od->has_vtep_lports) {
/* There is no need in these flows if datapath has no vtep lports. */
@@ -8633,7 +7999,7 @@ build_vtep_hairpin(struct ovn_datapath *od, struct hmap *lflows)
/* Build logical flows for the forwarding groups */
static void
-build_fwd_group_lflows(struct ovn_datapath *od, struct hmap *lflows)
+build_fwd_group_lflows(struct ovn_datapath *od, struct lflow_table *lflows)
{
ovs_assert(od->nbs);
if (!od->nbs->n_forwarding_groups) {
@@ -8814,7 +8180,8 @@ build_lswitch_rport_arp_req_self_orig_flow(struct ovn_port *op,
uint32_t priority,
const struct ovn_datapath *od,
const struct lr_nat_record *lrnat_rec,
- struct hmap *lflows)
+ struct lflow_table *lflows,
+ struct lflow_ref *lflow_ref)
{
struct ds eth_src = DS_EMPTY_INITIALIZER;
struct ds match = DS_EMPTY_INITIALIZER;
@@ -8838,8 +8205,10 @@ build_lswitch_rport_arp_req_self_orig_flow(struct ovn_port *op,
ds_put_format(&match,
"eth.src == %s && (arp.op == 1 || rarp.op == 3 || nd_ns)",
ds_cstr(ð_src));
- ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, priority, ds_cstr(&match),
- "outport = \""MC_FLOOD_L2"\"; output;");
+ ovn_lflow_add_with_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, priority,
+ ds_cstr(&match),
+ "outport = \""MC_FLOOD_L2"\"; output;",
+ lflow_ref);
ds_destroy(ð_src);
ds_destroy(&match);
@@ -8907,8 +8276,9 @@ static void
build_lswitch_rport_arp_req_flow(const char *ips,
int addr_family, struct ovn_port *patch_op,
const struct ovn_datapath *od,
- uint32_t priority, struct hmap *lflows,
- const struct ovsdb_idl_row *stage_hint)
+ uint32_t priority, struct lflow_table *lflows,
+ const struct ovsdb_idl_row *stage_hint,
+ struct lflow_ref *lflow_ref)
{
struct ds match = DS_EMPTY_INITIALIZER;
struct ds actions = DS_EMPTY_INITIALIZER;
@@ -8922,14 +8292,17 @@ build_lswitch_rport_arp_req_flow(const char *ips,
ds_put_format(&actions, "clone {outport = %s; output; }; "
"outport = \""MC_FLOOD_L2"\"; output;",
patch_op->json_key);
- ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_L2_LKUP,
- priority, ds_cstr(&match),
- ds_cstr(&actions), stage_hint);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, od, S_SWITCH_IN_L2_LKUP,
+ priority, ds_cstr(&match),
+ ds_cstr(&actions), stage_hint,
+ lflow_ref);
} else {
ds_put_format(&actions, "outport = %s; output;", patch_op->json_key);
- ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_L2_LKUP, priority,
- ds_cstr(&match), ds_cstr(&actions),
- stage_hint);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, od, S_SWITCH_IN_L2_LKUP,
+ priority, ds_cstr(&match),
+ ds_cstr(&actions),
+ stage_hint,
+ lflow_ref);
}
ds_destroy(&match);
@@ -8947,7 +8320,7 @@ static void
build_lswitch_rport_arp_req_flows(struct ovn_port *op,
struct ovn_datapath *sw_od,
struct ovn_port *sw_op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct ovsdb_idl_row *stage_hint)
{
if (!op || !op->nbrp) {
@@ -8965,12 +8338,12 @@ build_lswitch_rport_arp_req_flows(struct ovn_port *op,
for (size_t i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
build_lswitch_rport_arp_req_flow(
op->lrp_networks.ipv4_addrs[i].addr_s, AF_INET, sw_op, sw_od, 80,
- lflows, stage_hint);
+ lflows, stage_hint, sw_op->lflow_ref);
}
for (size_t i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
build_lswitch_rport_arp_req_flow(
op->lrp_networks.ipv6_addrs[i].addr_s, AF_INET6, sw_op, sw_od, 80,
- lflows, stage_hint);
+ lflows, stage_hint, sw_op->lflow_ref);
}
}
@@ -8986,8 +8359,9 @@ build_lswitch_rport_arp_req_flows_for_lbnats(struct ovn_port *op,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
const struct ovn_datapath *sw_od,
struct ovn_port *sw_op,
- struct hmap *lflows,
- const struct ovsdb_idl_row *stage_hint)
+ struct lflow_table *lflows,
+ const struct ovsdb_idl_row *stage_hint,
+ struct lflow_ref *lflow_ref)
{
if (!op || !op->nbrp) {
return;
@@ -9015,7 +8389,7 @@ build_lswitch_rport_arp_req_flows_for_lbnats(struct ovn_port *op,
lrouter_port_ipv4_reachable(op, ipv4_addr)) {
build_lswitch_rport_arp_req_flow(
ip_addr, AF_INET, sw_op, sw_od, 80, lflows,
- stage_hint);
+ stage_hint, lflow_ref);
}
}
SSET_FOR_EACH (ip_addr, &lr_lbnat_rec->lb_ips->ips_v6_reachable) {
@@ -9028,7 +8402,7 @@ build_lswitch_rport_arp_req_flows_for_lbnats(struct ovn_port *op,
lrouter_port_ipv6_reachable(op, &ipv6_addr)) {
build_lswitch_rport_arp_req_flow(
ip_addr, AF_INET6, sw_op, sw_od, 80, lflows,
- stage_hint);
+ stage_hint, lflow_ref);
}
}
}
@@ -9043,7 +8417,7 @@ build_lswitch_rport_arp_req_flows_for_lbnats(struct ovn_port *op,
if (sw_od->n_router_ports != sw_od->nbs->n_ports) {
build_lswitch_rport_arp_req_self_orig_flow(op, 75, sw_od,
lr_lbnat_rec->lrnat_rec,
- lflows);
+ lflows, lflow_ref);
}
for (size_t i = 0; i < lr_lbnat_rec->lrnat_rec->n_nat_entries; i++) {
@@ -9066,14 +8440,14 @@ build_lswitch_rport_arp_req_flows_for_lbnats(struct ovn_port *op,
nat->external_ip)) {
build_lswitch_rport_arp_req_flow(
nat->external_ip, AF_INET6, sw_op, sw_od, 80, lflows,
- stage_hint);
+ stage_hint, lflow_ref);
}
} else {
if (!sset_contains(&lr_lbnat_rec->lb_ips->ips_v4,
nat->external_ip)) {
build_lswitch_rport_arp_req_flow(
nat->external_ip, AF_INET, sw_op, sw_od, 80, lflows,
- stage_hint);
+ stage_hint, lflow_ref);
}
}
}
@@ -9084,7 +8458,7 @@ build_dhcpv4_options_flows(struct ovn_port *op,
struct lport_addresses *lsp_addrs,
struct ovn_port *inport, bool is_external,
const struct shash *meter_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct ds match = DS_EMPTY_INITIALIZER;
@@ -9107,7 +8481,7 @@ build_dhcpv4_options_flows(struct ovn_port *op,
op->json_key);
}
- ovn_lflow_add_with_hint__(lflows, op->od,
+ ovn_lflow_add_with_lflow_ref_hint__(lflows, op->od,
S_SWITCH_IN_DHCP_OPTIONS, 100,
ds_cstr(&match),
ds_cstr(&options_action),
@@ -9115,7 +8489,8 @@ build_dhcpv4_options_flows(struct ovn_port *op,
copp_meter_get(COPP_DHCPV4_OPTS,
op->od->nbs->copp,
meter_groups),
- &op->nbsp->dhcpv4_options->header_);
+ &op->nbsp->dhcpv4_options->header_,
+ op->lflow_ref);
ds_clear(&match);
/* Allow ip4.src = OFFER_IP and
* ip4.dst = {SERVER_IP, 255.255.255.255} for the below
@@ -9135,7 +8510,7 @@ build_dhcpv4_options_flows(struct ovn_port *op,
op->json_key);
}
- ovn_lflow_add_with_hint__(lflows, op->od,
+ ovn_lflow_add_with_lflow_ref_hint__(lflows, op->od,
S_SWITCH_IN_DHCP_OPTIONS, 100,
ds_cstr(&match),
ds_cstr(&options_action),
@@ -9143,7 +8518,8 @@ build_dhcpv4_options_flows(struct ovn_port *op,
copp_meter_get(COPP_DHCPV4_OPTS,
op->od->nbs->copp,
meter_groups),
- &op->nbsp->dhcpv4_options->header_);
+ &op->nbsp->dhcpv4_options->header_,
+ op->lflow_ref);
ds_clear(&match);
/* If REGBIT_DHCP_OPTS_RESULT is set, it means the
@@ -9162,7 +8538,8 @@ build_dhcpv4_options_flows(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_IN_DHCP_RESPONSE, 100,
ds_cstr(&match), ds_cstr(&response_action), inport->key,
- &op->nbsp->dhcpv4_options->header_);
+ &op->nbsp->dhcpv4_options->header_,
+ op->lflow_ref);
ds_destroy(&options_action);
ds_destroy(&response_action);
ds_destroy(&ipv4_addr_match);
@@ -9189,7 +8566,8 @@ build_dhcpv4_options_flows(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_OUT_ACL_EVAL, 34000,
ds_cstr(&match),dhcp_actions, op->key,
- &op->nbsp->dhcpv4_options->header_);
+ &op->nbsp->dhcpv4_options->header_,
+ op->lflow_ref);
}
break;
}
@@ -9202,7 +8580,7 @@ build_dhcpv6_options_flows(struct ovn_port *op,
struct lport_addresses *lsp_addrs,
struct ovn_port *inport, bool is_external,
const struct shash *meter_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct ds match = DS_EMPTY_INITIALIZER;
@@ -9224,7 +8602,7 @@ build_dhcpv6_options_flows(struct ovn_port *op,
op->json_key);
}
- ovn_lflow_add_with_hint__(lflows, op->od,
+ ovn_lflow_add_with_lflow_ref_hint__(lflows, op->od,
S_SWITCH_IN_DHCP_OPTIONS, 100,
ds_cstr(&match),
ds_cstr(&options_action),
@@ -9232,7 +8610,8 @@ build_dhcpv6_options_flows(struct ovn_port *op,
copp_meter_get(COPP_DHCPV6_OPTS,
op->od->nbs->copp,
meter_groups),
- &op->nbsp->dhcpv6_options->header_);
+ &op->nbsp->dhcpv6_options->header_,
+ op->lflow_ref);
/* If REGBIT_DHCP_OPTS_RESULT is set to 1, it means the
* put_dhcpv6_opts action is successful */
@@ -9240,7 +8619,7 @@ build_dhcpv6_options_flows(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_IN_DHCP_RESPONSE, 100,
ds_cstr(&match), ds_cstr(&response_action), inport->key,
- &op->nbsp->dhcpv6_options->header_);
+ &op->nbsp->dhcpv6_options->header_, op->lflow_ref);
ds_destroy(&options_action);
ds_destroy(&response_action);
@@ -9272,7 +8651,8 @@ build_dhcpv6_options_flows(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_OUT_ACL_EVAL, 34000,
ds_cstr(&match),dhcp6_actions, op->key,
- &op->nbsp->dhcpv6_options->header_);
+ &op->nbsp->dhcpv6_options->header_,
+ op->lflow_ref);
}
break;
}
@@ -9283,7 +8663,7 @@ build_dhcpv6_options_flows(struct ovn_port *op,
static void
build_drop_arp_nd_flows_for_unbound_router_ports(struct ovn_port *op,
const struct ovn_port *port,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct ds match = DS_EMPTY_INITIALIZER;
@@ -9303,7 +8683,7 @@ build_drop_arp_nd_flows_for_unbound_router_ports(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_IN_EXTERNAL_PORT, 100,
ds_cstr(&match), debug_drop_action(), port->key,
- &op->nbsp->header_);
+ &op->nbsp->header_, op->lflow_ref);
}
for (size_t l = 0; l < rp->lsp_addrs[k].n_ipv6_addrs; l++) {
ds_clear(&match);
@@ -9319,7 +8699,7 @@ build_drop_arp_nd_flows_for_unbound_router_ports(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(
lflows, op->od, S_SWITCH_IN_EXTERNAL_PORT, 100,
ds_cstr(&match), debug_drop_action(), port->key,
- &op->nbsp->header_);
+ &op->nbsp->header_, op->lflow_ref);
}
ds_clear(&match);
@@ -9335,7 +8715,8 @@ build_drop_arp_nd_flows_for_unbound_router_ports(struct ovn_port *op,
100, ds_cstr(&match),
debug_drop_action(),
port->key,
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
}
}
@@ -9350,7 +8731,7 @@ is_vlan_transparent(const struct ovn_datapath *od)
static void
build_lswitch_lflows_l2_unknown(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
/* Ingress table 25/26: Destination lookup for unknown MACs. */
if (od->has_unknown) {
@@ -9371,7 +8752,7 @@ static void
build_lswitch_lflows_pre_acl_and_acl(
struct ovn_datapath *od,
const struct chassis_features *features,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups)
{
ovs_assert(od->nbs);
@@ -9387,7 +8768,7 @@ build_lswitch_lflows_pre_acl_and_acl(
* 100). */
static void
build_lswitch_lflows_admission_control(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbs);
/* Logical VLANs not supported. */
@@ -9415,7 +8796,7 @@ build_lswitch_lflows_admission_control(struct ovn_datapath *od,
static void
build_lswitch_arp_nd_responder_skip_local(struct ovn_port *op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *match)
{
ovs_assert(op->nbsp);
@@ -9427,14 +8808,14 @@ build_lswitch_arp_nd_responder_skip_local(struct ovn_port *op,
ovn_lflow_add_with_lport_and_hint(lflows, op->od,
S_SWITCH_IN_ARP_ND_RSP, 100,
ds_cstr(match), "next;", op->key,
- &op->nbsp->header_);
+ &op->nbsp->header_, op->lflow_ref);
}
/* Ingress table 19: ARP/ND responder, reply for known IPs.
* (priority 50). */
static void
build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct hmap *ls_ports,
const struct shash *meter_groups,
struct ds *actions,
@@ -9519,7 +8900,8 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
S_SWITCH_IN_ARP_ND_RSP, 100,
ds_cstr(match),
ds_cstr(actions), vparent,
- &vp->nbsp->header_);
+ &vp->nbsp->header_,
+ op->lflow_ref);
}
free(tokstr);
@@ -9563,11 +8945,12 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
"output;",
op->lsp_addrs[i].ea_s, op->lsp_addrs[i].ea_s,
op->lsp_addrs[i].ipv4_addrs[j].addr_s);
- ovn_lflow_add_with_hint(lflows, op->od,
- S_SWITCH_IN_ARP_ND_RSP, 50,
- ds_cstr(match),
- ds_cstr(actions),
- &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
+ S_SWITCH_IN_ARP_ND_RSP, 50,
+ ds_cstr(match),
+ ds_cstr(actions),
+ &op->nbsp->header_,
+ op->lflow_ref);
/* Do not reply to an ARP request from the port that owns
* the address (otherwise a DHCP client that ARPs to check
@@ -9586,7 +8969,8 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
S_SWITCH_IN_ARP_ND_RSP,
100, ds_cstr(match),
"next;", op->key,
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
/* For ND solicitations, we need to listen for both the
@@ -9616,15 +9000,16 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
op->lsp_addrs[i].ipv6_addrs[j].addr_s,
op->lsp_addrs[i].ipv6_addrs[j].addr_s,
op->lsp_addrs[i].ea_s);
- ovn_lflow_add_with_hint__(lflows, op->od,
- S_SWITCH_IN_ARP_ND_RSP, 50,
- ds_cstr(match),
- ds_cstr(actions),
- NULL,
- copp_meter_get(COPP_ND_NA,
- op->od->nbs->copp,
- meter_groups),
- &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint__(lflows, op->od,
+ S_SWITCH_IN_ARP_ND_RSP, 50,
+ ds_cstr(match),
+ ds_cstr(actions),
+ NULL,
+ copp_meter_get(COPP_ND_NA,
+ op->od->nbs->copp,
+ meter_groups),
+ &op->nbsp->header_,
+ op->lflow_ref);
/* Do not reply to a solicitation from the port that owns
* the address (otherwise DAD detection will fail). */
@@ -9633,7 +9018,8 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
S_SWITCH_IN_ARP_ND_RSP,
100, ds_cstr(match),
"next;", op->key,
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
}
}
@@ -9679,8 +9065,12 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
ea_s,
ea_s);
- ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_ARP_ND_RSP,
- 30, ds_cstr(match), ds_cstr(actions), &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
+ S_SWITCH_IN_ARP_ND_RSP,
+ 30, ds_cstr(match),
+ ds_cstr(actions),
+ &op->nbsp->header_,
+ op->lflow_ref);
}
/* Add IPv6 NDP responses.
@@ -9723,15 +9113,16 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
lsp_is_router(op->nbsp) ? "nd_na_router" : "nd_na",
ea_s,
ea_s);
- ovn_lflow_add_with_hint__(lflows, op->od,
- S_SWITCH_IN_ARP_ND_RSP, 30,
- ds_cstr(match),
- ds_cstr(actions),
- NULL,
- copp_meter_get(COPP_ND_NA,
- op->od->nbs->copp,
- meter_groups),
- &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint__(lflows, op->od,
+ S_SWITCH_IN_ARP_ND_RSP, 30,
+ ds_cstr(match),
+ ds_cstr(actions),
+ NULL,
+ copp_meter_get(COPP_ND_NA,
+ op->od->nbs->copp,
+ meter_groups),
+ &op->nbsp->header_,
+ op->lflow_ref);
ds_destroy(&ip6_dst_match);
ds_destroy(&nd_target_match);
}
@@ -9742,7 +9133,7 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
* (priority 0)*/
static void
build_lswitch_arp_nd_responder_default(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbs);
ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1", "next;");
@@ -9753,7 +9144,7 @@ build_lswitch_arp_nd_responder_default(struct ovn_datapath *od,
static void
build_lswitch_arp_nd_service_monitor(const struct ovn_northd_lb *lb,
const struct hmap *ls_ports,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *actions,
struct ds *match)
{
@@ -9829,7 +9220,7 @@ build_lswitch_arp_nd_service_monitor(const struct ovn_northd_lb *lb,
* priority 100 flows. */
static void
build_lswitch_dhcp_options_and_response(struct ovn_port *op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups)
{
ovs_assert(op->nbsp);
@@ -9884,7 +9275,7 @@ build_lswitch_dhcp_options_and_response(struct ovn_port *op,
* (priority 0). */
static void
build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbs);
ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_OPTIONS, 0, "1", "next;");
@@ -9899,7 +9290,7 @@ build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od,
*/
static void
build_lswitch_dns_lookup_and_response(struct ovn_datapath *od,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups)
{
ovs_assert(od->nbs);
@@ -9930,7 +9321,7 @@ build_lswitch_dns_lookup_and_response(struct ovn_datapath *od,
* binding the external ports. */
static void
build_lswitch_external_port(struct ovn_port *op,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(op->nbsp);
if (!lsp_is_external(op->nbsp)) {
@@ -9946,7 +9337,7 @@ build_lswitch_external_port(struct ovn_port *op,
* (priority 70 - 100). */
static void
build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *actions,
const struct shash *meter_groups)
{
@@ -10039,7 +9430,7 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
* (priority 90). */
static void
build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *actions,
struct ds *match)
{
@@ -10119,7 +9510,8 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
/* Ingress table 25: Destination lookup, unicast handling (priority 50), */
static void
-build_lswitch_ip_unicast_lookup(struct ovn_port *op, struct hmap *lflows,
+build_lswitch_ip_unicast_lookup(struct ovn_port *op,
+ struct lflow_table *lflows,
struct ds *actions, struct ds *match)
{
ovs_assert(op->nbsp);
@@ -10152,10 +9544,12 @@ build_lswitch_ip_unicast_lookup(struct ovn_port *op, struct hmap *lflows,
ds_clear(actions);
ds_put_format(actions, action, op->json_key);
- ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_L2_LKUP,
- 50, ds_cstr(match),
- ds_cstr(actions),
- &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
+ S_SWITCH_IN_L2_LKUP,
+ 50, ds_cstr(match),
+ ds_cstr(actions),
+ &op->nbsp->header_,
+ op->lflow_ref);
} else if (!strcmp(op->nbsp->addresses[i], "unknown")) {
continue;
} else if (is_dynamic_lsp_address(op->nbsp->addresses[i])) {
@@ -10170,10 +9564,12 @@ build_lswitch_ip_unicast_lookup(struct ovn_port *op, struct hmap *lflows,
ds_clear(actions);
ds_put_format(actions, action, op->json_key);
- ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_L2_LKUP,
- 50, ds_cstr(match),
- ds_cstr(actions),
- &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
+ S_SWITCH_IN_L2_LKUP,
+ 50, ds_cstr(match),
+ ds_cstr(actions),
+ &op->nbsp->header_,
+ op->lflow_ref);
} else if (!strcmp(op->nbsp->addresses[i], "router")) {
if (!op->peer || !op->peer->nbrp
|| !ovs_scan(op->peer->nbrp->mac,
@@ -10225,10 +9621,11 @@ build_lswitch_ip_unicast_lookup(struct ovn_port *op, struct hmap *lflows,
ds_clear(actions);
ds_put_format(actions, action, op->json_key);
- ovn_lflow_add_with_hint(lflows, op->od,
- S_SWITCH_IN_L2_LKUP, 50,
- ds_cstr(match), ds_cstr(actions),
- &op->nbsp->header_);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
+ S_SWITCH_IN_L2_LKUP, 50,
+ ds_cstr(match), ds_cstr(actions),
+ &op->nbsp->header_,
+ op->lflow_ref);
} else {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
@@ -10243,8 +9640,8 @@ build_lswitch_ip_unicast_lookup(struct ovn_port *op, struct hmap *lflows,
static void
build_lswitch_ip_unicast_lookup_for_nats(struct ovn_port *op,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
- struct hmap *lflows, struct ds *match,
- struct ds *actions)
+ struct lflow_table *lflows, struct ds *match,
+ struct ds *actions, struct lflow_ref *lflow_ref)
{
ovs_assert(op->nbsp);
@@ -10276,11 +9673,12 @@ build_lswitch_ip_unicast_lookup_for_nats(struct ovn_port *op,
ds_clear(actions);
ds_put_format(actions, action, op->json_key);
- ovn_lflow_add_with_hint(lflows, op->od,
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
S_SWITCH_IN_L2_LKUP, 50,
ds_cstr(match),
ds_cstr(actions),
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ lflow_ref);
}
}
}
@@ -10520,7 +9918,7 @@ get_outport_for_routing_policy_nexthop(struct ovn_datapath *od,
}
static void
-build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
+build_routing_policy_flow(struct lflow_table *lflows, struct ovn_datapath *od,
const struct hmap *lr_ports,
const struct nbrec_logical_router_policy *rule,
const struct ovsdb_idl_row *stage_hint)
@@ -10585,7 +9983,8 @@ build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
}
static void
-build_ecmp_routing_policy_flows(struct hmap *lflows, struct ovn_datapath *od,
+build_ecmp_routing_policy_flows(struct lflow_table *lflows,
+ struct ovn_datapath *od,
const struct hmap *lr_ports,
const struct nbrec_logical_router_policy *rule,
uint16_t ecmp_group_id)
@@ -10721,7 +10120,7 @@ get_route_table_id(struct simap *route_tables, const char *route_table_name)
}
static void
-build_route_table_lflow(struct ovn_datapath *od, struct hmap *lflows,
+build_route_table_lflow(struct ovn_datapath *od, struct lflow_table *lflows,
struct nbrec_logical_router_port *lrp,
struct simap *route_tables)
{
@@ -11132,7 +10531,7 @@ find_static_route_outport(struct ovn_datapath *od, const struct hmap *lr_ports,
}
static void
-add_ecmp_symmetric_reply_flows(struct hmap *lflows,
+add_ecmp_symmetric_reply_flows(struct lflow_table *lflows,
struct ovn_datapath *od,
bool ct_masked_mark,
const char *port_ip,
@@ -11297,7 +10696,7 @@ add_ecmp_symmetric_reply_flows(struct hmap *lflows,
}
static void
-build_ecmp_route_flow(struct hmap *lflows, struct ovn_datapath *od,
+build_ecmp_route_flow(struct lflow_table *lflows, struct ovn_datapath *od,
bool ct_masked_mark, const struct hmap *lr_ports,
struct ecmp_groups_node *eg)
@@ -11384,12 +10783,12 @@ build_ecmp_route_flow(struct hmap *lflows, struct ovn_datapath *od,
}
static void
-add_route(struct hmap *lflows, struct ovn_datapath *od,
+add_route(struct lflow_table *lflows, struct ovn_datapath *od,
const struct ovn_port *op, const char *lrp_addr_s,
const char *network_s, int plen, const char *gateway,
bool is_src_route, const uint32_t rtb_id,
const struct ovsdb_idl_row *stage_hint, bool is_discard_route,
- int ofs)
+ int ofs, struct lflow_ref *lflow_ref)
{
bool is_ipv4 = strchr(network_s, '.') ? true : false;
struct ds match = DS_EMPTY_INITIALIZER;
@@ -11432,14 +10831,17 @@ add_route(struct hmap *lflows, struct ovn_datapath *od,
ds_put_format(&actions, "ip.ttl--; %s", ds_cstr(&common_actions));
}
- ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_ROUTING, priority,
- ds_cstr(&match), ds_cstr(&actions),
- stage_hint);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, od, S_ROUTER_IN_IP_ROUTING,
+ priority, ds_cstr(&match),
+ ds_cstr(&actions), stage_hint,
+ lflow_ref);
if (op && op->has_bfd) {
ds_put_format(&match, " && udp.dst == 3784");
- ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_ROUTING,
- priority + 1, ds_cstr(&match),
- ds_cstr(&common_actions), stage_hint);
+ ovn_lflow_add_with_lflow_ref_hint(lflows, op->od,
+ S_ROUTER_IN_IP_ROUTING,
+ priority + 1, ds_cstr(&match),
+ ds_cstr(&common_actions),\
+ stage_hint, lflow_ref);
}
ds_destroy(&match);
ds_destroy(&common_actions);
@@ -11447,7 +10849,7 @@ add_route(struct hmap *lflows, struct ovn_datapath *od,
}
static void
-build_static_route_flow(struct hmap *lflows, struct ovn_datapath *od,
+build_static_route_flow(struct lflow_table *lflows, struct ovn_datapath *od,
const struct hmap *lr_ports,
const struct parsed_route *route_)
{
@@ -11473,7 +10875,7 @@ build_static_route_flow(struct hmap *lflows, struct ovn_datapath *od,
add_route(lflows, route_->is_discard_route ? od : out_port->od, out_port,
lrp_addr_s, prefix_s, route_->plen, route->nexthop,
route_->is_src_route, route_->route_table_id, &route->header_,
- route_->is_discard_route, ofs);
+ route_->is_discard_route, ofs, NULL);
free(prefix_s);
}
@@ -11536,7 +10938,7 @@ struct lrouter_nat_lb_flows_ctx {
int prio;
- struct hmap *lflows;
+ struct lflow_table *lflows;
const struct shash *meter_groups;
};
@@ -11667,7 +11069,7 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
struct ovn_northd_lb_vip *vips_nb,
const struct ovn_datapaths *lr_datapaths,
const struct lr_lb_nat_data_table *lr_lbnats,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *match, struct ds *action,
const struct shash *meter_groups,
const struct chassis_features *features,
@@ -11836,7 +11238,7 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
static void
build_lswitch_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups,
const struct ovn_datapaths *ls_datapaths,
const struct chassis_features *features,
@@ -11897,7 +11299,7 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
*/
static void
build_lrouter_defrag_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct ovn_datapaths *lr_datapaths,
struct ds *match)
{
@@ -11923,7 +11325,7 @@ build_lrouter_defrag_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
static void
build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups,
const struct ovn_datapaths *lr_datapaths,
const struct lr_lb_nat_data_table *lr_lbnats,
@@ -12081,7 +11483,7 @@ lrouter_dnat_and_snat_is_stateless(const struct nbrec_nat *nat)
*/
static inline void
lrouter_nat_add_ext_ip_match(const struct ovn_datapath *od,
- struct hmap *lflows, struct ds *match,
+ struct lflow_table *lflows, struct ds *match,
const struct nbrec_nat *nat,
bool is_v6, bool is_src, int cidr_bits)
{
@@ -12148,7 +11550,7 @@ build_lrouter_arp_flow(const struct ovn_datapath *od, struct ovn_port *op,
const char *ip_address, const char *eth_addr,
struct ds *extra_match, bool drop, uint16_t priority,
const struct ovsdb_idl_row *hint,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct ds match = DS_EMPTY_INITIALIZER;
struct ds actions = DS_EMPTY_INITIALIZER;
@@ -12198,7 +11600,8 @@ build_lrouter_nd_flow(const struct ovn_datapath *od, struct ovn_port *op,
const char *sn_ip_address, const char *eth_addr,
struct ds *extra_match, bool drop, uint16_t priority,
const struct ovsdb_idl_row *hint,
- struct hmap *lflows, const struct shash *meter_groups)
+ struct lflow_table *lflows,
+ const struct shash *meter_groups)
{
struct ds match = DS_EMPTY_INITIALIZER;
struct ds actions = DS_EMPTY_INITIALIZER;
@@ -12249,7 +11652,7 @@ build_lrouter_nd_flow(const struct ovn_datapath *od, struct ovn_port *op,
static void
build_lrouter_nat_arp_nd_flow(const struct ovn_datapath *od,
struct ovn_nat *nat_entry,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups)
{
struct lport_addresses *ext_addrs = &nat_entry->ext_addrs;
@@ -12272,7 +11675,7 @@ build_lrouter_nat_arp_nd_flow(const struct ovn_datapath *od,
static void
build_lrouter_port_nat_arp_nd_flow(struct ovn_port *op,
struct ovn_nat *nat_entry,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups)
{
struct lport_addresses *ext_addrs = &nat_entry->ext_addrs;
@@ -12346,7 +11749,7 @@ build_lrouter_drop_own_dest(struct ovn_port *op,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
enum ovn_stage stage,
uint16_t priority, bool drop_snat_ip,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct ds match_ips = DS_EMPTY_INITIALIZER;
@@ -12411,7 +11814,7 @@ build_lrouter_drop_own_dest(struct ovn_port *op,
}
static void
-build_lrouter_force_snat_flows(struct hmap *lflows,
+build_lrouter_force_snat_flows(struct lflow_table *lflows,
const struct ovn_datapath *od,
const char *ip_version, const char *ip_addr,
const char *context)
@@ -12440,7 +11843,7 @@ build_lrouter_force_snat_flows(struct hmap *lflows,
static void
build_lrouter_force_snat_flows_op(struct ovn_port *op,
const struct lr_nat_record *lrnat_rec,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *match, struct ds *actions)
{
ovs_assert(op->nbrp);
@@ -12512,7 +11915,7 @@ build_lrouter_force_snat_flows_op(struct ovn_port *op,
}
static void
-build_lrouter_bfd_flows(struct hmap *lflows, struct ovn_port *op,
+build_lrouter_bfd_flows(struct lflow_table *lflows, struct ovn_port *op,
const struct shash *meter_groups)
{
if (!op->has_bfd) {
@@ -12567,7 +11970,7 @@ build_lrouter_bfd_flows(struct hmap *lflows, struct ovn_port *op,
*/
static void
build_adm_ctrl_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows)
+ struct ovn_datapath *od, struct lflow_table *lflows)
{
ovs_assert(od->nbr);
/* Logical VLANs not supported.
@@ -12611,7 +12014,7 @@ build_gateway_get_l2_hdr_size(struct ovn_port *op)
* function.
*/
static void OVS_PRINTF_FORMAT(9, 10)
-build_gateway_mtu_flow(struct hmap *lflows, struct ovn_port *op,
+build_gateway_mtu_flow(struct lflow_table *lflows, struct ovn_port *op,
enum ovn_stage stage, uint16_t prio_low,
uint16_t prio_high, struct ds *match,
struct ds *actions, const struct ovsdb_idl_row *hint,
@@ -12672,7 +12075,7 @@ consider_l3dgw_port_is_centralized(struct ovn_port *op)
*/
static void
build_adm_ctrl_flows_for_lrouter_port(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *match, struct ds *actions)
{
ovs_assert(op->nbrp);
@@ -12726,7 +12129,7 @@ build_adm_ctrl_flows_for_lrouter_port(
* lflows for logical routers. */
static void
build_neigh_learning_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows,
+ struct ovn_datapath *od, struct lflow_table *lflows,
struct ds *match, struct ds *actions,
const struct shash *meter_groups)
{
@@ -12857,7 +12260,7 @@ build_neigh_learning_flows_for_lrouter(
* for logical router ports. */
static void
build_neigh_learning_flows_for_lrouter_port(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *match, struct ds *actions)
{
ovs_assert(op->nbrp);
@@ -12919,7 +12322,7 @@ build_neigh_learning_flows_for_lrouter_port(
* Adv (RA) options and response. */
static void
build_ND_RA_flows_for_lrouter_port(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *match, struct ds *actions,
const struct shash *meter_groups)
{
@@ -13034,7 +12437,8 @@ build_ND_RA_flows_for_lrouter_port(
/* Logical router ingress table ND_RA_OPTIONS & ND_RA_RESPONSE: RS
* responder, by default goto next. (priority 0). */
static void
-build_ND_RA_flows_for_lrouter(struct ovn_datapath *od, struct hmap *lflows)
+build_ND_RA_flows_for_lrouter(struct ovn_datapath *od,
+ struct lflow_table *lflows)
{
ovs_assert(od->nbr);
ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_OPTIONS, 0, "1", "next;");
@@ -13045,7 +12449,7 @@ build_ND_RA_flows_for_lrouter(struct ovn_datapath *od, struct hmap *lflows)
* by default goto next. (priority 0). */
static void
build_ip_routing_pre_flows_for_lrouter(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbr);
ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING_PRE, 0, "1",
@@ -13073,21 +12477,23 @@ build_ip_routing_pre_flows_for_lrouter(struct ovn_datapath *od,
*/
static void
build_ip_routing_flows_for_lrp(
- struct ovn_port *op, struct hmap *lflows)
+ struct ovn_port *op, struct lflow_table *lflows)
{
ovs_assert(op->nbrp);
for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
add_route(lflows, op->od, op, op->lrp_networks.ipv4_addrs[i].addr_s,
op->lrp_networks.ipv4_addrs[i].network_s,
op->lrp_networks.ipv4_addrs[i].plen, NULL, false, 0,
- &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED);
+ &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED,
+ NULL);
}
for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
add_route(lflows, op->od, op, op->lrp_networks.ipv6_addrs[i].addr_s,
op->lrp_networks.ipv6_addrs[i].network_s,
op->lrp_networks.ipv6_addrs[i].plen, NULL, false, 0,
- &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED);
+ &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED,
+ NULL);
}
}
@@ -13101,7 +12507,8 @@ build_ip_routing_flows_for_lrp(
static void
build_ip_routing_flows_for_router_type_lsp(
struct ovn_port *op, const struct lr_lb_nat_data_table *lr_lbnats,
- const struct hmap *lr_ports, struct hmap *lflows)
+ const struct hmap *lr_ports, struct lflow_table *lflows,
+ struct lflow_ref *lflow_ref)
{
ovs_assert(op->nbsp);
if (!lsp_is_router(op->nbsp)) {
@@ -13136,7 +12543,8 @@ build_ip_routing_flows_for_router_type_lsp(
laddrs->ipv4_addrs[k].network_s,
laddrs->ipv4_addrs[k].plen, NULL, false, 0,
&peer->nbrp->header_, false,
- ROUTE_PRIO_OFFSET_CONNECTED);
+ ROUTE_PRIO_OFFSET_CONNECTED,
+ lflow_ref);
}
}
destroy_routable_addresses(&ra);
@@ -13148,7 +12556,7 @@ build_ip_routing_flows_for_router_type_lsp(
static void
build_static_route_flows_for_lrouter(
struct ovn_datapath *od, const struct chassis_features *features,
- struct hmap *lflows, const struct hmap *lr_ports,
+ struct lflow_table *lflows, const struct hmap *lr_ports,
const struct hmap *bfd_connections)
{
ovs_assert(od->nbr);
@@ -13212,7 +12620,7 @@ build_static_route_flows_for_lrouter(
*/
static void
build_mcast_lookup_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows,
+ struct ovn_datapath *od, struct lflow_table *lflows,
struct ds *match, struct ds *actions)
{
ovs_assert(od->nbr);
@@ -13313,7 +12721,7 @@ build_mcast_lookup_flows_for_lrouter(
* advances to the next table for ARP/ND resolution. */
static void
build_ingress_policy_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows,
+ struct ovn_datapath *od, struct lflow_table *lflows,
const struct hmap *lr_ports)
{
ovs_assert(od->nbr);
@@ -13347,7 +12755,7 @@ build_ingress_policy_flows_for_lrouter(
/* Local router ingress table ARP_RESOLVE: ARP Resolution. */
static void
build_arp_resolve_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows)
+ struct ovn_datapath *od, struct lflow_table *lflows)
{
ovs_assert(od->nbr);
/* Multicast packets already have the outport set so just advance to
@@ -13365,10 +12773,12 @@ build_arp_resolve_flows_for_lrouter(
}
static void
-routable_addresses_to_lflows(struct hmap *lflows, struct ovn_port *router_port,
+routable_addresses_to_lflows(struct lflow_table *lflows,
+ struct ovn_port *router_port,
struct ovn_port *peer,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
- struct ds *match, struct ds *actions)
+ struct ds *match, struct ds *actions,
+ struct lflow_ref *lflow_ref)
{
struct ovn_port_routable_addresses ra =
get_op_routable_addresses(router_port, lr_lbnat_rec);
@@ -13392,8 +12802,9 @@ routable_addresses_to_lflows(struct hmap *lflows, struct ovn_port *router_port,
ds_clear(actions);
ds_put_format(actions, "eth.dst = %s; next;", ra.laddrs[i].ea_s);
- ovn_lflow_add(lflows, peer->od, S_ROUTER_IN_ARP_RESOLVE, 100,
- ds_cstr(match), ds_cstr(actions));
+ ovn_lflow_add_with_lflow_ref(lflows, peer->od, S_ROUTER_IN_ARP_RESOLVE,
+ 100, ds_cstr(match), ds_cstr(actions),
+ lflow_ref);
}
destroy_routable_addresses(&ra);
}
@@ -13412,7 +12823,7 @@ routable_addresses_to_lflows(struct hmap *lflows, struct ovn_port *router_port,
static void
build_arp_resolve_flows_for_lrp(
struct ovn_port *op,
- struct hmap *lflows, struct ds *match, struct ds *actions)
+ struct lflow_table *lflows, struct ds *match, struct ds *actions)
{
ovs_assert(op->nbrp);
/* This is a logical router port. If next-hop IP address in
@@ -13486,7 +12897,7 @@ build_arp_resolve_flows_for_lrp(
/* This function adds ARP resolve flows related to a LSP. */
static void
build_arp_resolve_flows_for_lsp(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
const struct hmap *lr_ports,
struct ds *match, struct ds *actions)
{
@@ -13528,11 +12939,12 @@ build_arp_resolve_flows_for_lsp(
ds_clear(actions);
ds_put_format(actions, "eth.dst = %s; next;", ea_s);
- ovn_lflow_add_with_hint(lflows, peer->od,
+ ovn_lflow_add_with_lflow_ref_hint(lflows, peer->od,
S_ROUTER_IN_ARP_RESOLVE, 100,
ds_cstr(match),
ds_cstr(actions),
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
}
@@ -13559,11 +12971,12 @@ build_arp_resolve_flows_for_lsp(
ds_clear(actions);
ds_put_format(actions, "eth.dst = %s; next;", ea_s);
- ovn_lflow_add_with_hint(lflows, peer->od,
+ ovn_lflow_add_with_lflow_ref_hint(lflows, peer->od,
S_ROUTER_IN_ARP_RESOLVE, 100,
ds_cstr(match),
ds_cstr(actions),
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
}
}
@@ -13607,10 +13020,11 @@ build_arp_resolve_flows_for_lsp(
ds_clear(actions);
ds_put_format(actions, "eth.dst = %s; next;",
router_port->lrp_networks.ea_s);
- ovn_lflow_add_with_hint(lflows, peer->od,
+ ovn_lflow_add_with_lflow_ref_hint(lflows, peer->od,
S_ROUTER_IN_ARP_RESOLVE, 100,
ds_cstr(match), ds_cstr(actions),
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
if (router_port->lrp_networks.n_ipv6_addrs) {
@@ -13623,10 +13037,11 @@ build_arp_resolve_flows_for_lsp(
ds_clear(actions);
ds_put_format(actions, "eth.dst = %s; next;",
router_port->lrp_networks.ea_s);
- ovn_lflow_add_with_hint(lflows, peer->od,
+ ovn_lflow_add_with_lflow_ref_hint(lflows, peer->od,
S_ROUTER_IN_ARP_RESOLVE, 100,
ds_cstr(match), ds_cstr(actions),
- &op->nbsp->header_);
+ &op->nbsp->header_,
+ op->lflow_ref);
}
}
}
@@ -13634,10 +13049,11 @@ build_arp_resolve_flows_for_lsp(
static void
build_arp_resolve_flows_for_lsp_routable_addresses(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
const struct hmap *lr_ports,
const struct lr_lb_nat_data_table *lr_lbnats,
- struct ds *match, struct ds *actions)
+ struct ds *match, struct ds *actions,
+ struct lflow_ref *lflow_ref)
{
if (!lsp_is_router(op->nbsp)) {
return;
@@ -13671,13 +13087,15 @@ build_arp_resolve_flows_for_lsp_routable_addresses(
lr_lbnat_rec = lr_lb_nat_data_table_find(lr_lbnats,
router_port->od->nbr);
routable_addresses_to_lflows(lflows, router_port, peer,
- lr_lbnat_rec, match, actions);
+ lr_lbnat_rec, match, actions,
+ lflow_ref);
}
}
}
static void
-build_icmperr_pkt_big_flows(struct ovn_port *op, int mtu, struct hmap *lflows,
+build_icmperr_pkt_big_flows(struct ovn_port *op, int mtu,
+ struct lflow_table *lflows,
const struct shash *meter_groups, struct ds *match,
struct ds *actions, enum ovn_stage stage,
struct ovn_port *outport)
@@ -13770,7 +13188,7 @@ build_icmperr_pkt_big_flows(struct ovn_port *op, int mtu, struct hmap *lflows,
static void
build_check_pkt_len_flows_for_lrp(struct ovn_port *op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct hmap *lr_ports,
const struct shash *meter_groups,
struct ds *match,
@@ -13820,7 +13238,7 @@ build_check_pkt_len_flows_for_lrp(struct ovn_port *op,
* */
static void
build_check_pkt_len_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows,
+ struct ovn_datapath *od, struct lflow_table *lflows,
const struct hmap *lr_ports,
struct ds *match, struct ds *actions,
const struct shash *meter_groups)
@@ -13847,7 +13265,7 @@ build_check_pkt_len_flows_for_lrouter(
/* Logical router ingress table GW_REDIRECT: Gateway redirect. */
static void
build_gateway_redirect_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows,
+ struct ovn_datapath *od, struct lflow_table *lflows,
struct ds *match, struct ds *actions)
{
ovs_assert(od->nbr);
@@ -13892,7 +13310,7 @@ build_gateway_redirect_flows_for_lrouter(
static void
build_lr_gateway_redirect_flows_for_nats(
const struct ovn_datapath *od, const struct lr_nat_record *lrnat_rec,
- struct hmap *lflows, struct ds *match, struct ds *actions)
+ struct lflow_table *lflows, struct ds *match, struct ds *actions)
{
ovs_assert(od->nbr);
for (size_t i = 0; i < od->n_l3dgw_ports; i++) {
@@ -13961,7 +13379,7 @@ build_lr_gateway_redirect_flows_for_nats(
* and sends an ARP/IPv6 NA request (priority 100). */
static void
build_arp_request_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows,
+ struct ovn_datapath *od, struct lflow_table *lflows,
struct ds *match, struct ds *actions,
const struct shash *meter_groups)
{
@@ -14039,7 +13457,7 @@ build_arp_request_flows_for_lrouter(
*/
static void
build_egress_delivery_flows_for_lrouter_port(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *match, struct ds *actions)
{
ovs_assert(op->nbrp);
@@ -14081,7 +13499,7 @@ build_egress_delivery_flows_for_lrouter_port(
static void
build_misc_local_traffic_drop_flows_for_lrouter(
- struct ovn_datapath *od, struct hmap *lflows)
+ struct ovn_datapath *od, struct lflow_table *lflows)
{
ovs_assert(od->nbr);
/* Allow IGMP and MLD packets (with TTL = 1) if the router is
@@ -14163,7 +13581,7 @@ build_misc_local_traffic_drop_flows_for_lrouter(
static void
build_dhcpv6_reply_flows_for_lrouter_port(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *match)
{
ovs_assert(op->nbrp);
@@ -14183,7 +13601,7 @@ build_dhcpv6_reply_flows_for_lrouter_port(
static void
build_ipv6_input_flows_for_lrouter_port(
- struct ovn_port *op, struct hmap *lflows,
+ struct ovn_port *op, struct lflow_table *lflows,
struct ds *match, struct ds *actions,
const struct shash *meter_groups)
{
@@ -14352,7 +13770,7 @@ build_ipv6_input_flows_for_lrouter_port(
static void
build_lrouter_arp_nd_for_datapath(const struct ovn_datapath *od,
const struct lr_nat_record *lrnat_rec,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct shash *meter_groups)
{
ovs_assert(od->nbr);
@@ -14404,7 +13822,7 @@ build_lrouter_arp_nd_for_datapath(const struct ovn_datapath *od,
/* Logical router ingress table 3: IP Input for IPv4. */
static void
build_lrouter_ipv4_ip_input(struct ovn_port *op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *match, struct ds *actions,
const struct shash *meter_groups)
{
@@ -14608,7 +14026,7 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op,
/* Logical router ingress table 3: IP Input for IPv4. */
static void
build_lrouter_ipv4_ip_input_for_lbnats(struct ovn_port *op,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
struct ds *match, const struct shash *meter_groups)
{
@@ -14727,7 +14145,7 @@ build_lrouter_in_unsnat_match(const struct ovn_datapath *od,
}
static void
-build_lrouter_in_unsnat_stateless_flow(struct hmap *lflows,
+build_lrouter_in_unsnat_stateless_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat,
struct ds *match,
@@ -14749,7 +14167,7 @@ build_lrouter_in_unsnat_stateless_flow(struct hmap *lflows,
}
static void
-build_lrouter_in_unsnat_in_czone_flow(struct hmap *lflows,
+build_lrouter_in_unsnat_in_czone_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat,
struct ds *match, bool distributed_nat,
@@ -14783,7 +14201,7 @@ build_lrouter_in_unsnat_in_czone_flow(struct hmap *lflows,
}
static void
-build_lrouter_in_unsnat_flow(struct hmap *lflows,
+build_lrouter_in_unsnat_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat, struct ds *match,
bool distributed_nat, bool is_v6,
@@ -14805,7 +14223,7 @@ build_lrouter_in_unsnat_flow(struct hmap *lflows,
}
static void
-build_lrouter_in_dnat_flow(struct hmap *lflows,
+build_lrouter_in_dnat_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct lr_nat_record *lrnat_rec,
const struct nbrec_nat *nat, struct ds *match,
@@ -14877,7 +14295,7 @@ build_lrouter_in_dnat_flow(struct hmap *lflows,
}
static void
-build_lrouter_out_undnat_flow(struct hmap *lflows,
+build_lrouter_out_undnat_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat, struct ds *match,
struct ds *actions, bool distributed_nat,
@@ -14928,7 +14346,7 @@ build_lrouter_out_undnat_flow(struct hmap *lflows,
}
static void
-build_lrouter_out_is_dnat_local(struct hmap *lflows,
+build_lrouter_out_is_dnat_local(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat, struct ds *match,
struct ds *actions, bool distributed_nat,
@@ -14959,7 +14377,7 @@ build_lrouter_out_is_dnat_local(struct hmap *lflows,
}
static void
-build_lrouter_out_snat_match(struct hmap *lflows,
+build_lrouter_out_snat_match(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat, struct ds *match,
bool distributed_nat, int cidr_bits, bool is_v6,
@@ -14988,7 +14406,7 @@ build_lrouter_out_snat_match(struct hmap *lflows,
}
static void
-build_lrouter_out_snat_stateless_flow(struct hmap *lflows,
+build_lrouter_out_snat_stateless_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat,
struct ds *match, struct ds *actions,
@@ -15031,7 +14449,7 @@ build_lrouter_out_snat_stateless_flow(struct hmap *lflows,
}
static void
-build_lrouter_out_snat_in_czone_flow(struct hmap *lflows,
+build_lrouter_out_snat_in_czone_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat,
struct ds *match,
@@ -15093,7 +14511,7 @@ build_lrouter_out_snat_in_czone_flow(struct hmap *lflows,
}
static void
-build_lrouter_out_snat_flow(struct hmap *lflows,
+build_lrouter_out_snat_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat, struct ds *match,
struct ds *actions, bool distributed_nat,
@@ -15139,7 +14557,7 @@ build_lrouter_out_snat_flow(struct hmap *lflows,
}
static void
-build_lrouter_ingress_nat_check_pkt_len(struct hmap *lflows,
+build_lrouter_ingress_nat_check_pkt_len(struct lflow_table *lflows,
const struct nbrec_nat *nat,
const struct ovn_datapath *od,
bool is_v6, struct ds *match,
@@ -15211,7 +14629,7 @@ build_lrouter_ingress_nat_check_pkt_len(struct hmap *lflows,
}
static void
-build_lrouter_ingress_flow(struct hmap *lflows,
+build_lrouter_ingress_flow(struct lflow_table *lflows,
const struct ovn_datapath *od,
const struct nbrec_nat *nat, struct ds *match,
struct ds *actions, struct eth_addr mac,
@@ -15391,7 +14809,7 @@ lrouter_check_nat_entry(const struct ovn_datapath *od,
/* NAT, Defrag and load balancing. */
static void build_lr_nat_defrag_and_lb_default_flows(struct ovn_datapath *od,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(od->nbr);
@@ -15416,7 +14834,8 @@ static void build_lr_nat_defrag_and_lb_default_flows(struct ovn_datapath *od,
static void
build_lrouter_nat_defrag_and_lb(
- const struct lr_lb_nat_data_record *lr_lbnat_rec, struct hmap *lflows,
+ const struct lr_lb_nat_data_record *lr_lbnat_rec,
+ struct lflow_table *lflows,
const struct hmap *ls_ports, const struct hmap *lr_ports,
struct ds *match, struct ds *actions,
const struct shash *meter_groups,
@@ -15799,23 +15218,22 @@ build_lsp_lflows_for_lbnats(struct ovn_port *lsp,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
const struct lr_lb_nat_data_table *lr_lbnats,
const struct hmap *lr_ports,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct ds *match,
- struct ds *actions)
+ struct ds *actions,
+ struct lflow_ref *lflow_ref)
{
ovs_assert(lsp->nbsp);
- start_collecting_lflows();
build_lswitch_rport_arp_req_flows_for_lbnats(
lrp_peer, lr_lbnat_rec, lsp->od, lsp,
- lflows, &lsp->nbsp->header_);
+ lflows, &lsp->nbsp->header_, lflow_ref);
build_ip_routing_flows_for_router_type_lsp(lsp, lr_lbnats,
- lr_ports, lflows);
+ lr_ports, lflows,
+ lflow_ref);
build_arp_resolve_flows_for_lsp_routable_addresses(
- lsp, lflows, lr_ports, lr_lbnats, match, actions);
+ lsp, lflows, lr_ports, lr_lbnats, match, actions, lflow_ref);
build_lswitch_ip_unicast_lookup_for_nats(lsp, lr_lbnat_rec, lflows,
- match, actions);
- link_ovn_port_to_lflows(lsp, &collected_lflows);
- end_collecting_lflows();
+ match, actions, lflow_ref);
}
static void
@@ -15824,7 +15242,7 @@ build_lbnat_lflows_iterate_by_lsp(struct ovn_port *op,
const struct hmap *lr_ports,
struct ds *match,
struct ds *actions,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(op->nbsp);
@@ -15838,7 +15256,7 @@ build_lbnat_lflows_iterate_by_lsp(struct ovn_port *op,
build_lsp_lflows_for_lbnats(op, op->peer, lr_lbnat_rec,
lr_lbnats, lr_ports, lflows,
- match, actions);
+ match, actions, op->lbnat_lflow_ref);
}
static void
@@ -15846,7 +15264,7 @@ build_lrp_lflows_for_lbnats(struct ovn_port *op,
const struct lr_lb_nat_data_record *lr_lbnat_rec,
const struct shash *meter_groups,
struct ds *match, struct ds *actions,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
/* Drop IP traffic destined to router owned IPs except if the IP is
* also a SNAT IP. Those are dropped later, in stage
@@ -15883,7 +15301,7 @@ build_lbnat_lflows_iterate_by_lrp(struct ovn_port *op,
const struct shash *meter_groups,
struct ds *match,
struct ds *actions,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(op->nbrp);
@@ -15897,7 +15315,7 @@ build_lbnat_lflows_iterate_by_lrp(struct ovn_port *op,
static void
build_lr_lbnat_data_flows(const struct lr_lb_nat_data_record *lr_lbnat_rec,
- struct hmap *lflows,
+ struct lflow_table *lflows,
const struct hmap *ls_ports,
const struct hmap *lr_ports,
struct ds *match,
@@ -15920,7 +15338,7 @@ build_ls_lbacls_flows(const struct ls_lbacls_record *ls_lbacls_rec,
const struct ls_port_group_table *ls_pgs,
const struct chassis_features *features,
const struct shash *meter_groups,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(ls_lbacls_rec->od);
@@ -15939,7 +15357,7 @@ struct lswitch_flow_build_info {
const struct ls_port_group_table *ls_port_groups;
const struct lr_lb_nat_data_table *lr_lbnats;
const struct ls_lbacls_table *ls_lbacls;
- struct hmap *lflows;
+ struct lflow_table *lflows;
struct hmap *igmp_groups;
const struct shash *meter_groups;
const struct hmap *lb_dps_map;
@@ -16022,10 +15440,9 @@ build_lswitch_and_lrouter_iterate_by_lsp(
const struct shash *meter_groups,
struct ds *match,
struct ds *actions,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
ovs_assert(op->nbsp);
- start_collecting_lflows();
/* Build Logical Switch Flows. */
build_lswitch_port_sec_op(op, lflows, actions, match);
@@ -16040,9 +15457,6 @@ build_lswitch_and_lrouter_iterate_by_lsp(
/* Build Logical Router Flows. */
build_arp_resolve_flows_for_lsp(op, lflows, lr_ports, match, actions);
-
- link_ovn_port_to_lflows(op, &collected_lflows);
- end_collecting_lflows();
}
/* Helper function to combine all lflow generation which is iterated by logical
@@ -16252,7 +15666,7 @@ noop_callback(struct worker_pool *pool OVS_UNUSED,
/* Do nothing */
}
-/* Fixes the hmap size (hmap->n) after parallel building the lflow_map when
+/* Fixes the hmap size (hmap->n) after parallel building the lflow_table when
* dp-groups is enabled, because in that case all threads are updating the
* global lflow hmap. Although the lflow_hash_lock prevents currently inserting
* to the same hash bucket, the hmap->n is updated currently by all threads and
@@ -16262,7 +15676,7 @@ noop_callback(struct worker_pool *pool OVS_UNUSED,
* after the worker threads complete the tasks in each iteration before any
* future operations on the lflow map. */
static void
-fix_flow_map_size(struct hmap *lflow_map,
+fix_flow_table_size(struct lflow_table *lflow_table,
struct lswitch_flow_build_info *lsiv,
size_t n_lsiv)
{
@@ -16270,7 +15684,7 @@ fix_flow_map_size(struct hmap *lflow_map,
for (size_t i = 0; i < n_lsiv; i++) {
total += lsiv[i].thread_lflow_counter;
}
- lflow_map->n = total;
+ lflow_table_set_size(lflow_table, total);
}
static void
@@ -16281,7 +15695,7 @@ build_lswitch_and_lrouter_flows(const struct ovn_datapaths *ls_datapaths,
const struct ls_port_group_table *ls_pgs,
const struct lr_lb_nat_data_table *lr_lbnats,
const struct ls_lbacls_table *ls_lbacls,
- struct hmap *lflows,
+ struct lflow_table *lflows,
struct hmap *igmp_groups,
const struct shash *meter_groups,
const struct hmap *lb_dps_map,
@@ -16328,7 +15742,7 @@ build_lswitch_and_lrouter_flows(const struct ovn_datapaths *ls_datapaths,
/* Run thread pool. */
run_pool_callback(build_lflows_pool, NULL, NULL, noop_callback);
- fix_flow_map_size(lflows, lsiv, build_lflows_pool->size);
+ fix_flow_table_size(lflows, lsiv, build_lflows_pool->size);
for (index = 0; index < build_lflows_pool->size; index++) {
ds_destroy(&lsiv[index].match);
@@ -16442,24 +15856,6 @@ build_lswitch_and_lrouter_flows(const struct ovn_datapaths *ls_datapaths,
free(svc_check_match);
}
-static ssize_t max_seen_lflow_size = 128;
-
-void
-lflow_data_init(struct lflow_data *data)
-{
- fast_hmap_size_for(&data->lflows, max_seen_lflow_size);
-}
-
-void
-lflow_data_destroy(struct lflow_data *data)
-{
- struct ovn_lflow *lflow;
- HMAP_FOR_EACH_SAFE (lflow, hmap_node, &data->lflows) {
- ovn_lflow_destroy(&data->lflows, lflow);
- }
- hmap_destroy(&data->lflows);
-}
-
void run_update_worker_pool(int n_threads)
{
/* If number of threads has been updated (or initially set),
@@ -16505,7 +15901,7 @@ create_sb_multicast_group(struct ovsdb_idl_txn *ovnsb_txn,
* constructing their contents based on the OVN_NB database. */
void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
struct lflow_input *input_data,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct hmap mcast_groups;
struct hmap igmp_groups;
@@ -16536,281 +15932,26 @@ void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
}
/* Parallel build may result in a suboptimal hash. Resize the
- * hash to a correct size before doing lookups */
-
- hmap_expand(lflows);
-
- if (hmap_count(lflows) > max_seen_lflow_size) {
- max_seen_lflow_size = hmap_count(lflows);
- }
-
- stopwatch_start(LFLOWS_DP_GROUPS_STOPWATCH_NAME, time_msec());
- /* Collecting all unique datapath groups. */
- struct hmap ls_dp_groups = HMAP_INITIALIZER(&ls_dp_groups);
- struct hmap lr_dp_groups = HMAP_INITIALIZER(&lr_dp_groups);
- struct hmap single_dp_lflows;
-
- /* Single dp_flows will never grow bigger than lflows,
- * thus the two hmaps will remain the same size regardless
- * of how many elements we remove from lflows and add to
- * single_dp_lflows.
- * Note - lflows is always sized for at least 128 flows.
- */
- fast_hmap_size_for(&single_dp_lflows, max_seen_lflow_size);
-
- struct ovn_lflow *lflow;
- HMAP_FOR_EACH_SAFE (lflow, hmap_node, lflows) {
- struct ovn_datapath **datapaths_array;
- size_t n_datapaths;
-
- if (ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH) {
- n_datapaths = ods_size(input_data->ls_datapaths);
- datapaths_array = input_data->ls_datapaths->array;
- } else {
- n_datapaths = ods_size(input_data->lr_datapaths);
- datapaths_array = input_data->lr_datapaths->array;
- }
-
- lflow->n_ods = bitmap_count1(lflow->dpg_bitmap, n_datapaths);
-
- ovs_assert(lflow->n_ods);
-
- if (lflow->n_ods == 1) {
- /* There is only one datapath, so it should be moved out of the
- * group to a single 'od'. */
- size_t index = bitmap_scan(lflow->dpg_bitmap, true, 0,
- n_datapaths);
-
- bitmap_set0(lflow->dpg_bitmap, index);
- lflow->od = datapaths_array[index];
-
- /* Logical flow should be re-hashed to allow lookups. */
- uint32_t hash = hmap_node_hash(&lflow->hmap_node);
- /* Remove from lflows. */
- hmap_remove(lflows, &lflow->hmap_node);
- hash = ovn_logical_flow_hash_datapath(&lflow->od->sb->header_.uuid,
- hash);
- /* Add to single_dp_lflows. */
- hmap_insert_fast(&single_dp_lflows, &lflow->hmap_node, hash);
- }
- }
-
- /* Merge multiple and single dp hashes. */
-
- fast_hmap_merge(lflows, &single_dp_lflows);
-
- hmap_destroy(&single_dp_lflows);
-
- stopwatch_stop(LFLOWS_DP_GROUPS_STOPWATCH_NAME, time_msec());
+ * lflow map to a correct size before doing lookups */
+ lflow_table_expand(lflows);
+
stopwatch_start(LFLOWS_TO_SB_STOPWATCH_NAME, time_msec());
-
- struct hmap lflows_temp = HMAP_INITIALIZER(&lflows_temp);
- /* Push changes to the Logical_Flow table to database. */
- const struct sbrec_logical_flow *sbflow;
- SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_SAFE (sbflow,
- input_data->sbrec_logical_flow_table) {
- struct sbrec_logical_dp_group *dp_group = sbflow->logical_dp_group;
- struct ovn_datapath *logical_datapath_od = NULL;
- size_t i;
-
- /* Find one valid datapath to get the datapath type. */
- struct sbrec_datapath_binding *dp = sbflow->logical_datapath;
- if (dp) {
- logical_datapath_od = ovn_datapath_from_sbrec(
- &input_data->ls_datapaths->datapaths,
- &input_data->lr_datapaths->datapaths,
- dp);
- if (logical_datapath_od
- && ovn_datapath_is_stale(logical_datapath_od)) {
- logical_datapath_od = NULL;
- }
- }
- for (i = 0; dp_group && i < dp_group->n_datapaths; i++) {
- logical_datapath_od = ovn_datapath_from_sbrec(
- &input_data->ls_datapaths->datapaths,
- &input_data->lr_datapaths->datapaths,
- dp_group->datapaths[i]);
- if (logical_datapath_od
- && !ovn_datapath_is_stale(logical_datapath_od)) {
- break;
- }
- logical_datapath_od = NULL;
- }
-
- if (!logical_datapath_od) {
- /* This lflow has no valid logical datapaths. */
- sbrec_logical_flow_delete(sbflow);
- continue;
- }
-
- enum ovn_pipeline pipeline
- = !strcmp(sbflow->pipeline, "ingress") ? P_IN : P_OUT;
-
- lflow = ovn_lflow_find(
- lflows, dp_group ? NULL : logical_datapath_od,
- ovn_stage_build(ovn_datapath_get_type(logical_datapath_od),
- pipeline, sbflow->table_id),
- sbflow->priority, sbflow->match, sbflow->actions,
- sbflow->controller_meter, sbflow->hash);
- if (lflow) {
- struct hmap *dp_groups;
- size_t n_datapaths;
- bool is_switch;
-
- lflow->sb_uuid = sbflow->header_.uuid;
- is_switch = ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH;
- if (is_switch) {
- n_datapaths = ods_size(input_data->ls_datapaths);
- dp_groups = &ls_dp_groups;
- } else {
- n_datapaths = ods_size(input_data->lr_datapaths);
- dp_groups = &lr_dp_groups;
- }
- if (input_data->ovn_internal_version_changed) {
- const char *stage_name = smap_get_def(&sbflow->external_ids,
- "stage-name", "");
- const char *stage_hint = smap_get_def(&sbflow->external_ids,
- "stage-hint", "");
- const char *source = smap_get_def(&sbflow->external_ids,
- "source", "");
-
- if (strcmp(stage_name, ovn_stage_to_str(lflow->stage))) {
- sbrec_logical_flow_update_external_ids_setkey(sbflow,
- "stage-name", ovn_stage_to_str(lflow->stage));
- }
- if (lflow->stage_hint) {
- if (strcmp(stage_hint, lflow->stage_hint)) {
- sbrec_logical_flow_update_external_ids_setkey(sbflow,
- "stage-hint", lflow->stage_hint);
- }
- }
- if (lflow->where) {
- if (strcmp(source, lflow->where)) {
- sbrec_logical_flow_update_external_ids_setkey(sbflow,
- "source", lflow->where);
- }
- }
- }
-
- if (lflow->od) {
- sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
- sbrec_logical_flow_set_logical_dp_group(sbflow, NULL);
- } else {
- lflow->dpg = ovn_dp_group_get_or_create(
- ovnsb_txn, dp_groups, dp_group,
- lflow->n_ods, lflow->dpg_bitmap,
- n_datapaths, is_switch,
- input_data->ls_datapaths,
- input_data->lr_datapaths);
-
- sbrec_logical_flow_set_logical_datapath(sbflow, NULL);
- sbrec_logical_flow_set_logical_dp_group(sbflow,
- lflow->dpg->dp_group);
- }
-
- /* This lflow updated. Not needed anymore. */
- hmap_remove(lflows, &lflow->hmap_node);
- hmap_insert(&lflows_temp, &lflow->hmap_node,
- hmap_node_hash(&lflow->hmap_node));
- } else {
- sbrec_logical_flow_delete(sbflow);
- }
- }
-
- HMAP_FOR_EACH_SAFE (lflow, hmap_node, lflows) {
- const char *pipeline = ovn_stage_get_pipeline_name(lflow->stage);
- uint8_t table = ovn_stage_get_table(lflow->stage);
- struct hmap *dp_groups;
- size_t n_datapaths;
- bool is_switch;
-
- is_switch = ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH;
- if (is_switch) {
- n_datapaths = ods_size(input_data->ls_datapaths);
- dp_groups = &ls_dp_groups;
- } else {
- n_datapaths = ods_size(input_data->lr_datapaths);
- dp_groups = &lr_dp_groups;
- }
-
- lflow->sb_uuid = uuid_random();
- sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
- &lflow->sb_uuid);
- if (lflow->od) {
- sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
- } else {
- lflow->dpg = ovn_dp_group_get_or_create(
- ovnsb_txn, dp_groups, NULL,
- lflow->n_ods, lflow->dpg_bitmap,
- n_datapaths, is_switch,
- input_data->ls_datapaths,
- input_data->lr_datapaths);
-
- sbrec_logical_flow_set_logical_dp_group(sbflow,
- lflow->dpg->dp_group);
- }
-
- sbrec_logical_flow_set_pipeline(sbflow, pipeline);
- sbrec_logical_flow_set_table_id(sbflow, table);
- sbrec_logical_flow_set_priority(sbflow, lflow->priority);
- sbrec_logical_flow_set_match(sbflow, lflow->match);
- sbrec_logical_flow_set_actions(sbflow, lflow->actions);
- if (lflow->io_port) {
- struct smap tags = SMAP_INITIALIZER(&tags);
- smap_add(&tags, "in_out_port", lflow->io_port);
- sbrec_logical_flow_set_tags(sbflow, &tags);
- smap_destroy(&tags);
- }
- sbrec_logical_flow_set_controller_meter(sbflow, lflow->ctrl_meter);
-
- /* Trim the source locator lflow->where, which looks something like
- * "ovn/northd/northd.c:1234", down to just the part following the
- * last slash, e.g. "northd.c:1234". */
- const char *slash = strrchr(lflow->where, '/');
-#if _WIN32
- const char *backslash = strrchr(lflow->where, '\\');
- if (!slash || backslash > slash) {
- slash = backslash;
- }
-#endif
- const char *where = slash ? slash + 1 : lflow->where;
-
- struct smap ids = SMAP_INITIALIZER(&ids);
- smap_add(&ids, "stage-name", ovn_stage_to_str(lflow->stage));
- smap_add(&ids, "source", where);
- if (lflow->stage_hint) {
- smap_add(&ids, "stage-hint", lflow->stage_hint);
- }
- sbrec_logical_flow_set_external_ids(sbflow, &ids);
- smap_destroy(&ids);
- hmap_remove(lflows, &lflow->hmap_node);
- hmap_insert(&lflows_temp, &lflow->hmap_node,
- hmap_node_hash(&lflow->hmap_node));
- }
- hmap_swap(lflows, &lflows_temp);
- hmap_destroy(&lflows_temp);
+ lflow_table_sync_to_sb(lflows, ovnsb_txn, input_data->ls_datapaths,
+ input_data->lr_datapaths,
+ input_data->ovn_internal_version_changed,
+ input_data->sbrec_logical_flow_table,
+ input_data->sbrec_logical_dp_group_table);
stopwatch_stop(LFLOWS_TO_SB_STOPWATCH_NAME, time_msec());
- struct ovn_dp_group *dpg;
- HMAP_FOR_EACH_POP (dpg, node, &ls_dp_groups) {
- bitmap_free(dpg->bitmap);
- free(dpg);
- }
- hmap_destroy(&ls_dp_groups);
- HMAP_FOR_EACH_POP (dpg, node, &lr_dp_groups) {
- bitmap_free(dpg->bitmap);
- free(dpg);
- }
- hmap_destroy(&lr_dp_groups);
/* Push changes to the Multicast_Group table to database. */
const struct sbrec_multicast_group *sbmc;
SBREC_MULTICAST_GROUP_TABLE_FOR_EACH_SAFE (sbmc,
input_data->sbrec_multicast_group_table) {
struct ovn_datapath *od = ovn_datapath_from_sbrec(
- &input_data->ls_datapaths->datapaths,
- &input_data->lr_datapaths->datapaths,
- sbmc->datapath);
+ &input_data->ls_datapaths->datapaths,
+ &input_data->lr_datapaths->datapaths,
+ sbmc->datapath);
if (!od || ovn_datapath_is_stale(od)) {
sbrec_multicast_group_delete(sbmc);
@@ -16850,120 +15991,11 @@ void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
hmap_destroy(&mcast_groups);
}
-static void
-sync_lsp_lflows_to_sb(struct ovsdb_idl_txn *ovnsb_txn,
- struct lflow_input *lflow_input,
- struct hmap *lflows,
- struct ovn_lflow *lflow)
-{
- size_t n_datapaths;
- struct ovn_datapath **datapaths_array;
- if (ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH) {
- n_datapaths = ods_size(lflow_input->ls_datapaths);
- datapaths_array = lflow_input->ls_datapaths->array;
- } else {
- n_datapaths = ods_size(lflow_input->lr_datapaths);
- datapaths_array = lflow_input->lr_datapaths->array;
- }
- uint32_t n_ods = bitmap_count1(lflow->dpg_bitmap, n_datapaths);
- ovs_assert(n_ods == 1);
- /* There is only one datapath, so it should be moved out of the
- * group to a single 'od'. */
- size_t index = bitmap_scan(lflow->dpg_bitmap, true, 0,
- n_datapaths);
-
- bitmap_set0(lflow->dpg_bitmap, index);
- lflow->od = datapaths_array[index];
-
- /* Logical flow should be re-hashed to allow lookups. */
- uint32_t hash = hmap_node_hash(&lflow->hmap_node);
- /* Remove from lflows. */
- hmap_remove(lflows, &lflow->hmap_node);
- hash = ovn_logical_flow_hash_datapath(&lflow->od->sb->header_.uuid,
- hash);
- /* Add back. */
- hmap_insert(lflows, &lflow->hmap_node, hash);
-
- /* Sync to SB. */
- const struct sbrec_logical_flow *sbflow;
- /* Note: uuid_random acquires a global mutex. If we parallelize the sync to
- * SB this may become a bottleneck. */
- lflow->sb_uuid = uuid_random();
- sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
- &lflow->sb_uuid);
- const char *pipeline = ovn_stage_get_pipeline_name(lflow->stage);
- uint8_t table = ovn_stage_get_table(lflow->stage);
- sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
- sbrec_logical_flow_set_logical_dp_group(sbflow, NULL);
- sbrec_logical_flow_set_pipeline(sbflow, pipeline);
- sbrec_logical_flow_set_table_id(sbflow, table);
- sbrec_logical_flow_set_priority(sbflow, lflow->priority);
- sbrec_logical_flow_set_match(sbflow, lflow->match);
- sbrec_logical_flow_set_actions(sbflow, lflow->actions);
- if (lflow->io_port) {
- struct smap tags = SMAP_INITIALIZER(&tags);
- smap_add(&tags, "in_out_port", lflow->io_port);
- sbrec_logical_flow_set_tags(sbflow, &tags);
- smap_destroy(&tags);
- }
- sbrec_logical_flow_set_controller_meter(sbflow, lflow->ctrl_meter);
- /* Trim the source locator lflow->where, which looks something like
- * "ovn/northd/northd.c:1234", down to just the part following the
- * last slash, e.g. "northd.c:1234". */
- const char *slash = strrchr(lflow->where, '/');
-#if _WIN32
- const char *backslash = strrchr(lflow->where, '\\');
- if (!slash || backslash > slash) {
- slash = backslash;
- }
-#endif
- const char *where = slash ? slash + 1 : lflow->where;
-
- struct smap ids = SMAP_INITIALIZER(&ids);
- smap_add(&ids, "stage-name", ovn_stage_to_str(lflow->stage));
- smap_add(&ids, "source", where);
- if (lflow->stage_hint) {
- smap_add(&ids, "stage-hint", lflow->stage_hint);
- }
- sbrec_logical_flow_set_external_ids(sbflow, &ids);
- smap_destroy(&ids);
-}
-
-static bool
-delete_lflow_for_lsp(struct ovn_port *op, bool is_update,
- const struct sbrec_logical_flow_table *sb_lflow_table,
- struct hmap *lflows)
-{
- struct lflow_ref_node *lfrn;
- const char *operation = is_update ? "updated" : "deleted";
- LIST_FOR_EACH_SAFE (lfrn, lflow_list_node, &op->lflows) {
- VLOG_DBG("Deleting SB lflow "UUID_FMT" for %s port %s",
- UUID_ARGS(&lfrn->lflow->sb_uuid), operation, op->key);
-
- const struct sbrec_logical_flow *sblflow =
- sbrec_logical_flow_table_get_for_uuid(sb_lflow_table,
- &lfrn->lflow->sb_uuid);
- if (sblflow) {
- sbrec_logical_flow_delete(sblflow);
- } else {
- static struct vlog_rate_limit rl =
- VLOG_RATE_LIMIT_INIT(1, 1);
- VLOG_WARN_RL(&rl, "SB lflow "UUID_FMT" not found when handling "
- "%s port %s. Recompute.",
- UUID_ARGS(&lfrn->lflow->sb_uuid), operation, op->key);
- return false;
- }
-
- ovn_lflow_destroy(lflows, lfrn->lflow);
- }
- return true;
-}
-
bool
lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
struct tracked_ovn_ports *trk_ovn_ports,
struct lflow_input *lflow_input,
- struct hmap *lflows)
+ struct lflow_table *lflows)
{
struct hmapx_node *hmapx_node;
struct ovn_port *op;
@@ -16973,12 +16005,13 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
/* We don't support lflow handling for deleted logical router
* ports yet. */
ovs_assert(op->nbsp);
-
- if (!delete_lflow_for_lsp(op, false,
- lflow_input->sbrec_logical_flow_table,
- lflows)) {
- return false;
- }
+ lflow_ref_clear_and_sync_lflows(op->lflow_ref, op->od, lflows,
+ ovnsb_txn,
+ lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
/* No need to update SB multicast groups, thanks to weak
* references. */
@@ -16990,12 +16023,8 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
* ports yet. */
ovs_assert(op->nbsp);
- /* Delete old lflows. */
- if (!delete_lflow_for_lsp(op, true,
- lflow_input->sbrec_logical_flow_table,
- lflows)) {
- return false;
- }
+ /* Clear old lflows. */
+ lflow_ref_clear_lflows(op->lflow_ref, op->od, lflows);
/* Generate new lflows. */
struct ds match = DS_EMPTY_INITIALIZER;
@@ -17011,11 +16040,18 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
lr_lbnat_rec = lr_lb_nat_data_table_find(lflow_input->lr_lbnats,
op->peer->od->nbr);
ovs_assert(lr_lbnat_rec);
-
+ lflow_ref_clear_lflows(op->lbnat_lflow_ref, op->od, lflows);
build_lsp_lflows_for_lbnats(op, op->peer, lr_lbnat_rec,
lflow_input->lr_lbnats,
lflow_input->lr_ports,
- lflows, &match, &actions);
+ lflows, &match, &actions,
+ op->lbnat_lflow_ref);
+ lflow_ref_sync_lflows_to_sb(op->lbnat_lflow_ref, lflows, ovnsb_txn,
+ lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
}
ds_destroy(&match);
@@ -17025,11 +16061,12 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
* groups. */
/* Sync the new flows to SB. */
- struct lflow_ref_node *lfrn;
- LIST_FOR_EACH (lfrn, lflow_list_node, &op->lflows) {
- sync_lsp_lflows_to_sb(ovnsb_txn, lflow_input, lflows,
- lfrn->lflow);
- }
+ lflow_ref_sync_lflows_to_sb(op->lflow_ref, lflows, ovnsb_txn,
+ lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
}
HMAPX_FOR_EACH (hmapx_node, &trk_ovn_ports->created) {
@@ -17083,11 +16120,12 @@ lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
}
/* Sync the newly added flows to SB. */
- struct lflow_ref_node *lfrn;
- LIST_FOR_EACH (lfrn, lflow_list_node, &op->lflows) {
- sync_lsp_lflows_to_sb(ovnsb_txn, lflow_input, lflows,
- lfrn->lflow);
- }
+ lflow_ref_sync_lflows_to_sb(op->lflow_ref, lflows, ovnsb_txn,
+ lflow_input->ls_datapaths,
+ lflow_input->lr_datapaths,
+ lflow_input->ovn_internal_version_changed,
+ lflow_input->sbrec_logical_flow_table,
+ lflow_input->sbrec_logical_dp_group_table);
}
return true;
@@ -23,6 +23,7 @@
#include "northd/en-port-group.h"
#include "northd/ipam.h"
#include "openvswitch/hmap.h"
+#include "ovs-thread.h"
struct northd_input {
/* Northbound table references */
@@ -145,13 +146,6 @@ struct northd_data {
struct northd_tracked_data trk_northd_changes;
};
-struct lflow_data {
- struct hmap lflows;
-};
-
-void lflow_data_init(struct lflow_data *);
-void lflow_data_destroy(struct lflow_data *);
-
struct lr_nat_table;
struct lflow_input {
@@ -163,6 +157,7 @@ struct lflow_input {
const struct sbrec_logical_flow_table *sbrec_logical_flow_table;
const struct sbrec_multicast_group_table *sbrec_multicast_group_table;
const struct sbrec_igmp_group_table *sbrec_igmp_group_table;
+ const struct sbrec_logical_dp_group_table *sbrec_logical_dp_group_table;
/* Indexes */
struct ovsdb_idl_index *sbrec_mcast_group_by_name_dp;
@@ -182,6 +177,15 @@ struct lflow_input {
bool ovn_internal_version_changed;
};
+extern int parallelization_state;
+enum {
+ STATE_NULL, /* parallelization is off */
+ STATE_INIT_HASH_SIZES, /* parallelization is on; hashes sizing needed */
+ STATE_USE_PARALLELIZATION /* parallelization is on */
+};
+
+extern thread_local size_t thread_lflow_counter;
+
/*
* Multicast snooping and querier per datapath configuration.
*/
@@ -331,8 +335,181 @@ ods_size(const struct ovn_datapaths *datapaths)
return hmap_count(&datapaths->datapaths);
}
+struct ovn_datapath *ovn_datapath_from_sbrec(
+ const struct hmap *ls_datapaths, const struct hmap *lr_datapaths,
+ const struct sbrec_datapath_binding *);
+
+static inline bool
+ovn_datapath_is_stale(const struct ovn_datapath *od)
+{
+ return !od->nbr && !od->nbs;
+};
+
bool od_has_lb_vip(const struct ovn_datapath *od);
+/* Pipeline stages. */
+
+/* The two purposes for which ovn-northd uses OVN logical datapaths. */
+enum ovn_datapath_type {
+ DP_SWITCH, /* OVN logical switch. */
+ DP_ROUTER /* OVN logical router. */
+};
+
+/* Returns an "enum ovn_stage" built from the arguments.
+ *
+ * (It's better to use ovn_stage_build() for type-safety reasons, but inline
+ * functions can't be used in enums or switch cases.) */
+#define OVN_STAGE_BUILD(DP_TYPE, PIPELINE, TABLE) \
+ (((DP_TYPE) << 9) | ((PIPELINE) << 8) | (TABLE))
+
+/* A stage within an OVN logical switch or router.
+ *
+ * An "enum ovn_stage" indicates whether the stage is part of a logical switch
+ * or router, whether the stage is part of the ingress or egress pipeline, and
+ * the table within that pipeline. The first three components are combined to
+ * form the stage's full name, e.g. S_SWITCH_IN_PORT_SEC_L2,
+ * S_ROUTER_OUT_DELIVERY. */
+enum ovn_stage {
+#define PIPELINE_STAGES \
+ /* Logical switch ingress stages. */ \
+ PIPELINE_STAGE(SWITCH, IN, CHECK_PORT_SEC, 0, "ls_in_check_port_sec") \
+ PIPELINE_STAGE(SWITCH, IN, APPLY_PORT_SEC, 1, "ls_in_apply_port_sec") \
+ PIPELINE_STAGE(SWITCH, IN, LOOKUP_FDB , 2, "ls_in_lookup_fdb") \
+ PIPELINE_STAGE(SWITCH, IN, PUT_FDB, 3, "ls_in_put_fdb") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_ACL, 4, "ls_in_pre_acl") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_LB, 5, "ls_in_pre_lb") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_STATEFUL, 6, "ls_in_pre_stateful") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_HINT, 7, "ls_in_acl_hint") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_EVAL, 8, "ls_in_acl_eval") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_ACTION, 9, "ls_in_acl_action") \
+ PIPELINE_STAGE(SWITCH, IN, QOS_MARK, 10, "ls_in_qos_mark") \
+ PIPELINE_STAGE(SWITCH, IN, QOS_METER, 11, "ls_in_qos_meter") \
+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 12, "ls_in_lb_aff_check") \
+ PIPELINE_STAGE(SWITCH, IN, LB, 13, "ls_in_lb") \
+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 14, "ls_in_lb_aff_learn") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 15, "ls_in_pre_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 16, "ls_in_nat_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 17, "ls_in_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_EVAL, 18, \
+ "ls_in_acl_after_lb_eval") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_ACTION, 19, \
+ "ls_in_acl_after_lb_action") \
+ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 20, "ls_in_stateful") \
+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 21, "ls_in_arp_rsp") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 22, "ls_in_dhcp_options") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 23, "ls_in_dhcp_response") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 24, "ls_in_dns_lookup") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 25, "ls_in_dns_response") \
+ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 26, "ls_in_external_port") \
+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 27, "ls_in_l2_lkup") \
+ PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 28, "ls_in_l2_unknown") \
+ \
+ /* Logical switch egress stages. */ \
+ PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 0, "ls_out_pre_acl") \
+ PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 1, "ls_out_pre_lb") \
+ PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful") \
+ PIPELINE_STAGE(SWITCH, OUT, ACL_HINT, 3, "ls_out_acl_hint") \
+ PIPELINE_STAGE(SWITCH, OUT, ACL_EVAL, 4, "ls_out_acl_eval") \
+ PIPELINE_STAGE(SWITCH, OUT, ACL_ACTION, 5, "ls_out_acl_action") \
+ PIPELINE_STAGE(SWITCH, OUT, QOS_MARK, 6, "ls_out_qos_mark") \
+ PIPELINE_STAGE(SWITCH, OUT, QOS_METER, 7, "ls_out_qos_meter") \
+ PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 8, "ls_out_stateful") \
+ PIPELINE_STAGE(SWITCH, OUT, CHECK_PORT_SEC, 9, "ls_out_check_port_sec") \
+ PIPELINE_STAGE(SWITCH, OUT, APPLY_PORT_SEC, 10, "ls_out_apply_port_sec") \
+ \
+ /* Logical router ingress stages. */ \
+ PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \
+ PIPELINE_STAGE(ROUTER, IN, LOOKUP_NEIGHBOR, 1, "lr_in_lookup_neighbor") \
+ PIPELINE_STAGE(ROUTER, IN, LEARN_NEIGHBOR, 2, "lr_in_learn_neighbor") \
+ PIPELINE_STAGE(ROUTER, IN, IP_INPUT, 3, "lr_in_ip_input") \
+ PIPELINE_STAGE(ROUTER, IN, UNSNAT, 4, "lr_in_unsnat") \
+ PIPELINE_STAGE(ROUTER, IN, DEFRAG, 5, "lr_in_defrag") \
+ PIPELINE_STAGE(ROUTER, IN, LB_AFF_CHECK, 6, "lr_in_lb_aff_check") \
+ PIPELINE_STAGE(ROUTER, IN, DNAT, 7, "lr_in_dnat") \
+ PIPELINE_STAGE(ROUTER, IN, LB_AFF_LEARN, 8, "lr_in_lb_aff_learn") \
+ PIPELINE_STAGE(ROUTER, IN, ECMP_STATEFUL, 9, "lr_in_ecmp_stateful") \
+ PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 10, "lr_in_nd_ra_options") \
+ PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 11, "lr_in_nd_ra_response") \
+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_PRE, 12, "lr_in_ip_routing_pre") \
+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 13, "lr_in_ip_routing") \
+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_ECMP, 14, "lr_in_ip_routing_ecmp") \
+ PIPELINE_STAGE(ROUTER, IN, POLICY, 15, "lr_in_policy") \
+ PIPELINE_STAGE(ROUTER, IN, POLICY_ECMP, 16, "lr_in_policy_ecmp") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 17, "lr_in_arp_resolve") \
+ PIPELINE_STAGE(ROUTER, IN, CHK_PKT_LEN, 18, "lr_in_chk_pkt_len") \
+ PIPELINE_STAGE(ROUTER, IN, LARGER_PKTS, 19, "lr_in_larger_pkts") \
+ PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 20, "lr_in_gw_redirect") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 21, "lr_in_arp_request") \
+ \
+ /* Logical router egress stages. */ \
+ PIPELINE_STAGE(ROUTER, OUT, CHECK_DNAT_LOCAL, 0, \
+ "lr_out_chk_dnat_local") \
+ PIPELINE_STAGE(ROUTER, OUT, UNDNAT, 1, "lr_out_undnat") \
+ PIPELINE_STAGE(ROUTER, OUT, POST_UNDNAT, 2, "lr_out_post_undnat") \
+ PIPELINE_STAGE(ROUTER, OUT, SNAT, 3, "lr_out_snat") \
+ PIPELINE_STAGE(ROUTER, OUT, POST_SNAT, 4, "lr_out_post_snat") \
+ PIPELINE_STAGE(ROUTER, OUT, EGR_LOOP, 5, "lr_out_egr_loop") \
+ PIPELINE_STAGE(ROUTER, OUT, DELIVERY, 6, "lr_out_delivery")
+
+#define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
+ S_##DP_TYPE##_##PIPELINE##_##STAGE \
+ = OVN_STAGE_BUILD(DP_##DP_TYPE, P_##PIPELINE, TABLE),
+ PIPELINE_STAGES
+#undef PIPELINE_STAGE
+};
+
+enum ovn_datapath_type ovn_stage_to_datapath_type(enum ovn_stage stage);
+
+
+/* Returns 'od''s datapath type. */
+static inline enum ovn_datapath_type
+ovn_datapath_get_type(const struct ovn_datapath *od)
+{
+ return od->nbs ? DP_SWITCH : DP_ROUTER;
+}
+
+/* Returns an "enum ovn_stage" built from the arguments. */
+static inline enum ovn_stage
+ovn_stage_build(enum ovn_datapath_type dp_type, enum ovn_pipeline pipeline,
+ uint8_t table)
+{
+ return OVN_STAGE_BUILD(dp_type, pipeline, table);
+}
+
+/* Returns the pipeline to which 'stage' belongs. */
+static inline enum ovn_pipeline
+ovn_stage_get_pipeline(enum ovn_stage stage)
+{
+ return (stage >> 8) & 1;
+}
+
+/* Returns the pipeline name to which 'stage' belongs. */
+static inline const char *
+ovn_stage_get_pipeline_name(enum ovn_stage stage)
+{
+ return ovn_stage_get_pipeline(stage) == P_IN ? "ingress" : "egress";
+}
+
+/* Returns the table to which 'stage' belongs. */
+static inline uint8_t
+ovn_stage_get_table(enum ovn_stage stage)
+{
+ return stage & 0xff;
+}
+
+/* Returns a string name for 'stage'. */
+static inline const char *
+ovn_stage_to_str(enum ovn_stage stage)
+{
+ switch (stage) {
+#define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
+ case S_##DP_TYPE##_##PIPELINE##_##STAGE: return NAME;
+ PIPELINE_STAGES
+#undef PIPELINE_STAGE
+ default: return "<unknown>";
+ }
+}
+
/* A logical switch port or logical router port.
*
* In steady state, an ovn_port points to a northbound Logical_Switch_Port
@@ -423,8 +600,7 @@ struct ovn_port {
/* Temporarily used for traversing a list (or hmap) of ports. */
bool visited;
- /* List of struct lflow_ref_node that points to the lflows generated by
- * this ovn_port.
+ /* Reference of lflows generated for this ovn_port.
*
* This data is initialized and destroyed by the en_northd node, but
* populated and used only by the en_lflow node. Ideally this data should
@@ -442,8 +618,16 @@ struct ovn_port {
* Adding the list here is more straightforward. The drawback is that we
* need to keep in mind that this data belongs to en_lflow node, so never
* access it from any other nodes.
+ *
+ * 'lflow_ref' is used to reference generic logical flows generated for
+ * this ovn_port.
+ *
+ * 'lbnat_lflow_ref' is used for logical switch ports of type
+ * 'patch/router' to referenece logical flows generated fo this ovn_port
+ * from the 'lr_lb_nat_data_table' record of the peer port's datapath.
*/
- struct ovs_list lflows;
+ struct lflow_ref *lflow_ref;
+ struct lflow_ref *lbnat_lflow_ref;
};
void ovnnb_db_run(struct northd_input *input_data,
@@ -466,13 +650,15 @@ void northd_destroy(struct northd_data *data);
void northd_init(struct northd_data *data);
void northd_indices_create(struct northd_data *data,
struct ovsdb_idl *ovnsb_idl);
+
+struct lflow_table;
void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
struct lflow_input *input_data,
- struct hmap *lflows);
+ struct lflow_table *);
bool lflow_handle_northd_port_changes(struct ovsdb_idl_txn *ovnsb_txn,
struct tracked_ovn_ports *,
struct lflow_input *,
- struct hmap *lflows);
+ struct lflow_table *lflows);
bool northd_handle_sb_port_binding_changes(
const struct sbrec_port_binding_table *, struct hmap *ls_ports,
struct hmap *lr_ports);
@@ -848,6 +848,10 @@ main(int argc, char *argv[])
ovsdb_idl_omit_alert(ovnsb_idl_loop.idl,
&sbrec_port_group_columns[i]);
}
+ for (size_t i = 0; i < SBREC_LOGICAL_DP_GROUP_N_COLUMNS; i++) {
+ ovsdb_idl_omit_alert(ovnsb_idl_loop.idl,
+ &sbrec_logical_dp_group_columns[i]);
+ }
unixctl_command_register("sb-connection-status", "", 0, 0,
ovn_conn_show, ovnsb_idl_loop.idl);