From patchwork Wed Aug 9 16:37:46 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Numan Siddique X-Patchwork-Id: 1819459 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@legolas.ozlabs.org Authentication-Results: legolas.ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org (client-ip=2605:bc80:3010::138; helo=smtp1.osuosl.org; envelope-from=ovs-dev-bounces@openvswitch.org; receiver=) Received: from smtp1.osuosl.org (smtp1.osuosl.org [IPv6:2605:bc80:3010::138]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature ECDSA (P-384) server-digest SHA384) (No client certificate requested) by legolas.ozlabs.org (Postfix) with ESMTPS id 4RLbNb48NPz1yYl for ; Thu, 10 Aug 2023 02:38:55 +1000 (AEST) Received: from localhost (localhost [127.0.0.1]) by smtp1.osuosl.org (Postfix) with ESMTP id EFEEF83A79; Wed, 9 Aug 2023 16:38:52 +0000 (UTC) DKIM-Filter: OpenDKIM Filter v2.11.0 smtp1.osuosl.org EFEEF83A79 X-Virus-Scanned: amavisd-new at osuosl.org Received: from smtp1.osuosl.org ([127.0.0.1]) by localhost (smtp1.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id rdtmBaMmqQIw; Wed, 9 Aug 2023 16:38:46 +0000 (UTC) Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56]) by smtp1.osuosl.org (Postfix) with ESMTPS id 3C8E38106A; Wed, 9 Aug 2023 16:38:42 +0000 (UTC) DKIM-Filter: OpenDKIM Filter v2.11.0 smtp1.osuosl.org 3C8E38106A Received: from lf-lists.osuosl.org (localhost [127.0.0.1]) by lists.linuxfoundation.org (Postfix) with ESMTP id F05A4C0071; Wed, 9 Aug 2023 16:38:41 +0000 (UTC) X-Original-To: dev@openvswitch.org Delivered-To: ovs-dev@lists.linuxfoundation.org Received: from smtp2.osuosl.org (smtp2.osuosl.org [140.211.166.133]) by lists.linuxfoundation.org (Postfix) with ESMTP id 180A5C0032 for ; Wed, 9 Aug 2023 16:38:40 +0000 (UTC) Received: from localhost (localhost [127.0.0.1]) by smtp2.osuosl.org (Postfix) with ESMTP id 3D44341557 for ; Wed, 9 Aug 2023 16:37:59 +0000 (UTC) DKIM-Filter: OpenDKIM Filter v2.11.0 smtp2.osuosl.org 3D44341557 X-Virus-Scanned: amavisd-new at osuosl.org Received: from smtp2.osuosl.org ([127.0.0.1]) by localhost (smtp2.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id hI2cHduVEfTZ for ; Wed, 9 Aug 2023 16:37:54 +0000 (UTC) Received: from relay9-d.mail.gandi.net (relay9-d.mail.gandi.net [217.70.183.199]) by smtp2.osuosl.org (Postfix) with ESMTPS id 51A8C415B6 for ; Wed, 9 Aug 2023 16:37:53 +0000 (UTC) DKIM-Filter: OpenDKIM Filter v2.11.0 smtp2.osuosl.org 51A8C415B6 Received: by mail.gandi.net (Postfix) with ESMTPSA id 65796FF802; Wed, 9 Aug 2023 16:37:50 +0000 (UTC) From: numans@ovn.org To: dev@openvswitch.org Date: Wed, 9 Aug 2023 22:07:46 +0530 Message-Id: <20230809163746.1561622-1-numans@ovn.org> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230809163616.1561383-1-numans@ovn.org> References: <20230809163616.1561383-1-numans@ovn.org> MIME-Version: 1.0 X-GND-Sasl: numans@ovn.org Subject: [ovs-dev] [PATCH ovn v5 11/14] northd: Use objdep mgr for datapath/lb to lflow references. X-BeenThere: ovs-dev@openvswitch.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: ovs-dev-bounces@openvswitch.org Sender: "dev" From: Numan Siddique objdep_mgr is maintained in each 'ovn_datapath' and 'lb_datapaths' to store the references to the logical flows generated for each of these resources. The lflows related to load balancers for a datapath are stored in a separate objdep_type - OBJDEP_TYPE_LB and lflows which are common to both load balancers and conntrack related are stored in a separate objdep_type - OBJDEP_TYPE_CT_LB. This separation helps in incrementally processing the load balancer changes. Upcoming commit will make use of this to handle logical switch/router and load balancer changes incrementally in the lflow engine node. Signed-off-by: Numan Siddique --- lib/lb.c | 2 + lib/lb.h | 3 + lib/objdep.h | 4 + northd/northd.c | 1852 +++++++++++++++++++++++++++++++---------------- northd/northd.h | 2 + 5 files changed, 1219 insertions(+), 644 deletions(-) diff --git a/lib/lb.c b/lib/lb.c index d0d562b6fb..4a82222cfb 100644 --- a/lib/lb.c +++ b/lib/lb.c @@ -1082,6 +1082,7 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb, size_t n_ls_datapaths, lb_dps->nb_ls_map = bitmap_allocate(n_ls_datapaths); lb_dps->nb_lr_map = bitmap_allocate(n_lr_datapaths); + objdep_mgr_init(&lb_dps->lflow_dep_mgr); return lb_dps; } @@ -1104,6 +1105,7 @@ ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *lb_dps) { bitmap_free(lb_dps->nb_lr_map); bitmap_free(lb_dps->nb_ls_map); + objdep_mgr_destroy(&lb_dps->lflow_dep_mgr); free(lb_dps); } diff --git a/lib/lb.h b/lib/lb.h index b8e3c1e8fb..e647cb02ca 100644 --- a/lib/lb.h +++ b/lib/lb.h @@ -22,6 +22,7 @@ #include "lib/smap.h" #include "openvswitch/hmap.h" #include "ovn-util.h" +#include "objdep.h" #include "sset.h" #include "uuid.h" @@ -176,6 +177,8 @@ struct ovn_lb_datapaths { size_t n_nb_lr; unsigned long *nb_lr_map; + + struct objdep_mgr lflow_dep_mgr; }; struct ovn_lb_datapaths *ovn_lb_datapaths_create(const struct ovn_northd_lb *, diff --git a/lib/objdep.h b/lib/objdep.h index d599128ea3..37f9bd82ea 100644 --- a/lib/objdep.h +++ b/lib/objdep.h @@ -28,6 +28,10 @@ enum objdep_type { OBJDEP_TYPE_MC_GROUP, OBJDEP_TYPE_TEMPLATE, OBJDEP_TYPE_LPORT, + OBJDEP_TYPE_OD, + OBJDEP_TYPE_LB, + OBJDEP_TYPE_CT_LB, + OBJDEP_TYPE_IGMP, OBJDEP_TYPE_LFLOW_OD, OBJDEP_TYPE_MAX, }; diff --git a/northd/northd.c b/northd/northd.c index daf5d93430..34150e5352 100644 --- a/northd/northd.c +++ b/northd/northd.c @@ -819,6 +819,7 @@ ovn_datapath_create(struct hmap *datapaths, const struct uuid *key, hmap_insert(datapaths, &od->key_node, uuid_hash(&od->key)); od->lr_group = NULL; hmap_init(&od->ports); + objdep_mgr_init(&od->lflow_dep_mgr); return od; } @@ -853,6 +854,7 @@ ovn_datapath_destroy(struct hmap *datapaths, struct ovn_datapath *od) ovn_ls_port_group_destroy(&od->nb_pgs); destroy_mcast_info_for_datapath(od); destroy_ports_for_datapath(od); + objdep_mgr_destroy(&od->lflow_dep_mgr); free(od); } } @@ -6040,6 +6042,8 @@ struct ovn_igmp_group { struct multicast_group mcgroup; struct ovs_list entries; /* List of SB entries for this group. */ + + struct objdep_mgr lflow_dep_mgr; }; static uint32_t @@ -6096,7 +6100,7 @@ ovn_igmp_group_add(struct ovsdb_idl_index *sbrec_mcast_group_by_name_dp, } igmp_group->mcgroup.name = address_s; ovs_list_init(&igmp_group->entries); - + objdep_mgr_init(&igmp_group->lflow_dep_mgr); hmap_insert(igmp_groups, &igmp_group->hmap_node, ovn_igmp_group_hash(datapath, address)); ovs_list_push_back(&datapath->mcast_info.groups, @@ -6229,6 +6233,7 @@ ovn_igmp_group_destroy(struct hmap *igmp_groups, } hmap_remove(igmp_groups, &igmp_group->hmap_node); ovs_list_remove(&igmp_group->list_node); + objdep_mgr_destroy(&igmp_group->lflow_dep_mgr); free(igmp_group); } } @@ -6438,7 +6443,8 @@ ovn_dp_group_add_with_reference(struct ovn_lflow *lflow_ref, /* Adds a row with the specified contents to the Logical_Flow table. * Version to use when hash bucket locking is NOT required. */ static struct ovn_lflow * -do_ovn_lflow_add(struct lflow_data *lflow_data, const struct ovn_datapath *od, +do_ovn_lflow_add_with_od_lflow_ref(struct lflow_data *lflow_data, + const struct ovn_datapath *od, const unsigned long *dp_bitmap, size_t dp_bitmap_len, uint32_t hash, enum ovn_stage stage, uint16_t priority, const char *match, const char *actions, const char *io_port, @@ -6488,45 +6494,17 @@ do_ovn_lflow_add(struct lflow_data *lflow_data, const struct ovn_datapath *od, /* Adds a row with the specified contents to the Logical_Flow table. */ static void -ovn_lflow_add_at(struct lflow_data *lflow_data, const struct ovn_datapath *od, +ovn_lflow_add_at(struct lflow_data *lflow_data, + const struct ovn_datapath *od, const unsigned long *dp_bitmap, size_t dp_bitmap_len, enum ovn_stage stage, uint16_t priority, - const char *match, const char *actions, const char *io_port, - const char *ctrl_meter, - const struct ovsdb_idl_row *stage_hint, const char *where) - OVS_EXCLUDED(fake_hash_mutex) -{ - struct ovs_mutex *hash_lock; - uint32_t hash; - - ovs_assert(!od || - ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od)); - - hash = ovn_logical_flow_hash(ovn_stage_get_table(stage), - ovn_stage_get_pipeline(stage), - priority, match, - actions); - - hash_lock = lflow_hash_lock(&lflow_data->lflows_match_map, hash); - do_ovn_lflow_add(lflow_data, od, dp_bitmap, dp_bitmap_len, hash, stage, - priority, match, actions, io_port, stage_hint, where, - ctrl_meter); - lflow_hash_unlock(hash_lock); -} - -/* Adds a row with the specified contents to the Logical_Flow table. */ -static void -ovn_lflow_add_objdep_ref(struct lflow_data *lflow_data, - const struct ovn_datapath *od, - const unsigned long *dp_bitmap, size_t dp_bitmap_len, - enum ovn_stage stage, uint16_t priority, - const char *match, const char *actions, - const char *io_port, const char *ctrl_meter, - const struct ovsdb_idl_row *stage_hint, - const char *where, struct objdep_mgr *lflow_dep_mgr, - enum objdep_type objdep_type, - const char *res_name, - const struct ovn_datapath *res_od) + const char *match, const char *actions, + const char *io_port, const char *ctrl_meter, + const struct ovsdb_idl_row *stage_hint, + const char *where, struct objdep_mgr *lflow_dep_mgr, + enum objdep_type objdep_type, + const char *res_name, + const struct ovn_datapath *res_od) OVS_EXCLUDED(fake_hash_mutex) { struct ovs_mutex *hash_lock; @@ -6544,9 +6522,11 @@ ovn_lflow_add_objdep_ref(struct lflow_data *lflow_data, hash_lock = lflow_hash_lock(&lflow_data->lflows_match_map, hash); struct ovn_lflow *lflow = - do_ovn_lflow_add(lflow_data, od, dp_bitmap, dp_bitmap_len, hash, stage, - priority, match, actions, io_port, stage_hint, where, - ctrl_meter); + do_ovn_lflow_add_with_od_lflow_ref(lflow_data, od, dp_bitmap, + dp_bitmap_len, hash, stage, + priority, match, actions, + io_port, stage_hint, where, + ctrl_meter); objdep_mgr_add(lflow_dep_mgr, objdep_type, res_name, &lflow->lflow_uuid); @@ -6565,33 +6545,52 @@ static void __ovn_lflow_add_default_drop(struct lflow_data *lflow_data, struct ovn_datapath *od, enum ovn_stage stage, - const char *where) + const char *where, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type objdep_type, + const char *res_name, + const struct ovn_datapath *res_od) { ovn_lflow_add_at(lflow_data, od, NULL, 0, stage, 0, "1", - debug_drop_action(), NULL, NULL, NULL, where ); + debug_drop_action(), NULL, NULL, NULL, where, + lflow_dep_mgr, objdep_type, res_name, res_od); } /* Adds a row with the specified contents to the Logical_Flow table. */ #define ovn_lflow_add_with_hint__(LFLOW_DATA, OD, STAGE, PRIORITY, MATCH, \ ACTIONS, IN_OUT_PORT, CTRL_METER, \ - STAGE_HINT) \ + STAGE_HINT, LFLOW_DEP_MGR, OBJDEP_TYPE, \ + RES_NAME, RES_OD) \ ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ - IN_OUT_PORT, CTRL_METER, STAGE_HINT, OVS_SOURCE_LOCATOR) + IN_OUT_PORT, CTRL_METER, STAGE_HINT, OVS_SOURCE_LOCATOR, \ + LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, RES_OD) #define ovn_lflow_add_with_hint(LFLOW_DATA, OD, STAGE, PRIORITY, MATCH, \ - ACTIONS, STAGE_HINT) \ + ACTIONS, STAGE_HINT, LFLOW_DEP_MGR, \ + OBJDEP_TYPE, RES_NAME, RES_OD) \ ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ - NULL, NULL, STAGE_HINT, OVS_SOURCE_LOCATOR) + NULL, NULL, STAGE_HINT, OVS_SOURCE_LOCATOR, \ + LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, RES_OD) + +#define ovn_lflow_add(LFLOW_DATA, OD, STAGE, PRIORITY, MATCH, \ + ACTIONS, LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, RES_OD) \ + ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ + NULL, NULL, NULL, OVS_SOURCE_LOCATOR, \ + LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, RES_OD) #define ovn_lflow_add_with_dp_group(LFLOW_DATA, DP_BITMAP, DP_BITMAP_LEN, \ STAGE, PRIORITY, MATCH, ACTIONS, \ - STAGE_HINT) \ + STAGE_HINT, LFLOW_DEP_MGR, OBJDEP_TYPE, \ + RES_NAME, RES_OD) \ ovn_lflow_add_at(LFLOW_DATA, NULL, DP_BITMAP, DP_BITMAP_LEN, STAGE, \ PRIORITY, MATCH, ACTIONS, NULL, NULL, STAGE_HINT, \ - OVS_SOURCE_LOCATOR) + OVS_SOURCE_LOCATOR, LFLOW_DEP_MGR, OBJDEP_TYPE, \ + RES_NAME, RES_OD) -#define ovn_lflow_add_default_drop(LFLOW_DATA, OD, STAGE) \ - __ovn_lflow_add_default_drop(LFLOW_DATA, OD, STAGE, OVS_SOURCE_LOCATOR) +#define ovn_lflow_add_default_drop(LFLOW_DATA, OD, STAGE, LFLOW_DEP_MGR, \ + OBJDEP_TYPE, RES_NAME, RES_OD) \ + __ovn_lflow_add_default_drop(LFLOW_DATA, OD, STAGE, OVS_SOURCE_LOCATOR, \ + LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, RES_OD) /* This macro is similar to ovn_lflow_add_with_hint, except that it requires @@ -6606,28 +6605,50 @@ __ovn_lflow_add_default_drop(struct lflow_data *lflow_data, * For now, only LS pipelines should use this macro. */ #define ovn_lflow_add_with_lport_and_hint(LFLOW_DATA, OD, STAGE, PRIORITY, \ MATCH, ACTIONS, IN_OUT_PORT, \ - STAGE_HINT) \ + STAGE_HINT, LFLOW_DEP_MGR, \ + OBJDEP_TYPE, RES_NAME, RES_OD) \ + ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ + IN_OUT_PORT, NULL, STAGE_HINT, OVS_SOURCE_LOCATOR, \ + LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, RES_OD) + +#define ovn_lflow_add_with_od_lflow_ref(LFLOW_DATA, OD, STAGE, PRIORITY, \ + MATCH, ACTIONS, RES_NAME) \ ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ - IN_OUT_PORT, NULL, STAGE_HINT, OVS_SOURCE_LOCATOR) + NULL, NULL, NULL, OVS_SOURCE_LOCATOR, \ + &(OD)->lflow_dep_mgr, OBJDEP_TYPE_OD, RES_NAME, OD) -#define ovn_lflow_add(LFLOW_DATA, OD, STAGE, PRIORITY, MATCH, ACTIONS) \ +/* Adds an lflow and stores it in the lfow dep mgr for the lflows which + * are related to LB. */ +#define ovn_lflow_add_with_lb_lflow_ref(LFLOW_DATA, OD, STAGE, PRIORITY, \ + MATCH, ACTIONS, RES_NAME) \ ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ - NULL, NULL, NULL, OVS_SOURCE_LOCATOR) + NULL, NULL, NULL, OVS_SOURCE_LOCATOR, \ + &(OD)->lflow_dep_mgr, OBJDEP_TYPE_LB, RES_NAME, OD) + +/* Adds an lflow and stores it in the lfow dep mgr for the lflows which + * are related to both conntrack (ACLs) and LB. */ +#define ovn_lflow_add_with_ct_lb_lflow_ref(LFLOW_DATA, OD, STAGE, PRIORITY, \ + MATCH, ACTIONS, RES_NAME) \ + ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, ACTIONS,\ + NULL, NULL, NULL, OVS_SOURCE_LOCATOR, \ + &(OD)->lflow_dep_mgr, OBJDEP_TYPE_CT_LB, RES_NAME, OD) #define ovn_lflow_metered(LFLOW_DATA, OD, STAGE, PRIORITY, MATCH, ACTIONS, \ - CTRL_METER) \ + CTRL_METER, LFLOW_DEP_MGR, OBJDEP_TYPE, RES_NAME, \ + RES_OD) \ ovn_lflow_add_with_hint__(LFLOW_DATA, OD, STAGE, PRIORITY, MATCH, \ - ACTIONS, NULL, CTRL_METER, NULL) + ACTIONS, NULL, CTRL_METER, NULL, LFLOW_DEP_MGR, \ + OBJDEP_TYPE, RES_NAME, RES_OD) #define ovn_lflow_add_with_lport_lflow_ref(LFLOW_DATA, OD, STAGE, PRIORITY, \ MATCH, ACTIONS, IN_OUT_PORT, \ CTRL_METER, STAGE_HINT, \ LFLOW_DEP_MGR, LPORT_NAME, \ LPORT_OD) \ - ovn_lflow_add_objdep_ref(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, \ - ACTIONS, IN_OUT_PORT, CTRL_METER, STAGE_HINT, \ - OVS_SOURCE_LOCATOR, LFLOW_DEP_MGR, \ - OBJDEP_TYPE_LPORT, LPORT_NAME, LPORT_OD) + ovn_lflow_add_at(LFLOW_DATA, OD, NULL, 0, STAGE, PRIORITY, MATCH, \ + ACTIONS, IN_OUT_PORT, CTRL_METER, STAGE_HINT, \ + OVS_SOURCE_LOCATOR, LFLOW_DEP_MGR, \ + OBJDEP_TYPE_LPORT, LPORT_NAME, LPORT_OD) static struct ovn_lflow * ovn_lflow_find(const struct hmap *lflows, @@ -7101,10 +7122,13 @@ build_lswitch_learn_fdb_od( struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbs); - ovn_lflow_add(lflows, od, S_SWITCH_IN_LOOKUP_FDB, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_PUT_FDB, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 0, "1", - "outport = get_fdb(eth.dst); next;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_IN_LOOKUP_FDB, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_IN_PUT_FDB, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_IN_L2_LKUP, 0, "1", + "outport = get_fdb(eth.dst); next;", od->nbs->name); } /* Egress tables 8: Egress port security - IP (priority 0) @@ -7115,21 +7139,26 @@ build_lswitch_output_port_sec_od(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbs); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_CHECK_PORT_SEC, 100, - "eth.mcast", REGBIT_PORT_SEC_DROP" = 0; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_CHECK_PORT_SEC, 0, "1", - REGBIT_PORT_SEC_DROP" = check_out_port_sec(); next;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_OUT_CHECK_PORT_SEC, 100, + "eth.mcast", REGBIT_PORT_SEC_DROP" = 0; next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_OUT_CHECK_PORT_SEC, 0, "1", + REGBIT_PORT_SEC_DROP" = check_out_port_sec(); next;", od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_APPLY_PORT_SEC, 50, - REGBIT_PORT_SEC_DROP" == 1", debug_drop_action()); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_APPLY_PORT_SEC, 0, - "1", "output;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_OUT_APPLY_PORT_SEC, 50, + REGBIT_PORT_SEC_DROP" == 1", debug_drop_action(), od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_OUT_APPLY_PORT_SEC, 0, "1", "output;", + od->nbs->name); } static void skip_port_from_conntrack(struct ovn_datapath *od, struct ovn_port *op, enum ovn_stage in_stage, enum ovn_stage out_stage, - uint16_t priority, struct lflow_data *lflows) + uint16_t priority, struct lflow_data *lflows, + enum objdep_type objdep_type) { /* Can't use ct() for router ports. Consider the following configuration: * lp1(10.0.0.2) on hostA--ls1--lr0--ls2--lp2(10.0.1.2) on hostB, For a @@ -7151,10 +7180,14 @@ skip_port_from_conntrack(struct ovn_datapath *od, struct ovn_port *op, ovn_lflow_add_with_lport_and_hint(lflows, od, in_stage, priority, ingress_match, ingress_action, - op->key, &op->nbsp->header_); + op->key, &op->nbsp->header_, + &od->lflow_dep_mgr, objdep_type, + od->nbs->name, od); ovn_lflow_add_with_lport_and_hint(lflows, od, out_stage, priority, egress_match, egress_action, - op->key, &op->nbsp->header_); + op->key, &op->nbsp->header_, + &od->lflow_dep_mgr, objdep_type, + od->nbs->name, od); free(ingress_match); free(egress_match); @@ -7171,13 +7204,17 @@ build_stateless_filter(struct ovn_datapath *od, acl->priority + OVN_ACL_PRI_OFFSET, acl->match, action, - &acl->header_); + &acl->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_CT_LB, + od->nbs->name, od); } else { ovn_lflow_add_with_hint(lflows, od, S_SWITCH_OUT_PRE_ACL, acl->priority + OVN_ACL_PRI_OFFSET, acl->match, action, - &acl->header_); + &acl->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_CT_LB, + od->nbs->name, od); } } @@ -7212,14 +7249,18 @@ build_pre_acls(struct ovn_datapath *od, const struct hmap *port_groups, { /* Ingress and Egress Pre-ACL Table (Priority 0): Packets are * allowed by default. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 0, "1", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_PRE_ACL, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_PRE_ACL, 0, "1", "next;", od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 110, - "eth.dst == $svc_monitor_mac", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_PRE_ACL, 110, + "eth.dst == $svc_monitor_mac", "next;", od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 110, - "eth.src == $svc_monitor_mac", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_PRE_ACL, 110, + "eth.src == $svc_monitor_mac", "next;", od->nbs->name); /* If there are any stateful ACL rules in this datapath, we may * send IP packets for some (allow) filters through the conntrack action, @@ -7228,13 +7269,13 @@ build_pre_acls(struct ovn_datapath *od, const struct hmap *port_groups, for (size_t i = 0; i < od->n_router_ports; i++) { skip_port_from_conntrack(od, od->router_ports[i], S_SWITCH_IN_PRE_ACL, S_SWITCH_OUT_PRE_ACL, - 110, lflows); + 110, lflows, OBJDEP_TYPE_CT_LB); } for (size_t i = 0; i < od->n_localnet_ports; i++) { skip_port_from_conntrack(od, od->localnet_ports[i], S_SWITCH_IN_PRE_ACL, S_SWITCH_OUT_PRE_ACL, - 110, lflows); + 110, lflows, OBJDEP_TYPE_CT_LB); } /* stateless filters always take precedence over stateful ACLs. */ @@ -7244,18 +7285,24 @@ build_pre_acls(struct ovn_datapath *od, const struct hmap *port_groups, * * Not to do conntrack on ND and ICMP destination * unreachable packets. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 110, - "nd || nd_rs || nd_ra || mldv1 || mldv2 || " - "(udp && udp.src == 546 && udp.dst == 547)", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 110, - "nd || nd_rs || nd_ra || mldv1 || mldv2 || " - "(udp && udp.src == 546 && udp.dst == 547)", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_PRE_ACL, 110, + "nd || nd_rs || nd_ra || mldv1 || mldv2 || " + "(udp && udp.src == 546 && udp.dst == 547)", "next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_PRE_ACL, 110, + "nd || nd_rs || nd_ra || mldv1 || mldv2 || " + "(udp && udp.src == 546 && udp.dst == 547)", "next;", + od->nbs->name); /* Do not send multicast packets to conntrack. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 110, "eth.mcast", - "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 110, "eth.mcast", - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_PRE_ACL, 110, "eth.mcast", "next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_PRE_ACL, 110, "eth.mcast", "next;", + od->nbs->name); /* Ingress and Egress Pre-ACL Table (Priority 100). * @@ -7265,10 +7312,13 @@ build_pre_acls(struct ovn_datapath *od, const struct hmap *port_groups, * * 'REGBIT_CONNTRACK_DEFRAG' is set to let the pre-stateful table send * it to conntrack for tracking and defragmentation. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 100, "ip", - REGBIT_CONNTRACK_DEFRAG" = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 100, "ip", - REGBIT_CONNTRACK_DEFRAG" = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_PRE_ACL, 100, "ip", + REGBIT_CONNTRACK_DEFRAG" = 1; next;", od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_ACL, + 100, "ip", + REGBIT_CONNTRACK_DEFRAG" = 1; next;", + od->nbs->name); } else if (od->has_lb_vip) { /* We'll build stateless filters if there are LB rules so that * the stateless flows are not tracked in pre-lb. */ @@ -7355,7 +7405,9 @@ build_interconn_mcast_snoop_flows(struct ovn_datapath *od, ovn_lflow_metered(lflows, od, S_SWITCH_OUT_PRE_LB, 120, match, "clone { igmp; }; next;", copp_meter_get(COPP_IGMP, od->nbs->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbs->name, od); free(match); /* Punt MLD traffic to controller. */ @@ -7363,11 +7415,15 @@ build_interconn_mcast_snoop_flows(struct ovn_datapath *od, ovn_lflow_metered(lflows, od, S_SWITCH_OUT_PRE_LB, 120, match, "clone { igmp; }; next;", copp_meter_get(COPP_IGMP, od->nbs->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbs->name, od); free(match); } } +/* Builds pre_lb lflows which are generic to a logical switch datapath + * and do not change if any load balancer is associated or disassociated. */ static void build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups, struct lflow_data *lflows) @@ -7376,32 +7432,49 @@ build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups, build_interconn_mcast_snoop_flows(od, meter_groups, lflows); /* Do not send multicast packets to conntrack */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 110, "eth.mcast", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, 110, "eth.mcast", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_LB, 110, + "eth.mcast", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_LB, 110, + "eth.mcast", "next;", od->nbs->name); /* Do not send ND packets to conntrack */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 110, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_LB, 110, "nd || nd_rs || nd_ra || mldv1 || mldv2", - "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, 110, + "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_LB, 110, "nd || nd_rs || nd_ra || mldv1 || mldv2", - "next;"); + "next;", od->nbs->name); /* Do not send service monitor packets to conntrack. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 110, - "eth.dst == $svc_monitor_mac", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, 110, - "eth.src == $svc_monitor_mac", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_LB, 110, + "eth.dst == $svc_monitor_mac", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_LB, 110, + "eth.src == $svc_monitor_mac", "next;", od->nbs->name); /* Allow all packets to go to next tables by default. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_LB, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_LB, 0, + "1", "next;", od->nbs->name); for (size_t i = 0; i < od->n_router_ports; i++) { skip_port_from_conntrack(od, od->router_ports[i], S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB, - 110, lflows); + 110, lflows, OBJDEP_TYPE_OD); } + + /* Do not sent statless flows via conntrack */ + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_LB, 110, + REGBIT_ACL_STATELESS" == 1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_LB, 110, + REGBIT_ACL_STATELESS" == 1", "next;", od->nbs->name); +} + +/* Builds pre_lb lflows which are load balancer related and may change + * if a load balancer is associated or disassociated. */ +static void +build_pre_lb_lb_related(struct ovn_datapath *od, struct lflow_data *lflows) +{ /* Localnet ports have no need for going through conntrack, unless * the logical switch has a load balancer. Then, conntrack is necessary * so that traffic arriving via the localnet port can be load @@ -7411,16 +7484,10 @@ build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups, for (size_t i = 0; i < od->n_localnet_ports; i++) { skip_port_from_conntrack(od, od->localnet_ports[i], S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB, - 110, lflows); + 110, lflows, OBJDEP_TYPE_LB); } } - /* Do not sent statless flows via conntrack */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 110, - REGBIT_ACL_STATELESS" == 1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, 110, - REGBIT_ACL_STATELESS" == 1", "next;"); - /* 'REGBIT_CONNTRACK_NAT' is set to let the pre-stateful table send * packet to conntrack for defragmentation and possibly for unNATting. * @@ -7452,10 +7519,12 @@ build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups, * add a lflow to drop ct.inv packets. */ if (od->has_lb_vip) { - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, - 100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB, - 100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;"); + ovn_lflow_add_with_lb_lflow_ref( + lflows, od, S_SWITCH_IN_PRE_LB, 100, "ip", + REGBIT_CONNTRACK_NAT" = 1; next;", od->nbs->name); + ovn_lflow_add_with_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_PRE_LB, 100, "ip", + REGBIT_CONNTRACK_NAT" = 1; next;", od->nbs->name); } } @@ -7466,8 +7535,10 @@ build_pre_stateful(struct ovn_datapath *od, { /* Ingress and Egress pre-stateful Table (Priority 0): Packets are * allowed by default. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 0, + "1", "next;", od->nbs->name); /* Note: priority-120 flows are added in build_lb_rules_pre_stateful(). */ @@ -7475,19 +7546,19 @@ build_pre_stateful(struct ovn_datapath *od, ? "ct_lb_mark;" : "ct_lb;"; - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 110, - REGBIT_CONNTRACK_NAT" == 1", ct_lb_action); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 110, + REGBIT_CONNTRACK_NAT" == 1", ct_lb_action, od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 110, - REGBIT_CONNTRACK_NAT" == 1", ct_lb_action); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 110, + REGBIT_CONNTRACK_NAT" == 1", ct_lb_action, od->nbs->name); /* If REGBIT_CONNTRACK_DEFRAG is set as 1, then the packets should be * sent to conntrack for tracking and defragmentation. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 100, - REGBIT_CONNTRACK_DEFRAG" == 1", "ct_next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_PRE_STATEFUL, 100, + REGBIT_CONNTRACK_DEFRAG" == 1", "ct_next;", od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 100, - REGBIT_CONNTRACK_DEFRAG" == 1", "ct_next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_PRE_STATEFUL, 100, + REGBIT_CONNTRACK_DEFRAG" == 1", "ct_next;", od->nbs->name); } @@ -7520,9 +7591,11 @@ build_acl_hints(struct ovn_datapath *od, /* In any case, advance to the next stage. */ if (!od->has_acls && !od->has_lb_vip) { - ovn_lflow_add(lflows, od, stage, UINT16_MAX, "1", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, UINT16_MAX, + "1", "next;", od->nbs->name); } else { - ovn_lflow_add(lflows, od, stage, 0, "1", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 0, "1", + "next;", od->nbs->name); } if (!od->has_stateful_acl && !od->has_lb_vip) { @@ -7533,10 +7606,11 @@ build_acl_hints(struct ovn_datapath *od, * or drop ACLs. For allow ACLs, the connection must also be committed * to conntrack so we set REGBIT_ACL_HINT_ALLOW_NEW. */ - ovn_lflow_add(lflows, od, stage, 7, "ct.new && !ct.est", - REGBIT_ACL_HINT_ALLOW_NEW " = 1; " - REGBIT_ACL_HINT_DROP " = 1; " - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, stage, 7, "ct.new && !ct.est", + REGBIT_ACL_HINT_ALLOW_NEW " = 1; " + REGBIT_ACL_HINT_DROP " = 1; " + "next;", od->nbs->name); /* Already established connections in the "request" direction that * are already marked as "blocked" may hit either: @@ -7549,16 +7623,18 @@ build_acl_hints(struct ovn_datapath *od, match = features->ct_no_masked_label ? "!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 1" : "!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1"; - ovn_lflow_add(lflows, od, stage, 6, match, - REGBIT_ACL_HINT_ALLOW_NEW " = 1; " - REGBIT_ACL_HINT_DROP " = 1; " - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, stage, 6, match, + REGBIT_ACL_HINT_ALLOW_NEW " = 1; " + REGBIT_ACL_HINT_DROP " = 1; " + "next;", od->nbs->name); /* Not tracked traffic can either be allowed or dropped. */ - ovn_lflow_add(lflows, od, stage, 5, "!ct.trk", - REGBIT_ACL_HINT_ALLOW " = 1; " - REGBIT_ACL_HINT_DROP " = 1; " - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, stage, 5, "!ct.trk", + REGBIT_ACL_HINT_ALLOW " = 1; " + REGBIT_ACL_HINT_DROP " = 1; " + "next;", od->nbs->name); /* Already established connections in the "request" direction may hit * either: @@ -7571,23 +7647,24 @@ build_acl_hints(struct ovn_datapath *od, match = features->ct_no_masked_label ? "!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 0" : "!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0"; - ovn_lflow_add(lflows, od, stage, 4, match, - REGBIT_ACL_HINT_ALLOW " = 1; " - REGBIT_ACL_HINT_BLOCK " = 1; " - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, stage, 4, match, + REGBIT_ACL_HINT_ALLOW " = 1; " + REGBIT_ACL_HINT_BLOCK " = 1; next;", + od->nbs->name); /* Not established or established and already blocked connections may * hit drop ACLs. */ - ovn_lflow_add(lflows, od, stage, 3, "!ct.est", - REGBIT_ACL_HINT_DROP " = 1; " - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, stage, 3, "!ct.est", + REGBIT_ACL_HINT_DROP " = 1; next;", od->nbs->name); match = features->ct_no_masked_label ? "ct.est && ct_mark.blocked == 1" : "ct.est && ct_label.blocked == 1"; - ovn_lflow_add(lflows, od, stage, 2, match, - REGBIT_ACL_HINT_DROP " = 1; " - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, stage, 2, match, + REGBIT_ACL_HINT_DROP " = 1; next;", od->nbs->name); /* Established connections that were previously allowed might hit * drop ACLs in which case the connection must be committed with @@ -7596,9 +7673,9 @@ build_acl_hints(struct ovn_datapath *od, match = features->ct_no_masked_label ? "ct.est && ct_mark.blocked == 0" : "ct.est && ct_label.blocked == 0"; - ovn_lflow_add(lflows, od, stage, 1, match, + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 1, match, REGBIT_ACL_HINT_BLOCK " = 1; " - "next;"); + "next;", od->nbs->name); } } @@ -7732,7 +7809,8 @@ consider_acl(struct lflow_data *lflows, struct ovn_datapath *od, ds_put_format(match, "(%s)", acl->match); ovn_lflow_add_with_hint(lflows, od, stage, priority, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); return; } @@ -7769,7 +7847,8 @@ consider_acl(struct lflow_data *lflows, struct ovn_datapath *od, ds_put_cstr(actions, "next;"); ovn_lflow_add_with_hint(lflows, od, stage, priority, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); /* Match on traffic in the request direction for an established * connection tracking entry that has not been marked for @@ -7791,7 +7870,8 @@ consider_acl(struct lflow_data *lflows, struct ovn_datapath *od, ds_put_cstr(actions, "next;"); ovn_lflow_add_with_hint(lflows, od, stage, priority, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); } else if (!strcmp(acl->action, "drop") || !strcmp(acl->action, "reject")) { /* The implementation of "drop" differs if stateful ACLs are in @@ -7808,7 +7888,8 @@ consider_acl(struct lflow_data *lflows, struct ovn_datapath *od, ds_put_cstr(actions, "next;"); ovn_lflow_add_with_hint(lflows, od, stage, priority, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); /* For an existing connection without ct_mark.blocked set, we've * encountered a policy change. ACLs previously allowed * this connection and we committed the connection tracking @@ -7829,7 +7910,8 @@ consider_acl(struct lflow_data *lflows, struct ovn_datapath *od, ct_blocked_match); ovn_lflow_add_with_hint(lflows, od, stage, priority, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); } } @@ -7996,18 +8078,20 @@ build_acl_action_lflows(struct ovn_datapath *od, struct lflow_data *lflows, for (size_t i = 0; i < ARRAY_SIZE(stages); i++) { enum ovn_stage stage = stages[i]; if (!od->has_acls) { - ovn_lflow_add(lflows, od, stage, 0, "1", "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 0, "1", + "next;", od->nbs->name); continue; } ds_truncate(actions, verdict_len); ds_put_cstr(actions, "next;"); - ovn_lflow_add(lflows, od, stage, 1000, - REGBIT_ACL_VERDICT_ALLOW " == 1", ds_cstr(actions)); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 1000, + REGBIT_ACL_VERDICT_ALLOW " == 1", ds_cstr(actions), + od->nbs->name); ds_truncate(actions, verdict_len); ds_put_cstr(actions, debug_implicit_drop_action()); - ovn_lflow_add(lflows, od, stage, 1000, + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 1000, REGBIT_ACL_VERDICT_DROP " == 1", - ds_cstr(actions)); + ds_cstr(actions), od->nbs->name); bool ingress = ovn_stage_get_pipeline(stage) == P_IN; ds_truncate(actions, verdict_len); @@ -8023,11 +8107,14 @@ build_acl_action_lflows(struct ovn_datapath *od, struct lflow_data *lflows, ovn_lflow_metered(lflows, od, stage, 1000, REGBIT_ACL_VERDICT_REJECT " == 1", ds_cstr(actions), copp_meter_get(COPP_REJECT, od->nbs->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_CT_LB, + od->nbs->name, od); ds_truncate(actions, verdict_len); ds_put_cstr(actions, default_acl_action); - ovn_lflow_add(lflows, od, stage, 0, "1", ds_cstr(actions)); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 0, "1", + ds_cstr(actions), od->nbs->name); struct ds tier_actions = DS_EMPTY_INITIALIZER; for (size_t j = 0; j < od->max_acl_tier; j++) { @@ -8038,8 +8125,10 @@ build_acl_action_lflows(struct ovn_datapath *od, struct lflow_data *lflows, "next(pipeline=%s,table=%d);", j + 1, ingress ? "ingress" : "egress", ovn_stage_get_table(stage) - 1); - ovn_lflow_add(lflows, od, stage, 500, ds_cstr(match), - ds_cstr(&tier_actions)); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, stage, 500, + ds_cstr(match), + ds_cstr(&tier_actions), + od->nbs->name); } ds_destroy(&tier_actions); } @@ -8104,7 +8193,8 @@ build_acl_log_related_flows(struct ovn_datapath *od, struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, log_related_stage, UINT16_MAX - 2, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); ds_clear(match); ds_put_format(match, "!ct.est && ct.rel && !ct.new%s && " @@ -8115,7 +8205,8 @@ build_acl_log_related_flows(struct ovn_datapath *od, struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, log_related_stage, UINT16_MAX - 2, ds_cstr(match), ds_cstr(actions), - &acl->header_); + &acl->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_CT_LB, od->nbs->name, od); } static void @@ -8142,21 +8233,31 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, * are any stateful ACLs in this datapath. */ if (!od->has_acls) { if (!od->has_lb_vip) { - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX, "1", - "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX, "1", - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX, "1", "next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX, "1", "next;", + od->nbs->name); } else { - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, 0, "1", "next;"); - } - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, 0, "1", - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, 0, "1", "next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, 0, "1", "next;", + od->nbs->name); + } + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, 0, "1", "next;", + od->nbs->name); } else { - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, 0, "1", - "next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, 0, "1", "next;", + od->nbs->name); } @@ -8185,22 +8286,24 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, * uses "next;". */ ds_clear(&match); ds_put_format(&match, "ip && ct.est && %s == 1", ct_blocked_match); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, 1, + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_SWITCH_IN_ACL_EVAL, 1, ds_cstr(&match), REGBIT_CONNTRACK_COMMIT" = 1; " - REGBIT_ACL_VERDICT_ALLOW" = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, 1, - ds_cstr(&match), + REGBIT_ACL_VERDICT_ALLOW" = 1; next;", od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_SWITCH_OUT_ACL_EVAL, + 1, ds_cstr(&match), REGBIT_CONNTRACK_COMMIT" = 1; " - REGBIT_ACL_VERDICT_ALLOW" = 1; next;"); + REGBIT_ACL_VERDICT_ALLOW" = 1; next;", od->nbs->name); const char *next_action = default_acl_drop ? "next;" : REGBIT_CONNTRACK_COMMIT" = 1; next;"; - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, 1, "ip && !ct.est", - next_action); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, 1, "ip && !ct.est", - next_action); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, 1, "ip && !ct.est", next_action, + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, 1, "ip && !ct.est", + next_action, od->nbs->name); /* Ingress and Egress ACL Table (Priority 65532). * @@ -8213,10 +8316,14 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, ds_put_format(&match, "%s(ct.est && ct.rpl && %s == 1)", use_ct_inv_match ? "ct.inv || " : "", ct_blocked_match); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, - ds_cstr(&match), REGBIT_ACL_VERDICT_DROP " = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, - ds_cstr(&match), REGBIT_ACL_VERDICT_DROP " = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, + ds_cstr(&match), REGBIT_ACL_VERDICT_DROP " = 1; next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, + ds_cstr(&match), REGBIT_ACL_VERDICT_DROP " = 1; next;", + od->nbs->name); /* Ingress and Egress ACL Table (Priority 65535 - 3). * @@ -8232,14 +8339,16 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, "ct.rpl && %s == 0", use_ct_inv_match ? " && !ct.inv" : "", ct_blocked_match); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, - ds_cstr(&match), REGBIT_ACL_HINT_DROP" = 0; " - REGBIT_ACL_HINT_BLOCK" = 0; " - REGBIT_ACL_HINT_ALLOW_REL" = 1; " - REGBIT_ACL_VERDICT_ALLOW" = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, - ds_cstr(&match), - REGBIT_ACL_VERDICT_ALLOW " = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, + ds_cstr(&match), REGBIT_ACL_HINT_DROP" = 0; " + REGBIT_ACL_HINT_BLOCK" = 0; " + REGBIT_ACL_HINT_ALLOW_REL" = 1; " + REGBIT_ACL_VERDICT_ALLOW" = 1; next;", od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, + ds_cstr(&match), REGBIT_ACL_VERDICT_ALLOW " = 1; next;", + od->nbs->name); /* Ingress and Egress ACL Table (Priority 65535). * @@ -8267,16 +8376,20 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, ds_put_format(&match, "!ct.est && ct.rel && !ct.new%s && %s == 0", use_ct_inv_match ? " && !ct.inv" : "", ct_blocked_match); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, - ds_cstr(&match), ct_in_acl_action); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, - ds_cstr(&match), ct_out_acl_action); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, + ds_cstr(&match), ct_in_acl_action, od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_SWITCH_OUT_ACL_EVAL, + UINT16_MAX - 3, + ds_cstr(&match), ct_out_acl_action, + od->nbs->name); /* Reply and related traffic matched by an "allow-related" ACL * should be allowed in the ls_in_acl_after_lb stage too. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, - UINT16_MAX - 3, - REGBIT_ACL_HINT_ALLOW_REL" == 1", - REGBIT_ACL_VERDICT_ALLOW " = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, + UINT16_MAX - 3, + REGBIT_ACL_HINT_ALLOW_REL" == 1", + REGBIT_ACL_VERDICT_ALLOW " = 1; next;", od->nbs->name); } /* Ingress and Egress ACL Table (Priority 65532). @@ -8286,15 +8399,18 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, * Also, don't send them to conntrack because session tracking * for these protocols is not working properly: * https://bugzilla.kernel.org/show_bug.cgi?id=11797. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, - IPV6_CT_OMIT_MATCH, - REGBIT_ACL_VERDICT_ALLOW " = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, - IPV6_CT_OMIT_MATCH, - REGBIT_ACL_VERDICT_ALLOW " = 1; next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, UINT16_MAX - 3, - IPV6_CT_OMIT_MATCH, - REGBIT_ACL_VERDICT_ALLOW " = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_EVAL, UINT16_MAX - 3, + IPV6_CT_OMIT_MATCH, REGBIT_ACL_VERDICT_ALLOW " = 1; next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, UINT16_MAX - 3, + IPV6_CT_OMIT_MATCH, REGBIT_ACL_VERDICT_ALLOW " = 1; next;", + od->nbs->name); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_IN_ACL_AFTER_LB_EVAL, UINT16_MAX - 3, + IPV6_CT_OMIT_MATCH, REGBIT_ACL_VERDICT_ALLOW " = 1; next;", + od->nbs->name); /* Ingress or Egress ACL Table (Various priorities). */ for (size_t i = 0; i < od->nbs->n_acls; i++) { @@ -8329,23 +8445,24 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, has_stateful ? REGBIT_ACL_VERDICT_ALLOW" = 1; " "ct_commit; next;" : REGBIT_ACL_VERDICT_ALLOW" = 1; next;"; - ovn_lflow_add( + ovn_lflow_add_with_ct_lb_lflow_ref( lflows, od, S_SWITCH_OUT_ACL_EVAL, 34000, "udp.src == 53", - dns_actions); + dns_actions, od->nbs->name); } if (od->has_acls || od->has_lb_vip) { /* Add a 34000 priority flow to advance the service monitor reply * packets to skip applying ingress ACLs. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_EVAL, 34000, - "eth.dst == $svc_monitor_mac", - REGBIT_ACL_VERDICT_ALLOW" = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_SWITCH_IN_ACL_EVAL, + 34000, "eth.dst == $svc_monitor_mac", + REGBIT_ACL_VERDICT_ALLOW" = 1; next;", od->nbs->name); /* Add a 34000 priority flow to advance the service monitor packets * generated by ovn-controller to skip applying egress ACLs. */ - ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL_EVAL, 34000, - "eth.src == $svc_monitor_mac", - REGBIT_ACL_VERDICT_ALLOW" = 1; next;"); + ovn_lflow_add_with_ct_lb_lflow_ref( + lflows, od, S_SWITCH_OUT_ACL_EVAL, 34000, + "eth.src == $svc_monitor_mac", + REGBIT_ACL_VERDICT_ALLOW" = 1; next;", od->nbs->name); } build_acl_action_lflows(od, lflows, default_acl_action, meter_groups, @@ -8359,10 +8476,14 @@ static void build_qos(struct ovn_datapath *od, struct lflow_data *lflows) { struct ds action = DS_EMPTY_INITIALIZER; - ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_MARK, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_QOS_METER, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_QOS_METER, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_IN_QOS_MARK, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_OUT_QOS_MARK, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_IN_QOS_METER, 0, "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_OUT_QOS_METER, 0, "1", "next;", od->nbs->name); for (size_t i = 0; i < od->nbs->n_qos_rules; i++) { struct nbrec_qos *qos = od->nbs->qos_rules[i]; @@ -8379,7 +8500,9 @@ build_qos(struct ovn_datapath *od, struct lflow_data *lflows) { ovn_lflow_add_with_hint(lflows, od, stage, qos->priority, qos->match, ds_cstr(&action), - &qos->header_); + &qos->header_, + &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbs->name, od); } } @@ -8410,7 +8533,9 @@ build_qos(struct ovn_datapath *od, struct lflow_data *lflows) { ovn_lflow_add_with_hint(lflows, od, stage, qos->priority, qos->match, ds_cstr(&action), - &qos->header_); + &qos->header_, + &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbs->name, od); } } ds_destroy(&action); @@ -8475,7 +8600,8 @@ build_lb_rules_pre_stateful(struct lflow_data *lflows, ovn_lflow_add_with_dp_group( lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths), S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), ds_cstr(action), - &lb->nlb->header_); + &lb->nlb->header_, &lb_dps->lflow_dep_mgr, + OBJDEP_TYPE_LB, lb->nlb->name, NULL); } } @@ -8523,7 +8649,8 @@ build_lb_affinity_lr_flows(struct lflow_data *lflows, const struct ovn_northd_lb *lb, struct ovn_lb_vip *lb_vip, char *new_lb_match, char *lb_action, const unsigned long *dp_bitmap, - const struct ovn_datapaths *lr_datapaths) + const struct ovn_datapaths *lr_datapaths, + struct objdep_mgr *lflow_dep_mgr) { if (!lb->affinity_timeout || bitmap_is_all_zeros(dp_bitmap, ods_size(lr_datapaths))) { @@ -8562,7 +8689,8 @@ build_lb_affinity_lr_flows(struct lflow_data *lflows, ovn_lflow_add_with_dp_group( lflows, dp_bitmap, ods_size(lr_datapaths), S_ROUTER_IN_LB_AFF_CHECK, - 100, new_lb_match, ds_cstr(&aff_check_action), &lb->nlb->header_); + 100, new_lb_match, ds_cstr(&aff_check_action), &lb->nlb->header_, + lflow_dep_mgr, OBJDEP_TYPE_LB, lb->nlb->name, NULL); /* Prepare common part of affinity LB and affinity learn action. */ ds_put_format(&aff_action, "%s = %s; ", reg_vip, lb_vip->vip_str); @@ -8644,12 +8772,14 @@ build_lb_affinity_lr_flows(struct lflow_data *lflows, ovn_lflow_add_with_dp_group( lflows, dp_bitmap, ods_size(lr_datapaths), S_ROUTER_IN_LB_AFF_LEARN, 100, ds_cstr(&aff_match_learn), - ds_cstr(&aff_action_learn), &lb->nlb->header_); + ds_cstr(&aff_action_learn), &lb->nlb->header_, + lflow_dep_mgr, OBJDEP_TYPE_LB, lb->nlb->name, NULL); /* Use already selected backend within affinity timeslot. */ ovn_lflow_add_with_dp_group( lflows, dp_bitmap, ods_size(lr_datapaths), S_ROUTER_IN_DNAT, 150, - ds_cstr(&aff_match), ds_cstr(&aff_action), &lb->nlb->header_); + ds_cstr(&aff_match), ds_cstr(&aff_action), &lb->nlb->header_, + lflow_dep_mgr, OBJDEP_TYPE_LB, lb->nlb->name, NULL); ds_truncate(&aff_action, aff_action_len); ds_truncate(&aff_action_learn, aff_action_learn_len); @@ -8737,7 +8867,8 @@ build_lb_affinity_ls_flows(struct lflow_data *lflows, ovn_lflow_add_with_dp_group( lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths), S_SWITCH_IN_LB_AFF_CHECK, 100, ds_cstr(&new_lb_match), aff_check, - &lb_dps->lb->nlb->header_); + &lb_dps->lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, lb->nlb->name, NULL); ds_destroy(&new_lb_match); struct ds aff_action = DS_EMPTY_INITIALIZER; @@ -8827,13 +8958,15 @@ build_lb_affinity_ls_flows(struct lflow_data *lflows, ovn_lflow_add_with_dp_group( lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths), S_SWITCH_IN_LB_AFF_LEARN, 100, ds_cstr(&aff_match_learn), - ds_cstr(&aff_action_learn), &lb->nlb->header_); + ds_cstr(&aff_action_learn), &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, lb->nlb->name, NULL); /* Use already selected backend within affinity timeslot. */ ovn_lflow_add_with_dp_group( lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths), S_SWITCH_IN_LB, 150, ds_cstr(&aff_match), ds_cstr(&aff_action), - &lb->nlb->header_); + &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, lb->nlb->name, NULL); ds_truncate(&aff_action, aff_action_len); ds_truncate(&aff_action_learn, aff_action_learn_len); @@ -8852,8 +8985,10 @@ build_lswitch_lb_affinity_default_flows(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbs); - ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_CHECK, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_LEARN, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_LB_AFF_CHECK, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_LB_AFF_LEARN, 0, + "1", "next;", od->nbs->name); } static void @@ -8861,8 +8996,10 @@ build_lrouter_lb_affinity_default_flows(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbr); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_CHECK, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_LEARN, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LB_AFF_CHECK, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LB_AFF_LEARN, 0, + "1", "next;", od->nbr->name); } static void @@ -8930,14 +9067,18 @@ build_lb_rules(struct lflow_data *lflows, struct ovn_lb_datapaths *lb_dps, ovn_lflow_add_with_hint__( lflows, od, S_SWITCH_IN_LB, priority, ds_cstr(match), ds_cstr(action), - NULL, meter, &lb->nlb->header_); + NULL, meter, &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb->nlb->name, NULL); } } if (!reject || build_non_meter) { ovn_lflow_add_with_dp_group( lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map, ods_size(ls_datapaths), S_SWITCH_IN_LB, priority, - ds_cstr(match), ds_cstr(action), &lb->nlb->header_); + ds_cstr(match), ds_cstr(action), &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb->nlb->name, NULL); } bitmap_free(dp_non_meter); } @@ -8955,9 +9096,12 @@ build_stateful(struct ovn_datapath *od, /* Ingress LB, Ingress and Egress stateful Table (Priority 0): Packets are * allowed by default. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_LB, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_STATEFUL, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_LB, 0, "1", + "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_STATEFUL, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_STATEFUL, 0, + "1", "next;", od->nbs->name); /* If REGBIT_CONNTRACK_COMMIT is set as 1 and * REGBIT_CONNTRACK_SET_LABEL is set to 1, then the packets should be @@ -8968,14 +9112,14 @@ build_stateful(struct ovn_datapath *od, ds_put_format(&actions, "ct_commit { %s = 0; " "ct_label.label = " REG_LABEL "; }; next;", ct_block_action); - ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_STATEFUL, 100, REGBIT_CONNTRACK_COMMIT" == 1 && " REGBIT_ACL_LABEL" == 1", - ds_cstr(&actions)); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_STATEFUL, 100, + ds_cstr(&actions), od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_STATEFUL, 100, REGBIT_CONNTRACK_COMMIT" == 1 && " REGBIT_ACL_LABEL" == 1", - ds_cstr(&actions)); + ds_cstr(&actions), od->nbs->name); /* If REGBIT_CONNTRACK_COMMIT is set as 1, then the packets should be * committed to conntrack. We always set ct_mark.blocked to 0 here as @@ -8983,14 +9127,14 @@ build_stateful(struct ovn_datapath *od, * want to allow to continue. */ ds_clear(&actions); ds_put_format(&actions, "ct_commit { %s = 0; }; next;", ct_block_action); - ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_STATEFUL, 100, REGBIT_CONNTRACK_COMMIT" == 1 && " REGBIT_ACL_LABEL" == 0", - ds_cstr(&actions)); - ovn_lflow_add(lflows, od, S_SWITCH_OUT_STATEFUL, 100, + ds_cstr(&actions), od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_OUT_STATEFUL, 100, REGBIT_CONNTRACK_COMMIT" == 1 && " REGBIT_ACL_LABEL" == 0", - ds_cstr(&actions)); + ds_cstr(&actions), od->nbs->name); ds_destroy(&actions); } @@ -9000,9 +9144,12 @@ build_lb_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) /* Ingress Pre-Hairpin/Nat-Hairpin/Hairpin tabled (Priority 0). * Packets that don't need hairpinning should continue processing. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 0, "1", "next;"); + ovn_lflow_add_with_lb_lflow_ref(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_lb_lflow_ref(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_lb_lflow_ref(lflows, od, S_SWITCH_IN_HAIRPIN, 0, + "1", "next;", od->nbs->name); if (od->has_lb_vip) { /* Check if the packet needs to be hairpinned. @@ -9013,8 +9160,8 @@ build_lb_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 100, "ip && ct.trk", REGBIT_HAIRPIN " = chk_lb_hairpin(); " REGBIT_HAIRPIN_REPLY " = chk_lb_hairpin_reply(); " - "next;", - &od->nbs->header_); + "next;", &od->nbs->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_LB, od->nbs->name, NULL); /* If packet needs to be hairpinned, snat the src ip with the VIP * for new sessions. */ @@ -9022,7 +9169,9 @@ build_lb_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) "ip && ct.new && ct.trk" " && "REGBIT_HAIRPIN " == 1", "ct_snat_to_vip; next;", - &od->nbs->header_); + &od->nbs->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_LB, + od->nbs->name, od); /* If packet needs to be hairpinned, for established sessions there * should already be an SNAT conntrack entry. @@ -9031,23 +9180,27 @@ build_lb_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) "ip && ct.est && ct.trk" " && "REGBIT_HAIRPIN " == 1", "ct_snat;", - &od->nbs->header_); + &od->nbs->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_LB, + od->nbs->name, od); /* For the reply of hairpinned traffic, snat the src ip to the VIP. */ ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 90, "ip && "REGBIT_HAIRPIN_REPLY " == 1", "ct_snat;", - &od->nbs->header_); + &od->nbs->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_LB, + od->nbs->name, od); /* Ingress Hairpin table. * - Priority 1: Packets that were SNAT-ed for hairpinning should be * looped back (i.e., swap ETH addresses and send back on inport). */ - ovn_lflow_add( + ovn_lflow_add_with_lb_lflow_ref( lflows, od, S_SWITCH_IN_HAIRPIN, 1, "("REGBIT_HAIRPIN " == 1 || " REGBIT_HAIRPIN_REPLY " == 1)", "eth.dst <-> eth.src; outport = inport; flags.loopback = 1; " - "output;"); + "output;", od->nbs->name); } } @@ -9064,8 +9217,9 @@ build_vtep_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) */ char *action = xasprintf("next(pipeline=ingress, table=%d);", ovn_stage_get_table(S_SWITCH_IN_L2_LKUP)); - ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1000, - REGBIT_FROM_RAMP" == 1", action); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_HAIRPIN, 1000, + REGBIT_FROM_RAMP" == 1", action, + od->nbs->name); free(action); /* Ingress pre-arp flow for traffic from VTEP (ramp) switch. @@ -9081,8 +9235,9 @@ build_vtep_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) ds_put_format(&match, REGBIT_FROM_RAMP" == 1 && is_chassis_resident(%s)", op->cr_port->json_key); - ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 2000, - ds_cstr(&match), "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_HAIRPIN, + 2000, ds_cstr(&match), "next;", + od->nbs->name); } } @@ -9091,9 +9246,10 @@ build_vtep_hairpin(struct ovn_datapath *od, struct lflow_data *lflows) * Neighbor resolution for router ports is done in logical router ingress * pipeline. ARP resolution for vif lports is done directly by vif ports. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 65535, - REGBIT_FROM_RAMP" == 1 && (arp || nd_ns)", - "flags.loopback = 1; next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 65535, + REGBIT_FROM_RAMP" == 1 && (arp || nd_ns)", + "flags.loopback = 1; next;", + od->nbs->name); ds_destroy(&match); } @@ -9135,7 +9291,9 @@ build_fwd_group_lflows(struct ovn_datapath *od, struct lflow_data *lflows) ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 50, ds_cstr(&match), ds_cstr(&actions), - &fwd_group->header_); + &fwd_group->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbs->name, NULL); /* L2 lookup for the forwarding group's virtual MAC */ ds_clear(&match); @@ -9158,7 +9316,9 @@ build_fwd_group_lflows(struct ovn_datapath *od, struct lflow_data *lflows) ds_put_format(&actions, "fwd_group(%s);", ds_cstr(&group_ports)); ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_L2_LKUP, 50, ds_cstr(&match), ds_cstr(&actions), - &fwd_group->header_); + &fwd_group->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbs->name, NULL); } ds_destroy(&match); @@ -9831,15 +9991,17 @@ build_lswitch_lflows_l2_unknown(struct ovn_datapath *od, { /* Ingress table 25/26: Destination lookup for unknown MACs. */ if (od->has_unknown) { - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50, - "outport == \"none\"", - "outport = \""MC_UNKNOWN "\"; output;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50, + "outport == \"none\"", + "outport = \""MC_UNKNOWN "\"; output;", + od->nbs->name); } else { - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50, - "outport == \"none\"", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50, + "outport == \"none\"", + debug_drop_action(), od->nbs->name); } - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 0, "1", - "output;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 0, "1", + "output;", od->nbs->name); } /* Build pre-ACL and ACL tables for both ingress and egress. @@ -9856,6 +10018,7 @@ build_lswitch_lflows_pre_acl_and_acl(struct ovn_datapath *od, build_pre_acls(od, port_groups, lflows); build_pre_lb(od, meter_groups, lflows); + build_pre_lb_lb_related(od, lflows); build_pre_stateful(od, features, lflows); build_acl_hints(od, features, lflows); build_acls(od, features, lflows, port_groups, meter_groups); @@ -9875,21 +10038,26 @@ build_lswitch_lflows_admission_control(struct ovn_datapath *od, /* Logical VLANs not supported. */ if (!is_vlan_transparent(od)) { /* Block logical VLANs. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_CHECK_PORT_SEC, 100, - "vlan.present", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_CHECK_PORT_SEC, + 100, "vlan.present", + debug_drop_action(), od->nbs->name); } /* Broadcast/multicast source address is invalid. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_CHECK_PORT_SEC, 100, - "eth.src[40]", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_CHECK_PORT_SEC, + 100, "eth.src[40]", debug_drop_action(), + od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_IN_CHECK_PORT_SEC, 50, "1", - REGBIT_PORT_SEC_DROP" = check_in_port_sec(); next;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_SWITCH_IN_CHECK_PORT_SEC, 50, "1", + REGBIT_PORT_SEC_DROP" = check_in_port_sec(); next;", od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_IN_APPLY_PORT_SEC, 50, - REGBIT_PORT_SEC_DROP" == 1", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_APPLY_PORT_SEC, 50, + REGBIT_PORT_SEC_DROP" == 1", + debug_drop_action(), od->nbs->name); - ovn_lflow_add(lflows, od, S_SWITCH_IN_APPLY_PORT_SEC, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_APPLY_PORT_SEC, 0, + "1", "next;", od->nbs->name); } /* Ingress table 19: ARP/ND responder, skip requests coming from localnet @@ -10242,18 +10410,20 @@ build_lswitch_arp_nd_responder_default(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbs); - ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, + "1", "next;", od->nbs->name); } /* Ingress table 19: ARP/ND responder for service monitor source ip. * (priority 110)*/ static void -build_lswitch_arp_nd_service_monitor(const struct ovn_northd_lb *lb, +build_lswitch_arp_nd_service_monitor(struct ovn_lb_datapaths *lb_dps, const struct hmap *ls_ports, struct lflow_data *lflows, struct ds *actions, struct ds *match) { + const struct ovn_northd_lb *lb = lb_dps->lb; for (size_t i = 0; i < lb->n_vips; i++) { struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[i]; if (!lb_vip_nb->lb_health_check) { @@ -10316,7 +10486,9 @@ build_lswitch_arp_nd_service_monitor(const struct ovn_northd_lb *lb, op->od, S_SWITCH_IN_ARP_ND_RSP, 110, ds_cstr(match), ds_cstr(actions), - &lb->nlb->header_); + &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb->nlb->name, NULL); } } } @@ -10385,11 +10557,16 @@ build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbs); - ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_OPTIONS, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_RESPONSE, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_LOOKUP, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_SWITCH_IN_EXTERNAL_PORT, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_DHCP_OPTIONS, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_DHCP_RESPONSE, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_DNS_LOOKUP, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, + "1", "next;", od->nbs->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_EXTERNAL_PORT, 0, + "1", "next;", od->nbs->name); } /* Logical switch ingress table 22 and 23: DNS lookup and response @@ -10408,18 +10585,19 @@ build_lswitch_dns_lookup_and_response(struct ovn_datapath *od, "udp.dst == 53", REGBIT_DNS_LOOKUP_RESULT" = dns_lookup(); next;", copp_meter_get(COPP_DNS, od->nbs->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, od->nbs->name, od); const char *dns_action = "eth.dst <-> eth.src; ip4.src <-> ip4.dst; " "udp.dst = udp.src; udp.src = 53; outport = inport; " "flags.loopback = 1; output;"; const char *dns_match = "udp.dst == 53 && "REGBIT_DNS_LOOKUP_RESULT; - ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 100, - dns_match, dns_action); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 100, + dns_match, dns_action, od->nbs->name); dns_action = "eth.dst <-> eth.src; ip6.src <-> ip6.dst; " "udp.dst = udp.src; udp.src = 53; outport = inport; " "flags.loopback = 1; output;"; - ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 100, - dns_match, dns_action); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 100, + dns_match, dns_action, od->nbs->name); } /* Table 24: External port. Drop ARP request for router ips from @@ -10451,9 +10629,9 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od, { ovs_assert(od->nbs); - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 110, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, 110, "eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)", - "handle_svc_check(inport);"); + "handle_svc_check(inport);", od->nbs->name); struct mcast_switch_info *mcast_sw_info = &od->mcast_info.sw; @@ -10464,27 +10642,33 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od, ovn_lflow_metered(lflows, od, S_SWITCH_IN_L2_LKUP, 100, "igmp", ds_cstr(actions), copp_meter_get(COPP_IGMP, od->nbs->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbs->name, od); /* Punt MLD traffic to controller. */ ovn_lflow_metered(lflows, od, S_SWITCH_IN_L2_LKUP, 100, "mldv1 || mldv2", ds_cstr(actions), copp_meter_get(COPP_IGMP, od->nbs->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbs->name, od); /* Flood all IP multicast traffic destined to 224.0.0.X to all * ports - RFC 4541, section 2.1.2, item 2. */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 85, - "ip4.mcast && ip4.dst == 224.0.0.0/24", - "outport = \""MC_FLOOD_L2"\"; output;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, 85, + "ip4.mcast && ip4.dst == 224.0.0.0/24", + "outport = \""MC_FLOOD_L2"\"; output;", + od->nbs->name); /* Flood all IPv6 multicast traffic destined to reserved * multicast IPs (RFC 4291, 2.7.1). */ - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 85, - "ip6.mcast_flood", - "outport = \""MC_FLOOD"\"; output;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, 85, + "ip6.mcast_flood", + "outport = \""MC_FLOOD"\"; output;", + od->nbs->name); /* Forward uregistered IP multicast to routers with relay enabled * and to any ports configured to flood IP multicast traffic. @@ -10514,21 +10698,24 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od, ds_put_cstr(actions, debug_drop_action()); } - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 80, - "ip4.mcast || ip6.mcast", - ds_cstr(actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, + 80, "ip4.mcast || ip6.mcast", + ds_cstr(actions), od->nbs->name); } } if (!smap_get_bool(&od->nbs->other_config, "broadcast-arps-to-all-routers", true)) { - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 72, - "eth.mcast && (arp.op == 1 || nd_ns)", - "outport = \""MC_FLOOD_L2"\"; output;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, 72, + "eth.mcast && (arp.op == 1 || nd_ns)", + "outport = \""MC_FLOOD_L2"\"; output;", + od->nbs->name); } - ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 70, "eth.mcast", - "outport = \""MC_FLOOD"\"; output;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_SWITCH_IN_L2_LKUP, 70, + "eth.mcast", + "outport = \""MC_FLOOD"\"; output;", + od->nbs->name); } @@ -10609,8 +10796,10 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group, ds_put_format(actions, "outport = \"%s\"; output; ", igmp_group->mcgroup.name); - ovn_lflow_add(lflows, igmp_group->datapath, S_SWITCH_IN_L2_LKUP, - 90, ds_cstr(match), ds_cstr(actions)); + ovn_lflow_add(lflows, igmp_group->datapath, S_SWITCH_IN_L2_LKUP, 90, + ds_cstr(match), ds_cstr(actions), + &igmp_group->lflow_dep_mgr, OBJDEP_TYPE_IGMP, + igmp_group->mcgroup.name, igmp_group->datapath); } } @@ -11068,7 +11257,9 @@ build_routing_policy_flow(struct lflow_data *lflows, struct ovn_datapath *od, ds_put_format(&match, "%s", rule->match); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_POLICY, rule->priority, - ds_cstr(&match), ds_cstr(&actions), stage_hint); + ds_cstr(&match), ds_cstr(&actions), stage_hint, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_destroy(&match); ds_destroy(&actions); } @@ -11150,7 +11341,9 @@ build_ecmp_routing_policy_flows(struct lflow_data *lflows, ecmp_group_id, i + 1); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_POLICY_ECMP, 100, ds_cstr(&match), - ds_cstr(&actions), &rule->header_); + ds_cstr(&actions), &rule->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } ds_clear(&actions); @@ -11168,7 +11361,9 @@ build_ecmp_routing_policy_flows(struct lflow_data *lflows, ds_put_cstr(&actions, ");"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_POLICY, rule->priority, rule->match, - ds_cstr(&actions), &rule->header_); + ds_cstr(&actions), &rule->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); cleanup: ds_destroy(&match); @@ -11228,8 +11423,9 @@ build_route_table_lflow(struct ovn_datapath *od, struct lflow_data *lflows, ds_put_format(&actions, "%s = %d; next;", REG_ROUTE_TABLE_ID, rtb_id); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING_PRE, 100, - ds_cstr(&match), ds_cstr(&actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING_PRE, + 100, ds_cstr(&match), ds_cstr(&actions), + od->nbr->name); ds_destroy(&match); ds_destroy(&actions); @@ -11652,13 +11848,15 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DEFRAG, 100, ds_cstr(&base_match), REGBIT_KNOWN_ECMP_NH" = chk_ecmp_nh_mac(); ct_next;", - &st_route->header_); + &st_route->header_, &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); /* And packets that go out over an ECMP route need conntrack */ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DEFRAG, 100, ds_cstr(route_match), REGBIT_KNOWN_ECMP_NH" = chk_ecmp_nh(); ct_next;", - &st_route->header_); + &st_route->header_, &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); /* Save src eth and inport in ct_label for packets that arrive over * an ECMP route. @@ -11676,7 +11874,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "false" : "true"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 100, ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_clear(&match); ds_put_format(&match, "%s && (ct.new && !ct.est) && udp", ds_cstr(&base_match)); @@ -11689,7 +11889,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "false" : "true"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 100, ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_clear(&match); ds_put_format(&match, "%s && (ct.new && !ct.est) && sctp", ds_cstr(&base_match)); @@ -11702,7 +11904,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "false" : "true"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 100, ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_clear(&match); ds_put_format(&match, @@ -11717,7 +11921,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "false" : "true"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 100, ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_clear(&match); ds_put_format(&match, @@ -11732,7 +11938,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "false" : "true"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 100, ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_clear(&match); ds_put_format(&match, "%s && (!ct.rpl && ct.est) && sctp && "REGBIT_KNOWN_ECMP_NH" == 0", @@ -11746,7 +11954,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "false" : "true"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 100, ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); /* Bypass ECMP selection if we already have ct_label information * for where to route the packet. @@ -11764,13 +11974,16 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, IN6_IS_ADDR_V4MAPPED(&route->prefix) ? "" : "xx", port_ip, out_port->json_key); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_ROUTING, 10300, - ds_cstr(&match), ds_cstr(&actions), - &st_route->header_); + ds_cstr(&match), ds_cstr(&actions), + &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); /* Egress reply traffic for symmetric ECMP routes skips router policies. */ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_POLICY, 65535, ds_cstr(&ecmp_reply), "next;", - &st_route->header_); + &st_route->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); /* Use REG_ECMP_ETH_FULL to pass the eth field from ct_label to eth.dst to * avoid masked access to ct_label. Otherwise it may prevent OVS flow @@ -11786,7 +11999,9 @@ add_ecmp_symmetric_reply_flows(struct lflow_data *lflows, " pop(" REG_ECMP_ETH_FULL "); next;"; ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 200, ds_cstr(&ecmp_reply), - action, &st_route->header_); + action, &st_route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_destroy(&base_match); ds_destroy(&match); @@ -11829,8 +12044,9 @@ build_ecmp_route_flow(struct lflow_data *lflows, struct ovn_datapath *od, ds_put_cstr(&actions, ");"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, priority, - ds_cstr(&route_match), ds_cstr(&actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + priority, ds_cstr(&route_match), + ds_cstr(&actions), od->nbr->name); /* Add per member flow */ struct ds match = DS_EMPTY_INITIALIZER; @@ -11873,7 +12089,9 @@ build_ecmp_route_flow(struct lflow_data *lflows, struct ovn_datapath *od, out_port->json_key); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP, 100, ds_cstr(&match), ds_cstr(&actions), - &route->header_); + &route->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } sset_destroy(&visited_ports); ds_destroy(&match); @@ -11887,7 +12105,9 @@ add_route(struct lflow_data *lflows, struct ovn_datapath *od, const char *network_s, int plen, const char *gateway, bool is_src_route, const uint32_t rtb_id, const struct ovsdb_idl_row *stage_hint, bool is_discard_route, - int ofs) + int ofs, struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, const char *res_name, + struct ovn_datapath *res_od) { bool is_ipv4 = strchr(network_s, '.') ? true : false; struct ds match = DS_EMPTY_INITIALIZER; @@ -11932,12 +12152,16 @@ add_route(struct lflow_data *lflows, struct ovn_datapath *od, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_ROUTING, priority, ds_cstr(&match), ds_cstr(&actions), - stage_hint); + stage_hint, + lflow_dep_mgr, res_type, + res_name, res_od); if (op && op->has_bfd) { ds_put_format(&match, " && udp.dst == 3784"); ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_ROUTING, priority + 1, ds_cstr(&match), - ds_cstr(&common_actions), stage_hint); + ds_cstr(&common_actions), stage_hint, + lflow_dep_mgr, res_type, + res_name, res_od); } ds_destroy(&match); ds_destroy(&common_actions); @@ -11971,7 +12195,8 @@ build_static_route_flow(struct lflow_data *lflows, struct ovn_datapath *od, add_route(lflows, route_->is_discard_route ? od : out_port->od, out_port, lrp_addr_s, prefix_s, route_->plen, route->nexthop, route_->is_src_route, route_->route_table_id, &route->header_, - route_->is_discard_route, ofs); + route_->is_discard_route, ofs, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, od->nbr->name, od); free(prefix_s); } @@ -12068,7 +12293,10 @@ lrouter_use_common_zone(const struct ovn_datapath *od) static void build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, enum lrouter_nat_lb_flow_type type, - struct ovn_datapath *od) + struct ovn_datapath *od, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, + const char *res_name) { struct ovn_port *dgp = od->l3dgw_ports[0]; @@ -12107,7 +12335,8 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, ovn_lflow_add_with_hint__(ctx->lflows, od, S_ROUTER_IN_DNAT, ctx->prio, ds_cstr(ctx->new_match), ctx->new_action[type], - NULL, meter, &ctx->lb->nlb->header_); + NULL, meter, &ctx->lb->nlb->header_, + lflow_dep_mgr, res_type, res_name, NULL); ds_truncate(ctx->new_match, new_match_len); @@ -12126,7 +12355,8 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, ovn_lflow_add_with_hint(ctx->lflows, od, S_ROUTER_IN_GW_REDIRECT, 200, ds_cstr(ctx->undnat_match), ds_cstr(ctx->gw_redir_action), - &ctx->lb->nlb->header_); + &ctx->lb->nlb->header_, + lflow_dep_mgr, res_type, res_name, NULL); ds_truncate(ctx->undnat_match, undnat_match_len); ds_put_format(ctx->undnat_match, ") && (inport == %s || outport == %s)" @@ -12134,7 +12364,8 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, dgp->cr_port->json_key); ovn_lflow_add_with_hint(ctx->lflows, od, S_ROUTER_OUT_UNDNAT, 120, ds_cstr(ctx->undnat_match), undnat_action, - &ctx->lb->nlb->header_); + &ctx->lb->nlb->header_, + lflow_dep_mgr, res_type, res_name, NULL); ds_truncate(ctx->undnat_match, undnat_match_len); } @@ -12142,7 +12373,9 @@ static void build_gw_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, enum lrouter_nat_lb_flow_type type, const struct ovn_datapaths *lr_datapaths, - const unsigned long *dp_bitmap) + const unsigned long *dp_bitmap, + struct objdep_mgr *lflow_dep_mgr, + const char *lb_name) { unsigned long *dp_non_meter = NULL; bool build_non_meter = false; @@ -12168,14 +12401,16 @@ build_gw_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, bitmap_set0(dp_non_meter, index); ovn_lflow_add_with_hint__(ctx->lflows, od, S_ROUTER_IN_DNAT, ctx->prio, ds_cstr(ctx->new_match), ctx->new_action[type], - NULL, meter, &ctx->lb->nlb->header_); + NULL, meter, &ctx->lb->nlb->header_, + lflow_dep_mgr, OBJDEP_TYPE_LB, lb_name, NULL); } } if (!ctx->reject || build_non_meter) { ovn_lflow_add_with_dp_group(ctx->lflows, dp_non_meter ? dp_non_meter : dp_bitmap, ods_size(lr_datapaths), S_ROUTER_IN_DNAT, ctx->prio, ds_cstr(ctx->new_match), - ctx->new_action[type], &ctx->lb->nlb->header_); + ctx->new_action[type], &ctx->lb->nlb->header_, + lflow_dep_mgr, OBJDEP_TYPE_LB, lb_name, NULL); } bitmap_free(dp_non_meter); } @@ -12300,7 +12535,10 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, if (!od->n_l3dgw_ports) { bitmap_set1(dp_bitmap[type], index); } else { - build_distr_lrouter_nat_flows_for_lb(&ctx, type, od); + build_distr_lrouter_nat_flows_for_lb(&ctx, type, od, + &lb_dps->lflow_dep_mgr, + OBJDEP_TYPE_LB, + lb_dps->lb->nlb->name); } if (lb->affinity_timeout) { @@ -12326,13 +12564,17 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, * S_ROUTER_IN_DNAT stage. */ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, 120, ds_cstr(&unsnat_match), "next;", - &lb->nlb->header_); + &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb_dps->lb->nlb->name, NULL); } } for (size_t type = 0; type < LROUTER_NAT_LB_FLOW_MAX; type++) { build_gw_lrouter_nat_flows_for_lb(&ctx, type, lr_datapaths, - dp_bitmap[type]); + dp_bitmap[type], + &lb_dps->lflow_dep_mgr, + lb_dps->lb->nlb->name); } /* LB affinity flows for datapaths where CMS has specified @@ -12341,7 +12583,7 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, build_lb_affinity_lr_flows(lflows, lb, lb_vip, ds_cstr(match), "flags.force_snat_for_lb = 1; ", dp_bitmap[LROUTER_NAT_LB_AFF_FORCE_SNAT], - lr_datapaths); + lr_datapaths, &lb_dps->lflow_dep_mgr); /* LB affinity flows for datapaths where CMS has specified * skip_snat_for_lb floag option or regular datapaths. @@ -12350,7 +12592,7 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, lb->skip_snat ? "flags.skip_snat_for_lb = 1; " : NULL; build_lb_affinity_lr_flows(lflows, lb, lb_vip, ds_cstr(match), lb_aff_action, dp_bitmap[LROUTER_NAT_LB_AFF], - lr_datapaths); + lr_datapaths, &lb_dps->lflow_dep_mgr); ds_destroy(&unsnat_match); ds_destroy(&undnat_match); @@ -12396,7 +12638,9 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths *lb_dps, copp_meter_get(COPP_EVENT_ELB, od->nbs->copp, meter_groups), - &lb->nlb->header_); + &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb->nlb->name, NULL); } /* Ignore L4 port information in the key because fragmented packets * may not have L4 information. The pre-stateful table will send @@ -12446,7 +12690,9 @@ build_lrouter_defrag_flows_for_lb(struct ovn_lb_datapaths *lb_dps, ovn_lflow_add_with_dp_group( lflows, lb_dps->nb_lr_map, ods_size(lr_datapaths), S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), "ct_dnat;", - &lb_dps->lb->nlb->header_); + &lb_dps->lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb_dps->lb->nlb->name, NULL); } } @@ -12487,7 +12733,9 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps, copp_meter_get(COPP_EVENT_ELB, od->nbr->copp, meter_groups), - &lb->nlb->header_); + &lb->nlb->header_, + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb->nlb->name, NULL); } } @@ -12496,7 +12744,9 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths *lb_dps, struct ovn_datapath *od = lr_datapaths->array[index]; ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120, - "flags.skip_snat_for_lb == 1 && ip", "next;"); + "flags.skip_snat_for_lb == 1 && ip", "next;", + &lb_dps->lflow_dep_mgr, OBJDEP_TYPE_LB, + lb->nlb->name, NULL); } } } @@ -12662,7 +12912,8 @@ lrouter_nat_add_ext_ip_match(struct ovn_datapath *od, ovn_lflow_add_with_hint(lflows, od, stage, priority, ds_cstr(&match_exempt), "next;", - &nat->header_); + &nat->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); ds_destroy(&match_exempt); } } @@ -12676,7 +12927,10 @@ build_lrouter_arp_flow(struct ovn_datapath *od, struct ovn_port *op, const char *ip_address, const char *eth_addr, struct ds *extra_match, bool drop, uint16_t priority, const struct ovsdb_idl_row *hint, - struct lflow_data *lflows) + struct lflow_data *lflows, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, const char *res_name, + struct ovn_datapath *res_od) { struct ds match = DS_EMPTY_INITIALIZER; struct ds actions = DS_EMPTY_INITIALIZER; @@ -12708,7 +12962,9 @@ build_lrouter_arp_flow(struct ovn_datapath *od, struct ovn_port *op, } ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_INPUT, priority, - ds_cstr(&match), ds_cstr(&actions), hint); + ds_cstr(&match), ds_cstr(&actions), hint, + lflow_dep_mgr, res_type, + res_name, res_od); ds_destroy(&match); ds_destroy(&actions); @@ -12727,7 +12983,10 @@ build_lrouter_nd_flow(struct ovn_datapath *od, struct ovn_port *op, struct ds *extra_match, bool drop, uint16_t priority, const struct ovsdb_idl_row *hint, struct lflow_data *lflows, - const struct shash *meter_groups) + const struct shash *meter_groups, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, const char *res_name, + struct ovn_datapath *res_od) { struct ds match = DS_EMPTY_INITIALIZER; struct ds actions = DS_EMPTY_INITIALIZER; @@ -12750,7 +13009,9 @@ build_lrouter_nd_flow(struct ovn_datapath *od, struct ovn_port *op, if (drop) { ds_put_cstr(&actions, debug_drop_action()); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_INPUT, priority, - ds_cstr(&match), ds_cstr(&actions), hint); + ds_cstr(&match), ds_cstr(&actions), hint, + lflow_dep_mgr, res_type, + res_name, res_od); } else { ds_put_format(&actions, "%s { " @@ -12768,7 +13029,8 @@ build_lrouter_nd_flow(struct ovn_datapath *od, struct ovn_port *op, ds_cstr(&match), ds_cstr(&actions), NULL, copp_meter_get(COPP_ND_NA, od->nbr->copp, meter_groups), - hint); + hint, lflow_dep_mgr, res_type, + res_name, res_od); } ds_destroy(&match); @@ -12789,12 +13051,16 @@ build_lrouter_nat_arp_nd_flow(struct ovn_datapath *od, ext_addrs->ipv6_addrs[0].addr_s, ext_addrs->ipv6_addrs[0].sn_addr_s, REG_INPORT_ETH_ADDR, NULL, false, 90, - &nat->header_, lflows, meter_groups); + &nat->header_, lflows, meter_groups, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } else { build_lrouter_arp_flow(od, NULL, ext_addrs->ipv4_addrs[0].addr_s, REG_INPORT_ETH_ADDR, NULL, false, 90, - &nat->header_, lflows); + &nat->header_, lflows, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } } @@ -12850,21 +13116,29 @@ build_lrouter_port_nat_arp_nd_flow(struct ovn_port *op, ext_addrs->ipv6_addrs[0].addr_s, ext_addrs->ipv6_addrs[0].sn_addr_s, mac_s, &match, false, 92, - &nat->header_, lflows, meter_groups); + &nat->header_, lflows, meter_groups, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); build_lrouter_nd_flow(op->od, op, "nd_na", ext_addrs->ipv6_addrs[0].addr_s, ext_addrs->ipv6_addrs[0].sn_addr_s, mac_s, NULL, true, 91, - &nat->header_, lflows, meter_groups); + &nat->header_, lflows, meter_groups, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } else { build_lrouter_arp_flow(op->od, op, ext_addrs->ipv4_addrs[0].addr_s, mac_s, &match, false, 92, - &nat->header_, lflows); + &nat->header_, lflows, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); build_lrouter_arp_flow(op->od, op, ext_addrs->ipv4_addrs[0].addr_s, mac_s, NULL, true, 91, - &nat->header_, lflows); + &nat->header_, lflows, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } ds_destroy(&match); @@ -12899,7 +13173,11 @@ build_lrouter_drop_own_dest(struct ovn_port *op, enum ovn_stage stage, char *match = xasprintf("ip4.dst == {%s}", ds_cstr(&match_ips)); ovn_lflow_add_with_hint(lflows, op->od, stage, priority, match, debug_drop_action(), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, + OBJDEP_TYPE_LPORT, + op->nbrp->name, + op->od); free(match); } } @@ -12928,7 +13206,11 @@ build_lrouter_drop_own_dest(struct ovn_port *op, enum ovn_stage stage, char *match = xasprintf("ip6.dst == {%s}", ds_cstr(&match_ips)); ovn_lflow_add_with_hint(lflows, op->od, stage, priority, match, debug_drop_action(), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, + OBJDEP_TYPE_LPORT, + op->nbrp->name, + op->od); free(match); } } @@ -12939,14 +13221,16 @@ static void build_lrouter_force_snat_flows(struct lflow_data *lflows, struct ovn_datapath *od, const char *ip_version, const char *ip_addr, - const char *context) + const char *context, + enum objdep_type objdep_type) { struct ds match = DS_EMPTY_INITIALIZER; struct ds actions = DS_EMPTY_INITIALIZER; ds_put_format(&match, "ip%s && ip%s.dst == %s", ip_version, ip_version, ip_addr); - ovn_lflow_add(lflows, od, S_ROUTER_IN_UNSNAT, 110, - ds_cstr(&match), "ct_snat;"); + ovn_lflow_add(lflows, od, S_ROUTER_IN_UNSNAT, 110, ds_cstr(&match), + "ct_snat;", &od->lflow_dep_mgr, objdep_type, + od->nbr->name, od); /* Higher priority rules to force SNAT with the IP addresses * configured in the Gateway router. This only takes effect @@ -12955,8 +13239,9 @@ build_lrouter_force_snat_flows(struct lflow_data *lflows, ds_put_format(&match, "flags.force_snat_for_%s == 1 && ip%s", context, ip_version); ds_put_format(&actions, "ct_snat(%s);", ip_addr); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 100, - ds_cstr(&match), ds_cstr(&actions)); + ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 100, ds_cstr(&match), + ds_cstr(&actions), &od->lflow_dep_mgr, objdep_type, + od->nbr->name, od); ds_destroy(&match); ds_destroy(&actions); @@ -12978,8 +13263,10 @@ build_lrouter_force_snat_flows_op(struct ovn_port *op, ds_put_format(match, "inport == %s && ip4.dst == %s", op->json_key, op->lrp_networks.ipv4_addrs[0].addr_s); - ovn_lflow_add(lflows, op->od, S_ROUTER_IN_UNSNAT, 110, - ds_cstr(match), "ct_snat;"); + ovn_lflow_add(lflows, op->od, S_ROUTER_IN_UNSNAT, + 110, ds_cstr(match), "ct_snat;", + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); @@ -12991,7 +13278,9 @@ build_lrouter_force_snat_flows_op(struct ovn_port *op, ds_put_format(actions, "ct_snat(%s);", op->lrp_networks.ipv4_addrs[0].addr_s); ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_SNAT, 110, - ds_cstr(match), ds_cstr(actions)); + ds_cstr(match), ds_cstr(actions), + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); if (op->lrp_networks.n_ipv4_addrs > 1) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&rl, "Logical router port %s is configured with " @@ -13010,8 +13299,10 @@ build_lrouter_force_snat_flows_op(struct ovn_port *op, ds_put_format(match, "inport == %s && ip6.dst == %s", op->json_key, op->lrp_networks.ipv6_addrs[0].addr_s); - ovn_lflow_add(lflows, op->od, S_ROUTER_IN_UNSNAT, 110, - ds_cstr(match), "ct_snat;"); + ovn_lflow_add(lflows, op->od, S_ROUTER_IN_UNSNAT, + 110, ds_cstr(match), "ct_snat;", + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); @@ -13023,7 +13314,9 @@ build_lrouter_force_snat_flows_op(struct ovn_port *op, ds_put_format(actions, "ct_snat(%s);", op->lrp_networks.ipv6_addrs[0].addr_s); ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_SNAT, 110, - ds_cstr(match), ds_cstr(actions)); + ds_cstr(match), ds_cstr(actions), + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); if (op->lrp_networks.n_ipv6_addrs > 2) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&rl, "Logical router port %s is configured with " @@ -13052,7 +13345,9 @@ build_lrouter_bfd_flows(struct lflow_data *lflows, struct ovn_port *op, ds_cstr(&ip_list)); ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 110, ds_cstr(&match), "next; ", - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(&match); ds_put_format(&match, "ip4.dst == %s && udp.dst == 3784", ds_cstr(&ip_list)); @@ -13060,7 +13355,9 @@ build_lrouter_bfd_flows(struct lflow_data *lflows, struct ovn_port *op, ds_cstr(&match), "handle_bfd_msg(); ", NULL, copp_meter_get(COPP_BFD, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } if (op->lrp_networks.n_ipv6_addrs) { ds_clear(&ip_list); @@ -13071,7 +13368,9 @@ build_lrouter_bfd_flows(struct lflow_data *lflows, struct ovn_port *op, ds_cstr(&ip_list)); ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 110, ds_cstr(&match), "next; ", - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(&match); ds_put_format(&match, "ip6.dst == %s && udp.dst == 3784", ds_cstr(&ip_list)); @@ -13079,7 +13378,9 @@ build_lrouter_bfd_flows(struct lflow_data *lflows, struct ovn_port *op, ds_cstr(&match), "handle_bfd_msg(); ", NULL, copp_meter_get(COPP_BFD, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } ds_destroy(&ip_list); @@ -13096,11 +13397,14 @@ build_adm_ctrl_flows_for_lrouter( ovs_assert(od->nbr); /* Logical VLANs not supported. * Broadcast/multicast source address is invalid. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_ADMISSION, 100, - "vlan.present || eth.src[40]", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_ADMISSION, 100, + "vlan.present || eth.src[40]", + debug_drop_action(), od->nbr->name); /* Default action for L2 security is to drop. */ - ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_ADMISSION); + ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_ADMISSION, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static int @@ -13128,11 +13432,14 @@ build_gateway_get_l2_hdr_size(struct ovn_port *op) /* All 'gateway_mtu' and 'gateway_mtu_bypass' flows should be built with this * function. */ -static void OVS_PRINTF_FORMAT(9, 10) +static void OVS_PRINTF_FORMAT(13, 14) build_gateway_mtu_flow(struct lflow_data *lflows, struct ovn_port *op, enum ovn_stage stage, uint16_t prio_low, uint16_t prio_high, struct ds *match, struct ds *actions, const struct ovsdb_idl_row *hint, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, const char *res_name, + struct ovn_datapath *res_od, const char *extra_actions_fmt, ...) { int gw_mtu = smap_get_int(&op->nbrp->options, "gateway_mtu", 0); @@ -13150,7 +13457,8 @@ build_gateway_mtu_flow(struct lflow_data *lflows, struct ovn_port *op, ds_put_format_valist(actions, extra_actions_fmt, extra_actions_args); ovn_lflow_add_with_hint(lflows, op->od, stage, prio_low, ds_cstr(match), ds_cstr(actions), - hint); + hint, lflow_dep_mgr, res_type, + res_name, res_od); if (gw_mtu > 0) { const char *gw_mtu_bypass = smap_get(&op->nbrp->options, @@ -13162,7 +13470,8 @@ build_gateway_mtu_flow(struct lflow_data *lflows, struct ovn_port *op, ds_put_format(match, " && (%s)", gw_mtu_bypass); ovn_lflow_add_with_hint(lflows, op->od, stage, prio_high, ds_cstr(match), ds_cstr(actions), - hint); + hint, lflow_dep_mgr, res_type, + res_name, res_od); } } va_end(extra_actions_args); @@ -13215,6 +13524,8 @@ build_adm_ctrl_flows_for_lrouter_port( ds_put_format(match, "eth.mcast && inport == %s", op->json_key); build_gateway_mtu_flow(lflows, op, S_ROUTER_IN_ADMISSION, 50, 55, match, actions, &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od, REG_INPORT_ETH_ADDR " = %s; next;", op->lrp_networks.ea_s); @@ -13235,6 +13546,8 @@ build_adm_ctrl_flows_for_lrouter_port( } build_gateway_mtu_flow(lflows, op, S_ROUTER_IN_ADMISSION, 50, 55, match, actions, &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od, REG_INPORT_ETH_ADDR " = %s; next;", op->lrp_networks.ea_s); } @@ -13292,16 +13605,18 @@ build_neigh_learning_flows_for_lrouter( " = lookup_arp(inport, arp.spa, arp.sha); %snext;", learn_from_arp_request ? "" : REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1; "); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, - "arp.op == 2", ds_cstr(actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, + 100, "arp.op == 2", ds_cstr(actions), + od->nbr->name); ds_clear(actions); ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT " = lookup_nd(inport, nd.target, nd.tll); %snext;", learn_from_arp_request ? "" : REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1; "); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_na", - ds_cstr(actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, + 100, "nd_na", ds_cstr(actions), + od->nbr->name); if (!learn_from_arp_request) { /* Add flow to skip GARP LLA if we don't know it already. @@ -13316,9 +13631,10 @@ build_neigh_learning_flows_for_lrouter( " = lookup_nd(inport, ip6.src, nd.tll); " REGBIT_LOOKUP_NEIGHBOR_IP_RESULT " = lookup_nd_ip(inport, ip6.src); next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 110, - "nd_na && ip6.src == fe80::/10 && ip6.dst == ff00::/8", - ds_cstr(actions)); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 110, + "nd_na && ip6.src == fe80::/10 && ip6.dst == ff00::/8", + ds_cstr(actions), od->nbr->name); } ds_clear(actions); @@ -13327,13 +13643,16 @@ build_neigh_learning_flows_for_lrouter( learn_from_arp_request ? "" : REGBIT_LOOKUP_NEIGHBOR_IP_RESULT " = lookup_nd_ip(inport, ip6.src); "); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_ns", - ds_cstr(actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, + 100, "nd_ns", ds_cstr(actions), + od->nbr->name); /* For other packet types, we can skip neighbor learning. * So set REGBIT_LOOKUP_NEIGHBOR_RESULT to 1. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 0, "1", - REGBIT_LOOKUP_NEIGHBOR_RESULT" = 1; next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, + 0, "1", + REGBIT_LOOKUP_NEIGHBOR_RESULT" = 1; next;", + od->nbr->name); /* Flows for LEARN_NEIGHBOR. */ /* Skip Neighbor learning if not required. */ @@ -13341,34 +13660,46 @@ build_neigh_learning_flows_for_lrouter( ds_put_format(match, REGBIT_LOOKUP_NEIGHBOR_RESULT" == 1%s", learn_from_arp_request ? "" : " || "REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" == 0"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 100, - ds_cstr(match), "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, + 100, ds_cstr(match), "next;", + od->nbr->name); ovn_lflow_metered(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90, "arp", "put_arp(inport, arp.spa, arp.sha); next;", copp_meter_get(COPP_ARP, od->nbr->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 95, - "nd_ns && (ip6.src == 0 || nd.sll == 0)", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 95, + "nd_ns && (ip6.src == 0 || nd.sll == 0)", + "next;", od->nbr->name); ovn_lflow_metered(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 95, "nd_na && nd.tll == 0", "put_nd(inport, nd.target, eth.src); next;", copp_meter_get(COPP_ND_NA, od->nbr->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ovn_lflow_metered(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90, "nd_na", "put_nd(inport, nd.target, nd.tll); next;", copp_meter_get(COPP_ND_NA, od->nbr->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ovn_lflow_metered(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90, "nd_ns", "put_nd(inport, ip6.src, nd.sll); next;", copp_meter_get(COPP_ND_NS, od->nbr->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); - ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR); + ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } /* Logical router ingress Table 1: Neighbor lookup lflows @@ -13408,7 +13739,9 @@ build_neigh_learning_flows_for_lrouter_port( ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 110, ds_cstr(match), actions_s, - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } ds_clear(match); ds_put_format(match, @@ -13429,7 +13762,9 @@ build_neigh_learning_flows_for_lrouter_port( ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, ds_cstr(match), ds_cstr(actions), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } } @@ -13526,7 +13861,9 @@ build_ND_RA_flows_for_lrouter_port( copp_meter_get(COPP_ND_RA_OPTS, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(actions); ds_clear(match); ds_put_format(match, "inport == %s && ip6.dst == ff02::2 && " @@ -13545,7 +13882,9 @@ build_ND_RA_flows_for_lrouter_port( ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ND_RA_RESPONSE, 50, ds_cstr(match), ds_cstr(actions), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } } @@ -13556,8 +13895,10 @@ build_ND_RA_flows_for_lrouter(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbr); - ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_OPTIONS, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_RESPONSE, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_ND_RA_OPTIONS, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_ND_RA_RESPONSE, 0, + "1", "next;", od->nbr->name); } /* Logical router ingress table IP_ROUTING_PRE: @@ -13567,8 +13908,9 @@ build_ip_routing_pre_flows_for_lrouter(struct ovn_datapath *od, struct lflow_data *lflows) { ovs_assert(od->nbr); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING_PRE, 0, "1", - REG_ROUTE_TABLE_ID" = 0; next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING_PRE, 0, + "1", REG_ROUTE_TABLE_ID" = 0; next;", + od->nbr->name); } /* Logical router ingress table IP_ROUTING : IP Routing. @@ -13599,14 +13941,18 @@ build_ip_routing_flows_for_lrp( add_route(lflows, op->od, op, op->lrp_networks.ipv4_addrs[i].addr_s, op->lrp_networks.ipv4_addrs[i].network_s, op->lrp_networks.ipv4_addrs[i].plen, NULL, false, 0, - &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED); + &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) { add_route(lflows, op->od, op, op->lrp_networks.ipv6_addrs[i].addr_s, op->lrp_networks.ipv6_addrs[i].network_s, op->lrp_networks.ipv6_addrs[i].plen, NULL, false, 0, - &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED); + &op->nbrp->header_, false, ROUTE_PRIO_OFFSET_CONNECTED, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } } @@ -13648,7 +13994,9 @@ build_ip_routing_flows_for_router_type_lsp(struct ovn_port *op, laddrs->ipv4_addrs[k].network_s, laddrs->ipv4_addrs[k].plen, NULL, false, 0, &peer->nbrp->header_, false, - ROUTE_PRIO_OFFSET_CONNECTED); + ROUTE_PRIO_OFFSET_CONNECTED, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbsp->name, op->od); } } } @@ -13661,10 +14009,15 @@ build_static_route_flows_for_lrouter( const struct hmap *bfd_connections) { ovs_assert(od->nbr); - ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP); - ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_IP_ROUTING); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP, 150, - REG_ECMP_GROUP_ID" == 0", "next;"); + ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); + ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_IP_ROUTING, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP, + 150, REG_ECMP_GROUP_ID" == 0", "next;", + od->nbr->name); struct hmap ecmp_groups = HMAP_INITIALIZER(&ecmp_groups); struct hmap unique_routes = HMAP_INITIALIZER(&unique_routes); @@ -13729,8 +14082,9 @@ build_mcast_lookup_flows_for_lrouter( /* Drop IPv6 multicast traffic that shouldn't be forwarded, * i.e., router solicitation and router advertisement. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10550, - "nd_rs || nd_ra", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, 10550, + "nd_rs || nd_ra", debug_drop_action(), + od->nbr->name); if (!od->mcast_info.rtr.relay) { return; } @@ -13757,8 +14111,9 @@ build_mcast_lookup_flows_for_lrouter( } ds_put_format(actions, "outport = \"%s\"; ip.ttl--; next;", igmp_group->mcgroup.name); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10500, - ds_cstr(match), ds_cstr(actions)); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10500, ds_cstr(match), + ds_cstr(actions), od->nbr->name); } /* If needed, flood unregistered multicast on statically configured @@ -13776,38 +14131,46 @@ build_mcast_lookup_flows_for_lrouter( ds_clear(match); ds_put_format(match, "eth.src == %s && igmp", op->lrp_networks.ea_s); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10550, - ds_cstr(match), debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10550, ds_cstr(match), + debug_drop_action(), + od->nbr->name); ds_clear(match); ds_put_format(match, "eth.src == %s && (mldv1 || mldv2)", op->lrp_networks.ea_s); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10550, - ds_cstr(match), debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10550, ds_cstr(match), + debug_drop_action(), + od->nbr->name); } - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10460, - "igmp", + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10460, "igmp", "clone { " "outport = \""MC_STATIC"\"; " "next; " - "};"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10460, - "mldv1 || mldv2", + "};", + od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10460, "mldv1 || mldv2", "clone { " "outport = \""MC_STATIC"\"; " "next; " - "};"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10450, - "ip4.mcast || ip6.mcast", + "};", + od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10450, "ip4.mcast || ip6.mcast", "clone { " "outport = \""MC_STATIC"\"; " "ip.ttl--; " "next; " - "};"); + "};", + od->nbr->name); } else { - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 10450, - "ip4.mcast || ip6.mcast", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_ROUTING, + 10450, "ip4.mcast || ip6.mcast", + debug_drop_action(), od->nbr->name); } } @@ -13828,11 +14191,15 @@ build_ingress_policy_flows_for_lrouter( ovs_assert(od->nbr); /* This is a catch-all rule. It has the lowest priority (0) * does a match-all("1") and pass-through (next) */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, 0, "1", - REG_ECMP_GROUP_ID" = 0; next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY_ECMP, 150, - REG_ECMP_GROUP_ID" == 0", "next;"); - ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_POLICY_ECMP); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_POLICY, 0, "1", + REG_ECMP_GROUP_ID" = 0; next;", + od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_POLICY_ECMP, 150, + REG_ECMP_GROUP_ID" == 0", "next;", + od->nbr->name); + ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_POLICY_ECMP, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); /* Convert routing policies to flows. */ uint16_t ecmp_group_id = 1; @@ -13861,23 +14228,31 @@ build_arp_resolve_flows_for_lrouter( ovs_assert(od->nbr); /* Multicast packets already have the outport set so just advance to * next table (priority 500). */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 500, - "ip4.mcast || ip6.mcast", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 500, + "ip4.mcast || ip6.mcast", "next;", + od->nbr->name); - ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 1, "ip4", - "get_arp(outport, " REG_NEXT_HOP_IPV4 "); next;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_ROUTER_IN_ARP_RESOLVE, 1, "ip4", + "get_arp(outport, " REG_NEXT_HOP_IPV4 "); next;", od->nbr->name); - ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 1, "ip6", - "get_nd(outport, " REG_NEXT_HOP_IPV6 "); next;"); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_ROUTER_IN_ARP_RESOLVE, 1, "ip6", + "get_nd(outport, " REG_NEXT_HOP_IPV6 "); next;", od->nbr->name); - ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_ARP_RESOLVE); + ovn_lflow_add_default_drop(lflows, od, S_ROUTER_IN_ARP_RESOLVE, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void routable_addresses_to_lflows(struct lflow_data *lflows, struct ovn_port *router_port, struct ovn_port *peer, struct ds *match, - struct ds *actions) + struct ds *actions, + struct objdep_mgr *lflow_dep_mgr, + const char *res_name, + struct ovn_datapath *res_od) { struct ovn_port_routable_addresses *ra = &router_port->routables; if (!ra->n_addrs) { @@ -13901,7 +14276,8 @@ routable_addresses_to_lflows(struct lflow_data *lflows, ds_clear(actions); ds_put_format(actions, "eth.dst = %s; next;", ra->laddrs[i].ea_s); ovn_lflow_add(lflows, peer->od, S_ROUTER_IN_ARP_RESOLVE, 100, - ds_cstr(match), ds_cstr(actions)); + ds_cstr(match), ds_cstr(actions), lflow_dep_mgr, + OBJDEP_TYPE_LPORT, res_name, res_od); } } @@ -13941,10 +14317,15 @@ build_arp_resolve_flows_for_lrp( ds_clear(actions); ds_put_format(actions, "eth.dst = %s; next;", op->lrp_networks.ea_s); + /* Use the peer od's lflow dep mgr. */ ovn_lflow_add_with_hint(lflows, op->peer->od, S_ROUTER_IN_ARP_RESOLVE, 100, ds_cstr(match), ds_cstr(actions), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, + OBJDEP_TYPE_LPORT, + op->nbrp->name, + op->od); } if (op->lrp_networks.n_ipv6_addrs) { @@ -13957,10 +14338,15 @@ build_arp_resolve_flows_for_lrp( ds_clear(actions); ds_put_format(actions, "eth.dst = %s; next;", op->lrp_networks.ea_s); + /* Use the peer od's lflow dep mgr. */ ovn_lflow_add_with_hint(lflows, op->peer->od, S_ROUTER_IN_ARP_RESOLVE, 100, ds_cstr(match), ds_cstr(actions), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, + OBJDEP_TYPE_LPORT, + op->nbrp->name, + op->od); } } @@ -13985,7 +14371,11 @@ build_arp_resolve_flows_for_lrp( ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ARP_RESOLVE, 50, ds_cstr(match), ds_cstr(actions), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, + OBJDEP_TYPE_LPORT, + op->nbrp->name, + op->od); } } @@ -14131,7 +14521,9 @@ build_arp_resolve_flows_for_lsp( ovn_lflow_add_with_hint(lflows, peer->od, S_ROUTER_IN_ARP_RESOLVE, 100, ds_cstr(match), ds_cstr(actions), - &op->nbsp->header_); + &op->nbsp->header_, + lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->key, op->od); } if (router_port->lrp_networks.n_ipv6_addrs) { @@ -14147,13 +14539,16 @@ build_arp_resolve_flows_for_lsp( ovn_lflow_add_with_hint(lflows, peer->od, S_ROUTER_IN_ARP_RESOLVE, 100, ds_cstr(match), ds_cstr(actions), - &op->nbsp->header_); + &op->nbsp->header_, + lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->key, op->od); } if (smap_get(&peer->od->nbr->options, "chassis") || peer->cr_port) { routable_addresses_to_lflows(lflows, router_port, peer, - match, actions); + match, actions, + lflow_dep_mgr, op->key, op->od); } } } @@ -14164,7 +14559,11 @@ build_icmperr_pkt_big_flows(struct ovn_port *op, int mtu, struct lflow_data *lflows, const struct shash *meter_groups, struct ds *match, struct ds *actions, enum ovn_stage stage, - struct ovn_port *outport) + struct ovn_port *outport, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, + const char *res_name, + struct ovn_datapath *res_od) { char *outport_match = outport ? xasprintf("outport == %s && ", outport->json_key) @@ -14200,7 +14599,10 @@ build_icmperr_pkt_big_flows(struct ovn_port *op, int mtu, COPP_ICMP4_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + lflow_dep_mgr, + res_type, + res_name, res_od); } if (op->lrp_networks.ipv6_addrs) { @@ -14233,7 +14635,10 @@ build_icmperr_pkt_big_flows(struct ovn_port *op, int mtu, COPP_ICMP6_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + lflow_dep_mgr, + res_type, + res_name, res_od); } free(outport_match); } @@ -14244,7 +14649,11 @@ build_check_pkt_len_flows_for_lrp(struct ovn_port *op, const struct hmap *lr_ports, const struct shash *meter_groups, struct ds *match, - struct ds *actions) + struct ds *actions, + struct objdep_mgr *lflow_dep_mgr, + enum objdep_type res_type, + const char *res_name, + struct ovn_datapath *res_od) { int gw_mtu = smap_get_int(&op->nbrp->options, "gateway_mtu", 0); if (gw_mtu <= 0) { @@ -14254,12 +14663,16 @@ build_check_pkt_len_flows_for_lrp(struct ovn_port *op, ds_clear(match); ds_put_format(match, "outport == %s", op->json_key); build_gateway_mtu_flow(lflows, op, S_ROUTER_IN_CHK_PKT_LEN, 50, 55, - match, actions, &op->nbrp->header_, "next;"); + match, actions, &op->nbrp->header_, + lflow_dep_mgr, res_type, + res_name, res_od, + "next;"); /* ingress traffic */ build_icmperr_pkt_big_flows(op, gw_mtu, lflows, meter_groups, match, actions, S_ROUTER_IN_IP_INPUT, - NULL); + NULL, lflow_dep_mgr, res_type, + res_name, res_od); for (size_t i = 0; i < op->od->nbr->n_ports; i++) { struct ovn_port *rp = ovn_port_find(lr_ports, @@ -14271,7 +14684,8 @@ build_check_pkt_len_flows_for_lrp(struct ovn_port *op, /* egress traffic */ build_icmperr_pkt_big_flows(rp, gw_mtu, lflows, meter_groups, match, actions, S_ROUTER_IN_LARGER_PKTS, - op); + op, lflow_dep_mgr, res_type, + res_name, res_od); } } @@ -14298,10 +14712,10 @@ build_check_pkt_len_flows_for_lrouter( ovs_assert(od->nbr); /* Packets are allowed by default. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_CHK_PKT_LEN, 0, "1", - "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_LARGER_PKTS, 0, "1", - "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_CHK_PKT_LEN, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_LARGER_PKTS, 0, + "1", "next;", od->nbr->name); for (size_t i = 0; i < od->nbr->n_ports; i++) { struct ovn_port *rp = ovn_port_find(lr_ports, @@ -14310,7 +14724,9 @@ build_check_pkt_len_flows_for_lrouter( continue; } build_check_pkt_len_flows_for_lrp(rp, lflows, lr_ports, meter_groups, - match, actions); + match, actions, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } } @@ -14352,7 +14768,8 @@ build_gateway_redirect_flows_for_lrouter( od->l3dgw_ports[i]->cr_port->json_key); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 50, ds_cstr(match), ds_cstr(actions), - stage_hint); + stage_hint, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); for (int j = 0; j < od->n_nat_entries; j++) { const struct ovn_nat *nat = &od->nat_entries[j]; @@ -14371,28 +14788,34 @@ build_gateway_redirect_flows_for_lrouter( if (nat->nb->allowed_ext_ips) { ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 75, ds_cstr(&match_ext), - ds_cstr(actions), stage_hint); + ds_cstr(actions), stage_hint, + &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); if (add_def_flow) { ds_clear(&match_ext); ds_put_format(&match_ext, "ip && ip%s.dst == %s", nat_entry_is_v6(nat) ? "6" : "4", nat->nb->external_ip); - ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 70, - ds_cstr(&match_ext), debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_ROUTER_IN_GW_REDIRECT, 70, + ds_cstr(&match_ext), debug_drop_action(), + od->nbr->name); add_def_flow = false; } } else if (nat->nb->exempted_ext_ips) { ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 75, ds_cstr(&match_ext), debug_drop_action(), - stage_hint); + stage_hint, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); } ds_destroy(&match_ext); } } /* Packets are allowed by default. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_GW_REDIRECT, 0, + "1", "next;", od->nbr->name); } /* Local router ingress table ARP_REQUEST: ARP request. @@ -14446,7 +14869,8 @@ build_arp_request_flows_for_lrouter( copp_meter_get(COPP_ND_NS_RESOLVE, od->nbr->copp, meter_groups), - &route->header_); + &route->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); } ovn_lflow_metered(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100, @@ -14459,7 +14883,8 @@ build_arp_request_flows_for_lrouter( "output; " "};", copp_meter_get(COPP_ARP_RESOLVE, od->nbr->copp, - meter_groups)); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, od->nbr->name, od); ovn_lflow_metered(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100, "eth.dst == 00:00:00:00:00:00 && ip6", "nd_ns { " @@ -14467,8 +14892,10 @@ build_arp_request_flows_for_lrouter( "output; " "};", copp_meter_get(COPP_ND_NS_RESOLVE, od->nbr->copp, - meter_groups)); - ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 0, "1", "output;"); + meter_groups), + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, od->nbr->name, od); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_ARP_REQUEST, 0, + "1", "output;", od->nbr->name); } /* Logical router egress table DELIVERY: Delivery (priority 100-110). @@ -14508,16 +14935,22 @@ build_egress_delivery_flows_for_lrouter_port( op->json_key); ds_put_format(actions, "eth.src = %s; output;", op->lrp_networks.ea_s); - ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 110, - ds_cstr(match), ds_cstr(actions)); + ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, + 110, ds_cstr(match), ds_cstr(actions), + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } ds_clear(match); ds_put_format(match, "outport == %s", op->json_key); ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 100, - ds_cstr(match), "output;"); + ds_cstr(match), "output;", + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); - ovn_lflow_add_default_drop(lflows, op->od, S_ROUTER_OUT_DELIVERY); + ovn_lflow_add_default_drop(lflows, op->od, S_ROUTER_OUT_DELIVERY, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } static void @@ -14529,52 +14962,58 @@ build_misc_local_traffic_drop_flows_for_lrouter( * configured to flood them statically on some ports. */ if (od->mcast_info.rtr.flood_static) { - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 120, - "igmp && ip.ttl == 1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 120, - "(mldv1 || mldv2) && ip.ttl == 1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 120, + "igmp && ip.ttl == 1", "next;", + od->nbr->name); + ovn_lflow_add_with_od_lflow_ref( + lflows, od, S_ROUTER_IN_IP_INPUT, 120, + "(mldv1 || mldv2) && ip.ttl == 1", "next;", od->nbr->name); } /* L3 admission control: drop multicast and broadcast source, localhost * source or destination, and zero network source or destination * (priority 100). */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 100, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 100, "ip4.src_mcast ||" "ip4.src == 255.255.255.255 || " "ip4.src == 127.0.0.0/8 || " "ip4.dst == 127.0.0.0/8 || " "ip4.src == 0.0.0.0/8 || " "ip4.dst == 0.0.0.0/8", - debug_drop_action()); + debug_drop_action(), + od->nbr->name); /* Drop ARP packets (priority 85). ARP request packets for router's own * IPs are handled with priority-90 flows. * Drop IPv6 ND packets (priority 85). ND NA packets for router's own * IPs are handled with priority-90 flows. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 85, - "arp || nd", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 85, + "arp || nd", debug_drop_action(), + od->nbr->name); /* Allow IPv6 multicast traffic that's supposed to reach the * router pipeline (e.g., router solicitations). */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 84, "nd_rs || nd_ra", - "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 84, + "nd_rs || nd_ra", "next;", od->nbr->name); /* Drop other reserved multicast. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 83, - "ip6.mcast_rsvd", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 83, + "ip6.mcast_rsvd", debug_drop_action(), + od->nbr->name); /* Allow other multicast if relay enabled (priority 82). */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 82, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 82, "ip4.mcast || ip6.mcast", - (od->mcast_info.rtr.relay ? "next;" : - debug_drop_action())); + (od->mcast_info.rtr.relay ? "next;" : debug_drop_action()), + od->nbr->name); /* Drop Ethernet local broadcast. By definition this traffic should * not be forwarded.*/ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50, - "eth.bcast", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 50, + "eth.bcast", debug_drop_action(), + od->nbr->name); /* Avoid ICMP time exceeded for multicast, silent drop instead. * See RFC1812 section 5.3.1: @@ -14589,17 +15028,20 @@ build_misc_local_traffic_drop_flows_for_lrouter( * performance and functionality point of view. * * (priority-31 flows will send ICMP time exceeded) */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 32, + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 32, "ip.ttl == {0, 1} && !ip.later_frag && " - "(ip4.mcast || ip6.mcast)", debug_drop_action()); + "(ip4.mcast || ip6.mcast)", debug_drop_action(), + od->nbr->name); /* TTL discard */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 30, - "ip.ttl == {0, 1}", debug_drop_action()); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 30, + "ip.ttl == {0, 1}", debug_drop_action(), + od->nbr->name); /* Pass other traffic not already handled to the next table for * routing. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 0, "1", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_IP_INPUT, 0, + "1", "next;", od->nbr->name); } static void @@ -14617,8 +15059,9 @@ build_dhcpv6_reply_flows_for_lrouter_port( " udp.dst == 546", op->lrp_networks.ipv6_addrs[i].addr_s); ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100, - ds_cstr(match), - "reg0 = 0; handle_dhcpv6_reply;"); + ds_cstr(match), "reg0 = 0; handle_dhcpv6_reply;", + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } } @@ -14650,7 +15093,9 @@ build_ipv6_input_flows_for_lrouter_port( "next; "; ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90, ds_cstr(match), lrp_actions, - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } /* ND reply. These flows reply to ND solicitations for the @@ -14671,7 +15116,9 @@ build_ipv6_input_flows_for_lrouter_port( op->lrp_networks.ipv6_addrs[i].addr_s, op->lrp_networks.ipv6_addrs[i].sn_addr_s, REG_INPORT_ETH_ADDR, match, false, 90, - &op->nbrp->header_, lflows, meter_groups); + &op->nbrp->header_, lflows, meter_groups, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } /* UDP/TCP/SCTP port unreachable */ @@ -14691,7 +15138,9 @@ build_ipv6_input_flows_for_lrouter_port( COPP_TCP_RESET, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); ds_put_format(match, @@ -14707,7 +15156,9 @@ build_ipv6_input_flows_for_lrouter_port( COPP_TCP_RESET, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); ds_put_format(match, @@ -14726,7 +15177,9 @@ build_ipv6_input_flows_for_lrouter_port( COPP_ICMP6_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); ds_put_format(match, @@ -14745,7 +15198,9 @@ build_ipv6_input_flows_for_lrouter_port( COPP_ICMP6_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } } @@ -14785,7 +15240,9 @@ build_ipv6_input_flows_for_lrouter_port( 31, ds_cstr(match), ds_cstr(actions), NULL, copp_meter_get(COPP_ICMP6_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } ds_destroy(&ip_ds); } @@ -14860,7 +15317,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, ds_put_cstr(match, " && "REGBIT_EGRESS_LOOPBACK" == 0"); ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100, ds_cstr(match), debug_drop_action(), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); /* ICMP echo reply. These flows reply to ICMP echo requests * received for the router's IP address. Since packets only @@ -14879,7 +15338,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, "next; "; ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90, ds_cstr(match), icmp_actions, - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } /* BFD msg handling */ @@ -14912,7 +15373,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, 31, ds_cstr(match), ds_cstr(actions), NULL, copp_meter_get(COPP_ICMP4_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } ds_destroy(&ip_ds); @@ -14962,7 +15425,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, build_lrouter_arp_flow(op->od, op, op->lrp_networks.ipv4_addrs[i].addr_s, REG_INPORT_ETH_ADDR, match, false, 90, - &op->nbrp->header_, lflows); + &op->nbrp->header_, lflows, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } if (sset_count(&op->od->lb_ips->ips_v4_reachable)) { @@ -14977,7 +15442,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, AF_INET); build_lrouter_arp_flow(op->od, op, lb_ips_v4_as, REG_INPORT_ETH_ADDR, - match, false, 90, NULL, lflows); + match, false, 90, NULL, lflows, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); free(lb_ips_v4_as); } @@ -14994,7 +15461,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, AF_INET6); build_lrouter_nd_flow(op->od, op, "nd_na", lb_ips_v6_as, NULL, REG_INPORT_ETH_ADDR, match, false, 90, - NULL, lflows, meter_groups); + NULL, lflows, meter_groups, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); free(lb_ips_v6_as); } @@ -15018,7 +15487,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, COPP_ICMP4_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); ds_put_format(match, @@ -15034,7 +15505,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, COPP_TCP_RESET, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); ds_put_format(match, @@ -15050,7 +15523,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, COPP_TCP_RESET, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); ds_clear(match); ds_put_format(match, @@ -15069,7 +15544,9 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op, COPP_ICMP4_ERR, op->od->nbr->copp, meter_groups), - &op->nbrp->header_); + &op->nbrp->header_, + &op->lflow_dep_mgr, OBJDEP_TYPE_LPORT, + op->nbrp->name, op->od); } } @@ -15179,7 +15656,9 @@ build_lrouter_in_unsnat_stateless_flow(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, priority, ds_cstr(match), "next;", - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15205,7 +15684,8 @@ build_lrouter_in_unsnat_in_czone_flow(struct lflow_data *lflows, ds_put_cstr(match, " && flags.loopback == 0"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, 100, ds_cstr(match), "ct_snat_in_czone;", - &nat->header_); + &nat->header_, &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_truncate(match, common_match_len); /* Update common zone match for the hairpin traffic. */ @@ -15213,7 +15693,8 @@ build_lrouter_in_unsnat_in_czone_flow(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, 100, ds_cstr(match), "ct_snat;", - &nat->header_); + &nat->header_, &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15235,7 +15716,8 @@ build_lrouter_in_unsnat_flow(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, priority, ds_cstr(match), "ct_snat;", - &nat->header_); + &nat->header_, &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15305,7 +15787,9 @@ build_lrouter_in_dnat_flow(struct lflow_data *lflows, struct ovn_datapath *od, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, 100, ds_cstr(match), ds_cstr(actions), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15356,7 +15840,9 @@ build_lrouter_out_undnat_flow(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_UNDNAT, 100, ds_cstr(match), ds_cstr(actions), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15387,7 +15873,9 @@ build_lrouter_out_is_dnat_local(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_CHECK_DNAT_LOCAL, 50, ds_cstr(match), ds_cstr(actions), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15459,7 +15947,9 @@ build_lrouter_out_snat_stateless_flow(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT, priority, ds_cstr(match), - ds_cstr(actions), &nat->header_); + ds_cstr(actions), &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15512,13 +16002,17 @@ build_lrouter_out_snat_in_czone_flow(struct lflow_data *lflows, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT, priority, ds_cstr(match), - ds_cstr(actions), &nat->header_); + ds_cstr(actions), &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_put_cstr(match, " && "REGBIT_DST_NAT_IP_LOCAL" == 1"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT, priority + 1, ds_cstr(match), - ds_cstr(&zone_actions), &nat->header_); + ds_cstr(&zone_actions), &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); ds_destroy(&zone_actions); } @@ -15567,7 +16061,9 @@ build_lrouter_out_snat_flow(struct lflow_data *lflows, struct ovn_datapath *od, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT, priority, ds_cstr(match), - ds_cstr(actions), &nat->header_); + ds_cstr(actions), &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } static void @@ -15610,7 +16106,8 @@ build_lrouter_ingress_nat_check_pkt_len(struct lflow_data *lflows, COPP_ICMP4_ERR, od->nbr->copp, meter_groups), - &nat->header_); + &nat->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); } else { ds_put_format(match, " && ip6 && ip6.dst == %s", nat->external_ip); /* Set icmp6.frag_mtu to gw_mtu */ @@ -15637,7 +16134,8 @@ build_lrouter_ingress_nat_check_pkt_len(struct lflow_data *lflows, COPP_ICMP6_ERR, od->nbr->copp, meter_groups), - &nat->header_); + &nat->header_, &od->lflow_dep_mgr, + OBJDEP_TYPE_OD, od->nbr->name, od); } } @@ -15657,7 +16155,9 @@ build_lrouter_ingress_flow(struct lflow_data *lflows, struct ovn_datapath *od, is_v6 ? "ip6.src" : "ip4.src", nat->external_ip); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_INPUT, 120, ds_cstr(match), "next;", - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } /* Logical router ingress table 0: * For NAT on a distributed router, add rules allowing @@ -15681,6 +16181,8 @@ build_lrouter_ingress_flow(struct lflow_data *lflows, struct ovn_datapath *od, build_gateway_mtu_flow(lflows, l3dgw_port, S_ROUTER_IN_ADMISSION, 50, 55, match, actions, &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od, REG_INPORT_ETH_ADDR " = %s; next;", l3dgw_port->lrp_networks.ea_s); if (gw_mtu) { @@ -15820,29 +16322,14 @@ lrouter_check_nat_entry(struct ovn_datapath *od, const struct nbrec_nat *nat, /* NAT, Defrag and load balancing. */ static void -build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, - struct lflow_data *lflows, - const struct hmap *ls_ports, - const struct hmap *lr_ports, - struct ds *match, - struct ds *actions, - const struct shash *meter_groups, - const struct chassis_features *features) +build_lrouter_nat_flows_lb_related(struct ovn_datapath *od, + struct lflow_data *lflows, + struct ds *match, + const struct chassis_features *features) { - ovs_assert(od->nbr); - - /* Packets are allowed by default. */ - ovn_lflow_add(lflows, od, S_ROUTER_IN_DEFRAG, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_UNSNAT, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_CHECK_DNAT_LOCAL, 0, "1", - REGBIT_DST_NAT_IP_LOCAL" = 0; next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_POST_UNDNAT, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_POST_SNAT, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_EGR_LOOP, 0, "1", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 0, "1", "next;"); + if (!od->has_lb_vip) { + return; + } const char *ct_flag_reg = features->ct_no_masked_label ? "ct_mark" @@ -15856,24 +16343,27 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, * a dynamically negotiated FTP data channel), but will allow * related traffic such as an ICMP Port Unreachable through * that's generated from a non-listening UDP port. */ - if (od->has_lb_vip && features->ct_lb_related) { + if (features->ct_lb_related) { ds_clear(match); ds_put_cstr(match, "ct.rel && !ct.est && !ct.new"); size_t match_len = match->length; ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), - "flags.skip_snat_for_lb = 1; ct_commit_nat;"); + ovn_lflow_add_with_lb_lflow_ref( + lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), + "flags.skip_snat_for_lb = 1; ct_commit_nat;", od->nbr->name); ds_truncate(match, match_len); ds_put_format(match, " && %s.force_snat == 1", ct_flag_reg); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), - "flags.force_snat_for_lb = 1; ct_commit_nat;"); + ovn_lflow_add_with_lb_lflow_ref( + lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), + "flags.force_snat_for_lb = 1; ct_commit_nat;", od->nbr->name); ds_truncate(match, match_len); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50, ds_cstr(match), - "ct_commit_nat;"); + ovn_lflow_add_with_lb_lflow_ref(lflows, od, S_ROUTER_IN_DNAT, 50, + ds_cstr(match), "ct_commit_nat;", + od->nbr->name); } /* Ingress DNAT (Priority 50/70). @@ -15881,25 +16371,53 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, * Pass the traffic that is already established to the next table with * proper flags set. */ - if (od->has_lb_vip) { - ds_clear(match); + ds_clear(match); - ds_put_format(match, "ct.est && !ct.rel && !ct.new && %s.natted", - ct_flag_reg); - size_t match_len = match->length; + ds_put_format(match, "ct.est && !ct.rel && !ct.new && %s.natted", + ct_flag_reg); + size_t match_len = match->length; - ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), - "flags.skip_snat_for_lb = 1; next;"); + ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg); + ovn_lflow_add_with_lb_lflow_ref( + lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), + "flags.skip_snat_for_lb = 1; next;", od->nbr->name); - ds_truncate(match, match_len); - ds_put_format(match, " && %s.force_snat == 1", ct_flag_reg); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), - "flags.force_snat_for_lb = 1; next;"); + ds_truncate(match, match_len); + ds_put_format(match, " && %s.force_snat == 1", ct_flag_reg); + ovn_lflow_add_with_lb_lflow_ref( + lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match), + "flags.force_snat_for_lb = 1; next;", od->nbr->name); - ds_truncate(match, match_len); - ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50, ds_cstr(match), - "next;"); + ds_truncate(match, match_len); + ovn_lflow_add_with_lb_lflow_ref(lflows, od, S_ROUTER_IN_DNAT, 50, + ds_cstr(match), "next;", + od->nbr->name); + + /* Handle force SNAT options set in the gateway router. */ + if (od->is_gw_router) { + bool lb_force_snat_ip = + !lport_addresses_is_empty(&od->lb_force_snat_addrs); + if (lb_force_snat_ip) { + if (od->lb_force_snat_addrs.n_ipv4_addrs) { + build_lrouter_force_snat_flows(lflows, od, "4", + od->lb_force_snat_addrs.ipv4_addrs[0].addr_s, "lb", + OBJDEP_TYPE_LB); + } + if (od->lb_force_snat_addrs.n_ipv6_addrs) { + build_lrouter_force_snat_flows(lflows, od, "6", + od->lb_force_snat_addrs.ipv6_addrs[0].addr_s, "lb", + OBJDEP_TYPE_LB); + } + } + } +} + +static void +build_lrouter_nat_flows_ct_lb_related(struct ovn_datapath *od, + struct lflow_data *lflows) +{ + if (!(od->nbr->n_nat || od->has_lb_vip)) { + return; } /* If the router has load balancer or DNAT rules, re-circulate every packet @@ -15911,20 +16429,63 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, * not committed, it would produce ongoing datapath flows with the ct.new * flag set. Some NICs are unable to offload these flows. */ - if (od->is_gw_router && (od->nbr->n_nat || od->has_lb_vip)) { + if (od->is_gw_router) { /* Do not send ND or ICMP packets to connection tracking. */ - ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 100, - "nd || nd_rs || nd_ra", "next;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 50, - "ip", "flags.loopback = 1; ct_dnat;"); - ovn_lflow_add(lflows, od, S_ROUTER_OUT_POST_UNDNAT, 50, - "ip && ct.new", "ct_commit { } ; next; "); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_ROUTER_OUT_UNDNAT, + 100, "nd || nd_rs || nd_ra", + "next;", od->nbr->name); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, S_ROUTER_OUT_UNDNAT, 50, + "ip", "flags.loopback = 1; ct_dnat;", + od->nbr->name); + ovn_lflow_add_with_ct_lb_lflow_ref(lflows, od, + S_ROUTER_OUT_POST_UNDNAT, + 50, "ip && ct.new", + "ct_commit { } ; next; ", + od->nbr->name); } +} + +static void +build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, + struct lflow_data *lflows, + const struct hmap *ls_ports, + const struct hmap *lr_ports, + struct ds *match, + struct ds *actions, + const struct shash *meter_groups, + const struct chassis_features *features) +{ + ovs_assert(od->nbr); + + /* Packets are allowed by default. */ + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_DEFRAG, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_UNSNAT, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_CHECK_DNAT_LOCAL, + 0, "1", + REGBIT_DST_NAT_IP_LOCAL" = 0; next;", + od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_SNAT, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_DNAT, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_UNDNAT, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_POST_UNDNAT, + 0, "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_POST_SNAT, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_EGR_LOOP, 0, + "1", "next;", od->nbr->name); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 0, + "1", "next;", od->nbr->name); /* Send the IPv6 NS packets to next table. When ovn-controller * generates IPv6 NS (for the action - nd_ns{}), the injected * packet would go through conntrack - which is not required. */ - ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120, "nd_ns", "next;"); + ovn_lflow_add_with_od_lflow_ref(lflows, od, S_ROUTER_OUT_SNAT, 120, + "nd_ns", "next;", od->nbr->name); /* NAT rules are only valid on Gateway routers and routers with * l3dgw_ports (router has port(s) with gateway chassis @@ -15937,8 +16498,6 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, bool dnat_force_snat_ip = !lport_addresses_is_empty(&od->dnat_force_snat_addrs); - bool lb_force_snat_ip = - !lport_addresses_is_empty(&od->lb_force_snat_addrs); for (int i = 0; i < od->nbr->n_nat; i++) { const struct nbrec_nat *nat = nat = od->nbr->nat[i]; @@ -16004,7 +16563,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, S_ROUTER_IN_ARP_RESOLVE, 150, ds_cstr(match), debug_drop_action(), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); /* Now for packets coming from other (downlink) LRPs, allow ARP * resolve for the NAT IP, so that such packets can be * forwarded for E/W NAT. */ @@ -16023,7 +16584,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, S_ROUTER_IN_ARP_RESOLVE, 100, ds_cstr(match), ds_cstr(actions), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); if (od->redirect_bridged && distributed_nat) { ds_clear(match); ds_put_format( @@ -16044,7 +16607,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 90, ds_cstr(match), ds_cstr(actions), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } sset_add(&nat_entries, nat->external_ip); } @@ -16110,7 +16675,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, if (op && op->nbsp && !strcmp(op->nbsp->type, "virtual")) { ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 80, ds_cstr(match), - debug_drop_action(), &nat->header_); + debug_drop_action(), &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } ds_put_format(match, " && is_chassis_resident(\"%s\")", nat->logical_port); @@ -16120,7 +16687,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, nat->external_ip); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 100, ds_cstr(match), - ds_cstr(actions), &nat->header_); + ds_cstr(actions), &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } /* Egress Loopback table: For NAT on a distributed router. @@ -16161,7 +16730,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, ovn_stage_get_table(S_ROUTER_IN_ADMISSION)); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_EGR_LOOP, 100, ds_cstr(match), ds_cstr(actions), - &nat->header_); + &nat->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } } @@ -16181,7 +16752,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, ds_put_cstr(actions, REGBIT_DST_NAT_IP_LOCAL" = 1; next;"); ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_CHECK_DNAT_LOCAL, 50, ds_cstr(match), ds_cstr(actions), - &od->nbr->header_); + &od->nbr->header_, + &od->lflow_dep_mgr, OBJDEP_TYPE_OD, + od->nbr->name, od); } @@ -16191,22 +16764,12 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, if (od->dnat_force_snat_addrs.n_ipv4_addrs) { build_lrouter_force_snat_flows(lflows, od, "4", od->dnat_force_snat_addrs.ipv4_addrs[0].addr_s, - "dnat"); + "dnat", OBJDEP_TYPE_OD); } if (od->dnat_force_snat_addrs.n_ipv6_addrs) { build_lrouter_force_snat_flows(lflows, od, "6", od->dnat_force_snat_addrs.ipv6_addrs[0].addr_s, - "dnat"); - } - } - if (lb_force_snat_ip) { - if (od->lb_force_snat_addrs.n_ipv4_addrs) { - build_lrouter_force_snat_flows(lflows, od, "4", - od->lb_force_snat_addrs.ipv4_addrs[0].addr_s, "lb"); - } - if (od->lb_force_snat_addrs.n_ipv6_addrs) { - build_lrouter_force_snat_flows(lflows, od, "6", - od->lb_force_snat_addrs.ipv6_addrs[0].addr_s, "lb"); + "dnat", OBJDEP_TYPE_OD); } } } @@ -16214,8 +16777,6 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, sset_destroy(&nat_entries); } - - struct lswitch_flow_build_info { const struct ovn_datapaths *ls_datapaths; const struct ovn_datapaths *lr_datapaths; @@ -16297,6 +16858,9 @@ build_lswitch_and_lrouter_iterate_by_lr(struct ovn_datapath *od, lsi->lr_ports, &lsi->match, &lsi->actions, lsi->meter_groups, lsi->features); + build_lrouter_nat_flows_lb_related(od, lsi->lflows, &lsi->match, + lsi->features); + build_lrouter_nat_flows_ct_lb_related(od, lsi->lflows); build_lrouter_lb_affinity_default_flows(od, lsi->lflows); } @@ -16445,7 +17009,7 @@ build_lflows_thread(void *arg) if (stop_parallel_processing()) { return NULL; } - build_lswitch_arp_nd_service_monitor(lb_dps->lb, + build_lswitch_arp_nd_service_monitor(lb_dps, lsi->ls_ports, lsi->lflows, &lsi->match, @@ -16629,7 +17193,7 @@ build_lswitch_and_lrouter_flows(const struct ovn_datapaths *ls_datapaths, stopwatch_stop(LFLOWS_PORTS_STOPWATCH_NAME, time_msec()); stopwatch_start(LFLOWS_LBS_STOPWATCH_NAME, time_msec()); HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) { - build_lswitch_arp_nd_service_monitor(lb_dps->lb, lsi.ls_ports, + build_lswitch_arp_nd_service_monitor(lb_dps, lsi.ls_ports, lsi.lflows, &lsi.actions, &lsi.match); build_lrouter_defrag_flows_for_lb(lb_dps, lsi.lflows, diff --git a/northd/northd.h b/northd/northd.h index 4af6c4e9b0..d0992f142e 100644 --- a/northd/northd.h +++ b/northd/northd.h @@ -325,6 +325,8 @@ struct ovn_datapath { /* Map of ovn_port objects belonging to this datapath. * This map doesn't include derived ports. */ struct hmap ports; + + struct objdep_mgr lflow_dep_mgr; }; void ovnnb_db_run(struct northd_input *input_data,