@@ -874,6 +874,7 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
.collector_ids = l_ctx_in->collector_ids,
.lflow_uuid = lflow->header_.uuid,
.dp_key = ldp->datapath->tunnel_key,
+ .explicit_arp_ns_output = l_ctx_in->explicit_arp_ns_output,
.pipeline = ingress ? OVNACT_P_INGRESS : OVNACT_P_EGRESS,
.ingress_ptable = OFTABLE_LOG_INGRESS_PIPELINE,
@@ -130,6 +130,7 @@ struct lflow_ctx_in {
bool lb_hairpin_use_ct_mark;
bool localnet_learn_fdb;
bool localnet_learn_fdb_changed;
+ bool explicit_arp_ns_output;
};
struct lflow_ctx_out {
@@ -199,15 +199,24 @@ ovn_fdb_add(struct hmap *fdbs, uint32_t dp_key, struct eth_addr mac,
/* packet buffering functions */
struct packet_data *
-ovn_packet_data_create(struct ofpbuf ofpacts,
- const struct dp_packet *original_packet)
+ovn_packet_data_create(const struct ofputil_packet_in *pin,
+ const struct ofpbuf *continuation)
{
struct packet_data *pd = xmalloc(sizeof *pd);
- pd->ofpacts = ofpacts;
- /* clone the packet to send it later with correct L2 address */
- pd->p = dp_packet_clone_data(dp_packet_data(original_packet),
- dp_packet_size(original_packet));
+ pd->pin = (struct ofputil_packet_in) {
+ .packet = xmemdup(pin->packet, pin->packet_len),
+ .packet_len = pin->packet_len,
+ .flow_metadata = pin->flow_metadata,
+ .reason = pin->reason,
+ .table_id = pin->table_id,
+ .cookie = pin->cookie,
+ /* Userdata are empty on purpose,
+ * it is not needed for the continuation. */
+ .userdata = NULL,
+ .userdata_len = 0,
+ };
+ pd->continuation = ofpbuf_clone(continuation);
return pd;
}
@@ -216,8 +225,8 @@ ovn_packet_data_create(struct ofpbuf ofpacts,
void
ovn_packet_data_destroy(struct packet_data *pd)
{
- dp_packet_delete(pd->p);
- ofpbuf_uninit(&pd->ofpacts);
+ free(pd->pin.packet);
+ ofpbuf_delete(pd->continuation);
free(pd);
}
@@ -307,7 +316,10 @@ ovn_buffered_packets_ctx_run(struct buffered_packets_ctx *ctx,
struct packet_data *pd;
LIST_FOR_EACH_POP (pd, node, &bp->queue) {
- struct eth_header *eth = dp_packet_data(pd->p);
+ struct dp_packet packet;
+ dp_packet_use_const(&packet, pd->pin.packet, pd->pin.packet_len);
+
+ struct eth_header *eth = dp_packet_data(&packet);
eth->eth_dst = mac;
ovs_list_push_back(&ctx->ready_packets_data, &pd->node);
@@ -24,6 +24,7 @@
#include "openvswitch/hmap.h"
#include "openvswitch/list.h"
#include "openvswitch/ofpbuf.h"
+#include "openvswitch/ofp-packet.h"
struct ovsdb_idl_index;
@@ -91,8 +92,8 @@ struct fdb_entry *ovn_fdb_add(struct hmap *fdbs,
struct packet_data {
struct ovs_list node;
- struct ofpbuf ofpacts;
- struct dp_packet *p;
+ struct ofpbuf *continuation;
+ struct ofputil_packet_in pin;
};
struct buffered_packets {
@@ -120,8 +121,8 @@ struct buffered_packets_ctx {
};
struct packet_data *
-ovn_packet_data_create(struct ofpbuf ofpacts,
- const struct dp_packet *original_packet);
+ovn_packet_data_create(const struct ofputil_packet_in *pin,
+ const struct ofpbuf *continuation);
void ovn_packet_data_destroy(struct packet_data *pd);
struct buffered_packets *
ovn_buffered_packets_add(struct buffered_packets_ctx *ctx, uint64_t dp_key,
@@ -3667,6 +3667,7 @@ non_vif_data_ovs_iface_handler(struct engine_node *node, void *data OVS_UNUSED)
struct ed_type_northd_options {
bool lb_hairpin_use_ct_mark;
+ bool explicit_arp_ns_output;
};
@@ -3697,6 +3698,13 @@ en_northd_options_run(struct engine_node *node, void *data)
? smap_get_bool(&sb_global->options, "lb_hairpin_use_ct_mark",
DEFAULT_SB_GLOBAL_LB_HAIRPIN_USE_CT_MARK)
: DEFAULT_SB_GLOBAL_LB_HAIRPIN_USE_CT_MARK;
+
+ n_opts->explicit_arp_ns_output =
+ sb_global
+ ? smap_get_bool(&sb_global->options, "arp_ns_explicit_output",
+ false)
+ : false;
+
engine_set_node_state(node, EN_UPDATED);
}
@@ -3719,6 +3727,18 @@ en_northd_options_sb_sb_global_handler(struct engine_node *node, void *data)
n_opts->lb_hairpin_use_ct_mark = lb_hairpin_use_ct_mark;
engine_set_node_state(node, EN_UPDATED);
}
+
+ bool explicit_arp_ns_output =
+ sb_global
+ ? smap_get_bool(&sb_global->options, "arp_ns_explicit_output",
+ false)
+ : false;
+
+ if (explicit_arp_ns_output != n_opts->explicit_arp_ns_output) {
+ n_opts->explicit_arp_ns_output = explicit_arp_ns_output;
+ engine_set_node_state(node, EN_UPDATED);
+ }
+
return true;
}
@@ -3948,6 +3968,7 @@ init_lflow_ctx(struct engine_node *node,
l_ctx_in->localnet_learn_fdb_changed = rt_data->localnet_learn_fdb_changed;
l_ctx_in->chassis_tunnels = &non_vif_data->chassis_tunnels;
l_ctx_in->lb_hairpin_use_ct_mark = n_opts->lb_hairpin_use_ct_mark;
+ l_ctx_in->explicit_arp_ns_output = n_opts->explicit_arp_ns_output;
l_ctx_in->nd_ra_opts = &fo->nd_ra_opts;
l_ctx_in->dhcp_opts = &dhcp_opts->v4_opts;
l_ctx_in->dhcpv6_opts = &dhcp_opts->v6_opts;
@@ -257,9 +257,9 @@ static void pinctrl_handle_put_nd_ra_opts(
struct ofpbuf *continuation);
static void pinctrl_handle_nd_ns(struct rconn *swconn,
const struct flow *ip_flow,
- struct dp_packet *pkt_in,
- const struct match *md,
- struct ofpbuf *userdata);
+ const struct ofputil_packet_in *pin,
+ struct ofpbuf *userdata,
+ const struct ofpbuf *continuation);
static void pinctrl_handle_put_icmp_frag_mtu(struct rconn *swconn,
const struct flow *in_flow,
struct dp_packet *pkt_in,
@@ -1476,11 +1476,13 @@ destroy_buffered_packets_ctx(void)
/* Called with in the pinctrl_handler thread context. */
static void
-pinctrl_handle_buffered_packets(struct dp_packet *pkt_in,
- const struct match *md, bool is_arp)
+pinctrl_handle_buffered_packets(const struct ofputil_packet_in *pin,
+ const struct ofpbuf *continuation,
+ bool is_arp)
OVS_REQUIRES(pinctrl_mutex)
{
struct in6_addr ip;
+ const struct match *md = &pin->flow_metadata;
uint64_t dp_key = ntohll(md->flow.metadata);
uint64_t oport_key = md->flow.regs[MFF_LOG_OUTPORT - MFF_REG0];
@@ -1498,20 +1500,7 @@ OVS_REQUIRES(pinctrl_mutex)
return;
}
- struct ofpbuf ofpacts;
- ofpbuf_init(&ofpacts, 4096);
- reload_metadata(&ofpacts, md);
- /* reload pkt_mark field */
- const struct mf_field *pkt_mark_field = mf_from_id(MFF_PKT_MARK);
- union mf_value pkt_mark_value;
- mf_get_value(pkt_mark_field, &md->flow, &pkt_mark_value);
- ofpact_put_set_field(&ofpacts, pkt_mark_field, &pkt_mark_value, NULL);
-
- struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&ofpacts);
- resubmit->in_port = OFPP_CONTROLLER;
- resubmit->table_id = OFTABLE_OUTPUT_INIT;
-
- struct packet_data *pd = ovn_packet_data_create(ofpacts, pkt_in);
+ struct packet_data *pd = ovn_packet_data_create(pin, continuation);
ovn_buffered_packets_packet_data_enqueue(bp, pd);
/* There is a chance that the MAC binding was already created. */
@@ -1521,8 +1510,8 @@ OVS_REQUIRES(pinctrl_mutex)
/* Called with in the pinctrl_handler thread context. */
static void
pinctrl_handle_arp(struct rconn *swconn, const struct flow *ip_flow,
- struct dp_packet *pkt_in,
- const struct match *md, struct ofpbuf *userdata)
+ const struct ofputil_packet_in *pin,
+ struct ofpbuf *userdata, const struct ofpbuf *continuation)
{
/* This action only works for IP packets, and the switch should only send
* us IP packets this way, but check here just to be sure. */
@@ -1534,7 +1523,7 @@ pinctrl_handle_arp(struct rconn *swconn, const struct flow *ip_flow,
}
ovs_mutex_lock(&pinctrl_mutex);
- pinctrl_handle_buffered_packets(pkt_in, md, true);
+ pinctrl_handle_buffered_packets(pin, continuation, true);
ovs_mutex_unlock(&pinctrl_mutex);
/* Compose an ARP packet. */
@@ -1559,7 +1548,8 @@ pinctrl_handle_arp(struct rconn *swconn, const struct flow *ip_flow,
ip_flow->vlans[0].tci);
}
- set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+ set_actions_and_enqueue_msg(swconn, &packet,
+ &pin->flow_metadata, userdata);
dp_packet_uninit(&packet);
}
@@ -3212,8 +3202,7 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
switch (ntohl(ah->opcode)) {
case ACTION_OPCODE_ARP:
- pinctrl_handle_arp(swconn, &headers, &packet, &pin.flow_metadata,
- &userdata);
+ pinctrl_handle_arp(swconn, &headers, &pin, &userdata, &continuation);
break;
case ACTION_OPCODE_IGMP:
pinctrl_ip_mcast_handle(swconn, &headers, &packet, &pin.flow_metadata,
@@ -3279,8 +3268,7 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
break;
case ACTION_OPCODE_ND_NS:
- pinctrl_handle_nd_ns(swconn, &headers, &packet, &pin.flow_metadata,
- &userdata);
+ pinctrl_handle_nd_ns(swconn, &headers, &pin, &userdata, &continuation);
break;
case ACTION_OPCODE_ICMP:
@@ -4282,16 +4270,8 @@ send_mac_binding_buffered_pkts(struct rconn *swconn)
struct packet_data *pd;
LIST_FOR_EACH_POP (pd, node, &buffered_packets_ctx.ready_packets_data) {
- struct ofputil_packet_out po = {
- .packet = dp_packet_data(pd->p),
- .packet_len = dp_packet_size(pd->p),
- .buffer_id = UINT32_MAX,
- .ofpacts = pd->ofpacts.data,
- .ofpacts_len = pd->ofpacts.size,
- };
- match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
- queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
-
+ queue_msg(swconn, ofputil_encode_resume(&pd->pin, pd->continuation,
+ proto));
ovn_packet_data_destroy(pd);
}
@@ -6225,8 +6205,9 @@ pinctrl_handle_nd_na(struct rconn *swconn, const struct flow *ip_flow,
/* Called with in the pinctrl_handler thread context. */
static void
pinctrl_handle_nd_ns(struct rconn *swconn, const struct flow *ip_flow,
- struct dp_packet *pkt_in,
- const struct match *md, struct ofpbuf *userdata)
+ const struct ofputil_packet_in *pin,
+ struct ofpbuf *userdata,
+ const struct ofpbuf *continuation)
{
/* This action only works for IPv6 packets. */
if (get_dl_type(ip_flow) != htons(ETH_TYPE_IPV6)) {
@@ -6236,7 +6217,7 @@ pinctrl_handle_nd_ns(struct rconn *swconn, const struct flow *ip_flow,
}
ovs_mutex_lock(&pinctrl_mutex);
- pinctrl_handle_buffered_packets(pkt_in, md, false);
+ pinctrl_handle_buffered_packets(pin, continuation, false);
ovs_mutex_unlock(&pinctrl_mutex);
uint64_t packet_stub[128 / 8];
@@ -6249,7 +6230,8 @@ pinctrl_handle_nd_ns(struct rconn *swconn, const struct flow *ip_flow,
&ip_flow->ipv6_dst);
/* Reload previous packet metadata and set actions from userdata. */
- set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+ set_actions_and_enqueue_msg(swconn, &packet,
+ &pin->flow_metadata, userdata);
dp_packet_uninit(&packet);
}
@@ -847,6 +847,9 @@ struct ovnact_encode_params {
/* The datapath key. */
uint32_t dp_key;
+ /* Indication if we should add explicit output after arp/nd_ns action. */
+ bool explicit_arp_ns_output;
+
/* OVN maps each logical flow table (ltable), one-to-one, onto a physical
* OpenFlow flow table (ptable). A number of parameters describe this
* mapping and data related to flow tables:
@@ -1951,6 +1951,44 @@ format_REJECT(const struct ovnact_nest *nest, struct ds *s)
format_nested_action(nest, "reject", s);
}
+static bool
+is_paused_nested_action(enum action_opcode opcode)
+{
+ switch (opcode) {
+ case ACTION_OPCODE_ARP:
+ case ACTION_OPCODE_ND_NS:
+ return true;
+ case ACTION_OPCODE_IGMP:
+ case ACTION_OPCODE_PUT_ARP:
+ case ACTION_OPCODE_PUT_DHCP_OPTS:
+ case ACTION_OPCODE_ND_NA:
+ case ACTION_OPCODE_ND_NA_ROUTER:
+ case ACTION_OPCODE_PUT_ND:
+ case ACTION_OPCODE_PUT_FDB:
+ case ACTION_OPCODE_PUT_DHCPV6_OPTS:
+ case ACTION_OPCODE_DNS_LOOKUP:
+ case ACTION_OPCODE_LOG:
+ case ACTION_OPCODE_PUT_ND_RA_OPTS:
+ case ACTION_OPCODE_ICMP:
+ case ACTION_OPCODE_ICMP4_ERROR:
+ case ACTION_OPCODE_ICMP6_ERROR:
+ case ACTION_OPCODE_TCP_RESET:
+ case ACTION_OPCODE_SCTP_ABORT:
+ case ACTION_OPCODE_REJECT:
+ case ACTION_OPCODE_PUT_ICMP4_FRAG_MTU:
+ case ACTION_OPCODE_PUT_ICMP6_FRAG_MTU:
+ case ACTION_OPCODE_EVENT:
+ case ACTION_OPCODE_BIND_VPORT:
+ case ACTION_OPCODE_DHCP6_SERVER:
+ case ACTION_OPCODE_HANDLE_SVC_CHECK:
+ case ACTION_OPCODE_BFD_MSG:
+ case ACTION_OPCODE_ACTIVATION_STRATEGY_RARP:
+ case ACTION_OPCODE_MG_SPLIT_BUF:
+ default:
+ return false;
+ }
+}
+
static void
encode_nested_actions(const struct ovnact_nest *on,
const struct ovnact_encode_params *ep,
@@ -1966,7 +2004,8 @@ encode_nested_actions(const struct ovnact_nest *on,
* converted to OpenFlow, as its userdata. ovn-controller will convert the
* packet to ARP or NA and then send the packet and actions back to the
* switch inside an OFPT_PACKET_OUT message. */
- size_t oc_offset = encode_start_controller_op(opcode, false,
+ bool pause = is_paused_nested_action(opcode);
+ size_t oc_offset = encode_start_controller_op(opcode, pause,
ep->ctrl_meter_id, ofpacts);
ofpacts_put_openflow_actions(inner_ofpacts.data, inner_ofpacts.size,
ofpacts, OFP15_VERSION);
@@ -1982,6 +2021,9 @@ encode_ARP(const struct ovnact_nest *on,
struct ofpbuf *ofpacts)
{
encode_nested_actions(on, ep, ACTION_OPCODE_ARP, ofpacts);
+ if (!ep->explicit_arp_ns_output) {
+ emit_resubmit(ofpacts, ep->output_ptable);
+ }
}
static void
@@ -2070,6 +2112,9 @@ encode_ND_NS(const struct ovnact_nest *on,
struct ofpbuf *ofpacts)
{
encode_nested_actions(on, ep, ACTION_OPCODE_ND_NS, ofpacts);
+ if (!ep->explicit_arp_ns_output) {
+ emit_resubmit(ofpacts, ep->output_ptable);
+ }
}
static void
@@ -553,6 +553,10 @@ update_sb_config_options_to_sbrec(struct ed_type_global_config *config_data,
smap_replace(options, "sbctl_probe_interval", sip);
}
+ /* Adds indication that northd is handling explicit output after
+ * arp/nd_ns action. */
+ smap_add(options, "arp_ns_explicit_output", "true");
+
if (!smap_equal(&sb->options, options)) {
sbrec_sb_global_set_options(sb, options);
}
@@ -13409,7 +13409,7 @@ build_arp_request_flows_for_lrouter(
"ip6.dst = %s; "
"nd.target = %s; "
"output; "
- "};", ETH_ADDR_ARGS(eth_dst), sn_addr_s,
+ "}; output;", ETH_ADDR_ARGS(eth_dst), sn_addr_s,
route->nexthop);
ovn_lflow_add_with_hint__(lflows, od, S_ROUTER_IN_ARP_REQUEST, 200,
@@ -13429,7 +13429,7 @@ build_arp_request_flows_for_lrouter(
"arp.tpa = " REG_NEXT_HOP_IPV4 "; "
"arp.op = 1; " /* ARP request */
"output; "
- "};",
+ "}; output;",
copp_meter_get(COPP_ARP_RESOLVE, od->nbr->copp,
meter_groups),
lflow_ref);
@@ -13438,7 +13438,7 @@ build_arp_request_flows_for_lrouter(
"nd_ns { "
"nd.target = " REG_NEXT_HOP_IPV6 "; "
"output; "
- "};",
+ "}; output;",
copp_meter_get(COPP_ND_NS_RESOLVE, od->nbr->copp,
meter_groups),
lflow_ref);
@@ -70,6 +70,14 @@ M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | F
3 packets transmitted, 3 received, 0% packet loss, time 0ms
])
+check multinode_nbctl lsp-set-addresses sw1-port1 unknown
+m_wait_for_ports_up sw1-port1
+
+M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
AT_CLEANUP
AT_SETUP([ovn multinode pmtu - distributed router - geneve])
@@ -6931,6 +6931,9 @@ ct_dnat /* assuming no un-dnat entry, so no change */ {
arp.op = 1;
output("rtr-ls");
};
+ ct_dnat /* assuming no un-dnat entry, so no change */ {
+ output("rtr-ls");
+ };
};
};
])
@@ -1529,11 +1529,11 @@ clone { ip4.dst = 255.255.255.255; output; }; next;
# arp
arp { eth.dst = ff:ff:ff:ff:ff:ff; output; }; output;
- encodes as controller(userdata=00.00.00.00.00.00.00.00.00.19.00.10.80.00.06.06.ff.ff.ff.ff.ff.ff.00.00.ff.ff.00.10.00.00.23.20.00.0e.ff.f8.40.00.00.00),resubmit(,64)
+ encodes as controller(userdata=00.00.00.00.00.00.00.00.00.19.00.10.80.00.06.06.ff.ff.ff.ff.ff.ff.00.00.ff.ff.00.10.00.00.23.20.00.0e.ff.f8.40.00.00.00,pause),resubmit(,64)
has prereqs ip4
arp { };
formats as arp { drop; };
- encodes as controller(userdata=00.00.00.00.00.00.00.00)
+ encodes as controller(userdata=00.00.00.00.00.00.00.00,pause)
has prereqs ip4
# get_arp
@@ -1663,12 +1663,12 @@ reg1[0] = put_dhcp_opts(offerip=1.2.3.4, domain_search_list=1.2.3.4);
# nd_ns
nd_ns { nd.target = xxreg0; output; };
- encodes as controller(userdata=00.00.00.09.00.00.00.00.00.1c.00.18.00.80.00.00.00.00.00.00.00.01.de.10.80.00.3e.10.00.00.00.00.ff.ff.00.10.00.00.23.20.00.0e.ff.f8.40.00.00.00)
+ encodes as controller(userdata=00.00.00.09.00.00.00.00.00.1c.00.18.00.80.00.00.00.00.00.00.00.01.de.10.80.00.3e.10.00.00.00.00.ff.ff.00.10.00.00.23.20.00.0e.ff.f8.40.00.00.00,pause)
has prereqs ip6
nd_ns { };
formats as nd_ns { drop; };
- encodes as controller(userdata=00.00.00.09.00.00.00.00)
+ encodes as controller(userdata=00.00.00.09.00.00.00.00,pause)
has prereqs ip6
# nd_na
@@ -12413,3 +12413,98 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
/connection dropped.*/d"])
AT_CLEANUP
])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([LB with first packet buffered])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-ext])
+
+check ovs-ofctl add-flow br-ext action=normal
+# Set external-ids in br-int needed for ovn-controller
+check ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true \
+ -- set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add lr
+check ovn-nbctl ls-add internal
+check ovn-nbctl ls-add public
+
+check ovn-nbctl lrp-add lr lr-pub 00:00:01:01:02:03 192.168.100.1/24 1000::1/64
+check ovn-nbctl lsp-add public pub-lr -- set Logical_Switch_Port pub-lr \
+ type=router options:router-port=lr-pub addresses=\"00:00:01:01:02:03\"
+
+check ovn-nbctl lrp-add lr lr-internal 00:00:01:01:02:04 192.168.200.1/24 2000::1/64
+check ovn-nbctl lsp-add internal internal-lr -- set Logical_Switch_Port internal-lr \
+ type=router options:router-port=lr-internal addresses=\"00:00:01:01:02:04\"
+
+check ovn-nbctl lsp-add internal server -- lsp-set-addresses server "unknown"
+
+ovn-nbctl lsp-add public ln_port \
+ -- lsp-set-addresses ln_port unknown \
+ -- lsp-set-type ln_port localnet \
+ -- lsp-set-options ln_port network_name=phynet
+
+check ovn-nbctl set logical_router lr options:chassis=hv1
+
+check ovn-nbctl lb-add lb1 192.168.100.20 192.168.200.10
+check ovn-nbctl lb-add lb2 1000::20 2000::10
+check ovn-nbctl lr-lb-add lr lb1
+check ovn-nbctl lr-lb-add lr lb2
+check ovn-nbctl --wait=hv sync
+
+ADD_NAMESPACES(client)
+ADD_VETH(client, client, br-ext, "1000::10/64", "f0:00:00:01:02:03", \
+ "1000::1", "nodad", "192.168.100.10/24", "192.168.100.1")
+
+ADD_NAMESPACES(server)
+ADD_VETH(server, server, br-int, "2000::10/64", "f0:00:0f:01:02:03", \
+ "2000::1", "nodad", "192.168.200.10/24", "192.168.200.1")
+
+NETNS_DAEMONIZE([server], [nc -l -u 192.168.200.10 4242 > /dev/null], [serverv4.pid])
+NETNS_DAEMONIZE([server], [nc -l -u 2000::10 4243 > /dev/null], [serverv6.pid])
+
+NETNS_START_TCPDUMP([client], [-l -U -i client -vnne udp], [client])
+NETNS_START_TCPDUMP([server], [-l -U -i server -vnne udp], [server])
+
+check ovs-appctl dpctl/flush-conntrack
+
+NS_CHECK_EXEC([client], [nc -z -u 192.168.100.20 4242], [ignore], [ignore], [ignore])
+OVS_WAIT_UNTIL([grep -q "192.168.200.10" server.tcpdump])
+
+NS_CHECK_EXEC([client], [nc -z -u 1000::20 4243])
+OVS_WAIT_UNTIL([grep -q "2000::10" server.tcpdump])
+
+zone_id=$(ovn-appctl -t ovn-controller ct-zone-list | grep lr_dnat | cut -d ' ' -f2)
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | grep -c "zone=$zone_id"], [0], [2
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([ovn-northd])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
@@ -1359,6 +1359,7 @@ test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
.group_table = &group_table,
.meter_table = &meter_table,
.collector_ids = &collector_ids,
+ .explicit_arp_ns_output = true,
.pipeline = OVNACT_P_INGRESS,
.ingress_ptable = OFTABLE_LOG_INGRESS_PIPELINE,