@@ -501,6 +501,84 @@ ctx_cancel_freeze(struct xlate_ctx *ctx)
static void finish_freezing(struct xlate_ctx *ctx);
+/* These functions and structure are used to save stack space in actions that
+ * need to retain a large amount of xlate_ctx state. */
+struct xretained_state {
+ union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
+ uint64_t actset_stub[1024 / 8];
+ struct ofpbuf old_stack;
+ struct ofpbuf old_action_set;
+ struct flow old_flow;
+ struct flow old_base;
+ struct flow_tnl flow_tnl_mask;
+};
+
+/* The return of this function must be freed by
+ * xretain_state_restore_and_free(). */
+static struct xretained_state *
+xretain_state_save(struct xlate_ctx *ctx)
+{
+ struct xretained_state *retained = xmalloc(sizeof *retained);
+
+ retained->old_flow = ctx->xin->flow;
+ retained->old_stack = ctx->stack;
+ retained->old_action_set = ctx->action_set;
+ ofpbuf_use_stub(&ctx->stack, retained->new_stack,
+ sizeof retained->new_stack);
+ ofpbuf_use_stub(&ctx->action_set, retained->actset_stub,
+ sizeof retained->actset_stub);
+
+ return retained;
+}
+
+static void
+xretain_tunnel_mask_save(const struct xlate_ctx *ctx,
+ struct xretained_state *retained)
+{
+ retained->flow_tnl_mask = ctx->wc->masks.tunnel;
+}
+
+static void
+xretain_base_flow_save(const struct xlate_ctx *ctx,
+ struct xretained_state *retained)
+{
+ retained->old_base = ctx->base_flow;
+}
+
+static void
+xretain_base_flow_restore(struct xlate_ctx *ctx,
+ const struct xretained_state *retained)
+{
+ ctx->base_flow = retained->old_base;
+}
+
+static void
+xretain_flow_restore(struct xlate_ctx *ctx,
+ const struct xretained_state *retained)
+{
+ ctx->xin->flow = retained->old_flow;
+}
+
+static void
+xretain_tunnel_mask_restore(struct xlate_ctx *ctx,
+ const struct xretained_state *retained)
+{
+ ctx->wc->masks.tunnel = retained->flow_tnl_mask;
+}
+
+static void
+xretain_state_restore_and_free(struct xlate_ctx *ctx,
+ struct xretained_state *retained)
+{
+ ctx->xin->flow = retained->old_flow;
+ ofpbuf_uninit(&ctx->action_set);
+ ctx->action_set = retained->old_action_set;
+ ofpbuf_uninit(&ctx->stack);
+ ctx->stack = retained->old_stack;
+
+ free(retained);
+}
+
/* A controller may use OFPP_NONE as the ingress port to indicate that
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
* when an input bundle is needed for validation (e.g., mirroring or
@@ -3915,20 +3993,17 @@ static void
patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
struct xport *out_dev, bool is_last_action)
{
+ bool old_was_mpls = ctx->was_mpls;
struct flow *flow = &ctx->xin->flow;
- struct flow old_flow = ctx->xin->flow;
- struct flow_tnl old_flow_tnl_wc = ctx->wc->masks.tunnel;
bool old_conntrack = ctx->conntracked;
- bool old_was_mpls = ctx->was_mpls;
- ovs_version_t old_version = ctx->xin->tables_version;
- struct ofpbuf old_stack = ctx->stack;
- uint8_t new_stack[1024];
- struct ofpbuf old_action_set = ctx->action_set;
+ struct xretained_state *retained_state;
struct ovs_list *old_trace = ctx->xin->trace;
- uint64_t actset_stub[1024 / 8];
+ ovs_version_t old_version = ctx->xin->tables_version;
+
+ retained_state = xretain_state_save(ctx);
+
+ xretain_tunnel_mask_save(ctx, retained_state);
- ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
- ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
flow->in_port.ofp_port = out_dev->ofp_port;
flow->metadata = htonll(0);
memset(&flow->tunnel, 0, sizeof flow->tunnel);
@@ -3967,14 +4042,15 @@ patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
} else {
/* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
* the learning action look at the packet, then drop it. */
- struct flow old_base_flow = ctx->base_flow;
size_t old_size = ctx->odp_actions->size;
+
+ xretain_base_flow_save(ctx, retained_state);
mirror_mask_t old_mirrors2 = ctx->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true,
false, is_last_action, clone_xlate_actions);
ctx->mirrors = old_mirrors2;
- ctx->base_flow = old_base_flow;
+ xretain_base_flow_restore(ctx, retained_state);
ctx->odp_actions->size = old_size;
/* Undo changes that may have been done for freezing. */
@@ -3986,18 +4062,15 @@ patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev,
if (independent_mirrors) {
ctx->mirrors = old_mirrors;
}
- ctx->xin->flow = old_flow;
ctx->xbridge = in_dev->xbridge;
- ofpbuf_uninit(&ctx->action_set);
- ctx->action_set = old_action_set;
- ofpbuf_uninit(&ctx->stack);
- ctx->stack = old_stack;
/* Restore calling bridge's lookup version. */
ctx->xin->tables_version = old_version;
- /* Restore to calling bridge tunneling information */
- ctx->wc->masks.tunnel = old_flow_tnl_wc;
+ /* Restore to calling bridge tunneling information; the ctx flow, actions,
+ * and stack. And free the retained state. */
+ xretain_tunnel_mask_restore(ctx, retained_state);
+ xretain_state_restore_and_free(ctx, retained_state);
/* The out bridge popping MPLS should have no effect on the original
* bridge. */
@@ -4247,7 +4320,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
struct flow_wildcards *wc = ctx->wc;
struct flow *flow = &ctx->xin->flow;
- struct flow_tnl flow_tnl;
+ struct flow_tnl *flow_tnl = NULL;
union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS];
uint8_t flow_nw_tos;
odp_port_t out_port, odp_port, odp_tnl_port;
@@ -4261,7 +4334,6 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
- memset(&flow_tnl, 0, sizeof flow_tnl);
if (!check_output_prerequisites(ctx, xport, flow, check_stp)) {
return;
@@ -4305,7 +4377,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
* the Logical (tunnel) Port are not visible for any further
* matches, while explicit set actions on tunnel metadata are.
*/
- flow_tnl = flow->tunnel;
+ flow_tnl = xmemdup(&flow->tunnel, sizeof *flow_tnl);
odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
if (odp_port == ODPP_NONE) {
xlate_report(ctx, OFT_WARN, "Tunneling decided against output");
@@ -4336,7 +4408,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
tnl_type = tnl_port_get_type(xport->ofport);
commit_odp_tunnel_action(flow, &ctx->base_flow,
ctx->odp_actions, tnl_type);
- flow->tunnel = flow_tnl; /* Restore tunnel metadata */
+ flow->tunnel = *flow_tnl; /* Restore tunnel metadata. */
}
} else {
odp_port = xport->odp_port;
@@ -4380,7 +4452,8 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
/* Output to native tunnel port. */
native_tunnel_output(ctx, xport, flow, odp_port, truncate,
is_last_action);
- flow->tunnel = flow_tnl; /* Restore tunnel metadata */
+ ovs_assert(flow_tnl);
+ flow->tunnel = *flow_tnl; /* Restore tunnel metadata. */
} else if (terminate_native_tunnel(ctx, xport, flow, wc,
&odp_tnl_port)) {
@@ -4423,7 +4496,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
xport->xbundle));
}
- out:
+out:
/* Restore flow */
memcpy(flow->vlans, flow_vlans, sizeof flow->vlans);
flow->nw_tos = flow_nw_tos;
@@ -4431,6 +4504,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
flow->dl_src = flow_dl_src;
flow->packet_type = flow_packet_type;
flow->dl_type = flow_dl_type;
+ free(flow_tnl);
}
static void
@@ -5409,15 +5483,15 @@ xlate_output_reg_action(struct xlate_ctx *ctx,
{
uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
if (port <= UINT16_MAX) {
- xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
-
- union mf_subvalue value;
+ union mf_subvalue *value = xmalloc(sizeof *value);
- memset(&value, 0xff, sizeof value);
- mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
+ xlate_report(ctx, OFT_DETAIL, "output port is %"PRIu64, port);
+ memset(value, 0xff, sizeof *value);
+ mf_write_subfield_flow(&or->src, value, &ctx->wc->masks);
xlate_output_action(ctx, u16_to_ofp(port), or->max_len,
false, is_last_action, false,
group_bucket_action);
+ free(value);
} else {
xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range",
port);
@@ -5758,13 +5832,15 @@ xlate_sample_action(struct xlate_ctx *ctx,
struct flow *flow = &ctx->xin->flow;
tnl_port_send(xport->ofport, flow, ctx->wc);
if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
- struct flow_tnl flow_tnl = flow->tunnel;
+ struct flow_tnl *flow_tnl;
const char *tnl_type;
+ flow_tnl = xmemdup(&flow->tunnel, sizeof *flow_tnl);
tnl_type = tnl_port_get_type(xport->ofport);
commit_odp_tunnel_action(flow, &ctx->base_flow,
ctx->odp_actions, tnl_type);
- flow->tunnel = flow_tnl;
+ flow->tunnel = *flow_tnl;
+ free(flow_tnl);
}
} else {
xlate_report_error(ctx,
@@ -5874,21 +5950,12 @@ clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
struct xlate_ctx *ctx, bool is_last_action,
bool group_bucket_action OVS_UNUSED)
{
- struct ofpbuf old_stack = ctx->stack;
- union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
- ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
- ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
-
- struct ofpbuf old_action_set = ctx->action_set;
- uint64_t actset_stub[1024 / 8];
- ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
- ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
-
+ struct xretained_state *retained_state;
size_t offset, ac_offset;
- struct flow old_flow = ctx->xin->flow;
+
+ retained_state = xretain_state_save(ctx);
if (reversible_actions(actions, actions_len) || is_last_action) {
- old_flow = ctx->xin->flow;
do_xlate_actions(actions, actions_len, ctx, is_last_action, false);
if (!ctx->freezing) {
xlate_action_set(ctx);
@@ -5903,7 +5970,8 @@ clone_xlate_actions(const struct ofpact *actions, size_t actions_len,
* avoid emitting those actions twice. Once inside
* the clone, another time for the action after clone. */
xlate_commit_actions(ctx);
- struct flow old_base = ctx->base_flow;
+ xretain_base_flow_save(ctx, retained_state);
+
bool old_was_mpls = ctx->was_mpls;
bool old_conntracked = ctx->conntracked;
@@ -5960,14 +6028,10 @@ dp_clone_done:
ctx->was_mpls = old_was_mpls;
/* Restore the 'base_flow' for the next action. */
- ctx->base_flow = old_base;
+ xretain_base_flow_restore(ctx, retained_state);
xlate_done:
- ofpbuf_uninit(&ctx->action_set);
- ctx->action_set = old_action_set;
- ofpbuf_uninit(&ctx->stack);
- ctx->stack = old_stack;
- ctx->xin->flow = old_flow;
+ xretain_state_restore_and_free(ctx, retained_state);
}
static void
@@ -6343,8 +6407,8 @@ compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
{
uint16_t zone;
if (ofc->zone_src.field) {
- union mf_subvalue value;
- memset(&value, 0xff, sizeof(value));
+ union mf_subvalue *value = xmalloc(sizeof *value);
+ memset(value, 0xff, sizeof(*value));
zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
if (ctx->xin->frozen_state) {
@@ -6354,12 +6418,13 @@ compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc,
* which will invalidate the megaflow with old the recirc_id.
*/
if (!mf_is_frozen_metadata(ofc->zone_src.field)) {
- mf_write_subfield_flow(&ofc->zone_src, &value,
+ mf_write_subfield_flow(&ofc->zone_src, value,
&ctx->wc->masks);
}
} else {
- mf_write_subfield_flow(&ofc->zone_src, &value, &ctx->wc->masks);
+ mf_write_subfield_flow(&ofc->zone_src, value, &ctx->wc->masks);
}
+ free(value);
} else {
zone = ofc->zone_imm;
}
@@ -6449,16 +6514,16 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx,
const struct ofpact *remaining_acts,
size_t remaining_acts_len)
{
- union mf_subvalue value;
- memset(&value, 0, sizeof value);
+ union mf_subvalue *value = xmalloc(sizeof *value);
+ memset(value, 0, sizeof *value);
if (!ctx->xbridge->support.check_pkt_len) {
uint8_t is_pkt_larger = 0;
if (ctx->xin->packet) {
is_pkt_larger =
dp_packet_size(ctx->xin->packet) > check_pkt_larger->pkt_len;
}
- value.u8_val = is_pkt_larger;
- mf_write_subfield_flow(&check_pkt_larger->dst, &value,
+ value->u8_val = is_pkt_larger;
+ mf_write_subfield_flow(&check_pkt_larger->dst, value,
&ctx->xin->flow);
/* If datapath doesn't support check_pkt_len action, then set the
* SLOW_ACTION flag. If we don't set SLOW_ACTION, we
@@ -6468,22 +6533,17 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx,
* the packet length. This results in wrong actions being applied.
*/
ctx->xout->slow |= SLOW_ACTION;
+ free(value);
return;
}
- struct ofpbuf old_stack = ctx->stack;
- union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
- ofpbuf_use_stub(&ctx->stack, new_stack, sizeof new_stack);
- ofpbuf_put(&ctx->stack, old_stack.data, old_stack.size);
+ struct xretained_state *retained_state;
- struct ofpbuf old_action_set = ctx->action_set;
- uint64_t actset_stub[1024 / 8];
- ofpbuf_use_stub(&ctx->action_set, actset_stub, sizeof actset_stub);
- ofpbuf_put(&ctx->action_set, old_action_set.data, old_action_set.size);
+ retained_state = xretain_state_save(ctx);
- struct flow old_flow = ctx->xin->flow;
xlate_commit_actions(ctx);
- struct flow old_base = ctx->base_flow;
+ xretain_base_flow_save(ctx, retained_state);
+
bool old_was_mpls = ctx->was_mpls;
bool old_conntracked = ctx->conntracked;
@@ -6493,8 +6553,8 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx,
check_pkt_larger->pkt_len);
size_t offset_attr = nl_msg_start_nested(
ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
- value.u8_val = 1;
- mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
+ value->u8_val = 1;
+ mf_write_subfield_flow(&check_pkt_larger->dst, value, &ctx->xin->flow);
do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
if (!ctx->freezing) {
xlate_action_set(ctx);
@@ -6504,10 +6564,10 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx,
}
nl_msg_end_nested(ctx->odp_actions, offset_attr);
- ctx->base_flow = old_base;
+ xretain_base_flow_restore(ctx, retained_state);
+ xretain_flow_restore(ctx, retained_state);
ctx->was_mpls = old_was_mpls;
ctx->conntracked = old_conntracked;
- ctx->xin->flow = old_flow;
/* If the flow translation for the IF_GREATER case requires freezing,
* then ctx->exit would be true. Reset to false so that we can
@@ -6518,8 +6578,8 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx,
offset_attr = nl_msg_start_nested(
ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
- value.u8_val = 0;
- mf_write_subfield_flow(&check_pkt_larger->dst, &value, &ctx->xin->flow);
+ value->u8_val = 0;
+ mf_write_subfield_flow(&check_pkt_larger->dst, value, &ctx->xin->flow);
do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false);
if (!ctx->freezing) {
xlate_action_set(ctx);
@@ -6530,15 +6590,12 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx,
nl_msg_end_nested(ctx->odp_actions, offset_attr);
nl_msg_end_nested(ctx->odp_actions, offset);
- ofpbuf_uninit(&ctx->action_set);
- ctx->action_set = old_action_set;
- ofpbuf_uninit(&ctx->stack);
- ctx->stack = old_stack;
- ctx->base_flow = old_base;
ctx->was_mpls = old_was_mpls;
ctx->conntracked = old_conntracked;
- ctx->xin->flow = old_flow;
ctx->exit = old_exit;
+ xretain_base_flow_restore(ctx, retained_state);
+ xretain_state_restore_and_free(ctx, retained_state);
+ free(value);
}
static void
@@ -6989,6 +7046,31 @@ xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx,
"cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie);
}
+static void
+xlate_trace(struct xlate_ctx *ctx, const struct ofpact *a)
+{
+ struct ofputil_port_map *map;
+
+ map = xmalloc(sizeof *map);
+ ofputil_port_map_init(map);
+
+ if (ctx->xin->names) {
+ struct ofproto_dpif *ofprotop;
+
+ ofprotop = ofproto_dpif_lookup_by_name(ctx->xbridge->name);
+ ofproto_append_ports_to_map(map, ofprotop->up.ports);
+ }
+
+ struct ds s = DS_EMPTY_INITIALIZER;
+ struct ofpact_format_params fp = { .s = &s, .port_map = map };
+
+ ofpacts_format(a, OFPACT_ALIGN(a->len), &fp);
+ xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
+ ds_destroy(&s);
+ ofputil_port_map_destroy(map);
+ free(map);
+}
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct xlate_ctx *ctx, bool is_last_action,
@@ -7031,20 +7113,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
}
if (OVS_UNLIKELY(ctx->xin->trace)) {
- struct ofputil_port_map map = OFPUTIL_PORT_MAP_INITIALIZER(&map);
-
- if (ctx->xin->names) {
- struct ofproto_dpif *ofprotop;
- ofprotop = ofproto_dpif_lookup_by_name(ctx->xbridge->name);
- ofproto_append_ports_to_map(&map, ofprotop->up.ports);
- }
-
- struct ds s = DS_EMPTY_INITIALIZER;
- struct ofpact_format_params fp = { .s = &s, .port_map = &map };
- ofpacts_format(a, OFPACT_ALIGN(a->len), &fp);
- xlate_report(ctx, OFT_ACTION, "%s", ds_cstr(&s));
- ds_destroy(&s);
- ofputil_port_map_destroy(&map);
+ xlate_trace(ctx, a);
}
switch (a->type) {