@@ -105,6 +105,102 @@ netdev_flow_key_gen_masks(const struct netdev_flow_key *tbl,
bool dpcls_rule_matches_key(const struct dpcls_rule *rule,
const struct netdev_flow_key *target);
+
+
+/* Contained by struct dp_netdev_flow's 'stats' member. */
+struct dp_netdev_flow_stats {
+ atomic_llong used; /* Last used time, in monotonic msecs. */
+ atomic_ullong packet_count; /* Number of packets matched. */
+ atomic_ullong byte_count; /* Number of bytes matched. */
+ atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
+};
+
+/* Contained by struct dp_netdev_flow's 'last_attrs' member. */
+struct dp_netdev_flow_attrs {
+ atomic_bool offloaded; /* True if flow is offloaded to HW. */
+ ATOMIC(const char *) dp_layer; /* DP layer the flow is handled in. */
+};
+
+/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
+ * its pmd thread's classifier. The text below calls this classifier 'cls'.
+ *
+ * Motivation
+ * ----------
+ *
+ * The thread safety rules described here for "struct dp_netdev_flow" are
+ * motivated by two goals:
+ *
+ * - Prevent threads that read members of "struct dp_netdev_flow" from
+ * reading bad data due to changes by some thread concurrently modifying
+ * those members.
+ *
+ * - Prevent two threads making changes to members of a given "struct
+ * dp_netdev_flow" from interfering with each other.
+ *
+ *
+ * Rules
+ * -----
+ *
+ * A flow 'flow' may be accessed without a risk of being freed during an RCU
+ * grace period. Code that needs to hold onto a flow for a while
+ * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
+ *
+ * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
+ * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
+ * from modification.
+ *
+ * Some members, marked 'const', are immutable. Accessing other members
+ * requires synchronization, as noted in more detail below.
+ */
+struct dp_netdev_flow {
+ const struct flow flow; /* Unmasked flow that created this entry. */
+ /* Hash table index by unmasked flow. */
+ const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
+ /* 'flow_table'. */
+ const struct cmap_node mark_node; /* In owning flow_mark's mark_to_flow */
+ const ovs_u128 ufid; /* Unique flow identifier. */
+ const ovs_u128 mega_ufid; /* Unique mega flow identifier. */
+ const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
+ /* flow. */
+
+ /* Number of references.
+ * The classifier owns one reference.
+ * Any thread trying to keep a rule from being freed should hold its own
+ * reference. */
+ struct ovs_refcount ref_cnt;
+
+ bool dead;
+ uint32_t mark; /* Unique flow mark assigned to a flow */
+
+ /* Statistics. */
+ struct dp_netdev_flow_stats stats;
+
+ /* Statistics and attributes received from the netdev offload provider. */
+ atomic_int netdev_flow_get_result;
+ struct dp_netdev_flow_stats last_stats;
+ struct dp_netdev_flow_attrs last_attrs;
+
+ /* Actions. */
+ OVSRCU_TYPE(struct dp_netdev_actions *) actions;
+
+ /* While processing a group of input packets, the datapath uses the next
+ * member to store a pointer to the output batch for the flow. It is
+ * reset after the batch has been sent out (See dp_netdev_queue_batches(),
+ * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
+ struct packet_batch_per_flow *batch;
+
+ /* Packet classification. */
+ char *dp_extra_info; /* String to return in a flow dump/get. */
+ struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
+ /* 'cr' must be the last member. */
+};
+
#ifdef __cplusplus
}
#endif
@@ -401,100 +401,6 @@ struct dp_netdev_port {
char *rxq_affinity_list; /* Requested affinity of rx queues. */
};
-/* Contained by struct dp_netdev_flow's 'stats' member. */
-struct dp_netdev_flow_stats {
- atomic_llong used; /* Last used time, in monotonic msecs. */
- atomic_ullong packet_count; /* Number of packets matched. */
- atomic_ullong byte_count; /* Number of bytes matched. */
- atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
-};
-
-/* Contained by struct dp_netdev_flow's 'last_attrs' member. */
-struct dp_netdev_flow_attrs {
- atomic_bool offloaded; /* True if flow is offloaded to HW. */
- ATOMIC(const char *) dp_layer; /* DP layer the flow is handled in. */
-};
-
-/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
- *
- *
- * Thread-safety
- * =============
- *
- * Except near the beginning or ending of its lifespan, rule 'rule' belongs to
- * its pmd thread's classifier. The text below calls this classifier 'cls'.
- *
- * Motivation
- * ----------
- *
- * The thread safety rules described here for "struct dp_netdev_flow" are
- * motivated by two goals:
- *
- * - Prevent threads that read members of "struct dp_netdev_flow" from
- * reading bad data due to changes by some thread concurrently modifying
- * those members.
- *
- * - Prevent two threads making changes to members of a given "struct
- * dp_netdev_flow" from interfering with each other.
- *
- *
- * Rules
- * -----
- *
- * A flow 'flow' may be accessed without a risk of being freed during an RCU
- * grace period. Code that needs to hold onto a flow for a while
- * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
- *
- * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
- * flow from being deleted from 'cls' and it doesn't protect members of 'flow'
- * from modification.
- *
- * Some members, marked 'const', are immutable. Accessing other members
- * requires synchronization, as noted in more detail below.
- */
-struct dp_netdev_flow {
- const struct flow flow; /* Unmasked flow that created this entry. */
- /* Hash table index by unmasked flow. */
- const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
- /* 'flow_table'. */
- const struct cmap_node mark_node; /* In owning flow_mark's mark_to_flow */
- const ovs_u128 ufid; /* Unique flow identifier. */
- const ovs_u128 mega_ufid; /* Unique mega flow identifier. */
- const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
- /* flow. */
-
- /* Number of references.
- * The classifier owns one reference.
- * Any thread trying to keep a rule from being freed should hold its own
- * reference. */
- struct ovs_refcount ref_cnt;
-
- bool dead;
- uint32_t mark; /* Unique flow mark assigned to a flow */
-
- /* Statistics. */
- struct dp_netdev_flow_stats stats;
-
- /* Statistics and attributes received from the netdev offload provider. */
- atomic_int netdev_flow_get_result;
- struct dp_netdev_flow_stats last_stats;
- struct dp_netdev_flow_attrs last_attrs;
-
- /* Actions. */
- OVSRCU_TYPE(struct dp_netdev_actions *) actions;
-
- /* While processing a group of input packets, the datapath uses the next
- * member to store a pointer to the output batch for the flow. It is
- * reset after the batch has been sent out (See dp_netdev_queue_batches(),
- * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */
- struct packet_batch_per_flow *batch;
-
- /* Packet classification. */
- char *dp_extra_info; /* String to return in a flow dump/get. */
- struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
- /* 'cr' must be the last member. */
-};
-
static void dp_netdev_flow_unref(struct dp_netdev_flow *);
static bool dp_netdev_flow_ref(struct dp_netdev_flow *);
static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t,
This commit moves the dp_netdev_flow struct from dpif-netdev.c to the dpif-netdev-private.h file. No code changes are made. Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com> --- lib/dpif-netdev-private.h | 96 +++++++++++++++++++++++++++++++++++++++ lib/dpif-netdev.c | 94 -------------------------------------- 2 files changed, 96 insertions(+), 94 deletions(-)