@@ -83,7 +83,10 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd,
/* Check if EMC or SMC are enabled */
struct dfc_cache *cache = &pmd->flow_cache;
const uint32_t emc_enabled = pmd->ctx.emc_insert_min != 0;
+ bool smc_enable_db = pmd->ctx.smc_enable_db;
+
uint32_t emc_hits = 0;
+ uint32_t smc_hits = 0;
/* a 1 bit in this mask indidcates a hit, so no DPCLS lookup on the pkt. */
uint32_t hwol_emc_smc_hitmask = 0;
@@ -113,8 +116,11 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd,
*/
key->hash = dpif_netdev_packet_get_rss_hash_orig_pkt(packet, &key->mf);
+ struct dp_netdev_flow *f = NULL;
+
if (emc_enabled) {
- struct dp_netdev_flow *f = emc_lookup(&cache->emc_cache, key);
+ f = emc_lookup(&cache->emc_cache, key);
+
if (f) {
rules[i] = &f->cr;
emc_hits++;
@@ -123,6 +129,16 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd,
}
};
+ if (smc_enable_db && !f) {
+ f = smc_lookup_single(pmd, packet, key);
+ if (f) {
+ rules[i] = &f->cr;
+ smc_hits++;
+ hwol_emc_smc_hitmask |= (1 << i);
+ continue;
+ }
+ }
+
/* The flow pointer was not found in HWOL/EMC/SMC, so add it to the
* dpcls input keys array for batch lookup later.
*/
@@ -175,6 +191,7 @@ dp_netdev_input_outer_avx512(struct dp_netdev_pmd_thread *pmd,
/* At this point we don't return error anymore, so commit stats here. */
pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_RECV, batch_size);
pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_EXACT_HIT, emc_hits);
+ pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, smc_hits);
pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_HIT,
dpcls_key_idx);
pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_MASKED_LOOKUP,
@@ -81,6 +81,9 @@ extern "C" {
#define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
DEFAULT_EM_FLOW_INSERT_INV_PROB)
+/* Forward declaration for SMC function prototype. */
+struct dp_netdev_pmd_thread;
+
struct emc_entry {
struct dp_netdev_flow *flow;
struct netdev_flow_key key; /* key.hash used for emc hash value. */
@@ -237,6 +240,11 @@ emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key)
return NULL;
}
+struct dp_netdev_flow *
+smc_lookup_single(struct dp_netdev_pmd_thread *pmd,
+ struct dp_packet *packet,
+ struct netdev_flow_key *key);
+
#ifdef __cplusplus
}
#endif
@@ -45,6 +45,8 @@ struct dp_netdev_pmd_thread_ctx {
struct dp_netdev_rxq *last_rxq;
/* EMC insertion probability context for the current processing cycle. */
uint32_t emc_insert_min;
+ /* Enable the SMC cache from ovsdb config */
+ bool smc_enable_db;
};
/* Forward declaration for typedef */
@@ -5183,6 +5183,8 @@ dpif_netdev_run(struct dpif *dpif)
non_pmd->ctx.emc_insert_min = 0;
}
+ non_pmd->ctx.smc_enable_db = dp->smc_enable_db;
+
for (i = 0; i < port->n_rxq; i++) {
if (!netdev_rxq_enabled(port->rxqs[i].rx)) {
@@ -5454,6 +5456,8 @@ reload:
pmd->ctx.emc_insert_min = 0;
}
+ pmd->ctx.smc_enable_db = pmd->dp->smc_enable_db;
+
process_packets =
dp_netdev_process_rxq_port(pmd, poll_list[i].rxq,
poll_list[i].port_no);
@@ -6542,6 +6546,30 @@ smc_lookup_batch(struct dp_netdev_pmd_thread *pmd,
pmd_perf_update_counter(&pmd->perf_stats, PMD_STAT_SMC_HIT, n_smc_hit);
}
+struct dp_netdev_flow *
+smc_lookup_single(struct dp_netdev_pmd_thread *pmd,
+ struct dp_packet *packet,
+ struct netdev_flow_key *key)
+{
+ const struct cmap_node *flow_node = smc_entry_get(pmd, key->hash);
+
+ if (OVS_LIKELY(flow_node != NULL)) {
+ struct dp_netdev_flow *flow = NULL;
+
+ CMAP_NODE_FOR_EACH (flow, node, flow_node) {
+ /* Since we dont have per-port megaflow to check the port
+ * number, we need to verify that the input ports match. */
+ if (OVS_LIKELY(dpcls_rule_matches_key(&flow->cr, key) &&
+ flow->flow.in_port.odp_port == packet->md.in_port.odp_port)) {
+
+ return (void *) flow;
+ }
+ }
+ }
+
+ return NULL;
+}
+
/* Try to process all ('cnt') the 'packets' using only the datapath flow cache
* 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
* miniflow is copied into 'keys' and the packet pointer is moved at the