@@ -31,7 +31,6 @@ enum cache_level_flags {
NFT_CACHE_SET_BIT |
NFT_CACHE_SETELEM_BIT,
NFT_CACHE_RULE = NFT_CACHE_TABLE_BIT |
- NFT_CACHE_CHAIN_BIT |
NFT_CACHE_RULE_BIT,
NFT_CACHE_FULL = __NFT_CACHE_MAX_BIT - 1,
NFT_CACHE_TERSE = (1 << 27),
@@ -54,21 +54,19 @@ static unsigned int evaluate_cache_add(struct cmd *cmd, unsigned int flags)
break;
case CMD_OBJ_ELEMENTS:
flags |= NFT_CACHE_TABLE |
- NFT_CACHE_CHAIN |
NFT_CACHE_SET |
NFT_CACHE_OBJECT |
NFT_CACHE_SETELEM_MAYBE;
break;
case CMD_OBJ_RULE:
flags |= NFT_CACHE_TABLE |
- NFT_CACHE_CHAIN |
NFT_CACHE_SET |
NFT_CACHE_OBJECT |
NFT_CACHE_FLOWTABLE;
if (cmd->handle.index.id ||
cmd->handle.position.id)
- flags |= NFT_CACHE_RULE | NFT_CACHE_UPDATE;
+ flags |= NFT_CACHE_CHAIN | NFT_CACHE_RULE | NFT_CACHE_UPDATE;
break;
default:
break;
@@ -435,7 +433,6 @@ int nft_cache_evaluate(struct nft_ctx *nft, struct list_head *cmds,
case CMD_DELETE:
case CMD_DESTROY:
flags |= NFT_CACHE_TABLE |
- NFT_CACHE_CHAIN |
NFT_CACHE_SET |
NFT_CACHE_FLOWTABLE |
NFT_CACHE_OBJECT;
@@ -69,12 +69,17 @@ static int table_fuzzy_check(struct netlink_ctx *ctx, const struct cmd *cmd,
static int nft_cmd_enoent_chain(struct netlink_ctx *ctx, const struct cmd *cmd,
const struct location *loc)
{
+ unsigned int flags = NFT_CACHE_TABLE |
+ NFT_CACHE_CHAIN;
const struct table *table = NULL;
struct chain *chain;
if (!cmd->handle.chain.name)
return 0;
+ if (nft_cache_update(ctx->nft, flags, ctx->msgs, NULL) < 0)
+ return 0;
+
chain = chain_lookup_fuzzy(&cmd->handle, &ctx->nft->cache, &table);
/* check table first. */
if (!table)
Updates on verdict maps that require many non-base chains are slowed down due to fetching existing non-base chains into the cache. Chains are only required for error reporting hints if kernel reports ENOENT. Populate the cache from this error path only. Similar approach already exists from rule ENOENT error path since: 3f1d3912c3a6 ("cache: filter out tables that are not requested") however, NFT_CACHE_CHAIN was toggled inconditionally for rule commands, rendering this on-demand cache population useless. before this patch, running Neels' nft_slew benchmark (peak values): created idx 4992 in 52587950 ns (128 in 7122 ms) ... deleted idx 128 in 43542500 ns (127 in 6187 ms) after this patch: created idx 4992 in 11361299 ns (128 in 1612 ms) ... deleted idx 1664 in 5239633 ns (128 in 733 ms) cache is still populated when index is used. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> --- A new attempt to address this issue, now that "cache: recycle existing cache with incremental updates" has been reverted. include/cache.h | 1 - src/cache.c | 5 +---- src/cmd.c | 5 +++++ 3 files changed, 6 insertions(+), 5 deletions(-)