diff mbox series

[SRU,F:linux-bluefield] netfilter: flowtable: Use rw sem as flow block lock

Message ID 1621374205-162869-1-git-send-email-danielj@nvidia.com
State New
Headers show
Series [SRU,F:linux-bluefield] netfilter: flowtable: Use rw sem as flow block lock | expand

Commit Message

Daniel Jurgens May 18, 2021, 9:43 p.m. UTC
From: Paul Blakey <paulb@mellanox.com>

BugLink: https://bugs.launchpad.net/bugs/1927251

Currently flow offload threads are synchronized by the flow block mutex.
Use rw lock instead to increase flow insertion (read) concurrency.

Signed-off-by: Paul Blakey <paulb@mellanox.com>
Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
(backported from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>

---
 include/net/netfilter/nf_flow_table.h |  2 +-
 net/netfilter/nf_flow_table_core.c    | 11 +++++------
 net/netfilter/nf_flow_table_offload.c |  4 ++--
 3 files changed, 8 insertions(+), 9 deletions(-)

Comments

Kleber Sacilotto de Souza May 19, 2021, 1:15 p.m. UTC | #1
On 18.05.21 23:43, Daniel Jurgens wrote:
> From: Paul Blakey <paulb@mellanox.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1927251
> 
> Currently flow offload threads are synchronized by the flow block mutex.
> Use rw lock instead to increase flow insertion (read) concurrency.
> 
> Signed-off-by: Paul Blakey <paulb@mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
> (backported from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>

Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>

Thanks

> 
> ---
>   include/net/netfilter/nf_flow_table.h |  2 +-
>   net/netfilter/nf_flow_table_core.c    | 11 +++++------
>   net/netfilter/nf_flow_table_offload.c |  4 ++--
>   3 files changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index 3a46b3c..e27b73f 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -73,7 +73,7 @@ struct nf_flowtable {
>   	struct delayed_work		gc_work;
>   	unsigned int			flags;
>   	struct flow_block		flow_block;
> -	struct mutex			flow_block_lock; /* Guards flow_block */
> +	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
>   	u32				flow_timeout;
>   	possible_net_t			net;
>   };
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 5144e31..0c409aa 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>   	struct flow_block_cb *block_cb;
>   	int err = 0;
>   
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>   	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>   	if (block_cb) {
>   		err = -EEXIST;
> @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>   	list_add_tail(&block_cb->list, &block->cb_list);
>   
>   unlock:
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>   	return err;
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
>   	struct flow_block *block = &flow_table->flow_block;
>   	struct flow_block_cb *block_cb;
>   
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>   	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>   	if (block_cb)
>   		list_del(&block_cb->list);
>   	else
>   		WARN_ON(true);
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
>   
> @@ -561,7 +561,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
>   
>   	INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
>   	flow_block_init(&flowtable->flow_block);
> -	mutex_init(&flowtable->flow_block_lock);
> +	init_rwsem(&flowtable->flow_block_lock);
>   
>   	err = rhashtable_init(&flowtable->rhashtable,
>   			      &nf_flow_offload_rhash_params);
> @@ -627,7 +627,6 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
>   		nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
>   				      flow_table);
>   	rhashtable_destroy(&flow_table->rhashtable);
> -	mutex_destroy(&flow_table->flow_block_lock);
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_free);
>   
> diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
> index 26a950d..46bffaa 100644
> --- a/net/netfilter/nf_flow_table_offload.c
> +++ b/net/netfilter/nf_flow_table_offload.c
> @@ -698,7 +698,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>   	if (cmd == FLOW_CLS_REPLACE)
>   		cls_flow.rule = flow_rule->rule;
>   
> -	mutex_lock(&flowtable->flow_block_lock);
> +	down_read(&flowtable->flow_block_lock);
>   	list_for_each_entry(block_cb, block_cb_list, list) {
>   		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
>   				   block_cb->cb_priv);
> @@ -707,7 +707,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>   
>   		i++;
>   	}
> -	mutex_unlock(&flowtable->flow_block_lock);
> +	up_read(&flowtable->flow_block_lock);
>   
>   	if (cmd == FLOW_CLS_STATS)
>   		memcpy(stats, &cls_flow.stats, sizeof(*stats));
>
Kleber Sacilotto de Souza May 19, 2021, 1:25 p.m. UTC | #2
On 19.05.21 15:15, Kleber Souza wrote:
> On 18.05.21 23:43, Daniel Jurgens wrote:
>> From: Paul Blakey <paulb@mellanox.com>
>>
>> BugLink: https://bugs.launchpad.net/bugs/1927251
>>
>> Currently flow offload threads are synchronized by the flow block mutex.
>> Use rw lock instead to increase flow insertion (read) concurrency.
>>
>> Signed-off-by: Paul Blakey <paulb@mellanox.com>
>> Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
>> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
>> (backported from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> 
> Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
> 
> Thanks
> 
>>
>> ---
>>    include/net/netfilter/nf_flow_table.h |  2 +-
>>    net/netfilter/nf_flow_table_core.c    | 11 +++++------
>>    net/netfilter/nf_flow_table_offload.c |  4 ++--
>>    3 files changed, 8 insertions(+), 9 deletions(-)
>>
>> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
>> index 3a46b3c..e27b73f 100644
>> --- a/include/net/netfilter/nf_flow_table.h
>> +++ b/include/net/netfilter/nf_flow_table.h
>> @@ -73,7 +73,7 @@ struct nf_flowtable {
>>    	struct delayed_work		gc_work;
>>    	unsigned int			flags;
>>    	struct flow_block		flow_block;
>> -	struct mutex			flow_block_lock; /* Guards flow_block */
>> +	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
>>    	u32				flow_timeout;
>>    	possible_net_t			net;
>>    };
>> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
>> index 5144e31..0c409aa 100644
>> --- a/net/netfilter/nf_flow_table_core.c
>> +++ b/net/netfilter/nf_flow_table_core.c
>> @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>>    	struct flow_block_cb *block_cb;
>>    	int err = 0;
>>    
>> -	mutex_lock(&flow_table->flow_block_lock);
>> +	down_write(&flow_table->flow_block_lock);
>>    	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>>    	if (block_cb) {
>>    		err = -EEXIST;
>> @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>>    	list_add_tail(&block_cb->list, &block->cb_list);
>>    
>>    unlock:
>> -	mutex_unlock(&flow_table->flow_block_lock);
>> +	up_write(&flow_table->flow_block_lock);
>>    	return err;
>>    }
>>    EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
>> @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
>>    	struct flow_block *block = &flow_table->flow_block;
>>    	struct flow_block_cb *block_cb;
>>    
>> -	mutex_lock(&flow_table->flow_block_lock);
>> +	down_write(&flow_table->flow_block_lock);
>>    	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>>    	if (block_cb)
>>    		list_del(&block_cb->list);
>>    	else
>>    		WARN_ON(true);
>> -	mutex_unlock(&flow_table->flow_block_lock);
>> +	up_write(&flow_table->flow_block_lock);

Please note that there is another patch submitted ("netfilter: flowtable: Free block_cb
when being deleted") touching the same block above but still using mutex_lock. So some
context adjustments will be needed when applying them.

This patch doesn't need to be resubmitted, this is just a note for whoever will be
applying these patches.

Kleber
Tim Gardner May 19, 2021, 6:24 p.m. UTC | #3
Acked-by: Tim Gardner <tim.gardner@canonical.com>

On 5/18/21 3:43 PM, Daniel Jurgens wrote:
> From: Paul Blakey <paulb@mellanox.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1927251
> 
> Currently flow offload threads are synchronized by the flow block mutex.
> Use rw lock instead to increase flow insertion (read) concurrency.
> 
> Signed-off-by: Paul Blakey <paulb@mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
> (backported from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> 
> ---
>   include/net/netfilter/nf_flow_table.h |  2 +-
>   net/netfilter/nf_flow_table_core.c    | 11 +++++------
>   net/netfilter/nf_flow_table_offload.c |  4 ++--
>   3 files changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index 3a46b3c..e27b73f 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -73,7 +73,7 @@ struct nf_flowtable {
>   	struct delayed_work		gc_work;
>   	unsigned int			flags;
>   	struct flow_block		flow_block;
> -	struct mutex			flow_block_lock; /* Guards flow_block */
> +	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
>   	u32				flow_timeout;
>   	possible_net_t			net;
>   };
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 5144e31..0c409aa 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>   	struct flow_block_cb *block_cb;
>   	int err = 0;
>   
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>   	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>   	if (block_cb) {
>   		err = -EEXIST;
> @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>   	list_add_tail(&block_cb->list, &block->cb_list);
>   
>   unlock:
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>   	return err;
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
>   	struct flow_block *block = &flow_table->flow_block;
>   	struct flow_block_cb *block_cb;
>   
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>   	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>   	if (block_cb)
>   		list_del(&block_cb->list);
>   	else
>   		WARN_ON(true);
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
>   
> @@ -561,7 +561,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
>   
>   	INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
>   	flow_block_init(&flowtable->flow_block);
> -	mutex_init(&flowtable->flow_block_lock);
> +	init_rwsem(&flowtable->flow_block_lock);
>   
>   	err = rhashtable_init(&flowtable->rhashtable,
>   			      &nf_flow_offload_rhash_params);
> @@ -627,7 +627,6 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
>   		nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
>   				      flow_table);
>   	rhashtable_destroy(&flow_table->rhashtable);
> -	mutex_destroy(&flow_table->flow_block_lock);
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_free);
>   
> diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
> index 26a950d..46bffaa 100644
> --- a/net/netfilter/nf_flow_table_offload.c
> +++ b/net/netfilter/nf_flow_table_offload.c
> @@ -698,7 +698,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>   	if (cmd == FLOW_CLS_REPLACE)
>   		cls_flow.rule = flow_rule->rule;
>   
> -	mutex_lock(&flowtable->flow_block_lock);
> +	down_read(&flowtable->flow_block_lock);
>   	list_for_each_entry(block_cb, block_cb_list, list) {
>   		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
>   				   block_cb->cb_priv);
> @@ -707,7 +707,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>   
>   		i++;
>   	}
> -	mutex_unlock(&flowtable->flow_block_lock);
> +	up_read(&flowtable->flow_block_lock);
>   
>   	if (cmd == FLOW_CLS_STATS)
>   		memcpy(stats, &cls_flow.stats, sizeof(*stats));
>
Kelsey Skunberg May 29, 2021, 12:41 a.m. UTC | #4
On 2021-05-19 00:43:25 , Daniel Jurgens wrote:
> From: Paul Blakey <paulb@mellanox.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1927251
> 
> Currently flow offload threads are synchronized by the flow block mutex.
> Use rw lock instead to increase flow insertion (read) concurrency.
> 
> Signed-off-by: Paul Blakey <paulb@mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
> (backported from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
[danielj: minor context adjustments in header (for example)]
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>

There was a prior version of this patch on the mailing list which Stefan
reviewed. He requested a line like the above to please be added when
applying. 

Thank you! 

-Kelsey

> 
> ---
>  include/net/netfilter/nf_flow_table.h |  2 +-
>  net/netfilter/nf_flow_table_core.c    | 11 +++++------
>  net/netfilter/nf_flow_table_offload.c |  4 ++--
>  3 files changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index 3a46b3c..e27b73f 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -73,7 +73,7 @@ struct nf_flowtable {
>  	struct delayed_work		gc_work;
>  	unsigned int			flags;
>  	struct flow_block		flow_block;
> -	struct mutex			flow_block_lock; /* Guards flow_block */
> +	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
>  	u32				flow_timeout;
>  	possible_net_t			net;
>  };
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 5144e31..0c409aa 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>  	struct flow_block_cb *block_cb;
>  	int err = 0;
>  
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>  	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>  	if (block_cb) {
>  		err = -EEXIST;
> @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>  	list_add_tail(&block_cb->list, &block->cb_list);
>  
>  unlock:
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>  	return err;
>  }
>  EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
>  	struct flow_block *block = &flow_table->flow_block;
>  	struct flow_block_cb *block_cb;
>  
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>  	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>  	if (block_cb)
>  		list_del(&block_cb->list);
>  	else
>  		WARN_ON(true);
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>  }
>  EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
>  
> @@ -561,7 +561,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
>  
>  	INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
>  	flow_block_init(&flowtable->flow_block);
> -	mutex_init(&flowtable->flow_block_lock);
> +	init_rwsem(&flowtable->flow_block_lock);
>  
>  	err = rhashtable_init(&flowtable->rhashtable,
>  			      &nf_flow_offload_rhash_params);
> @@ -627,7 +627,6 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
>  		nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
>  				      flow_table);
>  	rhashtable_destroy(&flow_table->rhashtable);
> -	mutex_destroy(&flow_table->flow_block_lock);
>  }
>  EXPORT_SYMBOL_GPL(nf_flow_table_free);
>  
> diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
> index 26a950d..46bffaa 100644
> --- a/net/netfilter/nf_flow_table_offload.c
> +++ b/net/netfilter/nf_flow_table_offload.c
> @@ -698,7 +698,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>  	if (cmd == FLOW_CLS_REPLACE)
>  		cls_flow.rule = flow_rule->rule;
>  
> -	mutex_lock(&flowtable->flow_block_lock);
> +	down_read(&flowtable->flow_block_lock);
>  	list_for_each_entry(block_cb, block_cb_list, list) {
>  		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
>  				   block_cb->cb_priv);
> @@ -707,7 +707,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>  
>  		i++;
>  	}
> -	mutex_unlock(&flowtable->flow_block_lock);
> +	up_read(&flowtable->flow_block_lock);
>  
>  	if (cmd == FLOW_CLS_STATS)
>  		memcpy(stats, &cls_flow.stats, sizeof(*stats));
> -- 
> 1.8.3.1
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
Daniel Jurgens June 1, 2021, 8:15 p.m. UTC | #5
> -----Original Message-----
> From: Kelsey Skunberg <kelsey.skunberg@canonical.com>
> Sent: Friday, May 28, 2021 7:41 PM
> Subject: CMT: [SRU][F:linux-bluefield][PATCH] netfilter: flowtable: Use rw
> sem as flow block lock
> 
> On 2021-05-19 00:43:25 , Daniel Jurgens wrote:
> > From: Paul Blakey <paulb@mellanox.com>
> >
> > BugLink: https://bugs.launchpad.net/bugs/1927251
> >
> > Currently flow offload threads are synchronized by the flow block mutex.
> > Use rw lock instead to increase flow insertion (read) concurrency.
> >
> > Signed-off-by: Paul Blakey <paulb@mellanox.com>
> > Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
> > Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> (backported
> > from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
> [danielj: minor context adjustments in header (for example)]
> > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> 
> There was a prior version of this patch on the mailing list which Stefan
> reviewed. He requested a line like the above to please be added when
> applying.

Not clear if you want me to resubmit? Stefan said no need to. 

> 
> Thank you!
> 
> -Kelsey
> 
> >
> > ---
> >  include/net/netfilter/nf_flow_table.h |  2 +-
> >  net/netfilter/nf_flow_table_core.c    | 11 +++++------
> >  net/netfilter/nf_flow_table_offload.c |  4 ++--
> >  3 files changed, 8 insertions(+), 9 deletions(-)
> >
> > diff --git a/include/net/netfilter/nf_flow_table.h
> > b/include/net/netfilter/nf_flow_table.h
> > index 3a46b3c..e27b73f 100644
> > --- a/include/net/netfilter/nf_flow_table.h
> > +++ b/include/net/netfilter/nf_flow_table.h
> > @@ -73,7 +73,7 @@ struct nf_flowtable {
> >  	struct delayed_work		gc_work;
> >  	unsigned int			flags;
> >  	struct flow_block		flow_block;
> > -	struct mutex			flow_block_lock; /* Guards
> flow_block */
> > +	struct rw_semaphore		flow_block_lock; /* Guards
> flow_block */
> >  	u32				flow_timeout;
> >  	possible_net_t			net;
> >  };
> > diff --git a/net/netfilter/nf_flow_table_core.c
> > b/net/netfilter/nf_flow_table_core.c
> > index 5144e31..0c409aa 100644
> > --- a/net/netfilter/nf_flow_table_core.c
> > +++ b/net/netfilter/nf_flow_table_core.c
> > @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct
> nf_flowtable *flow_table,
> >  	struct flow_block_cb *block_cb;
> >  	int err = 0;
> >
> > -	mutex_lock(&flow_table->flow_block_lock);
> > +	down_write(&flow_table->flow_block_lock);
> >  	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> >  	if (block_cb) {
> >  		err = -EEXIST;
> > @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct
> nf_flowtable *flow_table,
> >  	list_add_tail(&block_cb->list, &block->cb_list);
> >
> >  unlock:
> > -	mutex_unlock(&flow_table->flow_block_lock);
> > +	up_write(&flow_table->flow_block_lock);
> >  	return err;
> >  }
> >  EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> > @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct
> nf_flowtable *flow_table,
> >  	struct flow_block *block = &flow_table->flow_block;
> >  	struct flow_block_cb *block_cb;
> >
> > -	mutex_lock(&flow_table->flow_block_lock);
> > +	down_write(&flow_table->flow_block_lock);
> >  	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> >  	if (block_cb)
> >  		list_del(&block_cb->list);
> >  	else
> >  		WARN_ON(true);
> > -	mutex_unlock(&flow_table->flow_block_lock);
> > +	up_write(&flow_table->flow_block_lock);
> >  }
> >  EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
> >
> > @@ -561,7 +561,7 @@ int nf_flow_table_init(struct nf_flowtable
> > *flowtable)
> >
> >  	INIT_DEFERRABLE_WORK(&flowtable->gc_work,
> nf_flow_offload_work_gc);
> >  	flow_block_init(&flowtable->flow_block);
> > -	mutex_init(&flowtable->flow_block_lock);
> > +	init_rwsem(&flowtable->flow_block_lock);
> >
> >  	err = rhashtable_init(&flowtable->rhashtable,
> >  			      &nf_flow_offload_rhash_params); @@ -627,7
> +627,6 @@ void
> > nf_flow_table_free(struct nf_flowtable *flow_table)
> >  		nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
> >  				      flow_table);
> >  	rhashtable_destroy(&flow_table->rhashtable);
> > -	mutex_destroy(&flow_table->flow_block_lock);
> >  }
> >  EXPORT_SYMBOL_GPL(nf_flow_table_free);
> >
> > diff --git a/net/netfilter/nf_flow_table_offload.c
> > b/net/netfilter/nf_flow_table_offload.c
> > index 26a950d..46bffaa 100644
> > --- a/net/netfilter/nf_flow_table_offload.c
> > +++ b/net/netfilter/nf_flow_table_offload.c
> > @@ -698,7 +698,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable
> *flowtable,
> >  	if (cmd == FLOW_CLS_REPLACE)
> >  		cls_flow.rule = flow_rule->rule;
> >
> > -	mutex_lock(&flowtable->flow_block_lock);
> > +	down_read(&flowtable->flow_block_lock);
> >  	list_for_each_entry(block_cb, block_cb_list, list) {
> >  		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
> >  				   block_cb->cb_priv);
> > @@ -707,7 +707,7 @@ static int nf_flow_offload_tuple(struct
> > nf_flowtable *flowtable,
> >
> >  		i++;
> >  	}
> > -	mutex_unlock(&flowtable->flow_block_lock);
> > +	up_read(&flowtable->flow_block_lock);
> >
> >  	if (cmd == FLOW_CLS_STATS)
> >  		memcpy(stats, &cls_flow.stats, sizeof(*stats));
> > --
> > 1.8.3.1
> >
> >
> > --
> > kernel-team mailing list
> > kernel-team@lists.ubuntu.com
> > https://lists.ubuntu.com/mailman/listinfo/kernel-team
Stefan Bader June 2, 2021, 7:57 a.m. UTC | #6
On 18.05.21 23:43, Daniel Jurgens wrote:
> From: Paul Blakey <paulb@mellanox.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1927251
> 
> Currently flow offload threads are synchronized by the flow block mutex.
> Use rw lock instead to increase flow insertion (read) concurrency.
> 
> Signed-off-by: Paul Blakey <paulb@mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
> (backported from commit 422c032afcf57d5e8109a54912e22ffc53d99068)
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> 
> ---
>   include/net/netfilter/nf_flow_table.h |  2 +-
>   net/netfilter/nf_flow_table_core.c    | 11 +++++------
>   net/netfilter/nf_flow_table_offload.c |  4 ++--
>   3 files changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index 3a46b3c..e27b73f 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -73,7 +73,7 @@ struct nf_flowtable {
>   	struct delayed_work		gc_work;
>   	unsigned int			flags;
>   	struct flow_block		flow_block;
> -	struct mutex			flow_block_lock; /* Guards flow_block */
> +	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
>   	u32				flow_timeout;
>   	possible_net_t			net;
>   };
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 5144e31..0c409aa 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -398,7 +398,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>   	struct flow_block_cb *block_cb;
>   	int err = 0;
>   
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>   	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>   	if (block_cb) {
>   		err = -EEXIST;
> @@ -414,7 +414,7 @@ int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
>   	list_add_tail(&block_cb->list, &block->cb_list);
>   
>   unlock:
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>   	return err;
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> @@ -425,13 +425,13 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
>   	struct flow_block *block = &flow_table->flow_block;
>   	struct flow_block_cb *block_cb;
>   
> -	mutex_lock(&flow_table->flow_block_lock);
> +	down_write(&flow_table->flow_block_lock);
>   	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
>   	if (block_cb)
>   		list_del(&block_cb->list);
>   	else
>   		WARN_ON(true);
> -	mutex_unlock(&flow_table->flow_block_lock);
> +	up_write(&flow_table->flow_block_lock);
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
>   
> @@ -561,7 +561,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
>   
>   	INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
>   	flow_block_init(&flowtable->flow_block);
> -	mutex_init(&flowtable->flow_block_lock);
> +	init_rwsem(&flowtable->flow_block_lock);
>   
>   	err = rhashtable_init(&flowtable->rhashtable,
>   			      &nf_flow_offload_rhash_params);
> @@ -627,7 +627,6 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
>   		nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
>   				      flow_table);
>   	rhashtable_destroy(&flow_table->rhashtable);
> -	mutex_destroy(&flow_table->flow_block_lock);
>   }
>   EXPORT_SYMBOL_GPL(nf_flow_table_free);
>   
> diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
> index 26a950d..46bffaa 100644
> --- a/net/netfilter/nf_flow_table_offload.c
> +++ b/net/netfilter/nf_flow_table_offload.c
> @@ -698,7 +698,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>   	if (cmd == FLOW_CLS_REPLACE)
>   		cls_flow.rule = flow_rule->rule;
>   
> -	mutex_lock(&flowtable->flow_block_lock);
> +	down_read(&flowtable->flow_block_lock);
>   	list_for_each_entry(block_cb, block_cb_list, list) {
>   		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
>   				   block_cb->cb_priv);
> @@ -707,7 +707,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
>   
>   		i++;
>   	}
> -	mutex_unlock(&flowtable->flow_block_lock);
> +	up_read(&flowtable->flow_block_lock);
>   
>   	if (cmd == FLOW_CLS_STATS)
>   		memcpy(stats, &cls_flow.stats, sizeof(*stats));
> 
There was a NACK which appears to be meant for this but not as a reply to the 
thread.

-Stefan
diff mbox series

Patch

diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 3a46b3c..e27b73f 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -73,7 +73,7 @@  struct nf_flowtable {
 	struct delayed_work		gc_work;
 	unsigned int			flags;
 	struct flow_block		flow_block;
-	struct mutex			flow_block_lock; /* Guards flow_block */
+	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
 	u32				flow_timeout;
 	possible_net_t			net;
 };
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 5144e31..0c409aa 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -398,7 +398,7 @@  int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
 	struct flow_block_cb *block_cb;
 	int err = 0;
 
-	mutex_lock(&flow_table->flow_block_lock);
+	down_write(&flow_table->flow_block_lock);
 	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
 	if (block_cb) {
 		err = -EEXIST;
@@ -414,7 +414,7 @@  int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
 	list_add_tail(&block_cb->list, &block->cb_list);
 
 unlock:
-	mutex_unlock(&flow_table->flow_block_lock);
+	up_write(&flow_table->flow_block_lock);
 	return err;
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
@@ -425,13 +425,13 @@  void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
 	struct flow_block *block = &flow_table->flow_block;
 	struct flow_block_cb *block_cb;
 
-	mutex_lock(&flow_table->flow_block_lock);
+	down_write(&flow_table->flow_block_lock);
 	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
 	if (block_cb)
 		list_del(&block_cb->list);
 	else
 		WARN_ON(true);
-	mutex_unlock(&flow_table->flow_block_lock);
+	up_write(&flow_table->flow_block_lock);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
 
@@ -561,7 +561,7 @@  int nf_flow_table_init(struct nf_flowtable *flowtable)
 
 	INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
 	flow_block_init(&flowtable->flow_block);
-	mutex_init(&flowtable->flow_block_lock);
+	init_rwsem(&flowtable->flow_block_lock);
 
 	err = rhashtable_init(&flowtable->rhashtable,
 			      &nf_flow_offload_rhash_params);
@@ -627,7 +627,6 @@  void nf_flow_table_free(struct nf_flowtable *flow_table)
 		nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
 				      flow_table);
 	rhashtable_destroy(&flow_table->rhashtable);
-	mutex_destroy(&flow_table->flow_block_lock);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
 
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 26a950d..46bffaa 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -698,7 +698,7 @@  static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
 	if (cmd == FLOW_CLS_REPLACE)
 		cls_flow.rule = flow_rule->rule;
 
-	mutex_lock(&flowtable->flow_block_lock);
+	down_read(&flowtable->flow_block_lock);
 	list_for_each_entry(block_cb, block_cb_list, list) {
 		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
 				   block_cb->cb_priv);
@@ -707,7 +707,7 @@  static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
 
 		i++;
 	}
-	mutex_unlock(&flowtable->flow_block_lock);
+	up_read(&flowtable->flow_block_lock);
 
 	if (cmd == FLOW_CLS_STATS)
 		memcpy(stats, &cls_flow.stats, sizeof(*stats));