diff mbox series

[2/2] ext4: Convert EXT4_B2C(sbi->s_stripe) users to EXT4_NUM_B2C

Message ID 02e5b5139294897985f2b99729c5f3da3dd4f6f9.1723794770.git.ojaswin@linux.ibm.com
State Superseded
Headers show
Series [1/2] ext4: Check stripe size compatibility on remount as well | expand

Commit Message

Ojaswin Mujoo Aug. 16, 2024, 7:57 a.m. UTC
Although we have checks to make sure s_stripe is a multiple of cluster
size, in case we accidentally end up with a scenario where this is not
the case, use EXT4_NUM_B2C() so that we don't end up with unexpected
cases where EXT4_B2C(stripe) becomes 0.

Also make the is_stripe_aligned check in regular_allocator a bit more
robust while we are at it. This should ideally have no functional change
unless we have a bug somewhere causing (stripe % cluster_size != 0)

Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
---
 fs/ext4/mballoc.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

Comments

Kemeng Shi Aug. 19, 2024, 12:51 p.m. UTC | #1
on 8/16/2024 3:57 PM, Ojaswin Mujoo wrote:
> Although we have checks to make sure s_stripe is a multiple of cluster
> size, in case we accidentally end up with a scenario where this is not
> the case, use EXT4_NUM_B2C() so that we don't end up with unexpected
> cases where EXT4_B2C(stripe) becomes 0.
> 
> Also make the is_stripe_aligned check in regular_allocator a bit more
> robust while we are at it. This should ideally have no functional change
> unless we have a bug somewhere causing (stripe % cluster_size != 0)
> 
> Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>> ---
>  fs/ext4/mballoc.c | 7 ++++---
>  1 file changed, 4 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index 9dda9cd68ab2..b2e836a4bd18 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -2553,7 +2553,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
>  	do_div(a, sbi->s_stripe);
>  	i = (a * sbi->s_stripe) - first_group_block;
>  
> -	stripe = EXT4_B2C(sbi, sbi->s_stripe);
> +	stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
>  	i = EXT4_B2C(sbi, i);
>  	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
>  		if (!mb_test_bit(i, bitmap)) {
> @@ -2929,8 +2929,9 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
>  				ext4_mb_simple_scan_group(ac, &e4b);
>  			else {
>  				bool is_stripe_aligned = sbi->s_stripe &&
> +					sbi->s_stripe >= sbi->s_cluster_ratio &&
sbi->s_cluster_ratio is >= 1, so sbi->s_stripe >= sbi->s_cluster_ratio could
cover the case that sbi->s_stripe is non-zero. Non-zero check of sbi->s_stripe
seems not needed.
>  					!(ac->ac_g_ex.fe_len %
> -					  EXT4_B2C(sbi, sbi->s_stripe));
> +					  EXT4_NUM_B2C(sbi, sbi->s_stripe));
>  
>  				if ((cr == CR_GOAL_LEN_FAST ||
>  				     cr == CR_BEST_AVAIL_LEN) &&
> @@ -3707,7 +3708,7 @@ int ext4_mb_init(struct super_block *sb)
>  	 */
>  	if (sbi->s_stripe > 1) {
>  		sbi->s_mb_group_prealloc = roundup(
> -			sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
> +			sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
>  	}
>  
>  	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
>
Ojaswin Mujoo Aug. 20, 2024, 6:29 a.m. UTC | #2
On Mon, Aug 19, 2024 at 08:51:58PM +0800, Kemeng Shi wrote:
> 
> 
> on 8/16/2024 3:57 PM, Ojaswin Mujoo wrote:
> > Although we have checks to make sure s_stripe is a multiple of cluster
> > size, in case we accidentally end up with a scenario where this is not
> > the case, use EXT4_NUM_B2C() so that we don't end up with unexpected
> > cases where EXT4_B2C(stripe) becomes 0.
> > 
> > Also make the is_stripe_aligned check in regular_allocator a bit more
> > robust while we are at it. This should ideally have no functional change
> > unless we have a bug somewhere causing (stripe % cluster_size != 0)
> > 
> > Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>> ---
> >  fs/ext4/mballoc.c | 7 ++++---
> >  1 file changed, 4 insertions(+), 3 deletions(-)
> > 
> > diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> > index 9dda9cd68ab2..b2e836a4bd18 100644
> > --- a/fs/ext4/mballoc.c
> > +++ b/fs/ext4/mballoc.c
> > @@ -2553,7 +2553,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
> >  	do_div(a, sbi->s_stripe);
> >  	i = (a * sbi->s_stripe) - first_group_block;
> >  
> > -	stripe = EXT4_B2C(sbi, sbi->s_stripe);
> > +	stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
> >  	i = EXT4_B2C(sbi, i);
> >  	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
> >  		if (!mb_test_bit(i, bitmap)) {
> > @@ -2929,8 +2929,9 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
> >  				ext4_mb_simple_scan_group(ac, &e4b);
> >  			else {
> >  				bool is_stripe_aligned = sbi->s_stripe &&
> > +					sbi->s_stripe >= sbi->s_cluster_ratio &&
> sbi->s_cluster_ratio is >= 1, so sbi->s_stripe >= sbi->s_cluster_ratio could
> cover the case that sbi->s_stripe is non-zero. Non-zero check of sbi->s_stripe
> seems not needed.

That makes sense Kemeng, I'll send a v2 with this change.

Thanks,
ojaswin

> >  					!(ac->ac_g_ex.fe_len %
> > -					  EXT4_B2C(sbi, sbi->s_stripe));
> > +					  EXT4_NUM_B2C(sbi, sbi->s_stripe));
> >  
> >  				if ((cr == CR_GOAL_LEN_FAST ||
> >  				     cr == CR_BEST_AVAIL_LEN) &&
> > @@ -3707,7 +3708,7 @@ int ext4_mb_init(struct super_block *sb)
> >  	 */
> >  	if (sbi->s_stripe > 1) {
> >  		sbi->s_mb_group_prealloc = roundup(
> > -			sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
> > +			sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
> >  	}
> >  
> >  	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
> > 
>
diff mbox series

Patch

diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9dda9cd68ab2..b2e836a4bd18 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2553,7 +2553,7 @@  void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
 	do_div(a, sbi->s_stripe);
 	i = (a * sbi->s_stripe) - first_group_block;
 
-	stripe = EXT4_B2C(sbi, sbi->s_stripe);
+	stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
 	i = EXT4_B2C(sbi, i);
 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
 		if (!mb_test_bit(i, bitmap)) {
@@ -2929,8 +2929,9 @@  ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 				ext4_mb_simple_scan_group(ac, &e4b);
 			else {
 				bool is_stripe_aligned = sbi->s_stripe &&
+					sbi->s_stripe >= sbi->s_cluster_ratio &&
 					!(ac->ac_g_ex.fe_len %
-					  EXT4_B2C(sbi, sbi->s_stripe));
+					  EXT4_NUM_B2C(sbi, sbi->s_stripe));
 
 				if ((cr == CR_GOAL_LEN_FAST ||
 				     cr == CR_BEST_AVAIL_LEN) &&
@@ -3707,7 +3708,7 @@  int ext4_mb_init(struct super_block *sb)
 	 */
 	if (sbi->s_stripe > 1) {
 		sbi->s_mb_group_prealloc = roundup(
-			sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
+			sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
 	}
 
 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);