diff mbox series

[v1,04/10] powerpc/kernel/iommu: Add new iommu_table_in_use() helper

Message ID 20200817234033.442511-5-leobras.c@gmail.com (mailing list archive)
State Superseded, archived
Headers show
Series DDW indirect mapping | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (97a94d178e5876ad49482c42b13b7296cd6803de)
snowpatch_ozlabs/checkpatch success total: 0 errors, 0 warnings, 0 checks, 98 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Leonardo Brás Aug. 17, 2020, 11:40 p.m. UTC
Having a function to check if the iommu table has any allocation helps
deciding if a tbl can be reset for using a new DMA window.

It should be enough to replace all instances of !bitmap_empty(tbl...).

iommu_table_in_use() skips reserved memory, so we don't need to worry about
releasing it before testing. This causes iommu_table_release_pages() to
become unnecessary, given it is only used to remove reserved memory for
testing.

Signed-off-by: Leonardo Bras <leobras.c@gmail.com>
---
 arch/powerpc/include/asm/iommu.h |  1 +
 arch/powerpc/kernel/iommu.c      | 62 ++++++++++++++++++--------------
 2 files changed, 37 insertions(+), 26 deletions(-)

Comments

Alexey Kardashevskiy Aug. 22, 2020, 10:34 a.m. UTC | #1
On 18/08/2020 09:40, Leonardo Bras wrote:
> Having a function to check if the iommu table has any allocation helps
> deciding if a tbl can be reset for using a new DMA window.
> 
> It should be enough to replace all instances of !bitmap_empty(tbl...).
> 
> iommu_table_in_use() skips reserved memory, so we don't need to worry about
> releasing it before testing. This causes iommu_table_release_pages() to
> become unnecessary, given it is only used to remove reserved memory for
> testing.
> 
> Signed-off-by: Leonardo Bras <leobras.c@gmail.com>
> ---
>  arch/powerpc/include/asm/iommu.h |  1 +
>  arch/powerpc/kernel/iommu.c      | 62 ++++++++++++++++++--------------
>  2 files changed, 37 insertions(+), 26 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
> index 5032f1593299..2913e5c8b1f8 100644
> --- a/arch/powerpc/include/asm/iommu.h
> +++ b/arch/powerpc/include/asm/iommu.h
> @@ -154,6 +154,7 @@ extern int iommu_tce_table_put(struct iommu_table *tbl);
>   */
>  extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
>  		int nid, unsigned long res_start, unsigned long res_end);
> +bool iommu_table_in_use(struct iommu_table *tbl);
>  
>  #define IOMMU_TABLE_GROUP_MAX_TABLES	2
>  
> diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
> index 7f603d4e62d4..c5d5d36ab65e 100644
> --- a/arch/powerpc/kernel/iommu.c
> +++ b/arch/powerpc/kernel/iommu.c
> @@ -668,21 +668,6 @@ static void iommu_table_reserve_pages(struct iommu_table *tbl,
>  		set_bit(i - tbl->it_offset, tbl->it_map);
>  }
>  
> -static void iommu_table_release_pages(struct iommu_table *tbl)
> -{
> -	int i;
> -
> -	/*
> -	 * In case we have reserved the first bit, we should not emit
> -	 * the warning below.
> -	 */
> -	if (tbl->it_offset == 0)
> -		clear_bit(0, tbl->it_map);
> -
> -	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
> -		clear_bit(i - tbl->it_offset, tbl->it_map);
> -}
> -
>  /*
>   * Build a iommu_table structure.  This contains a bit map which
>   * is used to manage allocation of the tce space.
> @@ -743,6 +728,38 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
>  	return tbl;
>  }
>  
> +bool iommu_table_in_use(struct iommu_table *tbl)
> +{
> +	bool in_use;
> +	unsigned long p1_start = 0, p1_end, p2_start, p2_end;
> +
> +	/*ignore reserved bit0*/

s/ignore reserved bit0/ ignore reserved bit0 /  (add spaces)

> +	if (tbl->it_offset == 0)
> +		p1_start = 1;
> +
> +	/* Check if reserved memory is valid*/

A missing space here.

> +	if (tbl->it_reserved_start >= tbl->it_offset &&
> +	    tbl->it_reserved_start <= (tbl->it_offset + tbl->it_size) &&
> +	    tbl->it_reserved_end   >= tbl->it_offset &&
> +	    tbl->it_reserved_end   <= (tbl->it_offset + tbl->it_size)) {


Uff. What if tbl->it_reserved_end is bigger than tbl->it_offset +
tbl->it_size?

The reserved area is to preserve MMIO32 so it is for it_offset==0 only
and the boundaries are checked in the only callsite, and it is unlikely
to change soon or ever.

Rather that bothering with fixing that, may be just add (did not test):

if (WARN_ON((
(tbl->it_reserved_start || tbl->it_reserved_end) && (it_offset != 0))
||
(tbl->it_reserved_start > it_offset && tbl->it_reserved_end < it_offset
+ it_size) && (it_offset == 0)) )
 return true;

Or simply always look for it_offset..it_reserved_start and
it_reserved_end..it_offset+it_size and if there is no reserved area,
initialize it_reserved_start=it_reserved_end=it_offset so the first
it_offset..it_reserved_start becomes a no-op.


> +		p1_end = tbl->it_reserved_start - tbl->it_offset;
> +		p2_start = tbl->it_reserved_end - tbl->it_offset + 1;
> +		p2_end = tbl->it_size;
> +	} else {
> +		p1_end = tbl->it_size;
> +		p2_start = 0;
> +		p2_end = 0;
> +	}
> +
> +	in_use = (find_next_bit(tbl->it_map, p1_end, p1_start) != p1_end);
> +	if (in_use || p2_start == 0)
> +		return in_use;
> +
> +	in_use = (find_next_bit(tbl->it_map, p2_end, p2_start) != p2_end);
> +
> +	return in_use;
> +}
> +
>  static void iommu_table_free(struct kref *kref)
>  {
>  	unsigned long bitmap_sz;
> @@ -759,10 +776,8 @@ static void iommu_table_free(struct kref *kref)
>  		return;
>  	}
>  
> -	iommu_table_release_pages(tbl);
> -
>  	/* verify that table contains no entries */
> -	if (!bitmap_empty(tbl->it_map, tbl->it_size))
> +	if (iommu_table_in_use(tbl))
>  		pr_warn("%s: Unexpected TCEs\n", __func__);
>  
>  	/* calculate bitmap size in bytes */
> @@ -1069,18 +1084,13 @@ int iommu_take_ownership(struct iommu_table *tbl)
>  	for (i = 0; i < tbl->nr_pools; i++)
>  		spin_lock(&tbl->pools[i].lock);
>  
> -	iommu_table_release_pages(tbl);
> -
> -	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
> +	if (iommu_table_in_use(tbl)) {
>  		pr_err("iommu_tce: it_map is not empty");
>  		ret = -EBUSY;
> -		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
> -		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
> -				tbl->it_reserved_end);
> -	} else {
> -		memset(tbl->it_map, 0xff, sz);
>  	}
>  
> +	memset(tbl->it_map, 0xff, sz);
> +
>  	for (i = 0; i < tbl->nr_pools; i++)
>  		spin_unlock(&tbl->pools[i].lock);
>  	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
>
Leonardo Brás Aug. 27, 2020, 6:34 p.m. UTC | #2
On Sat, 2020-08-22 at 20:34 +1000, Alexey Kardashevskiy wrote:
> > +
> > +	/*ignore reserved bit0*/
> 
> s/ignore reserved bit0/ ignore reserved bit0 /  (add spaces)

Fixed

> > +	if (tbl->it_offset == 0)
> > +		p1_start = 1;
> > +
> > +	/* Check if reserved memory is valid*/
> 
> A missing space here.

Fixed

> 
> > +	if (tbl->it_reserved_start >= tbl->it_offset &&
> > +	    tbl->it_reserved_start <= (tbl->it_offset + tbl->it_size) &&
> > +	    tbl->it_reserved_end   >= tbl->it_offset &&
> > +	    tbl->it_reserved_end   <= (tbl->it_offset + tbl->it_size)) {
> 
> Uff. What if tbl->it_reserved_end is bigger than tbl->it_offset +
> tbl->it_size?
> 
> The reserved area is to preserve MMIO32 so it is for it_offset==0 only
> and the boundaries are checked in the only callsite, and it is unlikely
> to change soon or ever.
> 
> Rather that bothering with fixing that, may be just add (did not test):
> 
> if (WARN_ON((
> (tbl->it_reserved_start || tbl->it_reserved_end) && (it_offset != 0))
> (tbl->it_reserved_start > it_offset && tbl->it_reserved_end < it_offset
> + it_size) && (it_offset == 0)) )
>  return true;
> 
> Or simply always look for it_offset..it_reserved_start and
> it_reserved_end..it_offset+it_size and if there is no reserved area,
> initialize it_reserved_start=it_reserved_end=it_offset so the first
> it_offset..it_reserved_start becomes a no-op.

The problem here is that the values of it_reserved_{start,end} are not
necessarily valid. I mean, on iommu_table_reserve_pages() the values
are stored however they are given (bit reserving is done only if they
are valid). 

Having a it_reserved_{start,end} value outside the valid ranges would
cause find_next_bit() to run over memory outside the bitmap.
Even if the those values are < tbl->it_offset, the resulting
subtraction on unsigned would cause it to become a big value and run
over memory outside the bitmap.

But I think you are right. That is not the place to check if the
reserved values are valid. It should just trust them here.
I intent to change iommu_table_reserve_pages() to only store the
parameters in it_reserved_{start,end} if they are in the range, and or
it_offset in both of them if they are not.

What do you think?

Thanks for the feedback!
Leonardo Bras
Alexey Kardashevskiy Aug. 28, 2020, 1:51 a.m. UTC | #3
On 28/08/2020 04:34, Leonardo Bras wrote:
> On Sat, 2020-08-22 at 20:34 +1000, Alexey Kardashevskiy wrote:
>>> +
>>> +	/*ignore reserved bit0*/
>>
>> s/ignore reserved bit0/ ignore reserved bit0 /  (add spaces)
> 
> Fixed
> 
>>> +	if (tbl->it_offset == 0)
>>> +		p1_start = 1;
>>> +
>>> +	/* Check if reserved memory is valid*/
>>
>> A missing space here.
> 
> Fixed
> 
>>
>>> +	if (tbl->it_reserved_start >= tbl->it_offset &&
>>> +	    tbl->it_reserved_start <= (tbl->it_offset + tbl->it_size) &&
>>> +	    tbl->it_reserved_end   >= tbl->it_offset &&
>>> +	    tbl->it_reserved_end   <= (tbl->it_offset + tbl->it_size)) {
>>
>> Uff. What if tbl->it_reserved_end is bigger than tbl->it_offset +
>> tbl->it_size?
>>
>> The reserved area is to preserve MMIO32 so it is for it_offset==0 only
>> and the boundaries are checked in the only callsite, and it is unlikely
>> to change soon or ever.
>>
>> Rather that bothering with fixing that, may be just add (did not test):
>>
>> if (WARN_ON((
>> (tbl->it_reserved_start || tbl->it_reserved_end) && (it_offset != 0))
>> (tbl->it_reserved_start > it_offset && tbl->it_reserved_end < it_offset
>> + it_size) && (it_offset == 0)) )
>>  return true;
>>
>> Or simply always look for it_offset..it_reserved_start and
>> it_reserved_end..it_offset+it_size and if there is no reserved area,
>> initialize it_reserved_start=it_reserved_end=it_offset so the first
>> it_offset..it_reserved_start becomes a no-op.
> 
> The problem here is that the values of it_reserved_{start,end} are not
> necessarily valid. I mean, on iommu_table_reserve_pages() the values
> are stored however they are given (bit reserving is done only if they
> are valid). 
> 
> Having a it_reserved_{start,end} value outside the valid ranges would
> cause find_next_bit() to run over memory outside the bitmap.
> Even if the those values are < tbl->it_offset, the resulting
> subtraction on unsigned would cause it to become a big value and run
> over memory outside the bitmap.
> 
> But I think you are right. That is not the place to check if the
> reserved values are valid. It should just trust them here.
> I intent to change iommu_table_reserve_pages() to only store the
> parameters in it_reserved_{start,end} if they are in the range, and or
> it_offset in both of them if they are not.
> 
> What do you think?

This should work, yes.


> 
> Thanks for the feedback!
> Leonardo Bras
> 
> 
>
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 5032f1593299..2913e5c8b1f8 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -154,6 +154,7 @@  extern int iommu_tce_table_put(struct iommu_table *tbl);
  */
 extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
 		int nid, unsigned long res_start, unsigned long res_end);
+bool iommu_table_in_use(struct iommu_table *tbl);
 
 #define IOMMU_TABLE_GROUP_MAX_TABLES	2
 
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 7f603d4e62d4..c5d5d36ab65e 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -668,21 +668,6 @@  static void iommu_table_reserve_pages(struct iommu_table *tbl,
 		set_bit(i - tbl->it_offset, tbl->it_map);
 }
 
-static void iommu_table_release_pages(struct iommu_table *tbl)
-{
-	int i;
-
-	/*
-	 * In case we have reserved the first bit, we should not emit
-	 * the warning below.
-	 */
-	if (tbl->it_offset == 0)
-		clear_bit(0, tbl->it_map);
-
-	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
-		clear_bit(i - tbl->it_offset, tbl->it_map);
-}
-
 /*
  * Build a iommu_table structure.  This contains a bit map which
  * is used to manage allocation of the tce space.
@@ -743,6 +728,38 @@  struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
 	return tbl;
 }
 
+bool iommu_table_in_use(struct iommu_table *tbl)
+{
+	bool in_use;
+	unsigned long p1_start = 0, p1_end, p2_start, p2_end;
+
+	/*ignore reserved bit0*/
+	if (tbl->it_offset == 0)
+		p1_start = 1;
+
+	/* Check if reserved memory is valid*/
+	if (tbl->it_reserved_start >= tbl->it_offset &&
+	    tbl->it_reserved_start <= (tbl->it_offset + tbl->it_size) &&
+	    tbl->it_reserved_end   >= tbl->it_offset &&
+	    tbl->it_reserved_end   <= (tbl->it_offset + tbl->it_size)) {
+		p1_end = tbl->it_reserved_start - tbl->it_offset;
+		p2_start = tbl->it_reserved_end - tbl->it_offset + 1;
+		p2_end = tbl->it_size;
+	} else {
+		p1_end = tbl->it_size;
+		p2_start = 0;
+		p2_end = 0;
+	}
+
+	in_use = (find_next_bit(tbl->it_map, p1_end, p1_start) != p1_end);
+	if (in_use || p2_start == 0)
+		return in_use;
+
+	in_use = (find_next_bit(tbl->it_map, p2_end, p2_start) != p2_end);
+
+	return in_use;
+}
+
 static void iommu_table_free(struct kref *kref)
 {
 	unsigned long bitmap_sz;
@@ -759,10 +776,8 @@  static void iommu_table_free(struct kref *kref)
 		return;
 	}
 
-	iommu_table_release_pages(tbl);
-
 	/* verify that table contains no entries */
-	if (!bitmap_empty(tbl->it_map, tbl->it_size))
+	if (iommu_table_in_use(tbl))
 		pr_warn("%s: Unexpected TCEs\n", __func__);
 
 	/* calculate bitmap size in bytes */
@@ -1069,18 +1084,13 @@  int iommu_take_ownership(struct iommu_table *tbl)
 	for (i = 0; i < tbl->nr_pools; i++)
 		spin_lock(&tbl->pools[i].lock);
 
-	iommu_table_release_pages(tbl);
-
-	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
+	if (iommu_table_in_use(tbl)) {
 		pr_err("iommu_tce: it_map is not empty");
 		ret = -EBUSY;
-		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
-		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
-				tbl->it_reserved_end);
-	} else {
-		memset(tbl->it_map, 0xff, sz);
 	}
 
+	memset(tbl->it_map, 0xff, sz);
+
 	for (i = 0; i < tbl->nr_pools; i++)
 		spin_unlock(&tbl->pools[i].lock);
 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);