diff mbox

[v5,4/4] powerpc/mm: unstub radix__vmemmap_remove_mapping()

Message ID 1484593666-8001-5-git-send-email-arbab@linux.vnet.ibm.com (mailing list archive)
State Accepted
Headers show

Commit Message

Reza Arbab Jan. 16, 2017, 7:07 p.m. UTC
Use remove_pagetable() and friends for radix vmemmap removal.

We do not require the special-case handling of vmemmap done in the x86
versions of these functions. This is because vmemmap_free() has already
freed the mapped pages, and calls us with an aligned address range.

So, add a few failsafe WARNs, but otherwise the code to remove physical
mappings is already sufficient for vmemmap.

Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com>
---
 arch/powerpc/mm/pgtable-radix.c | 29 ++++++++++++++++++++++++++++-
 1 file changed, 28 insertions(+), 1 deletion(-)

Comments

Balbir Singh Jan. 17, 2017, 7:25 a.m. UTC | #1
On Mon, Jan 16, 2017 at 01:07:46PM -0600, Reza Arbab wrote:
> Use remove_pagetable() and friends for radix vmemmap removal.
> 
> We do not require the special-case handling of vmemmap done in the x86
> versions of these functions. This is because vmemmap_free() has already
> freed the mapped pages, and calls us with an aligned address range.
> 
> So, add a few failsafe WARNs, but otherwise the code to remove physical
> mappings is already sufficient for vmemmap.

I wonder if we really need them?

Balbir Singh.
Reza Arbab Jan. 17, 2017, 6:36 p.m. UTC | #2
On Tue, Jan 17, 2017 at 12:55:13PM +0530, Balbir Singh wrote:
>On Mon, Jan 16, 2017 at 01:07:46PM -0600, Reza Arbab wrote:
>> Use remove_pagetable() and friends for radix vmemmap removal.
>>
>> We do not require the special-case handling of vmemmap done in the x86
>> versions of these functions. This is because vmemmap_free() has already
>> freed the mapped pages, and calls us with an aligned address range.
>>
>> So, add a few failsafe WARNs, but otherwise the code to remove physical
>> mappings is already sufficient for vmemmap.
>
>I wonder if we really need them?

Not sure what the guideline is for a "this shouldn't happen" WARN. It 
could save us some grief, should our vmemmap code ever start calling 
with an unaligned range, like it does on x86.
Balbir Singh Jan. 18, 2017, 1:53 a.m. UTC | #3
On Tue, Jan 17, 2017 at 12:36:53PM -0600, Reza Arbab wrote:
> On Tue, Jan 17, 2017 at 12:55:13PM +0530, Balbir Singh wrote:
> > On Mon, Jan 16, 2017 at 01:07:46PM -0600, Reza Arbab wrote:
> > > Use remove_pagetable() and friends for radix vmemmap removal.
> > > 
> > > We do not require the special-case handling of vmemmap done in the x86
> > > versions of these functions. This is because vmemmap_free() has already
> > > freed the mapped pages, and calls us with an aligned address range.
> > > 
> > > So, add a few failsafe WARNs, but otherwise the code to remove physical
> > > mappings is already sufficient for vmemmap.
> > 
> > I wonder if we really need them?
> 
> Not sure what the guideline is for a "this shouldn't happen" WARN. It could
> save us some grief, should our vmemmap code ever start calling with an
> unaligned range, like it does on x86.
>

Fair enough

Acked-by: Balbir Singh <bsingharora@gmail.com>
diff mbox

Patch

diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index cfba666..6d06171 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -521,6 +521,15 @@  static void remove_pte_table(pte_t *pte_start, unsigned long addr,
 		if (!pte_present(*pte))
 			continue;
 
+		if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
+			/*
+			 * The vmemmap_free() and remove_section_mapping()
+			 * codepaths call us with aligned addresses.
+			 */
+			WARN_ONCE(1, "%s: unaligned range\n", __func__);
+			continue;
+		}
+
 		pte_clear(&init_mm, addr, pte);
 	}
 }
@@ -540,6 +549,12 @@  static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
 			continue;
 
 		if (pmd_huge(*pmd)) {
+			if (!IS_ALIGNED(addr, PMD_SIZE) ||
+			    !IS_ALIGNED(next, PMD_SIZE)) {
+				WARN_ONCE(1, "%s: unaligned range\n", __func__);
+				continue;
+			}
+
 			pte_clear(&init_mm, addr, (pte_t *)pmd);
 			continue;
 		}
@@ -565,6 +580,12 @@  static void remove_pud_table(pud_t *pud_start, unsigned long addr,
 			continue;
 
 		if (pud_huge(*pud)) {
+			if (!IS_ALIGNED(addr, PUD_SIZE) ||
+			    !IS_ALIGNED(next, PUD_SIZE)) {
+				WARN_ONCE(1, "%s: unaligned range\n", __func__);
+				continue;
+			}
+
 			pte_clear(&init_mm, addr, (pte_t *)pud);
 			continue;
 		}
@@ -591,6 +612,12 @@  static void remove_pagetable(unsigned long start, unsigned long end)
 			continue;
 
 		if (pgd_huge(*pgd)) {
+			if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
+			    !IS_ALIGNED(next, PGDIR_SIZE)) {
+				WARN_ONCE(1, "%s: unaligned range\n", __func__);
+				continue;
+			}
+
 			pte_clear(&init_mm, addr, (pte_t *)pgd);
 			continue;
 		}
@@ -630,7 +657,7 @@  int __meminit radix__vmemmap_create_mapping(unsigned long start,
 #ifdef CONFIG_MEMORY_HOTPLUG
 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
 {
-	/* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+	remove_pagetable(start, start + page_size);
 }
 #endif
 #endif