@@ -14,9 +14,9 @@
*/
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct ptdesc *pte)
{
- pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
+ pmd_set(pmd, (pte_t *)(page_to_pa(ptdesc_page(pte)) + PAGE_OFFSET));
}
static inline void
@@ -46,9 +46,9 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
set_pmd(pmd, __pmd((unsigned long)pte));
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct ptdesc *pte)
{
- set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
+ set_pmd(pmd, __pmd((unsigned long)ptdesc_address(pte)));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -130,7 +130,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
}
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct ptdesc *ptep)
{
extern pmdval_t user_pmd_table;
pmdval_t prot;
@@ -140,7 +140,7 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
else
prot = _PAGE_USER_TABLE;
- __pmd_populate(pmdp, page_to_phys(ptep), prot);
+ __pmd_populate(pmdp, page_to_phys(ptdesc_page(ptep)), prot);
}
#endif /* CONFIG_MMU */
@@ -131,10 +131,10 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
}
static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct ptdesc *ptep)
{
VM_BUG_ON(mm == &init_mm);
- __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
+ __pmd_populate(pmdp, page_to_phys(ptdesc_page(ptep)), PMD_TYPE_TABLE | PMD_TABLE_PXN);
}
#endif
@@ -42,13 +42,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t pte)
+ struct ptdesc *pte)
{
/*
* Conveniently, zero in 3 LSB means indirect 4K page table.
* Not so convenient when you're trying to vary the page size.
*/
- set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
+ set_pmd(pmd, __pmd(((unsigned long)ptdesc_pfn(pte) << PAGE_SHIFT) |
HEXAGON_L1_PTE_SIZE));
}
@@ -18,9 +18,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
set_pmd(pmd, __pmd((unsigned long)pte));
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct ptdesc *pte)
{
- set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+ set_pmd(pmd, __pmd((unsigned long)ptdesc_address(pte)));
}
#ifndef __PAGETABLE_PMD_FOLDED
@@ -84,9 +84,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *
pmd_set(pmd, pte);
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct ptdesc *page)
{
- pmd_set(pmd, page);
+ pmd_set(pmd, ptdesc_page(page));
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -28,9 +28,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *
pmd_val(*pmd) = __pa((unsigned long)pte);
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct ptdesc *ptdesc)
{
- pmd_val(*pmd) = __pa((unsigned long)page_address(page));
+ pmd_val(*pmd) = __pa((unsigned long)ptdesc_address(ptdesc));
}
/*
@@ -33,7 +33,7 @@ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) \
- (pmd_val(*(pmd)) = (unsigned long)page_address(pte))
+ (pmd_val(*(pmd)) = (unsigned long)page_address(ptdesc_page(pte)))
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
@@ -25,9 +25,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t pte)
+ struct ptdesc *pte)
{
- set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+ set_pmd(pmd, __pmd((unsigned long)ptdesc_address(pte)));
}
/*
@@ -21,9 +21,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t pte)
+ struct ptdesc *pte)
{
- set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+ set_pmd(pmd, __pmd((unsigned long)ptdesc_address(pte)));
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);
@@ -29,10 +29,10 @@ extern int mem_init_done;
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- struct page *pte)
+ struct ptdesc *pte)
{
set_pmd(pmd, __pmd(_KERNPG_TABLE +
- ((unsigned long)page_to_pfn(pte) <<
+ ((unsigned long)ptdesc_pfn(pte) <<
(unsigned long) PAGE_SHIFT)));
}
@@ -68,6 +68,6 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
}
#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ pmd_populate_kernel(mm, pmd, page_address(ptdesc_page(pte_page)))
#endif
@@ -32,7 +32,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pte_page)
+ struct ptdesc *pte_page)
{
*pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
}
@@ -27,9 +27,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t pte)
+ struct ptdesc *pte)
{
- set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+ set_pmd(pmd, __pmd((unsigned long)ptdesc_address(pte)));
}
#define __pte_free_tlb(tlb, pte, addr) \
@@ -50,7 +50,7 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
-#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
+#define pmd_populate(mm, pmd, pte) pmd_set(pmd, (pte_t *)pte)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel pmd_populate
@@ -76,9 +76,9 @@ static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- struct page *pte)
+ struct ptdesc *pte)
{
- unsigned long pfn = page_to_pfn(pte);
+ unsigned long pfn = page_to_pfn(ptdesc_page(pte));
paravirt_alloc_pte(mm, pfn);
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
@@ -645,7 +645,7 @@ static void __init pmd_populate_tests(struct pgtable_debug_args *args)
* This entry points to next level page table page.
* Hence this must not qualify as pmd_bad().
*/
- pmd_populate(args->mm, args->pmdp, args->start_ptep);
+ pmd_populate(args->mm, args->pmdp, page_ptdesc(args->start_ptep));
pmd = READ_ONCE(*args->pmdp);
WARN_ON(pmd_bad(pmd));
}
@@ -2365,7 +2365,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
ptdesc = pgtable_trans_huge_withdraw(mm, pmd);
- pmd_populate(mm, &_pmd, ptdesc_page(ptdesc));
+ pmd_populate(mm, &_pmd, ptdesc);
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte);
@@ -2382,7 +2382,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
}
pte_unmap(pte - 1);
smp_wmb(); /* make pte visible before pmd */
- pmd_populate(mm, pmd, ptdesc_page(ptdesc));
+ pmd_populate(mm, pmd, ptdesc);
}
static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
@@ -2537,7 +2537,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* This's critical for some architectures (Power).
*/
ptdesc = pgtable_trans_huge_withdraw(mm, pmd);
- pmd_populate(mm, &_pmd, ptdesc_page(ptdesc));
+ pmd_populate(mm, &_pmd, ptdesc);
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte);
@@ -2602,7 +2602,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page);
smp_wmb(); /* make pte visible before pmd */
- pmd_populate(mm, pmd, ptdesc_page(ptdesc));
+ pmd_populate(mm, pmd, ptdesc);
}
void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
@@ -769,7 +769,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
* acquiring anon_vma_lock_write is unnecessary.
*/
pmd_ptl = pmd_lock(vma->vm_mm, pmd);
- pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
+ pmd_populate(vma->vm_mm, pmd, (struct ptdesc *)pmd_pgtable(orig_pmd));
spin_unlock(pmd_ptl);
/*
* Release both raw and compound pages isolated
@@ -1198,7 +1198,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* hugepmds and never for establishing regular pmds that
* points to regular pagetables. Use pmd_populate for that
*/
- pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+ pmd_populate(mm, pmd, (struct ptdesc *)pmd_pgtable(_pmd));
spin_unlock(pmd_ptl);
anon_vma_unlock_write(vma->anon_vma);
goto out_up_write;
@@ -438,7 +438,7 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
* smp_rmb() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
- pmd_populate(mm, pmd, *pte);
+ pmd_populate(mm, pmd, (struct ptdesc *)(*pte));
*pte = NULL;
}
spin_unlock(ptl);
@@ -283,7 +283,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pmd_none(*new_pmd));
- pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
+ pmd_populate(mm, new_pmd, (struct ptdesc *)pmd_pgtable(pmd));
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);