@@ -292,7 +292,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
WARN_ON(addr & (sz - 1));
- ptep = pte_alloc_huge(mm, pmdp, addr);
+ ptep = pte_alloc_huge(mm, pmdp, addr, sz);
} else if (sz == PMD_SIZE) {
if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
ptep = huge_pmd_share(mm, vma, addr, pudp);
@@ -66,7 +66,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
- pte = pte_alloc_huge(mm, pmd, addr);
+ pte = pte_alloc_huge(mm, pmd, addr, sz);
}
return pte;
}
@@ -183,7 +183,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
- return pte_alloc_huge(mm, (pmd_t *)hpdp, addr);
+ return pte_alloc_huge(mm, (pmd_t *)hpdp, addr, sz);
BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
@@ -67,7 +67,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
for_each_napot_order(order) {
if (napot_cont_size(order) == sz) {
- pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order));
+ pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order), sz);
break;
}
}
@@ -38,7 +38,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
- pte = pte_alloc_huge(mm, pmd, addr);
+ pte = pte_alloc_huge(mm, pmd, addr, sz);
}
}
}
@@ -298,7 +298,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
if (sz >= PMD_SIZE)
return (pte_t *)pmd;
- return pte_alloc_huge(mm, pmd, addr);
+ return pte_alloc_huge(mm, pmd, addr, sz);
}
pte_t *huge_pte_offset(struct mm_struct *mm,
@@ -193,9 +193,9 @@ static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
return pte_offset_kernel(pmd, address);
}
static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address)
+ unsigned long address, unsigned long sz)
{
- return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+ return pte_alloc_size(mm, pmd, sz) ? NULL : pte_offset_huge(pmd, address);
}
#endif
In order to be able to flag the PMD entry with _PMD_HUGE_8M on powerpc 8xx, provide page size to pte_alloc_huge() and use it through the newly introduced pte_alloc_size(). Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> --- arch/arm64/mm/hugetlbpage.c | 2 +- arch/parisc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/riscv/mm/hugetlbpage.c | 2 +- arch/sh/mm/hugetlbpage.c | 2 +- arch/sparc/mm/hugetlbpage.c | 2 +- include/linux/hugetlb.h | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-)