@@ -47,8 +47,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
}
-#define get_hugepd_cache_index(x) (x)
-
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
void *table, int shift)
{
@@ -30,10 +30,12 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
}
#define is_hugepage_only_range is_hugepage_only_range
+#ifdef CONFIG_ARCH_HAS_HUGEPD
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
+#endif
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
@@ -67,14 +69,6 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
{
}
-#define hugepd_shift(x) 0
-static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
- unsigned pdshift)
-{
- return NULL;
-}
-
-
static inline void __init gigantic_hugetlb_cma_reserve(void)
{
}
@@ -340,7 +340,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pgprot_writecombine pgprot_noncached_wc
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_ARCH_HAS_HUGEPD
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
@@ -42,6 +42,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long s
return __find_linux_pte(mm->pgd, addr, NULL, NULL);
}
+#ifdef CONFIG_ARCH_HAS_HUGEPD
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
unsigned long address, unsigned int pdshift,
unsigned int pshift, spinlock_t *ptl)
@@ -193,6 +194,36 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
return hugepte_offset(*hpdp, addr, pdshift);
}
+#else
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr &= ~(sz - 1);
+
+ p4d = p4d_offset(pgd_offset(mm, addr), addr);
+ if (!mm_pud_folded(mm) && sz >= P4D_SIZE)
+ return (pte_t *)p4d;
+
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud)
+ return NULL;
+ if (!mm_pmd_folded(mm) && sz >= PUD_SIZE)
+ return (pte_t *)pud;
+
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return NULL;
+
+ if (sz >= PMD_SIZE)
+ return (pte_t *)pmd;
+
+ return pte_alloc_huge(mm, pmd, addr);
+}
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
/*
@@ -248,6 +279,7 @@ int __init alloc_bootmem_huge_page(struct hstate *h, int nid)
return __alloc_bootmem_huge_page(h, nid);
}
+#ifdef CONFIG_ARCH_HAS_HUGEPD
#ifndef CONFIG_PPC_BOOK3S_64
#define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
@@ -505,6 +537,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
}
} while (addr = next, addr != end);
}
+#endif
bool __init arch_hugetlb_valid_size(unsigned long size)
{
@@ -496,8 +496,10 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (!hpdp)
return NULL;
+#ifdef CONFIG_ARCH_HAS_HUGEPD
ret_pte = hugepte_offset(*hpdp, ea, pdshift);
pdshift = hugepd_shift(*hpdp);
+#endif
out:
if (hpage_shift)
*hpage_shift = pdshift;