@@ -2287,6 +2287,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL_GPL(apply_to_page_range);
+#ifndef __HAVE_ARCH_CALL_SPF
/*
* handle_pte_fault chooses page fault handler according to an entry which was
* read non-atomically. Before making any commitment, on those architectures
@@ -2296,7 +2297,7 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
* and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
- pte_t *page_table, pte_t orig_pte)
+ pte_t *page_table, pte_t orig_pte)
{
int same = 1;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
@@ -2310,6 +2311,7 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
pte_unmap(page_table);
return same;
}
+#endif
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
@@ -2871,11 +2873,14 @@ int do_swap_page(struct vm_fault *vmf)
if (vma_readahead)
page = swap_readahead_detect(vmf, &swap_ra);
+
+#ifndef __HAVE_ARCH_CALL_SPF
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
if (page)
put_page(page);
goto out;
}
+#endif
entry = pte_to_swp_entry(vmf->orig_pte);
if (unlikely(non_swap_entry(entry))) {