@@ -719,14 +719,9 @@ EXPORT_SYMBOL(phys_mem_access_prot);
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
-static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
-{
- return memblock_alloc(sz, align);
-}
-
static void __init *early_alloc(unsigned long sz)
{
- return early_alloc_aligned(sz, sz);
+ return memblock_alloc(sz, sz);
}
static void *__init late_alloc(unsigned long sz)
@@ -998,7 +993,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
if (!nr)
return;
- svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
+ svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
@@ -1020,7 +1015,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
struct vm_struct *vm;
struct static_vm *svm;
- svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
+ svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
vm = &svm->vm;
vm->addr = (void *)addr;
@@ -141,16 +141,12 @@ static void __init build_mem_type_table(void)
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
-static void __init *early_alloc(unsigned long sz)
-{
- return memblock_alloc(sz, sz);
-}
-
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t),
+ PTRS_PER_PTE * sizeof(pte_t));
__pmd_populate(pmd, __pa(pte) | prot);
}
BUG_ON(pmd_bad(*pmd));
@@ -352,7 +348,7 @@ static void __init devicemaps_init(void)
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -429,7 +425,7 @@ void __init paging_init(void)
top_pmd = pmd_off_k(0xffff0000);
/* allocate the zero page. */
- zero_page = early_alloc(PAGE_SIZE);
+ zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
bootmem_init();
On arm and unicore32i the early_alloc_aligned() and and early_alloc() are oneliner wrappers for memblock_alloc. Replace their usage with direct call to memblock_alloc. Suggested-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- arch/arm/mm/mmu.c | 11 +++-------- arch/unicore32/mm/mmu.c | 12 ++++-------- 2 files changed, 7 insertions(+), 16 deletions(-)