Message ID | 1543852035-26634-7-git-send-email-rppt@linux.ibm.com |
---|---|
State | Not Applicable |
Delegated to: | David Miller |
Headers | show |
Series | memblock: simplify several early memory allocation | expand |
On Mon, Dec 3, 2018 at 9:48 AM Mike Rapoport <rppt@linux.ibm.com> wrote: > > On arm and unicore32i the early_alloc_aligned() and and early_alloc() are > oneliner wrappers for memblock_alloc. > > Replace their usage with direct call to memblock_alloc. > > Suggested-by: Christoph Hellwig <hch@infradead.org> > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> > --- > arch/arm/mm/mmu.c | 11 +++-------- > arch/unicore32/mm/mmu.c | 12 ++++-------- > 2 files changed, 7 insertions(+), 16 deletions(-) > > diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c > index 0a04c9a5..57de0dd 100644 > --- a/arch/arm/mm/mmu.c > +++ b/arch/arm/mm/mmu.c > @@ -719,14 +719,9 @@ EXPORT_SYMBOL(phys_mem_access_prot); > > #define vectors_base() (vectors_high() ? 0xffff0000 : 0) > > -static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) > -{ > - return memblock_alloc(sz, align); > -} > - > static void __init *early_alloc(unsigned long sz) Why not get rid of this wrapper like you do on unicore? > { > - return early_alloc_aligned(sz, sz); > + return memblock_alloc(sz, sz); > }
On Mon, Dec 03, 2018 at 10:27:02AM -0600, Rob Herring wrote: > On Mon, Dec 3, 2018 at 9:48 AM Mike Rapoport <rppt@linux.ibm.com> wrote: > > > > On arm and unicore32i the early_alloc_aligned() and and early_alloc() are > > oneliner wrappers for memblock_alloc. > > > > Replace their usage with direct call to memblock_alloc. > > > > Suggested-by: Christoph Hellwig <hch@infradead.org> > > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> > > --- > > arch/arm/mm/mmu.c | 11 +++-------- > > arch/unicore32/mm/mmu.c | 12 ++++-------- > > 2 files changed, 7 insertions(+), 16 deletions(-) > > > > diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c > > index 0a04c9a5..57de0dd 100644 > > --- a/arch/arm/mm/mmu.c > > +++ b/arch/arm/mm/mmu.c > > @@ -719,14 +719,9 @@ EXPORT_SYMBOL(phys_mem_access_prot); > > > > #define vectors_base() (vectors_high() ? 0xffff0000 : 0) > > > > -static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) > > -{ > > - return memblock_alloc(sz, align); > > -} > > - > > static void __init *early_alloc(unsigned long sz) > > Why not get rid of this wrapper like you do on unicore? ARM has early_alloc() and late_alloc() callbacks which in the end are passed as a parameter to alloc_init_pXd() functions. Removing early_alloc() would require refactoring all the page table allocation code. > > { > > - return early_alloc_aligned(sz, sz); > > + return memblock_alloc(sz, sz); > > } >
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 0a04c9a5..57de0dd 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -719,14 +719,9 @@ EXPORT_SYMBOL(phys_mem_access_prot); #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) -{ - return memblock_alloc(sz, align); -} - static void __init *early_alloc(unsigned long sz) { - return early_alloc_aligned(sz, sz); + return memblock_alloc(sz, sz); } static void *__init late_alloc(unsigned long sz) @@ -998,7 +993,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) if (!nr) return; - svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); + svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { create_mapping(md); @@ -1020,7 +1015,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, struct vm_struct *vm; struct static_vm *svm; - svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); + svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); vm = &svm->vm; vm->addr = (void *)addr; diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 50d8c1a..a402192 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c @@ -141,16 +141,12 @@ static void __init build_mem_type_table(void) #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init *early_alloc(unsigned long sz) -{ - return memblock_alloc(sz, sz); -} - static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { - pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); + pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), + PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | prot); } BUG_ON(pmd_bad(*pmd)); @@ -352,7 +348,7 @@ static void __init devicemaps_init(void) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -429,7 +425,7 @@ void __init paging_init(void) top_pmd = pmd_off_k(0xffff0000); /* allocate the zero page. */ - zero_page = early_alloc(PAGE_SIZE); + zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); bootmem_init();
On arm and unicore32i the early_alloc_aligned() and and early_alloc() are oneliner wrappers for memblock_alloc. Replace their usage with direct call to memblock_alloc. Suggested-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- arch/arm/mm/mmu.c | 11 +++-------- arch/unicore32/mm/mmu.c | 12 ++++-------- 2 files changed, 7 insertions(+), 16 deletions(-)