Message ID | 20240505160628.2323363-6-rppt@kernel.org |
---|---|
State | New |
Headers | show |
Series | mm: jit/text allocator | expand |
On Sun, 5 May 2024 19:06:17 +0300 Mike Rapoport <rppt@kernel.org> wrote: > From: "Mike Rapoport (IBM)" <rppt@kernel.org> > > Move the logic related to the memory allocation and freeing into > module_memory_alloc() and module_memory_free(). > Looks good to me. Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Thanks, > Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> > Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> > --- > kernel/module/main.c | 64 +++++++++++++++++++++++++++----------------- > 1 file changed, 39 insertions(+), 25 deletions(-) > > diff --git a/kernel/module/main.c b/kernel/module/main.c > index e1e8a7a9d6c1..5b82b069e0d3 100644 > --- a/kernel/module/main.c > +++ b/kernel/module/main.c > @@ -1203,15 +1203,44 @@ static bool mod_mem_use_vmalloc(enum mod_mem_type type) > mod_mem_type_is_core_data(type); > } > > -static void *module_memory_alloc(unsigned int size, enum mod_mem_type type) > +static int module_memory_alloc(struct module *mod, enum mod_mem_type type) > { > + unsigned int size = PAGE_ALIGN(mod->mem[type].size); > + void *ptr; > + > + mod->mem[type].size = size; > + > if (mod_mem_use_vmalloc(type)) > - return vzalloc(size); > - return module_alloc(size); > + ptr = vmalloc(size); > + else > + ptr = module_alloc(size); > + > + if (!ptr) > + return -ENOMEM; > + > + /* > + * The pointer to these blocks of memory are stored on the module > + * structure and we keep that around so long as the module is > + * around. We only free that memory when we unload the module. > + * Just mark them as not being a leak then. The .init* ELF > + * sections *do* get freed after boot so we *could* treat them > + * slightly differently with kmemleak_ignore() and only grey > + * them out as they work as typical memory allocations which > + * *do* eventually get freed, but let's just keep things simple > + * and avoid *any* false positives. > + */ > + kmemleak_not_leak(ptr); > + > + memset(ptr, 0, size); > + mod->mem[type].base = ptr; > + > + return 0; > } > > -static void module_memory_free(void *ptr, enum mod_mem_type type) > +static void module_memory_free(struct module *mod, enum mod_mem_type type) > { > + void *ptr = mod->mem[type].base; > + > if (mod_mem_use_vmalloc(type)) > vfree(ptr); > else > @@ -1229,12 +1258,12 @@ static void free_mod_mem(struct module *mod) > /* Free lock-classes; relies on the preceding sync_rcu(). */ > lockdep_free_key_range(mod_mem->base, mod_mem->size); > if (mod_mem->size) > - module_memory_free(mod_mem->base, type); > + module_memory_free(mod, type); > } > > /* MOD_DATA hosts mod, so free it at last */ > lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); > - module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA); > + module_memory_free(mod, MOD_DATA); > } > > /* Free a module, remove from lists, etc. */ > @@ -2225,7 +2254,6 @@ static int find_module_sections(struct module *mod, struct load_info *info) > static int move_module(struct module *mod, struct load_info *info) > { > int i; > - void *ptr; > enum mod_mem_type t = 0; > int ret = -ENOMEM; > > @@ -2234,26 +2262,12 @@ static int move_module(struct module *mod, struct load_info *info) > mod->mem[type].base = NULL; > continue; > } > - mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size); > - ptr = module_memory_alloc(mod->mem[type].size, type); > - /* > - * The pointer to these blocks of memory are stored on the module > - * structure and we keep that around so long as the module is > - * around. We only free that memory when we unload the module. > - * Just mark them as not being a leak then. The .init* ELF > - * sections *do* get freed after boot so we *could* treat them > - * slightly differently with kmemleak_ignore() and only grey > - * them out as they work as typical memory allocations which > - * *do* eventually get freed, but let's just keep things simple > - * and avoid *any* false positives. > - */ > - kmemleak_not_leak(ptr); > - if (!ptr) { > + > + ret = module_memory_alloc(mod, type); > + if (ret) { > t = type; > goto out_enomem; > } > - memset(ptr, 0, mod->mem[type].size); > - mod->mem[type].base = ptr; > } > > /* Transfer each section which specifies SHF_ALLOC */ > @@ -2296,7 +2310,7 @@ static int move_module(struct module *mod, struct load_info *info) > return 0; > out_enomem: > for (t--; t >= 0; t--) > - module_memory_free(mod->mem[t].base, t); > + module_memory_free(mod, t); > return ret; > } > > -- > 2.43.0 >
diff --git a/kernel/module/main.c b/kernel/module/main.c index e1e8a7a9d6c1..5b82b069e0d3 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1203,15 +1203,44 @@ static bool mod_mem_use_vmalloc(enum mod_mem_type type) mod_mem_type_is_core_data(type); } -static void *module_memory_alloc(unsigned int size, enum mod_mem_type type) +static int module_memory_alloc(struct module *mod, enum mod_mem_type type) { + unsigned int size = PAGE_ALIGN(mod->mem[type].size); + void *ptr; + + mod->mem[type].size = size; + if (mod_mem_use_vmalloc(type)) - return vzalloc(size); - return module_alloc(size); + ptr = vmalloc(size); + else + ptr = module_alloc(size); + + if (!ptr) + return -ENOMEM; + + /* + * The pointer to these blocks of memory are stored on the module + * structure and we keep that around so long as the module is + * around. We only free that memory when we unload the module. + * Just mark them as not being a leak then. The .init* ELF + * sections *do* get freed after boot so we *could* treat them + * slightly differently with kmemleak_ignore() and only grey + * them out as they work as typical memory allocations which + * *do* eventually get freed, but let's just keep things simple + * and avoid *any* false positives. + */ + kmemleak_not_leak(ptr); + + memset(ptr, 0, size); + mod->mem[type].base = ptr; + + return 0; } -static void module_memory_free(void *ptr, enum mod_mem_type type) +static void module_memory_free(struct module *mod, enum mod_mem_type type) { + void *ptr = mod->mem[type].base; + if (mod_mem_use_vmalloc(type)) vfree(ptr); else @@ -1229,12 +1258,12 @@ static void free_mod_mem(struct module *mod) /* Free lock-classes; relies on the preceding sync_rcu(). */ lockdep_free_key_range(mod_mem->base, mod_mem->size); if (mod_mem->size) - module_memory_free(mod_mem->base, type); + module_memory_free(mod, type); } /* MOD_DATA hosts mod, so free it at last */ lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); - module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA); + module_memory_free(mod, MOD_DATA); } /* Free a module, remove from lists, etc. */ @@ -2225,7 +2254,6 @@ static int find_module_sections(struct module *mod, struct load_info *info) static int move_module(struct module *mod, struct load_info *info) { int i; - void *ptr; enum mod_mem_type t = 0; int ret = -ENOMEM; @@ -2234,26 +2262,12 @@ static int move_module(struct module *mod, struct load_info *info) mod->mem[type].base = NULL; continue; } - mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size); - ptr = module_memory_alloc(mod->mem[type].size, type); - /* - * The pointer to these blocks of memory are stored on the module - * structure and we keep that around so long as the module is - * around. We only free that memory when we unload the module. - * Just mark them as not being a leak then. The .init* ELF - * sections *do* get freed after boot so we *could* treat them - * slightly differently with kmemleak_ignore() and only grey - * them out as they work as typical memory allocations which - * *do* eventually get freed, but let's just keep things simple - * and avoid *any* false positives. - */ - kmemleak_not_leak(ptr); - if (!ptr) { + + ret = module_memory_alloc(mod, type); + if (ret) { t = type; goto out_enomem; } - memset(ptr, 0, mod->mem[type].size); - mod->mem[type].base = ptr; } /* Transfer each section which specifies SHF_ALLOC */ @@ -2296,7 +2310,7 @@ static int move_module(struct module *mod, struct load_info *info) return 0; out_enomem: for (t--; t >= 0; t--) - module_memory_free(mod->mem[t].base, t); + module_memory_free(mod, t); return ret; }