Message ID | 20240404154402.3581254-5-aleksander.lobakin@intel.com |
---|---|
State | Handled Elsewhere |
Headers | show |
Series | net: intel: start The Great Code Dedup + Page Pool for iavf | expand |
On 4/4/24 17:43, Alexander Lobakin wrote: > Add NUMA-aware counterparts for kvmalloc_array() and kvcalloc() to be > able to flexibly allocate arrays for a particular node. > Rewrite kvmalloc_array() to kvmalloc_array_node(NUMA_NO_NODE) call. > > Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> > --- > include/linux/slab.h | 17 +++++++++++++++-- > 1 file changed, 15 insertions(+), 2 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index e53cbfa18325..d1d1fa5e7983 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) > return kvmalloc(size, flags | __GFP_ZERO); > } > > -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) > +static inline __alloc_size(1, 2) void * > +kvmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) > { > size_t bytes; > > if (unlikely(check_mul_overflow(n, size, &bytes))) > return NULL; > > - return kvmalloc(bytes, flags); > + return kvmalloc_node(bytes, flags, node); > +} > + > +static inline __alloc_size(1, 2) void * > +kvmalloc_array(size_t n, size_t size, gfp_t flags) > +{ > + return kvmalloc_array_node(n, size, flags, NUMA_NO_NODE); > +} > + > +static inline __alloc_size(1, 2) void * > +kvcalloc_node(size_t n, size_t size, gfp_t flags, int node) > +{ > + return kvmalloc_array_node(n, size, flags | __GFP_ZERO, node); > } > > static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
On 4/4/24 5:43 PM, Alexander Lobakin wrote: > Add NUMA-aware counterparts for kvmalloc_array() and kvcalloc() to be > able to flexibly allocate arrays for a particular node. > Rewrite kvmalloc_array() to kvmalloc_array_node(NUMA_NO_NODE) call. > > Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Acked-by: Vlastimil Babka <vbabka@suse> This will however cause some conflicts with alloc tagging series with mm tree in next and the new wrappers will have to be adjusted. > --- > include/linux/slab.h | 17 +++++++++++++++-- > 1 file changed, 15 insertions(+), 2 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index e53cbfa18325..d1d1fa5e7983 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) > return kvmalloc(size, flags | __GFP_ZERO); > } > > -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) > +static inline __alloc_size(1, 2) void * > +kvmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) > { > size_t bytes; > > if (unlikely(check_mul_overflow(n, size, &bytes))) > return NULL; > > - return kvmalloc(bytes, flags); > + return kvmalloc_node(bytes, flags, node); > +} > + > +static inline __alloc_size(1, 2) void * > +kvmalloc_array(size_t n, size_t size, gfp_t flags) > +{ > + return kvmalloc_array_node(n, size, flags, NUMA_NO_NODE); > +} > + > +static inline __alloc_size(1, 2) void * > +kvcalloc_node(size_t n, size_t size, gfp_t flags, int node) > +{ > + return kvmalloc_array_node(n, size, flags | __GFP_ZERO, node); > } > > static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
diff --git a/include/linux/slab.h b/include/linux/slab.h index e53cbfa18325..d1d1fa5e7983 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) return kvmalloc(size, flags | __GFP_ZERO); } -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void * +kvmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; - return kvmalloc(bytes, flags); + return kvmalloc_node(bytes, flags, node); +} + +static inline __alloc_size(1, 2) void * +kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array_node(n, size, flags, NUMA_NO_NODE); +} + +static inline __alloc_size(1, 2) void * +kvcalloc_node(size_t n, size_t size, gfp_t flags, int node) +{ + return kvmalloc_array_node(n, size, flags | __GFP_ZERO, node); } static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
Add NUMA-aware counterparts for kvmalloc_array() and kvcalloc() to be able to flexibly allocate arrays for a particular node. Rewrite kvmalloc_array() to kvmalloc_array_node(NUMA_NO_NODE) call. Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> --- include/linux/slab.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-)