Skip to content

support large align and nid in Rust allocators #5582

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: bpf-next_base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion fs/bcachefs/darray.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ int __bch2_darray_resize_noprof(darray_char *d, size_t element_size, size_t new_
return -ENOMEM;

void *data = likely(bytes < INT_MAX)
? kvmalloc_noprof(bytes, gfp)
? kvmalloc_node_align_noprof(bytes, 1, gfp, NUMA_NO_NODE)
: vmalloc_noprof(bytes);
if (!data)
return -ENOMEM;
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/util.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static inline void *bch2_kvmalloc_noprof(size_t n, gfp_t flags)
{
void *p = unlikely(n >= INT_MAX)
? vmalloc_noprof(n)
: kvmalloc_noprof(n, flags & ~__GFP_ZERO);
: kvmalloc_node_align_noprof(n, 1, flags & ~__GFP_ZERO, NUMA_NO_NODE);
if (p && (flags & __GFP_ZERO))
memset(p, 0, n);
return p;
Expand Down
2 changes: 1 addition & 1 deletion include/linux/bpfptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,

static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
{
void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN);
void *p = kvmalloc_node_align_noprof(len, 1, GFP_USER | __GFP_NOWARN, NUMA_NO_NODE);

if (!p)
return ERR_PTR(-ENOMEM);
Expand Down
38 changes: 23 additions & 15 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -465,9 +465,13 @@ int kmem_cache_shrink(struct kmem_cache *s);
/*
* Common kmalloc functions provided by all allocators
*/
void * __must_check krealloc_noprof(const void *objp, size_t new_size,
gfp_t flags) __realloc_size(2);
#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
unsigned long align,
gfp_t flags, int nid) __realloc_size(2);
#define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
#define krealloc_node_align(...) alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
#define krealloc_node(_o, _s, _f, _n) krealloc_node_align(_o, _s, 1, _f, _n)
#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)

void kfree(const void *objp);
void kfree_sensitive(const void *objp);
Expand Down Expand Up @@ -1041,18 +1045,20 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)

void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1);
#define kvmalloc_node_noprof(size, flags, node) \
__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node)
#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))

#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
gfp_t flags, int node) __alloc_size(1);
#define kvmalloc_node_align_noprof(_size, _align, _flags, _node) \
__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
#define kvmalloc_node_align(...) \
alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
#define kvmalloc_node(_s, _f, _n) kvmalloc_node_align(_s, 1, _f, _n)
#define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)

#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)

#define kmem_buckets_valloc(_b, _size, _flags) \
alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))

static inline __alloc_size(1, 2) void *
kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
Expand All @@ -1062,7 +1068,7 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;

return kvmalloc_node_noprof(bytes, flags, node);
return kvmalloc_node_align_noprof(bytes, 1, flags, node);
}

#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
Expand All @@ -1073,9 +1079,11 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))

void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
__realloc_size(2);
#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
gfp_t flags, int nid) __realloc_size(2);
#define kvrealloc_node_align(...) alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
#define kvrealloc_node(_p, _s, _f, _n) kvrealloc_node_align(_p, _s, 1, _f, _n)
#define kvrealloc(...) kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)

extern void kvfree(const void *addr);
DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
Expand Down
12 changes: 9 additions & 3 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,9 +197,15 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))

void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
__realloc_size(2);
#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
void *__must_check vrealloc_node_align_noprof(const void *p, size_t size,
unsigned long align, gfp_t flags, int nid) __realloc_size(2);
#define vrealloc_node_noprof(_p, _s, _f, _nid) \
vrealloc_node_align_noprof(_p, _s, 1, _f, _nid)
#define vrealloc_noprof(_p, _s, _f) \
vrealloc_node_align_noprof(_p, _s, 1, _f, NUMA_NO_NODE)
#define vrealloc_node_align(...) alloc_hooks(vrealloc_node_align_noprof(__VA_ARGS__))
#define vrealloc_node(...) alloc_hooks(vrealloc_node_noprof(__VA_ARGS__))
#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))

extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
Expand Down
4 changes: 2 additions & 2 deletions lib/rhashtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
static struct lock_class_key __key;

tbl = alloc_hooks_tag(ht->alloc_tag,
kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
gfp|__GFP_ZERO, NUMA_NO_NODE));
kvmalloc_node_align_noprof(struct_size(tbl, buckets, nbuckets),
1, gfp|__GFP_ZERO, NUMA_NO_NODE));

size = nbuckets;

Expand Down
3 changes: 2 additions & 1 deletion mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,8 @@ void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
}
EXPORT_SYMBOL(__vmalloc_noprof);

void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
gfp_t flags, int node)
{
return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
}
Expand Down
64 changes: 44 additions & 20 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -4844,7 +4844,7 @@ void kfree(const void *object)
EXPORT_SYMBOL(kfree);

static __always_inline __realloc_size(2) void *
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
__do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid)
{
void *ret;
size_t ks = 0;
Expand All @@ -4858,6 +4858,20 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
if (!kasan_check_byte(p))
return NULL;

/* refuse to proceed if alignment is bigger than what kmalloc() provides */
if (!IS_ALIGNED((unsigned long)p, align) || new_size < align)
return NULL;

/*
* If reallocation is not necessary (e. g. the new size is less
* than the current allocated size), the current allocation will be
* preserved unless __GFP_THISNODE is set. In the latter case a new
* allocation on the requested node will be attempted.
*/
if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
nid != page_to_nid(virt_to_page(p)))
goto alloc_new;

if (is_kfence_address(p)) {
ks = orig_size = kfence_ksize(p);
} else {
Expand Down Expand Up @@ -4902,7 +4916,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
return (void *)p;

alloc_new:
ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
if (ret && p) {
/* Disable KASAN checks as the object's redzone is accessed. */
kasan_disable_current();
Expand All @@ -4914,10 +4928,12 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
}

/**
* krealloc - reallocate memory. The contents will remain unchanged.
* krealloc_node_align - reallocate memory. The contents will remain unchanged.
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
* @align: desired alignment.
* @flags: the type of memory to allocate.
* @nid: NUMA node or NUMA_NO_NODE
*
* If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
* is 0 and @p is not a %NULL pointer, the object pointed to is freed.
Expand Down Expand Up @@ -4946,7 +4962,8 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
*
* Return: pointer to the allocated memory or %NULL in case of error
*/
void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
void *krealloc_node_align_noprof(const void *p, size_t new_size, unsigned long align,
gfp_t flags, int nid)
{
void *ret;

Expand All @@ -4955,13 +4972,13 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
return ZERO_SIZE_PTR;
}

ret = __do_krealloc(p, new_size, flags);
ret = __do_krealloc(p, new_size, align, flags, nid);
if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
kfree(p);

return ret;
}
EXPORT_SYMBOL(krealloc_noprof);
EXPORT_SYMBOL(krealloc_node_align_noprof);

static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
{
Expand Down Expand Up @@ -4992,6 +5009,7 @@ static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
* failure, fall back to non-contiguous (vmalloc) allocation.
* @size: size of the request.
* @b: which set of kmalloc buckets to allocate from.
* @align: desired alignment.
* @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
* @node: numa node to allocate from
*
Expand All @@ -5004,19 +5022,22 @@ static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
*
* Return: pointer to the allocated memory of %NULL in case of failure
*/
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
gfp_t flags, int node)
{
void *ret;

/*
* It doesn't really make sense to fallback to vmalloc for sub page
* requests
* requests and small alignments
*/
ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
kmalloc_gfp_adjust(flags, size),
node, _RET_IP_);
if (ret || size <= PAGE_SIZE)
return ret;
if (size >= align) {
ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
kmalloc_gfp_adjust(flags, size),
node, _RET_IP_);
if (ret || size <= PAGE_SIZE)
return ret;
}

/* non-sleeping allocations are not supported by vmalloc */
if (!gfpflags_allow_blocking(flags))
Expand All @@ -5034,7 +5055,7 @@ void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
* about the resulting pointer, and cannot play
* protection games.
*/
return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
node, __builtin_return_address(0));
}
Expand Down Expand Up @@ -5078,10 +5099,12 @@ void kvfree_sensitive(const void *addr, size_t len)
EXPORT_SYMBOL(kvfree_sensitive);

/**
* kvrealloc - reallocate memory; contents remain unchanged
* kvrealloc_node_align - reallocate memory; contents remain unchanged
* @p: object to reallocate memory for
* @size: the size to reallocate
* @align: desired alignment
* @flags: the flags for the page level allocator
* @nid: NUMA node id
*
* If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
* and @p is not a %NULL pointer, the object pointed to is freed.
Expand All @@ -5099,17 +5122,18 @@ EXPORT_SYMBOL(kvfree_sensitive);
*
* Return: pointer to the allocated memory or %NULL in case of error
*/
void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
gfp_t flags, int nid)
{
void *n;

if (is_vmalloc_addr(p))
return vrealloc_noprof(p, size, flags);
return vrealloc_node_align_noprof(p, size, align, flags, nid);

n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
n = krealloc_node_align_noprof(p, size, align, kmalloc_gfp_adjust(flags, size), nid);
if (!n) {
/* We failed to krealloc(), fall back to kvmalloc(). */
n = kvmalloc_noprof(size, flags);
n = kvmalloc_node_align_noprof(size, align, flags, nid);
if (!n)
return NULL;

Expand All @@ -5125,7 +5149,7 @@ void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)

return n;
}
EXPORT_SYMBOL(kvrealloc_noprof);
EXPORT_SYMBOL(kvrealloc_node_align_noprof);

struct detached_freelist {
struct slab *slab;
Expand Down
31 changes: 26 additions & 5 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4081,19 +4081,31 @@ void *vzalloc_node_noprof(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node_noprof);

/**
* vrealloc - reallocate virtually contiguous memory; contents remain unchanged
* vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents
* remain unchanged
* @p: object to reallocate memory for
* @size: the size to reallocate
* @align: requested alignment
* @flags: the flags for the page level allocator
* @nid: node number of the target node
*
* If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc(). If @size is
* 0 and @p is not a %NULL pointer, the object pointed to is freed.
*
* If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
* @p is not a %NULL pointer, the object pointed to is freed.
* if @nid is not NUMA_NO_NODE, this function will try to allocate memory on
* the given node. If reallocation is not necessary (e. g. the new size is less
* than the current allocated size), the current allocation will be preserved
* unless __GFP_THISNODE is set. In the latter case a new allocation on the
* requested node will be attempted.
*
* If __GFP_ZERO logic is requested, callers must ensure that, starting with the
* initial memory allocation, every subsequent call to this API for the same
* memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
* __GFP_ZERO is not fully honored by this API.
*
* If the requested alignment is bigger than the one the *existing* allocation
* has, this function will fail.
*
* In any case, the contents of the object pointed to are preserved up to the
* lesser of the new and old sizes.
*
Expand All @@ -4103,7 +4115,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
* Return: pointer to the allocated memory; %NULL if @size is zero or in case of
* failure
*/
void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
gfp_t flags, int nid)
{
struct vm_struct *vm = NULL;
size_t alloced_size = 0;
Expand All @@ -4127,6 +4140,12 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
if (WARN(alloced_size < old_size,
"vrealloc() has mismatched area vs requested sizes (%p)\n", p))
return NULL;
if (WARN(!IS_ALIGNED((unsigned long)p, align),
"will not reallocate with a bigger alignment (0x%lx)\n", align))
return NULL;
if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
nid != page_to_nid(vmalloc_to_page(p)))
goto need_realloc;
}

/*
Expand Down Expand Up @@ -4157,8 +4176,10 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
return (void *)p;
}

need_realloc:
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
n = __vmalloc_noprof(size, flags);
n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0));

if (!n)
return NULL;

Expand Down
10 changes: 6 additions & 4 deletions rust/helpers/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,15 @@
#include <linux/slab.h>

void * __must_check __realloc_size(2)
rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
rust_helper_krealloc_node_align(const void *objp, size_t new_size, unsigned long align,
gfp_t flags, int node)
{
return krealloc(objp, new_size, flags);
return krealloc_node_align(objp, new_size, align, flags, node);
}

void * __must_check __realloc_size(2)
rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
rust_helper_kvrealloc_node_align(const void *p, size_t size, unsigned long align,
gfp_t flags, int node)
{
return kvrealloc(p, size, flags);
return kvrealloc_node_align(p, size, align, flags, node);
}
Loading
Loading