When both KASAN and SLAB_STORE_USER are enabled, accesses to struct kasan_alloc_meta fields can be misaligned on 64-bit architectures. This occurs because orig_size is currently defined as unsigned int, which only guarantees 4-byte alignment. When struct kasan_alloc_meta is placed after orig_size, it may end up at a 4-byte boundary rather than the required 8-byte boundary on 64-bit systems.
Note that 64-bit architectures without HAVE_EFFICIENT_UNALIGNED_ACCESS are assumed to require 64-bit accesses to be 64-bit aligned. See HAVE_64BIT_ALIGNED_ACCESS and commit adab66b71abf ("Revert: "ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS"") for more details.
Change orig_size from unsigned int to unsigned long to ensure proper alignment for any subsequent metadata. This should not waste additional memory because kmalloc objects are already aligned to at least ARCH_KMALLOC_MINALIGN.
Suggested-by: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: stable@vger.kernel.org Fixes: 6edf2576a6cc ("mm/slub: enable debugging memory wasting of kmalloc") Signed-off-by: Harry Yoo harry.yoo@oracle.com --- mm/slub.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c index ad71f01571f0..1c747435a6ab 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -857,7 +857,7 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, * request size in the meta data area, for better debug and sanity check. */ static inline void set_orig_size(struct kmem_cache *s, - void *object, unsigned int orig_size) + void *object, unsigned long orig_size) { void *p = kasan_reset_tag(object);
@@ -867,10 +867,10 @@ static inline void set_orig_size(struct kmem_cache *s, p += get_info_end(s); p += sizeof(struct track) * 2;
- *(unsigned int *)p = orig_size; + *(unsigned long *)p = orig_size; }
-static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) +static inline unsigned long get_orig_size(struct kmem_cache *s, void *object) { void *p = kasan_reset_tag(object);
@@ -883,7 +883,7 @@ static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) p += get_info_end(s); p += sizeof(struct track) * 2;
- return *(unsigned int *)p; + return *(unsigned long *)p; }
#ifdef CONFIG_SLUB_DEBUG @@ -1198,7 +1198,7 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) off += 2 * sizeof(struct track);
if (slub_debug_orig_size(s)) - off += sizeof(unsigned int); + off += sizeof(unsigned long);
off += kasan_metadata_size(s, false);
@@ -1394,7 +1394,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) off += 2 * sizeof(struct track);
if (s->flags & SLAB_KMALLOC) - off += sizeof(unsigned int); + off += sizeof(unsigned long); }
off += kasan_metadata_size(s, false); @@ -7949,7 +7949,7 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
/* Save the original kmalloc request size */ if (flags & SLAB_KMALLOC) - size += sizeof(unsigned int); + size += sizeof(unsigned long); } #endif