diff options
| author | Stephen Rothwell <sfr@canb.auug.org.au> | 2022-06-28 15:41:13 +1000 |
|---|---|---|
| committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2022-06-28 15:41:13 +1000 |
| commit | adc3aa587669e26f1e3b88226f58da89c3a0fe4e (patch) | |
| tree | 08108a16c0f736c70270fcade6de5b46a38d1f11 | |
| parent | a0597f80ceb630126fb11cf776acdfff404e7f69 (diff) | |
| parent | 0835f14f8218f676cb893e892ebcc08f502085ca (diff) | |
| download | linux-next-adc3aa587669e26f1e3b88226f58da89c3a0fe4e.tar.gz | |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
| -rw-r--r-- | include/trace/events/kmem.h | 40 | ||||
| -rw-r--r-- | mm/slab.c | 28 | ||||
| -rw-r--r-- | mm/slab.h | 30 | ||||
| -rw-r--r-- | mm/slab_common.c | 9 | ||||
| -rw-r--r-- | mm/slob.c | 8 | ||||
| -rw-r--r-- | mm/slub.c | 94 | ||||
| -rw-r--r-- | tools/vm/slabinfo.c | 26 |
7 files changed, 110 insertions, 125 deletions
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f76668305ac50..4cb51ace600d4 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, const void *ptr, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags), TP_STRUCT__entry( __field( unsigned long, call_site ) @@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) + __field( bool, accounted ) ), TP_fast_assign( @@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc, __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; + __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ? + ((gfp_flags & __GFP_ACCOUNT) || + (s && s->flags & SLAB_ACCOUNT)) : false; ), - TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s", (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags)) + show_gfp_flags(__entry->gfp_flags), + __entry->accounted ? "true" : "false") ); DEFINE_EVENT(kmem_alloc, kmalloc, - TP_PROTO(unsigned long call_site, const void *ptr, + TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags) ); DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, - TP_PROTO(unsigned long call_site, const void *ptr, + TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags) ); DECLARE_EVENT_CLASS(kmem_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node), TP_STRUCT__entry( __field( unsigned long, call_site ) @@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) __field( int, node ) + __field( bool, accounted ) ), TP_fast_assign( @@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->node = node; + __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ? + ((gfp_flags & __GFP_ACCOUNT) || + (s && s->flags & SLAB_ACCOUNT)) : false; ), - TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s", (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags), - __entry->node) + __entry->node, + __entry->accounted ? "true" : "false") ); DEFINE_EVENT(kmem_alloc_node, kmalloc_node, TP_PROTO(unsigned long call_site, const void *ptr, - size_t bytes_req, size_t bytes_alloc, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node) ); DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, - size_t bytes_req, size_t bytes_alloc, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node) ); TRACE_EVENT(kfree, diff --git a/mm/slab.c b/mm/slab.c index f8cd00f4ba13c..6474c515a664c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2958,12 +2958,6 @@ direct_grow: return ac->entry[--ac->avail]; } -static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, - gfp_t flags) -{ - might_sleep_if(gfpflags_allow_blocking(flags)); -} - #if DEBUG static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags, void *objp, unsigned long caller) @@ -3205,7 +3199,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ if (unlikely(ptr)) goto out_hooks; - cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); if (nodeid == NUMA_NO_NODE) @@ -3230,7 +3223,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ } /* ___cache_alloc_node can fall back to other nodes */ ptr = ____cache_alloc_node(cachep, flags, nodeid); - out: +out: local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); init = slab_want_init_on_alloc(flags, cachep); @@ -3259,7 +3252,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) if (!objp) objp = ____cache_alloc_node(cache, flags, numa_mem_id()); - out: +out: return objp; } #else @@ -3290,7 +3283,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, if (unlikely(objp)) goto out; - cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags); local_irq_restore(save_flags); @@ -3406,9 +3398,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, { bool init; + memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1); + if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); - memcg_slab_free_hook(cachep, &objp, 1); __kfence_free(objp); return; } @@ -3441,7 +3434,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); - memcg_slab_free_hook(cachep, &objp, 1); /* * Skip calling cache_free_alien() when the platform is not numa. @@ -3478,7 +3470,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, { void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_); - trace_kmem_cache_alloc(_RET_IP_, ret, + trace_kmem_cache_alloc(_RET_IP_, ret, cachep, cachep->object_size, cachep->size, flags); return ret; @@ -3527,8 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, if (!s) return 0; - cache_alloc_debugcheck_before(s, flags); - local_irq_disable(); for (i = 0; i < size; i++) { void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags); @@ -3567,7 +3557,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_); ret = kasan_kmalloc(cachep, ret, size, flags); - trace_kmalloc(_RET_IP_, ret, + trace_kmalloc(_RET_IP_, ret, cachep, size, cachep->size, flags); return ret; } @@ -3592,7 +3582,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_); - trace_kmem_cache_alloc_node(_RET_IP_, ret, + trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep, cachep->object_size, cachep->size, flags, nodeid); @@ -3611,7 +3601,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_); ret = kasan_kmalloc(cachep, ret, size, flags); - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(_RET_IP_, ret, cachep, size, cachep->size, flags, nodeid); return ret; @@ -3694,7 +3684,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, ret = slab_alloc(cachep, NULL, flags, size, caller); ret = kasan_kmalloc(cachep, ret, size, flags); - trace_kmalloc(caller, ret, + trace_kmalloc(caller, ret, cachep, size, cachep->size, flags); return ret; diff --git a/mm/slab.h b/mm/slab.h index db9fb5c8dae73..a8d5eb1c323f5 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, obj_cgroup_put(objcg); } -static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { - struct kmem_cache *s; struct obj_cgroup **objcgs; - struct obj_cgroup *objcg; - struct slab *slab; - unsigned int off; int i; if (!memcg_kmem_enabled()) return; - for (i = 0; i < objects; i++) { - if (unlikely(!p[i])) - continue; - - slab = virt_to_slab(p[i]); - /* we could be given a kmalloc_large() object, skip those */ - if (!slab) - continue; - - objcgs = slab_objcgs(slab); - if (!objcgs) - continue; + objcgs = slab_objcgs(slab); + if (!objcgs) + return; - if (!s_orig) - s = slab->slab_cache; - else - s = s_orig; + for (i = 0; i < objects; i++) { + struct obj_cgroup *objcg; + unsigned int off; off = obj_to_index(s, slab, p[i]); objcg = objcgs[off]; @@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, { } -static inline void memcg_slab_free_hook(struct kmem_cache *s, +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { } diff --git a/mm/slab_common.c b/mm/slab_common.c index 77c3adf40e504..6c9aac5d8f4a1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -26,13 +26,12 @@ #include <linux/memcontrol.h> #include <linux/stackdepot.h> -#define CREATE_TRACE_POINTS -#include <trace/events/kmem.h> - #include "internal.h" - #include "slab.h" +#define CREATE_TRACE_POINTS +#include <trace/events/kmem.h> + enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); @@ -959,7 +958,7 @@ EXPORT_SYMBOL(kmalloc_order); void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) { void *ret = kmalloc_order(size, flags, order); - trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); + trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags); return ret; } EXPORT_SYMBOL(kmalloc_order_trace); diff --git a/mm/slob.c b/mm/slob.c index f47811f09aca0..56421fe461c4f 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) *m = size; ret = (void *)m + minalign; - trace_kmalloc_node(caller, ret, + trace_kmalloc_node(caller, ret, NULL, size, size + minalign, gfp, node); } else { unsigned int order = get_order(size); @@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); - trace_kmalloc_node(caller, ret, + trace_kmalloc_node(caller, ret, NULL, size, PAGE_SIZE << order, gfp, node); } @@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node, 0); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, + trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size, SLOB_UNITS(c->size) * SLOB_UNIT, flags, node); } else { b = slob_new_pages(flags, get_order(c->size), node); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, + trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size, PAGE_SIZE << get_order(c->size), flags, node); } diff --git a/mm/slub.c b/mm/slub.c index b1281b8654bd3..26b00951aad15 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3257,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, { void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); - trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, + trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size, s->size, gfpflags); return ret; @@ -3280,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru); void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size); - trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); + trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } @@ -3292,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); - trace_kmem_cache_alloc_node(_RET_IP_, ret, + trace_kmem_cache_alloc_node(_RET_IP_, ret, s, s->object_size, s->size, gfpflags, node); return ret; @@ -3306,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node); ret = kasan_kmalloc(s, ret, size, gfpflags); @@ -3464,9 +3464,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s, struct kmem_cache_cpu *c; unsigned long tid; - /* memcg_slab_free_hook() is already called for bulk free. */ - if (!tail) - memcg_slab_free_hook(s, &head, 1); redo: /* * Determine the currently cpus per cpu slab. @@ -3526,9 +3523,10 @@ redo: } static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab, - void *head, void *tail, int cnt, + void *head, void *tail, void **p, int cnt, unsigned long addr) { + memcg_slab_free_hook(s, slab, p, cnt); /* * With KASAN enabled slab_free_freelist_hook modifies the freelist * to remove objects, whose reuse must be delayed. @@ -3550,7 +3548,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) if (!s) return; trace_kmem_cache_free(_RET_IP_, x, s->name); - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_); + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); @@ -3591,79 +3589,59 @@ static inline int build_detached_freelist(struct kmem_cache *s, size_t size, void **p, struct detached_freelist *df) { - size_t first_skipped_index = 0; int lookahead = 3; void *object; struct folio *folio; - struct slab *slab; - - /* Always re-init detached_freelist */ - df->slab = NULL; - - do { - object = p[--size]; - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ - } while (!object && size); - - if (!object) - return 0; + size_t same; + object = p[--size]; folio = virt_to_folio(object); if (!s) { /* Handle kalloc'ed objects */ if (unlikely(!folio_test_slab(folio))) { free_large_kmalloc(folio, object); - p[size] = NULL; /* mark object processed */ + df->slab = NULL; return size; } /* Derive kmem_cache from object */ - slab = folio_slab(folio); - df->s = slab->slab_cache; + df->slab = folio_slab(folio); + df->s = df->slab->slab_cache; } else { - slab = folio_slab(folio); + df->slab = folio_slab(folio); df->s = cache_from_obj(s, object); /* Support for memcg */ } - if (is_kfence_address(object)) { - slab_free_hook(df->s, object, false); - __kfence_free(object); - p[size] = NULL; /* mark object processed */ - return size; - } - /* Start new detached freelist */ - df->slab = slab; - set_freepointer(df->s, object, NULL); df->tail = object; df->freelist = object; - p[size] = NULL; /* mark object processed */ df->cnt = 1; + if (is_kfence_address(object)) + return size; + + set_freepointer(df->s, object, NULL); + + same = size; while (size) { object = p[--size]; - if (!object) - continue; /* Skip processed objects */ - /* df->slab is always set at this point */ if (df->slab == virt_to_slab(object)) { /* Opportunity build freelist */ set_freepointer(df->s, object, df->freelist); df->freelist = object; df->cnt++; - p[size] = NULL; /* mark object processed */ - + same--; + if (size != same) + swap(p[size], p[same]); continue; } /* Limit look ahead search */ if (!--lookahead) break; - - if (!first_skipped_index) - first_skipped_index = size + 1; } - return first_skipped_index; + return same; } /* Note that interrupts must be enabled when calling this function. */ @@ -3672,7 +3650,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) if (WARN_ON(!size)) return; - memcg_slab_free_hook(s, p, size); do { struct detached_freelist df; @@ -3680,7 +3657,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) if (!df.slab) continue; - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_); + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, + _RET_IP_); } while (likely(size)); } EXPORT_SYMBOL(kmem_cache_free_bulk); @@ -4441,7 +4419,7 @@ void *__kmalloc(size_t size, gfp_t flags) ret = slab_alloc(s, NULL, flags, _RET_IP_, size); - trace_kmalloc(_RET_IP_, ret, size, s->size, flags); + trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags); ret = kasan_kmalloc(s, ret, size, flags); @@ -4475,7 +4453,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, flags, node); - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(_RET_IP_, ret, NULL, size, PAGE_SIZE << get_order(size), flags, node); @@ -4489,7 +4467,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size); - trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); + trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node); ret = kasan_kmalloc(s, ret, size, flags); @@ -4581,7 +4559,7 @@ void kfree(const void *x) return; } slab = folio_slab(folio); - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_); + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_); } EXPORT_SYMBOL(kfree); @@ -4890,6 +4868,9 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, s = find_mergeable(size, align, flags, name, ctor); if (s) { + if (sysfs_slab_alias(s, name)) + return NULL; + s->refcount++; /* @@ -4898,11 +4879,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, */ s->object_size = max(s->object_size, size); s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); - - if (sysfs_slab_alias(s, name)) { - s->refcount--; - s = NULL; - } } return s; @@ -4948,7 +4924,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ret = slab_alloc(s, NULL, gfpflags, caller, size); /* Honor the call site pointer we received. */ - trace_kmalloc(caller, ret, size, s->size, gfpflags); + trace_kmalloc(caller, ret, s, size, s->size, gfpflags); return ret; } @@ -4964,7 +4940,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, gfpflags, node); - trace_kmalloc_node(caller, ret, + trace_kmalloc_node(caller, ret, NULL, size, PAGE_SIZE << get_order(size), gfpflags, node); @@ -4979,7 +4955,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ - trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); + trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node); return ret; } diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index 9b68658b6bb85..5b98f3ee58a58 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c @@ -233,6 +233,24 @@ static unsigned long read_slab_obj(struct slabinfo *s, const char *name) return l; } +static unsigned long read_debug_slab_obj(struct slabinfo *s, const char *name) +{ + char x[128]; + FILE *f; + size_t l; + + snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name); + f = fopen(x, "r"); + if (!f) { + buffer[0] = 0; + l = 0; + } else { + l = fread(buffer, 1, sizeof(buffer), f); + buffer[l] = 0; + fclose(f); + } + return l; +} /* * Put a size string together @@ -409,14 +427,18 @@ static void show_tracking(struct slabinfo *s) { printf("\n%s: Kernel object allocation\n", s->name); printf("-----------------------------------------------------------------------\n"); - if (read_slab_obj(s, "alloc_calls")) + if (read_debug_slab_obj(s, "alloc_traces")) + printf("%s", buffer); + else if (read_slab_obj(s, "alloc_calls")) printf("%s", buffer); else printf("No Data\n"); printf("\n%s: Kernel object freeing\n", s->name); printf("------------------------------------------------------------------------\n"); - if (read_slab_obj(s, "free_calls")) + if (read_debug_slab_obj(s, "free_traces")) + printf("%s", buffer); + else if (read_slab_obj(s, "free_calls")) printf("%s", buffer); else printf("No Data\n"); |
