Failures inside codetag_load_module() are currently ignored. As a result an error there would not cause a module load failure and freeing of the associated resources. Correct this behavior by propagating the error code to the caller and handling possible errors. With this change, error to allocate percpu counters, which happens at this stage, will not be ignored and will cause a module load failure and freeing of resources. With this change we also do not need to disable memory allocation profiling when this error happens, instead we fail to load the module.
Fixes: 10075262888b ("alloc_tag: allocate percpu counters for module tags dynamically") Reported-by: Casey Chen cachen@purestorage.com Closes: https://lore.kernel.org/all/20250520231620.15259-1-cachen@purestorage.com/ Signed-off-by: Suren Baghdasaryan surenb@google.com Cc: stable@vger.kernel.org --- include/linux/codetag.h | 8 ++++---- kernel/module/main.c | 5 +++-- lib/alloc_tag.c | 12 +++++++----- lib/codetag.c | 34 +++++++++++++++++++++++++--------- 4 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/include/linux/codetag.h b/include/linux/codetag.h index 0ee4c21c6dbc..5f2b9a1f722c 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -36,8 +36,8 @@ union codetag_ref { struct codetag_type_desc { const char *section; size_t tag_size; - void (*module_load)(struct module *mod, - struct codetag *start, struct codetag *end); + int (*module_load)(struct module *mod, + struct codetag *start, struct codetag *end); void (*module_unload)(struct module *mod, struct codetag *start, struct codetag *end); #ifdef CONFIG_MODULES @@ -89,7 +89,7 @@ void *codetag_alloc_module_section(struct module *mod, const char *name, unsigned long align); void codetag_free_module_sections(struct module *mod); void codetag_module_replaced(struct module *mod, struct module *new_mod); -void codetag_load_module(struct module *mod); +int codetag_load_module(struct module *mod); void codetag_unload_module(struct module *mod);
#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ @@ -103,7 +103,7 @@ codetag_alloc_module_section(struct module *mod, const char *name, unsigned long align) { return NULL; } static inline void codetag_free_module_sections(struct module *mod) {} static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {} -static inline void codetag_load_module(struct module *mod) {} +static inline int codetag_load_module(struct module *mod) { return 0; } static inline void codetag_unload_module(struct module *mod) {}
#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 5c6ab20240a6..9861c2ac5fd5 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3399,11 +3399,12 @@ static int load_module(struct load_info *info, const char __user *uargs, goto sysfs_cleanup; }
+ if (codetag_load_module(mod)) + goto sysfs_cleanup; + /* Get rid of temporary copy. */ free_copy(info, flags);
- codetag_load_module(mod); - /* Done! */ trace_module_load(mod);
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 45dae7da70e1..d48b80f3f007 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -607,15 +607,16 @@ static void release_module_tags(struct module *mod, bool used) mas_unlock(&mas); }
-static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) +static int load_module(struct module *mod, struct codetag *start, struct codetag *stop) { /* Allocate module alloc_tag percpu counters */ struct alloc_tag *start_tag; struct alloc_tag *stop_tag; struct alloc_tag *tag;
+ /* percpu counters for core allocations are already statically allocated */ if (!mod) - return; + return 0;
start_tag = ct_to_alloc_tag(start); stop_tag = ct_to_alloc_tag(stop); @@ -627,12 +628,13 @@ static void load_module(struct module *mod, struct codetag *start, struct codeta free_percpu(tag->counters); tag->counters = NULL; } - shutdown_mem_profiling(true); - pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n", + pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s\n", mod->name); - break; + return -ENOMEM; } } + + return 0; }
static void replace_module(struct module *mod, struct module *new_mod) diff --git a/lib/codetag.c b/lib/codetag.c index de332e98d6f5..650d54d7e14d 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -167,6 +167,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { struct codetag_range range; struct codetag_module *cmod; + int mod_id; int err;
range = get_section_range(mod, cttype->desc.section); @@ -190,11 +191,20 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) cmod->range = range;
down_write(&cttype->mod_lock); - err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL); - if (err >= 0) { - cttype->count += range_size(cttype, &range); - if (cttype->desc.module_load) - cttype->desc.module_load(mod, range.start, range.stop); + mod_id = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL); + if (mod_id >= 0) { + if (cttype->desc.module_load) { + err = cttype->desc.module_load(mod, range.start, range.stop); + if (!err) + cttype->count += range_size(cttype, &range); + else + idr_remove(&cttype->mod_idr, mod_id); + } else { + cttype->count += range_size(cttype, &range); + err = 0; + } + } else { + err = mod_id; } up_write(&cttype->mod_lock);
@@ -295,17 +305,23 @@ void codetag_module_replaced(struct module *mod, struct module *new_mod) mutex_unlock(&codetag_lock); }
-void codetag_load_module(struct module *mod) +int codetag_load_module(struct module *mod) { struct codetag_type *cttype; + int ret = 0;
if (!mod) - return; + return 0;
mutex_lock(&codetag_lock); - list_for_each_entry(cttype, &codetag_types, link) - codetag_module_init(cttype, mod); + list_for_each_entry(cttype, &codetag_types, link) { + ret = codetag_module_init(cttype, mod); + if (ret) + break; + } mutex_unlock(&codetag_lock); + + return ret; }
void codetag_unload_module(struct module *mod)
base-commit: 9f3e87f6c8d4b28b96eb8bddb22d3ba4b846e10b
On Wed, May 21, 2025 at 9:06 AM Suren Baghdasaryan surenb@google.com wrote:
Failures inside codetag_load_module() are currently ignored. As a result an error there would not cause a module load failure and freeing of the associated resources. Correct this behavior by propagating the error code to the caller and handling possible errors. With this change, error to allocate percpu counters, which happens at this stage, will not be ignored and will cause a module load failure and freeing of resources. With this change we also do not need to disable memory allocation profiling when this error happens, instead we fail to load the module.
Fixes: 10075262888b ("alloc_tag: allocate percpu counters for module tags dynamically") Reported-by: Casey Chen cachen@purestorage.com Closes: https://lore.kernel.org/all/20250520231620.15259-1-cachen@purestorage.com/ Signed-off-by: Suren Baghdasaryan surenb@google.com Cc: stable@vger.kernel.org
include/linux/codetag.h | 8 ++++---- kernel/module/main.c | 5 +++-- lib/alloc_tag.c | 12 +++++++----- lib/codetag.c | 34 +++++++++++++++++++++++++--------- 4 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/include/linux/codetag.h b/include/linux/codetag.h index 0ee4c21c6dbc..5f2b9a1f722c 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -36,8 +36,8 @@ union codetag_ref { struct codetag_type_desc { const char *section; size_t tag_size;
void (*module_load)(struct module *mod,
struct codetag *start, struct codetag *end);
int (*module_load)(struct module *mod,
struct codetag *start, struct codetag *end); void (*module_unload)(struct module *mod, struct codetag *start, struct codetag *end);
#ifdef CONFIG_MODULES @@ -89,7 +89,7 @@ void *codetag_alloc_module_section(struct module *mod, const char *name, unsigned long align); void codetag_free_module_sections(struct module *mod); void codetag_module_replaced(struct module *mod, struct module *new_mod); -void codetag_load_module(struct module *mod); +int codetag_load_module(struct module *mod); void codetag_unload_module(struct module *mod);
#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ @@ -103,7 +103,7 @@ codetag_alloc_module_section(struct module *mod, const char *name, unsigned long align) { return NULL; } static inline void codetag_free_module_sections(struct module *mod) {} static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {} -static inline void codetag_load_module(struct module *mod) {} +static inline int codetag_load_module(struct module *mod) { return 0; } static inline void codetag_unload_module(struct module *mod) {}
#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 5c6ab20240a6..9861c2ac5fd5 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3399,11 +3399,12 @@ static int load_module(struct load_info *info, const char __user *uargs, goto sysfs_cleanup; }
if (codetag_load_module(mod))
goto sysfs_cleanup;
/* Get rid of temporary copy. */ free_copy(info, flags);
codetag_load_module(mod);
/* Done! */ trace_module_load(mod);
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 45dae7da70e1..d48b80f3f007 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -607,15 +607,16 @@ static void release_module_tags(struct module *mod, bool used) mas_unlock(&mas); }
-static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) +static int load_module(struct module *mod, struct codetag *start, struct codetag *stop) { /* Allocate module alloc_tag percpu counters */ struct alloc_tag *start_tag; struct alloc_tag *stop_tag; struct alloc_tag *tag;
/* percpu counters for core allocations are already statically allocated */ if (!mod)
return;
return 0; start_tag = ct_to_alloc_tag(start); stop_tag = ct_to_alloc_tag(stop);
@@ -627,12 +628,13 @@ static void load_module(struct module *mod, struct codetag *start, struct codeta free_percpu(tag->counters); tag->counters = NULL; }
shutdown_mem_profiling(true);
pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s\n", mod->name);
break;
return -ENOMEM; } }
return 0;
}
static void replace_module(struct module *mod, struct module *new_mod) diff --git a/lib/codetag.c b/lib/codetag.c index de332e98d6f5..650d54d7e14d 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -167,6 +167,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { struct codetag_range range; struct codetag_module *cmod;
int mod_id; int err; range = get_section_range(mod, cttype->desc.section);
@@ -190,11 +191,20 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) cmod->range = range;
down_write(&cttype->mod_lock);
err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
if (err >= 0) {
cttype->count += range_size(cttype, &range);
if (cttype->desc.module_load)
cttype->desc.module_load(mod, range.start, range.stop);
mod_id = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
if (mod_id >= 0) {
if (cttype->desc.module_load) {
err = cttype->desc.module_load(mod, range.start, range.stop);
if (!err)
cttype->count += range_size(cttype, &range);
else
idr_remove(&cttype->mod_idr, mod_id);
} else {
cttype->count += range_size(cttype, &range);
err = 0;
}
} else {
err = mod_id; } up_write(&cttype->mod_lock);
Overall looks good, just one small nit: should we not increase cttype->count if there is no module_load callback ? Personally I prefer having tag allocation and counter allocation at the same place in move_module() by calling something like codetag_alloc_module_tag_counter(). But your approach looks more modular. I don't have a strong preference, you can choose what you want. Thanks!
int codetag_alloc_module_tag_counter(struct module *mod, void *start_addr, unsigned long size) { struct codetag_type *cttype; int ret = -ENODEV;
mutex_lock(&codetag_lock); list_for_each_entry(cttype, &codetag_types, link) { if (WARN_ON(!cttype->desc.alloc_counter_mem)) break;
down_write(&cttype->mod_lock); ret = cttype->desc.alloc_counter_mem(mod, start_addr, size); up_write(&cttype->mod_lock); break; } mutex_unlock(&codetag_lock);
return ret; }
Casey
@@ -295,17 +305,23 @@ void codetag_module_replaced(struct module *mod, struct module *new_mod) mutex_unlock(&codetag_lock); }
-void codetag_load_module(struct module *mod) +int codetag_load_module(struct module *mod) { struct codetag_type *cttype;
int ret = 0; if (!mod)
return;
return 0; mutex_lock(&codetag_lock);
list_for_each_entry(cttype, &codetag_types, link)
codetag_module_init(cttype, mod);
list_for_each_entry(cttype, &codetag_types, link) {
ret = codetag_module_init(cttype, mod);
if (ret)
break;
} mutex_unlock(&codetag_lock);
return ret;
}
void codetag_unload_module(struct module *mod)
base-commit: 9f3e87f6c8d4b28b96eb8bddb22d3ba4b846e10b
2.49.0.1112.g889b7c5bd8-goog
On Wed, May 21, 2025 at 2:04 PM Casey Chen cachen@purestorage.com wrote:
On Wed, May 21, 2025 at 9:06 AM Suren Baghdasaryan surenb@google.com wrote:
Failures inside codetag_load_module() are currently ignored. As a result an error there would not cause a module load failure and freeing of the associated resources. Correct this behavior by propagating the error code to the caller and handling possible errors. With this change, error to allocate percpu counters, which happens at this stage, will not be ignored and will cause a module load failure and freeing of resources. With this change we also do not need to disable memory allocation profiling when this error happens, instead we fail to load the module.
Fixes: 10075262888b ("alloc_tag: allocate percpu counters for module tags dynamically") Reported-by: Casey Chen cachen@purestorage.com Closes: https://lore.kernel.org/all/20250520231620.15259-1-cachen@purestorage.com/ Signed-off-by: Suren Baghdasaryan surenb@google.com Cc: stable@vger.kernel.org
include/linux/codetag.h | 8 ++++---- kernel/module/main.c | 5 +++-- lib/alloc_tag.c | 12 +++++++----- lib/codetag.c | 34 +++++++++++++++++++++++++--------- 4 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/include/linux/codetag.h b/include/linux/codetag.h index 0ee4c21c6dbc..5f2b9a1f722c 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -36,8 +36,8 @@ union codetag_ref { struct codetag_type_desc { const char *section; size_t tag_size;
void (*module_load)(struct module *mod,
struct codetag *start, struct codetag *end);
int (*module_load)(struct module *mod,
struct codetag *start, struct codetag *end); void (*module_unload)(struct module *mod, struct codetag *start, struct codetag *end);
#ifdef CONFIG_MODULES @@ -89,7 +89,7 @@ void *codetag_alloc_module_section(struct module *mod, const char *name, unsigned long align); void codetag_free_module_sections(struct module *mod); void codetag_module_replaced(struct module *mod, struct module *new_mod); -void codetag_load_module(struct module *mod); +int codetag_load_module(struct module *mod); void codetag_unload_module(struct module *mod);
#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ @@ -103,7 +103,7 @@ codetag_alloc_module_section(struct module *mod, const char *name, unsigned long align) { return NULL; } static inline void codetag_free_module_sections(struct module *mod) {} static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {} -static inline void codetag_load_module(struct module *mod) {} +static inline int codetag_load_module(struct module *mod) { return 0; } static inline void codetag_unload_module(struct module *mod) {}
#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 5c6ab20240a6..9861c2ac5fd5 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3399,11 +3399,12 @@ static int load_module(struct load_info *info, const char __user *uargs, goto sysfs_cleanup; }
if (codetag_load_module(mod))
goto sysfs_cleanup;
/* Get rid of temporary copy. */ free_copy(info, flags);
codetag_load_module(mod);
/* Done! */ trace_module_load(mod);
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 45dae7da70e1..d48b80f3f007 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -607,15 +607,16 @@ static void release_module_tags(struct module *mod, bool used) mas_unlock(&mas); }
-static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) +static int load_module(struct module *mod, struct codetag *start, struct codetag *stop) { /* Allocate module alloc_tag percpu counters */ struct alloc_tag *start_tag; struct alloc_tag *stop_tag; struct alloc_tag *tag;
/* percpu counters for core allocations are already statically allocated */ if (!mod)
return;
return 0; start_tag = ct_to_alloc_tag(start); stop_tag = ct_to_alloc_tag(stop);
@@ -627,12 +628,13 @@ static void load_module(struct module *mod, struct codetag *start, struct codeta free_percpu(tag->counters); tag->counters = NULL; }
shutdown_mem_profiling(true);
pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s\n", mod->name);
break;
return -ENOMEM; } }
return 0;
}
static void replace_module(struct module *mod, struct module *new_mod) diff --git a/lib/codetag.c b/lib/codetag.c index de332e98d6f5..650d54d7e14d 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -167,6 +167,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { struct codetag_range range; struct codetag_module *cmod;
int mod_id; int err; range = get_section_range(mod, cttype->desc.section);
@@ -190,11 +191,20 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) cmod->range = range;
down_write(&cttype->mod_lock);
err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
if (err >= 0) {
cttype->count += range_size(cttype, &range);
if (cttype->desc.module_load)
cttype->desc.module_load(mod, range.start, range.stop);
mod_id = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
if (mod_id >= 0) {
if (cttype->desc.module_load) {
err = cttype->desc.module_load(mod, range.start, range.stop);
if (!err)
cttype->count += range_size(cttype, &range);
else
idr_remove(&cttype->mod_idr, mod_id);
} else {
cttype->count += range_size(cttype, &range);
err = 0;
}
} else {
err = mod_id; } up_write(&cttype->mod_lock);
Overall looks good, just one small nit: should we not increase cttype->count if there is no module_load callback ?
No, a codetag type might not require module_load callback but can still have a non-empty tags section.
Personally I prefer having tag allocation and counter allocation at the same place in move_module() by calling something like codetag_alloc_module_tag_counter(). But your approach looks more modular. I don't have a strong preference, you can choose what you want. Thanks!
Yeah, I try to keep the codetagging footprint in the module loading code as small as possible, so let's avoid adding more hooks there. Thanks, Suren.
int codetag_alloc_module_tag_counter(struct module *mod, void *start_addr, unsigned long size) { struct codetag_type *cttype; int ret = -ENODEV;
mutex_lock(&codetag_lock); list_for_each_entry(cttype, &codetag_types, link) { if (WARN_ON(!cttype->desc.alloc_counter_mem)) break; down_write(&cttype->mod_lock); ret = cttype->desc.alloc_counter_mem(mod, start_addr, size); up_write(&cttype->mod_lock); break; } mutex_unlock(&codetag_lock); return ret;
}
Casey
@@ -295,17 +305,23 @@ void codetag_module_replaced(struct module *mod, struct module *new_mod) mutex_unlock(&codetag_lock); }
-void codetag_load_module(struct module *mod) +int codetag_load_module(struct module *mod) { struct codetag_type *cttype;
int ret = 0; if (!mod)
return;
return 0; mutex_lock(&codetag_lock);
list_for_each_entry(cttype, &codetag_types, link)
codetag_module_init(cttype, mod);
list_for_each_entry(cttype, &codetag_types, link) {
ret = codetag_module_init(cttype, mod);
if (ret)
break;
} mutex_unlock(&codetag_lock);
return ret;
}
void codetag_unload_module(struct module *mod)
base-commit: 9f3e87f6c8d4b28b96eb8bddb22d3ba4b846e10b
2.49.0.1112.g889b7c5bd8-goog
linux-stable-mirror@lists.linaro.org