Unlike idr_for_each_entry(), which terminates on the first NULL entry, idr_for_each passes them through. This fixes potential issues with the idr walk terminating prematurely due to transient NULL entries the exist when creating and destroying a handle.
Note that transient NULL pointers in drm_file.object_idr have been a thing since f6cd7daecff5 ("drm: Release driver references to handle before making it available again"), this is a really old issue.
Aside from temporarily inconsistent fdinfo statistic there's no other impact of this issue.
Fixes: 686b21b5f6ca ("drm: Add fdinfo memory stats") Cc: Rob Clark robdclark@chromium.org Cc: Emil Velikov emil.l.velikov@gmail.com Cc: Tvrtko Ursulin tvrtko.ursulin@intel.com Cc: stable@vger.kernel.org # v6.5+ Signed-off-by: Simona Vetter simona.vetter@intel.com Signed-off-by: Simona Vetter simona.vetter@ffwll.ch --- drivers/gpu/drm/drm_file.c | 95 ++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 40 deletions(-)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 246cf845e2c9..428a4eb85e94 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -892,6 +892,58 @@ void drm_print_memory_stats(struct drm_printer *p, } EXPORT_SYMBOL(drm_print_memory_stats);
+struct drm_bo_print_data { + struct drm_memory_stats status; + enum drm_gem_object_status supported_status; +}; + +static int +drm_bo_memory_stats(int id, void *ptr, void *data) +{ + struct drm_bo_print_data *drm_data; + struct drm_gem_object *obj = ptr; + enum drm_gem_object_status s = 0; + size_t add_size; + + if (!obj) + return 0; + + add_size = (obj->funcs && obj->funcs->rss) ? + obj->funcs->rss(obj) : obj->size; + + if (obj->funcs && obj->funcs->status) { + s = obj->funcs->status(obj); + drm_data->supported_status |= s; + } + + if (drm_gem_object_is_shared_for_memory_stats(obj)) + drm_data->status.shared += obj->size; + else + drm_data->status.private += obj->size; + + if (s & DRM_GEM_OBJECT_RESIDENT) { + drm_data->status.resident += add_size; + } else { + /* If already purged or not yet backed by pages, don't + * count it as purgeable: + */ + s &= ~DRM_GEM_OBJECT_PURGEABLE; + } + + if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) { + drm_data->status.active += add_size; + drm_data->supported_status |= DRM_GEM_OBJECT_ACTIVE; + + /* If still active, don't count as purgeable: */ + s &= ~DRM_GEM_OBJECT_PURGEABLE; + } + + if (s & DRM_GEM_OBJECT_PURGEABLE) + drm_data->status.purgeable += add_size; + + return 0; +} + /** * drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats * @p: the printer to print output to @@ -902,50 +954,13 @@ EXPORT_SYMBOL(drm_print_memory_stats); */ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) { - struct drm_gem_object *obj; - struct drm_memory_stats status = {}; - enum drm_gem_object_status supported_status = 0; - int id; + struct drm_bo_print_data data = {};
spin_lock(&file->table_lock); - idr_for_each_entry (&file->object_idr, obj, id) { - enum drm_gem_object_status s = 0; - size_t add_size = (obj->funcs && obj->funcs->rss) ? - obj->funcs->rss(obj) : obj->size; - - if (obj->funcs && obj->funcs->status) { - s = obj->funcs->status(obj); - supported_status |= s; - } - - if (drm_gem_object_is_shared_for_memory_stats(obj)) - status.shared += obj->size; - else - status.private += obj->size; - - if (s & DRM_GEM_OBJECT_RESIDENT) { - status.resident += add_size; - } else { - /* If already purged or not yet backed by pages, don't - * count it as purgeable: - */ - s &= ~DRM_GEM_OBJECT_PURGEABLE; - } - - if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) { - status.active += add_size; - supported_status |= DRM_GEM_OBJECT_ACTIVE; - - /* If still active, don't count as purgeable: */ - s &= ~DRM_GEM_OBJECT_PURGEABLE; - } - - if (s & DRM_GEM_OBJECT_PURGEABLE) - status.purgeable += add_size; - } + idr_for_each(&file->object_idr, &drm_bo_memory_stats, &data); spin_unlock(&file->table_lock);
- drm_print_memory_stats(p, &status, supported_status, "memory"); + drm_print_memory_stats(p, &data.status, data.supported_status, "memory"); } EXPORT_SYMBOL(drm_show_memory_stats);
On Wed, May 28, 2025 at 11:13:00AM +0200, Simona Vetter wrote:
Unlike idr_for_each_entry(), which terminates on the first NULL entry, idr_for_each passes them through. This fixes potential issues with the idr walk terminating prematurely due to transient NULL entries the exist when creating and destroying a handle.
Note that transient NULL pointers in drm_file.object_idr have been a thing since f6cd7daecff5 ("drm: Release driver references to handle before making it available again"), this is a really old issue.
Aside from temporarily inconsistent fdinfo statistic there's no other impact of this issue.
Fixes: 686b21b5f6ca ("drm: Add fdinfo memory stats") Cc: Rob Clark robdclark@chromium.org Cc: Emil Velikov emil.l.velikov@gmail.com Cc: Tvrtko Ursulin tvrtko.ursulin@intel.com Cc: stable@vger.kernel.org # v6.5+ Signed-off-by: Simona Vetter simona.vetter@intel.com Signed-off-by: Simona Vetter simona.vetter@ffwll.ch
Ok I screwed up reading idr_for_each_entry() respectively idr_get_next_ul() big time, it already copes with NULL entries entirely fine.
Mea culpa. -Sima
drivers/gpu/drm/drm_file.c | 95 ++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 40 deletions(-)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 246cf845e2c9..428a4eb85e94 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -892,6 +892,58 @@ void drm_print_memory_stats(struct drm_printer *p, } EXPORT_SYMBOL(drm_print_memory_stats); +struct drm_bo_print_data {
- struct drm_memory_stats status;
- enum drm_gem_object_status supported_status;
+};
+static int +drm_bo_memory_stats(int id, void *ptr, void *data) +{
- struct drm_bo_print_data *drm_data;
- struct drm_gem_object *obj = ptr;
- enum drm_gem_object_status s = 0;
- size_t add_size;
- if (!obj)
return 0;
- add_size = (obj->funcs && obj->funcs->rss) ?
obj->funcs->rss(obj) : obj->size;
- if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
drm_data->supported_status |= s;
- }
- if (drm_gem_object_is_shared_for_memory_stats(obj))
drm_data->status.shared += obj->size;
- else
drm_data->status.private += obj->size;
- if (s & DRM_GEM_OBJECT_RESIDENT) {
drm_data->status.resident += add_size;
- } else {
/* If already purged or not yet backed by pages, don't
* count it as purgeable:
*/
s &= ~DRM_GEM_OBJECT_PURGEABLE;
- }
- if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
drm_data->status.active += add_size;
drm_data->supported_status |= DRM_GEM_OBJECT_ACTIVE;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;
- }
- if (s & DRM_GEM_OBJECT_PURGEABLE)
drm_data->status.purgeable += add_size;
- return 0;
+}
/**
- drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats
- @p: the printer to print output to
@@ -902,50 +954,13 @@ EXPORT_SYMBOL(drm_print_memory_stats); */ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) {
- struct drm_gem_object *obj;
- struct drm_memory_stats status = {};
- enum drm_gem_object_status supported_status = 0;
- int id;
- struct drm_bo_print_data data = {};
spin_lock(&file->table_lock);
- idr_for_each_entry (&file->object_idr, obj, id) {
enum drm_gem_object_status s = 0;
size_t add_size = (obj->funcs && obj->funcs->rss) ?
obj->funcs->rss(obj) : obj->size;
if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
supported_status |= s;
}
if (drm_gem_object_is_shared_for_memory_stats(obj))
status.shared += obj->size;
else
status.private += obj->size;
if (s & DRM_GEM_OBJECT_RESIDENT) {
status.resident += add_size;
} else {
/* If already purged or not yet backed by pages, don't
* count it as purgeable:
*/
s &= ~DRM_GEM_OBJECT_PURGEABLE;
}
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
status.active += add_size;
supported_status |= DRM_GEM_OBJECT_ACTIVE;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;
}
if (s & DRM_GEM_OBJECT_PURGEABLE)
status.purgeable += add_size;
- }
- idr_for_each(&file->object_idr, &drm_bo_memory_stats, &data); spin_unlock(&file->table_lock);
- drm_print_memory_stats(p, &status, supported_status, "memory");
- drm_print_memory_stats(p, &data.status, data.supported_status, "memory");
} EXPORT_SYMBOL(drm_show_memory_stats); -- 2.49.0
Hi Simona,
kernel test robot noticed the following build warnings:
[auto build test WARNING on next-20250527] [also build test WARNING on linus/master v6.15] [cannot apply to v6.15 v6.15-rc7 v6.15-rc6] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Simona-Vetter/drm-gem-Fix-rac... base: next-20250527 patch link: https://lore.kernel.org/r/20250528091307.1894940-3-simona.vetter%40ffwll.ch patch subject: [PATCH 2/8] drm/fdinfo: Switch to idr_for_each() in drm_show_memory_stats() config: riscv-randconfig-001-20250529 (https://download.01.org/0day-ci/archive/20250529/202505290334.GjoY9qsk-lkp@i...) compiler: clang version 21.0.0git (https://github.com/llvm/llvm-project f819f46284f2a79790038e1f6649172789734ae8) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250529/202505290334.GjoY9qsk-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202505290334.GjoY9qsk-lkp@intel.com/
All warnings (new ones prefixed by >>):
drivers/gpu/drm/drm_file.c:916:3: warning: variable 'drm_data' is uninitialized when used here [-Wuninitialized]
916 | drm_data->supported_status |= s; | ^~~~~~~~ drivers/gpu/drm/drm_file.c:903:36: note: initialize the variable 'drm_data' to silence this warning 903 | struct drm_bo_print_data *drm_data; | ^ | = NULL 1 warning generated.
vim +/drm_data +916 drivers/gpu/drm/drm_file.c
899 900 static int 901 drm_bo_memory_stats(int id, void *ptr, void *data) 902 { 903 struct drm_bo_print_data *drm_data; 904 struct drm_gem_object *obj = ptr; 905 enum drm_gem_object_status s = 0; 906 size_t add_size; 907 908 if (!obj) 909 return 0; 910 911 add_size = (obj->funcs && obj->funcs->rss) ? 912 obj->funcs->rss(obj) : obj->size; 913 914 if (obj->funcs && obj->funcs->status) { 915 s = obj->funcs->status(obj);
916 drm_data->supported_status |= s;
917 } 918 919 if (drm_gem_object_is_shared_for_memory_stats(obj)) 920 drm_data->status.shared += obj->size; 921 else 922 drm_data->status.private += obj->size; 923 924 if (s & DRM_GEM_OBJECT_RESIDENT) { 925 drm_data->status.resident += add_size; 926 } else { 927 /* If already purged or not yet backed by pages, don't 928 * count it as purgeable: 929 */ 930 s &= ~DRM_GEM_OBJECT_PURGEABLE; 931 } 932 933 if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) { 934 drm_data->status.active += add_size; 935 drm_data->supported_status |= DRM_GEM_OBJECT_ACTIVE; 936 937 /* If still active, don't count as purgeable: */ 938 s &= ~DRM_GEM_OBJECT_PURGEABLE; 939 } 940 941 if (s & DRM_GEM_OBJECT_PURGEABLE) 942 drm_data->status.purgeable += add_size; 943 944 return 0; 945 } 946
linux-stable-mirror@lists.linaro.org