On Tue, Oct 21, 2025 at 04:49:59AM +0000, Tzung-Bi Shih wrote:
I didn't get the idea. With a mutex, how to handle the opening files?
Are they something like: (?)
- Maintain a list for opening files in both .open() and .release().
- In misc_deregister_sync(), traverse the list, do something (what?), and wait for the userspace programs close the files.
You don't need any list, we don't want to close files.
Something like this, it is very simple. You can replace the rwsem with a srcu. srcu gives faster read locking but much slower sync.
diff --git a/fs/char_dev.c b/fs/char_dev.c index c2ddb998f3c943..69bbfe9de4f3bb 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -5,6 +5,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */
+#include <linux/cleanup.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/kdev_t.h> @@ -343,6 +344,74 @@ void __unregister_chrdev(unsigned int major, unsigned int baseminor, kfree(cd); }
+struct cdev_sync_data { + struct rw_semaphore sem; + const struct file_operations *orig_fops; + struct file_operations sync_fops; + bool revoked; +}; + +static int cdev_sync_open(struct inode *inode, struct file *filp) +{ + struct cdev *p = inode->i_cdev; + struct cdev_sync_data *sync = p->sync; + const struct file_operations *fops; + int ret; + + scoped_cond_guard(rwsem_read_kill, return -ERESTARTSYS, &sync->sem) { + if (sync->revoked) + return -ENODEV; + + fops = fops_get(sync->orig_fops); + if (fops->open) { + ret = filp->f_op->open(inode, filp); + if (ret) { + fops_put(fops); + return ret; + } + } + } + return 0; +} + +static void cdev_sync_release(struct inode *inode, struct file *filp) +{ + struct cdev *p = inode->i_cdev; + struct cdev_sync_data *sync = p->sync; + + /* + * Release can continue to be called after unregister. The driver must + * only clean up memory. + */ + if (sync->orig_fops->release) + sync->orig_fops->release(inode, filp); + fops_put(sync->orig_fops); +} + +/* Must call before chrdev_open can happen */ +static int cdev_sync_init(struct cdev *p) +{ + struct cdev_sync_data *sync; + + sync = kzalloc(sizeof(*sync), GFP_KERNEL); + if (!sync) + return -ENOMEM; + sync->sync_fops.owner = THIS_MODULE; + sync->sync_fops.open = cdev_sync_open; + sync->sync_fops.release = cdev_sync_release; + // .. + p->is_sync = true; + p->sync = sync; +} + +static int cdev_sync_revoke(struct cdev *p) +{ + struct cdev_sync_data *sync = p->sync; + + guard(rwsem_write)(&sync->sem); + sync->revoked = true; +} + static DEFINE_SPINLOCK(cdev_lock);
static struct kobject *cdev_get(struct cdev *p) @@ -405,7 +474,11 @@ static int chrdev_open(struct inode *inode, struct file *filp) return ret;
ret = -ENXIO; - fops = fops_get(p->ops); + if (p->is_sync) + fops = fops_get(p->ops); + else + fops = fops_get(&p->sync->sync_fops); + if (!fops) goto out_cdev_put;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h index 0e8cd6293debba..28f0445011df20 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -11,13 +11,19 @@ struct file_operations; struct inode; struct module;
+struct cdev_sync_data; + struct cdev { struct kobject kobj; struct module *owner; - const struct file_operations *ops; + union { + const struct file_operations *ops; + struct cdev_sync_data *sync; + }; struct list_head list; dev_t dev; unsigned int count; + bool is_sync; } __randomize_layout;
void cdev_init(struct cdev *, const struct file_operations *); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f1aaf676a874a1..298c7d4d8abb5e 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -253,6 +253,7 @@ extern void up_write(struct rw_semaphore *sem); DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T)) DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T)) DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0) +DEFINE_GUARD_COND(rwsem_read, _kill, down_read_killable(_T), _RET == 0)
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))