On 2/8/24 2:55 PM, Bart Van Assche wrote:
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6bff6cb93789..4837e3071263 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -31,7 +31,6 @@ #include <linux/usb/composite.h> #include <linux/usb/functionfs.h> -#include <linux/aio.h> #include <linux/kthread.h> #include <linux/poll.h> #include <linux/eventfd.h> @@ -1157,23 +1156,16 @@ ffs_epfile_open(struct inode *inode, struct file *file) return stream_open(inode, file); } -static int ffs_aio_cancel(struct kiocb *kiocb) +static void ffs_epfile_cancel_kiocb(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; struct ffs_epfile *epfile = kiocb->ki_filp->private_data; unsigned long flags;
- int value;
spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
- if (io_data && io_data->ep && io_data->req)
value = usb_ep_dequeue(io_data->ep, io_data->req);
- else
value = -EINVAL;
spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);usb_ep_dequeue(io_data->ep, io_data->req);
- return value;
}
I'm assuming the NULL checks can go because it's just the async parts now?
@@ -634,6 +619,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) queue_rcu_work(system_wq, &ctx->free_rwork); } +static void aio_cancel_and_del(struct aio_kiocb *req);
Just move the function higher up? It doesn't have any dependencies.
@@ -1552,6 +1538,24 @@ static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); } +static void aio_add_rw_to_active_reqs(struct kiocb *req) +{
- struct aio_kiocb *aio = container_of(req, struct aio_kiocb, rw);
- struct kioctx *ctx = aio->ki_ctx;
- unsigned long flags;
- /*
* If the .cancel_kiocb() callback has been set, add the request
* to the list of active requests.
*/
- if (!req->ki_filp->f_op->cancel_kiocb)
return;
- spin_lock_irqsave(&ctx->ctx_lock, flags);
- list_add_tail(&aio->ki_list, &ctx->active_reqs);
- spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+}
This can use spin_lock_irq(), always called from process context.
+/* Must be called only for IOCB_CMD_POLL requests. */ +static void aio_poll_cancel(struct aio_kiocb *aiocb) +{
- struct poll_iocb *req = &aiocb->poll;
- struct kioctx *ctx = aiocb->ki_ctx;
- lockdep_assert_held(&ctx->ctx_lock);
- if (!poll_iocb_lock_wq(req))
return;
- WRITE_ONCE(req->cancelled, true);
- if (!req->work_scheduled) {
schedule_work(&aiocb->poll.work);
req->work_scheduled = true;
- }
- poll_iocb_unlock_wq(req);
+}
Not your code, it's just moved, but this looks racy. Might not matter, and obviously beyond the scope of this change.
+{
- void (*cancel_kiocb)(struct kiocb *) =
req->rw.ki_filp->f_op->cancel_kiocb;
- struct kioctx *ctx = req->ki_ctx;
- lockdep_assert_held(&ctx->ctx_lock);
- switch (req->ki_opcode) {
- case IOCB_CMD_PREAD:
- case IOCB_CMD_PWRITE:
- case IOCB_CMD_PREADV:
- case IOCB_CMD_PWRITEV:
if (cancel_kiocb)
cancel_kiocb(&req->rw);
break;
- case IOCB_CMD_FSYNC:
- case IOCB_CMD_FDSYNC:
break;
- case IOCB_CMD_POLL:
aio_poll_cancel(req);
break;
- default:
WARN_ONCE(true, "invalid aio operation %d\n", req->ki_opcode);
- }
- list_del_init(&req->ki_list);
+}
Why don't you just keep ki_cancel() and just change it to a void return that takes an aio_kiocb? Then you don't need this odd switch, or adding an opcode field just for this. That seems cleaner.
Outside of these little nits, looks alright. I'd still love to kill the silly cancel code just for the gadget bits, but that's for another day. And since the gadget and aio code basically never changes, a cleaned up variant of the this patch should be trivial enough to backport to stable, so I don't think we need to worry about doing a fixup first.