io_uring: get rid of double locking
We don't need to take both uring_locks at once, msg_ring can be split in two parts, first getting a file from the filetable of the first ring and then installing it into the second one. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/a80ecc2bc99c3b3f2cf20015d618b7c51419a797.1670384893.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
77e443ab29
commit
11373026f2
|
@ -15,6 +15,7 @@
|
|||
|
||||
struct io_msg {
|
||||
struct file *file;
|
||||
struct file *src_file;
|
||||
u64 user_data;
|
||||
u32 len;
|
||||
u32 cmd;
|
||||
|
@ -23,6 +24,17 @@ struct io_msg {
|
|||
u32 flags;
|
||||
};
|
||||
|
||||
void io_msg_ring_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
|
||||
if (WARN_ON_ONCE(!msg->src_file))
|
||||
return;
|
||||
|
||||
fput(msg->src_file);
|
||||
msg->src_file = NULL;
|
||||
}
|
||||
|
||||
static int io_msg_ring_data(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
|
@ -37,17 +49,13 @@ static int io_msg_ring_data(struct io_kiocb *req)
|
|||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
static void io_double_unlock_ctx(struct io_ring_ctx *ctx,
|
||||
struct io_ring_ctx *octx,
|
||||
static void io_double_unlock_ctx(struct io_ring_ctx *octx,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
if (issue_flags & IO_URING_F_UNLOCKED)
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
mutex_unlock(&octx->uring_lock);
|
||||
}
|
||||
|
||||
static int io_double_lock_ctx(struct io_ring_ctx *ctx,
|
||||
struct io_ring_ctx *octx,
|
||||
static int io_double_lock_ctx(struct io_ring_ctx *octx,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
/*
|
||||
|
@ -60,17 +68,28 @@ static int io_double_lock_ctx(struct io_ring_ctx *ctx,
|
|||
return -EAGAIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Always grab smallest value ctx first. We know ctx != octx. */
|
||||
if (ctx < octx) {
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
mutex_lock(&octx->uring_lock);
|
||||
} else {
|
||||
mutex_lock(&octx->uring_lock);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *file = NULL;
|
||||
unsigned long file_ptr;
|
||||
int idx = msg->src_fd;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (likely(idx < ctx->nr_user_files)) {
|
||||
idx = array_index_nospec(idx, ctx->nr_user_files);
|
||||
file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
|
||||
file = (struct file *) (file_ptr & FFS_MASK);
|
||||
if (file)
|
||||
get_file(file);
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
}
|
||||
|
||||
static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
@ -78,34 +97,27 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
|||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned long file_ptr;
|
||||
struct file *src_file;
|
||||
struct file *src_file = msg->src_file;
|
||||
int ret;
|
||||
|
||||
if (target_ctx == ctx)
|
||||
return -EINVAL;
|
||||
if (!src_file) {
|
||||
src_file = io_msg_grab_file(req, issue_flags);
|
||||
if (!src_file)
|
||||
return -EBADF;
|
||||
msg->src_file = src_file;
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
}
|
||||
|
||||
ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
ret = -EBADF;
|
||||
if (unlikely(msg->src_fd >= ctx->nr_user_files))
|
||||
goto out_unlock;
|
||||
|
||||
msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
|
||||
file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
|
||||
if (!file_ptr)
|
||||
goto out_unlock;
|
||||
|
||||
src_file = (struct file *) (file_ptr & FFS_MASK);
|
||||
get_file(src_file);
|
||||
if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
|
||||
return -EAGAIN;
|
||||
|
||||
ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
|
||||
if (ret < 0) {
|
||||
fput(src_file);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
msg->src_file = NULL;
|
||||
req->flags &= ~REQ_F_NEED_CLEANUP;
|
||||
|
||||
if (msg->flags & IORING_MSG_RING_CQE_SKIP)
|
||||
goto out_unlock;
|
||||
|
@ -119,7 +131,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
|||
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
|
||||
ret = -EOVERFLOW;
|
||||
out_unlock:
|
||||
io_double_unlock_ctx(ctx, target_ctx, issue_flags);
|
||||
io_double_unlock_ctx(target_ctx, issue_flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -130,6 +142,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
if (unlikely(sqe->buf_index || sqe->personality))
|
||||
return -EINVAL;
|
||||
|
||||
msg->src_file = NULL;
|
||||
msg->user_data = READ_ONCE(sqe->off);
|
||||
msg->len = READ_ONCE(sqe->len);
|
||||
msg->cmd = READ_ONCE(sqe->addr);
|
||||
|
|
|
@ -2,3 +2,4 @@
|
|||
|
||||
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags);
|
||||
void io_msg_ring_cleanup(struct io_kiocb *req);
|
||||
|
|
|
@ -445,6 +445,7 @@ const struct io_op_def io_op_defs[] = {
|
|||
.name = "MSG_RING",
|
||||
.prep = io_msg_ring_prep,
|
||||
.issue = io_msg_ring,
|
||||
.cleanup = io_msg_ring_cleanup,
|
||||
},
|
||||
[IORING_OP_FSETXATTR] = {
|
||||
.needs_file = 1,
|
||||
|
|
Loading…
Reference in New Issue