io_uring/poll: fix double poll req->flags races
io_poll_double_prepare() | io_poll_wake()
| poll->head = NULL
smp_load(&poll->head); /* NULL */ |
flags = req->flags; |
| req->flags &= ~SINGLE_POLL;
req->flags = flags | DOUBLE_POLL |
The idea behind io_poll_double_prepare() is to serialise with the
first poll entry by taking the wq lock. However, it's not safe to assume
that io_poll_wake() is not running when we can't grab the lock and so we
may race modifying req->flags.
Skip double poll setup if that happens. It's ok because the first poll
entry will only be removed when it's definitely completing, e.g.
pollfree or oneshot with a valid mask.
Fixes: 49f1c68e04
("io_uring: optimise submission side poll_refs")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b7fab2d502f6121a7d7b199fe4d914a43ca9cdfd.1668184658.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3851d25c75
commit
30a33669fa
|
@ -394,7 +394,8 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_poll_double_prepare(struct io_kiocb *req)
|
/* fails only when polling is already completing by the first entry */
|
||||||
|
static bool io_poll_double_prepare(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct wait_queue_head *head;
|
struct wait_queue_head *head;
|
||||||
struct io_poll *poll = io_poll_get_single(req);
|
struct io_poll *poll = io_poll_get_single(req);
|
||||||
|
@ -403,20 +404,20 @@ static void io_poll_double_prepare(struct io_kiocb *req)
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
head = smp_load_acquire(&poll->head);
|
head = smp_load_acquire(&poll->head);
|
||||||
/*
|
/*
|
||||||
* poll arm may not hold ownership and so race with
|
* poll arm might not hold ownership and so race for req->flags with
|
||||||
* io_poll_wake() by modifying req->flags. There is only one
|
* io_poll_wake(). There is only one poll entry queued, serialise with
|
||||||
* poll entry queued, serialise with it by taking its head lock.
|
* it by taking its head lock. As we're still arming the tw hanlder
|
||||||
|
* is not going to be run, so there are no races with it.
|
||||||
*/
|
*/
|
||||||
if (head)
|
if (head) {
|
||||||
spin_lock_irq(&head->lock);
|
spin_lock_irq(&head->lock);
|
||||||
|
req->flags |= REQ_F_DOUBLE_POLL;
|
||||||
req->flags |= REQ_F_DOUBLE_POLL;
|
if (req->opcode == IORING_OP_POLL_ADD)
|
||||||
if (req->opcode == IORING_OP_POLL_ADD)
|
req->flags |= REQ_F_ASYNC_DATA;
|
||||||
req->flags |= REQ_F_ASYNC_DATA;
|
|
||||||
|
|
||||||
if (head)
|
|
||||||
spin_unlock_irq(&head->lock);
|
spin_unlock_irq(&head->lock);
|
||||||
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
return !!head;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
|
static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
|
||||||
|
@ -454,7 +455,11 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
|
||||||
/* mark as double wq entry */
|
/* mark as double wq entry */
|
||||||
wqe_private |= IO_WQE_F_DOUBLE;
|
wqe_private |= IO_WQE_F_DOUBLE;
|
||||||
io_init_poll_iocb(poll, first->events, first->wait.func);
|
io_init_poll_iocb(poll, first->events, first->wait.func);
|
||||||
io_poll_double_prepare(req);
|
if (!io_poll_double_prepare(req)) {
|
||||||
|
/* the request is completing, just back off */
|
||||||
|
kfree(poll);
|
||||||
|
return;
|
||||||
|
}
|
||||||
*poll_ptr = poll;
|
*poll_ptr = poll;
|
||||||
} else {
|
} else {
|
||||||
/* fine to modify, there is no poll queued to race with us */
|
/* fine to modify, there is no poll queued to race with us */
|
||||||
|
|
Loading…
Reference in New Issue