forked from luck/tmp_suning_uos_patched
io_uring: flip if handling after io_setup_async_rw
As recently done with with send/recv, flip the if after rw_verify_aread() in io_{read,write}() and tabulise left bits left. This removes mispredicted by a compiler jump on the success/fast path. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1752f0adea
commit
fa15bafb71
146
fs/io_uring.c
146
fs/io_uring.c
|
@ -3034,57 +3034,56 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|||
struct kiocb *kiocb = &req->rw.kiocb;
|
||||
struct iov_iter iter;
|
||||
size_t iov_count;
|
||||
ssize_t io_size, ret;
|
||||
ssize_t io_size, ret, ret2;
|
||||
unsigned long nr_segs;
|
||||
|
||||
ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
io_size = ret;
|
||||
req->result = io_size;
|
||||
|
||||
/* Ensure we clear previously set non-block flag */
|
||||
if (!force_nonblock)
|
||||
kiocb->ki_flags &= ~IOCB_NOWAIT;
|
||||
|
||||
io_size = ret;
|
||||
req->result = io_size;
|
||||
|
||||
/* If the file doesn't support async, just async punt */
|
||||
if (force_nonblock && !io_file_supports_async(req->file, READ))
|
||||
goto copy_iov;
|
||||
|
||||
iov_count = iov_iter_count(&iter);
|
||||
nr_segs = iter.nr_segs;
|
||||
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
|
||||
if (!ret) {
|
||||
unsigned long nr_segs = iter.nr_segs;
|
||||
ssize_t ret2 = 0;
|
||||
if (unlikely(ret))
|
||||
goto out_free;
|
||||
|
||||
ret2 = io_iter_do_read(req, &iter);
|
||||
ret2 = io_iter_do_read(req, &iter);
|
||||
|
||||
/* Catch -EAGAIN return for forced non-blocking submission */
|
||||
if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
|
||||
kiocb_done(kiocb, ret2, cs);
|
||||
} else {
|
||||
iter.count = iov_count;
|
||||
iter.nr_segs = nr_segs;
|
||||
/* Catch -EAGAIN return for forced non-blocking submission */
|
||||
if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
|
||||
kiocb_done(kiocb, ret2, cs);
|
||||
} else {
|
||||
iter.count = iov_count;
|
||||
iter.nr_segs = nr_segs;
|
||||
copy_iov:
|
||||
ret = io_setup_async_rw(req, io_size, iovec,
|
||||
inline_vecs, &iter);
|
||||
if (ret)
|
||||
ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
|
||||
&iter);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
/* it's copied and will be cleaned with ->io */
|
||||
iovec = NULL;
|
||||
/* if we can retry, do so with the callbacks armed */
|
||||
if (io_rw_should_retry(req)) {
|
||||
ret2 = io_iter_do_read(req, &iter);
|
||||
if (ret2 == -EIOCBQUEUED) {
|
||||
goto out_free;
|
||||
} else if (ret2 != -EAGAIN) {
|
||||
kiocb_done(kiocb, ret2, cs);
|
||||
goto out_free;
|
||||
/* it's copied and will be cleaned with ->io */
|
||||
iovec = NULL;
|
||||
/* if we can retry, do so with the callbacks armed */
|
||||
if (io_rw_should_retry(req)) {
|
||||
ret2 = io_iter_do_read(req, &iter);
|
||||
if (ret2 == -EIOCBQUEUED) {
|
||||
goto out_free;
|
||||
} else if (ret2 != -EAGAIN) {
|
||||
kiocb_done(kiocb, ret2, cs);
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
kiocb->ki_flags &= ~IOCB_WAITQ;
|
||||
return -EAGAIN;
|
||||
}
|
||||
kiocb->ki_flags &= ~IOCB_WAITQ;
|
||||
return -EAGAIN;
|
||||
}
|
||||
out_free:
|
||||
if (iovec)
|
||||
|
@ -3117,19 +3116,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|||
struct kiocb *kiocb = &req->rw.kiocb;
|
||||
struct iov_iter iter;
|
||||
size_t iov_count;
|
||||
ssize_t ret, io_size;
|
||||
ssize_t ret, ret2, io_size;
|
||||
unsigned long nr_segs;
|
||||
|
||||
ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
io_size = ret;
|
||||
req->result = io_size;
|
||||
|
||||
/* Ensure we clear previously set non-block flag */
|
||||
if (!force_nonblock)
|
||||
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
|
||||
|
||||
io_size = ret;
|
||||
req->result = io_size;
|
||||
|
||||
/* If the file doesn't support async, just async punt */
|
||||
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
|
||||
goto copy_iov;
|
||||
|
@ -3140,51 +3139,50 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|||
goto copy_iov;
|
||||
|
||||
iov_count = iov_iter_count(&iter);
|
||||
nr_segs = iter.nr_segs;
|
||||
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
|
||||
if (!ret) {
|
||||
unsigned long nr_segs = iter.nr_segs;
|
||||
ssize_t ret2;
|
||||
if (unlikely(ret))
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* Open-code file_start_write here to grab freeze protection,
|
||||
* which will be released by another thread in
|
||||
* io_complete_rw(). Fool lockdep by telling it the lock got
|
||||
* released so that it doesn't complain about the held lock when
|
||||
* we return to userspace.
|
||||
*/
|
||||
if (req->flags & REQ_F_ISREG) {
|
||||
__sb_start_write(file_inode(req->file)->i_sb,
|
||||
SB_FREEZE_WRITE, true);
|
||||
__sb_writers_release(file_inode(req->file)->i_sb,
|
||||
SB_FREEZE_WRITE);
|
||||
}
|
||||
kiocb->ki_flags |= IOCB_WRITE;
|
||||
/*
|
||||
* Open-code file_start_write here to grab freeze protection,
|
||||
* which will be released by another thread in
|
||||
* io_complete_rw(). Fool lockdep by telling it the lock got
|
||||
* released so that it doesn't complain about the held lock when
|
||||
* we return to userspace.
|
||||
*/
|
||||
if (req->flags & REQ_F_ISREG) {
|
||||
__sb_start_write(file_inode(req->file)->i_sb,
|
||||
SB_FREEZE_WRITE, true);
|
||||
__sb_writers_release(file_inode(req->file)->i_sb,
|
||||
SB_FREEZE_WRITE);
|
||||
}
|
||||
kiocb->ki_flags |= IOCB_WRITE;
|
||||
|
||||
if (req->file->f_op->write_iter)
|
||||
ret2 = call_write_iter(req->file, kiocb, &iter);
|
||||
else
|
||||
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
|
||||
if (req->file->f_op->write_iter)
|
||||
ret2 = call_write_iter(req->file, kiocb, &iter);
|
||||
else
|
||||
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
|
||||
|
||||
/*
|
||||
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
|
||||
* retry them without IOCB_NOWAIT.
|
||||
*/
|
||||
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
|
||||
ret2 = -EAGAIN;
|
||||
if (!force_nonblock || ret2 != -EAGAIN) {
|
||||
kiocb_done(kiocb, ret2, cs);
|
||||
} else {
|
||||
iter.count = iov_count;
|
||||
iter.nr_segs = nr_segs;
|
||||
/*
|
||||
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
|
||||
* retry them without IOCB_NOWAIT.
|
||||
*/
|
||||
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
|
||||
ret2 = -EAGAIN;
|
||||
if (!force_nonblock || ret2 != -EAGAIN) {
|
||||
kiocb_done(kiocb, ret2, cs);
|
||||
} else {
|
||||
iter.count = iov_count;
|
||||
iter.nr_segs = nr_segs;
|
||||
copy_iov:
|
||||
ret = io_setup_async_rw(req, io_size, iovec,
|
||||
inline_vecs, &iter);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
/* it's copied and will be cleaned with ->io */
|
||||
iovec = NULL;
|
||||
return -EAGAIN;
|
||||
}
|
||||
ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
|
||||
&iter);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
/* it's copied and will be cleaned with ->io */
|
||||
iovec = NULL;
|
||||
return -EAGAIN;
|
||||
}
|
||||
out_free:
|
||||
if (iovec)
|
||||
|
|
Loading…
Reference in New Issue
Block a user