io_uring-5.5-20191226

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl4E+aYQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgprKmEAC6tcPlb2BB+7fOuj44uAdE+RInqMxbfD3w
 Tj9KpF47e02DUvBTtwDJDHJ9QT4PlJhd66M1xrp3IMUV13PKQt9OFfc6TH38Jz/9
 mhHDGNj1s+GVLRH22PQtFjyMgzHA6+UF4NxHLDJ62c2CtrCVswFRUiWrSR8LgvDp
 EkVELGEpi080ffton9nhyy3ylOCcpCu1xX1mOCg5EhcqzFQnZMlFaj9PDFrNhQzT
 e8fdl/nGoKtxZ/x6V8Oso02r/K1XievV4dfrAtOZg4jiqp/3G2eiqoGGcYnShSDU
 qulKLGsuHK51Lay8AGEaw3haeMn1PKCNe+xv0uCubHdf2iMyBdpjCLsLpTlhmtF/
 DkfP13H8k3/nUP9Y8FHt9+Ld56qpdqi/77ngCF84Ed4MFXKYkwyFFyHLMaBCw5zk
 Z07qISAbj3UeRPug+8iBKpzNBUXvXqqOGHp2h0faXz+C0yG0l7HOkhZ3m+dDD6vN
 6ABrMrS/ZuWdiW4PiJUejW81rlRKJaCgmTXMjjQCpgFeUqj6flB4sALp3amY9v7r
 CZVL67wBZ4u4YeKW0q8j4Lh3DrT5M7IGPP2uT9tw0FiamgYByC2rV/SAecxbh8f5
 NQJbL4uyJwcusOorRvaKOWU9KQaz5Q6dx9auHnmLC3A0WsEFxjgtVwG3iqw8zOeF
 8E7Lk3kPcA==
 =L2kR
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-5.5-20191226' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - Removal of now unused busy wqe list (Hillf)

 - Add cond_resched() to io-wq work processing (Hillf)

 - And then the series that I hinted at from last week, which removes
   the sqe from the io_kiocb and keeps all sqe handling on the prep
   side. This guarantees that an opcode can't do the wrong thing and
   read the sqe more than once. This is unchanged from last week, no
   issues have been observed with this in testing. Hence I really think
   we should fold this into 5.5.

* tag 'io_uring-5.5-20191226' of git://git.kernel.dk/linux-block:
  io-wq: add cond_resched() to worker thread
  io-wq: remove unused busy list from io_sqe
  io_uring: pass in 'sqe' to the prep handlers
  io_uring: standardize the prep methods
  io_uring: read 'count' for IORING_OP_TIMEOUT in prep handler
  io_uring: move all prep state for IORING_OP_{SEND,RECV}_MGS to prep handler
  io_uring: move all prep state for IORING_OP_CONNECT to prep handler
  io_uring: add and use struct io_rw for read/writes
  io_uring: use u64_to_user_ptr() consistently
This commit is contained in:
Linus Torvalds 2019-12-27 11:17:08 -08:00
commit 534121d289
2 changed files with 357 additions and 343 deletions

View File

@ -92,7 +92,6 @@ struct io_wqe {
struct io_wqe_acct acct[2];
struct hlist_nulls_head free_list;
struct hlist_nulls_head busy_list;
struct list_head all_list;
struct io_wq *wq;
@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
if (worker->flags & IO_WORKER_F_FREE) {
worker->flags &= ~IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
}
/*
@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
{
if (!(worker->flags & IO_WORKER_F_FREE)) {
worker->flags |= IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
}
@ -432,6 +429,8 @@ static void io_worker_handle_work(struct io_worker *worker)
if (signal_pending(current))
flush_signals(current);
cond_resched();
spin_lock_irq(&worker->lock);
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
@ -798,10 +797,6 @@ void io_wq_cancel_all(struct io_wq *wq)
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
/*
* Browse both lists, as there's a gap between handing work off
* to a worker and the worker putting itself on the busy_list
*/
rcu_read_lock();
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
@ -1049,7 +1044,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
INIT_LIST_HEAD(&wqe->all_list);
}

File diff suppressed because it is too large Load Diff