forked from luck/tmp_suning_uos_patched
xprtrdma: Remove atomic send completion counting
The sendctx circular queue now guarantees that xprtrdma cannot overflow the Send Queue, so remove the remaining bits of the original Send WQE counting mechanism. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
01bb35c89d
commit
6f0afc2825
|
@ -419,7 +419,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
|
||||
IB_ACCESS_REMOTE_READ;
|
||||
|
||||
rpcrdma_set_signaled(&r_xprt->rx_ep, ®_wr->wr);
|
||||
rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr);
|
||||
if (rc)
|
||||
goto out_senderr;
|
||||
|
@ -507,12 +506,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
|
|||
f->fr_cqe.done = frwr_wc_localinv_wake;
|
||||
reinit_completion(&f->fr_linv_done);
|
||||
|
||||
/* Initialize CQ count, since there is always a signaled
|
||||
* WR being posted here. The new cqcount depends on how
|
||||
* many SQEs are about to be consumed.
|
||||
*/
|
||||
rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
|
||||
|
||||
/* Transport disconnect drains the receive CQ before it
|
||||
* replaces the QP. The RPC reply handler won't call us
|
||||
* unless ri_id->qp is a valid pointer.
|
||||
|
@ -545,7 +538,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
|
|||
/* Find and reset the MRs in the LOCAL_INV WRs that did not
|
||||
* get posted.
|
||||
*/
|
||||
rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
|
||||
while (bad_wr) {
|
||||
f = container_of(bad_wr, struct rpcrdma_frmr,
|
||||
fr_invwr);
|
||||
|
|
|
@ -553,10 +553,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|||
ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
|
||||
cdata->max_requests >> 2);
|
||||
ep->rep_send_count = ep->rep_send_batch;
|
||||
ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
|
||||
if (ep->rep_cqinit <= 2)
|
||||
ep->rep_cqinit = 0; /* always signal? */
|
||||
rpcrdma_init_cqcount(ep, 0);
|
||||
init_waitqueue_head(&ep->rep_connect_wait);
|
||||
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
|
||||
|
||||
|
|
|
@ -95,8 +95,6 @@ enum {
|
|||
struct rpcrdma_ep {
|
||||
unsigned int rep_send_count;
|
||||
unsigned int rep_send_batch;
|
||||
atomic_t rep_cqcount;
|
||||
int rep_cqinit;
|
||||
int rep_connected;
|
||||
struct ib_qp_init_attr rep_attr;
|
||||
wait_queue_head_t rep_connect_wait;
|
||||
|
@ -106,25 +104,6 @@ struct rpcrdma_ep {
|
|||
struct delayed_work rep_connect_worker;
|
||||
};
|
||||
|
||||
static inline void
|
||||
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
|
||||
{
|
||||
atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
|
||||
}
|
||||
|
||||
/* To update send queue accounting, provider must take a
|
||||
* send completion every now and then.
|
||||
*/
|
||||
static inline void
|
||||
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
|
||||
{
|
||||
send_wr->send_flags = 0;
|
||||
if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
|
||||
rpcrdma_init_cqcount(ep, 0);
|
||||
send_wr->send_flags = IB_SEND_SIGNALED;
|
||||
}
|
||||
}
|
||||
|
||||
/* Pre-allocate extra Work Requests for handling backward receives
|
||||
* and sends. This is a fixed value because the Work Queues are
|
||||
* allocated when the forward channel is set up.
|
||||
|
|
Loading…
Reference in New Issue
Block a user