RDMA/core: Delete function indirection for alloc/free kernel CQ

The ib_alloc_cq*() and ib_free_cq*() are solely kernel verbs to manage CQs
and doesn't need extra indirection just to call same functions with
constant parameter NULL as udata.

Link: https://lore.kernel.org/r/20200907120921.476363-6-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Leon Romanovsky 2020-09-07 15:09:17 +03:00 committed by Jason Gunthorpe
parent 119181d1d4
commit 7e3c66c9a9
2 changed files with 18 additions and 71 deletions

View File

@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
} }
/** /**
* __ib_alloc_cq_user - allocate a completion queue * __ib_alloc_cq allocate a completion queue
* @dev: device to allocate the CQ for * @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context * @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate * @nr_cqe: number of CQEs to allocate
* @comp_vector: HCA completion vectors for this CQ * @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from. * @poll_ctx: context to poll the CQ from.
* @caller: module owner name. * @caller: module owner name.
* @udata: Valid user data or NULL for kernel object
* *
* This is the proper interface to allocate a CQ for in-kernel users. A * This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the * CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction. * to use this CQ abstraction.
*/ */
struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
int nr_cqe, int comp_vector, int comp_vector, enum ib_poll_context poll_ctx,
enum ib_poll_context poll_ctx, const char *caller)
const char *caller, struct ib_udata *udata)
{ {
struct ib_cq_init_attr cq_attr = { struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe, .cqe = nr_cqe,
@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
out_destroy_cq: out_destroy_cq:
rdma_dim_destroy(cq); rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res); rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata); cq->device->ops.destroy_cq(cq, NULL);
out_free_wc: out_free_wc:
kfree(cq->wc); kfree(cq->wc);
out_free_cq: out_free_cq:
@ -285,7 +283,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL(__ib_alloc_cq_user); EXPORT_SYMBOL(__ib_alloc_cq);
/** /**
* __ib_alloc_cq_any - allocate a completion queue * __ib_alloc_cq_any - allocate a completion queue
@ -310,17 +308,16 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
atomic_inc_return(&counter) % atomic_inc_return(&counter) %
min_t(int, dev->num_comp_vectors, num_online_cpus()); min_t(int, dev->num_comp_vectors, num_online_cpus());
return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
caller, NULL); caller);
} }
EXPORT_SYMBOL(__ib_alloc_cq_any); EXPORT_SYMBOL(__ib_alloc_cq_any);
/** /**
* ib_free_cq_user - free a completion queue * ib_free_cq - free a completion queue
* @cq: completion queue to free. * @cq: completion queue to free.
* @udata: User data or NULL for kernel object
*/ */
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) void ib_free_cq(struct ib_cq *cq)
{ {
if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
return; return;
@ -344,11 +341,11 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
rdma_dim_destroy(cq); rdma_dim_destroy(cq);
trace_cq_free(cq); trace_cq_free(cq);
rdma_restrack_del(&cq->res); rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata); cq->device->ops.destroy_cq(cq, NULL);
kfree(cq->wc); kfree(cq->wc);
kfree(cq); kfree(cq);
} }
EXPORT_SYMBOL(ib_free_cq_user); EXPORT_SYMBOL(ib_free_cq);
void ib_cq_pool_init(struct ib_device *dev) void ib_cq_pool_init(struct ib_device *dev)
{ {

View File

@ -3802,46 +3802,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
} }
struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
int nr_cqe, int comp_vector, int comp_vector, enum ib_poll_context poll_ctx,
enum ib_poll_context poll_ctx, const char *caller);
const char *caller, struct ib_udata *udata);
/**
* ib_alloc_cq_user: Allocate kernel/user CQ
* @dev: The IB device
* @private: Private data attached to the CQE
* @nr_cqe: Number of CQEs in the CQ
* @comp_vector: Completion vector used for the IRQs
* @poll_ctx: Context used for polling the CQ
* @udata: Valid user data or NULL for kernel objects
*/
static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
void *private, int nr_cqe,
int comp_vector,
enum ib_poll_context poll_ctx,
struct ib_udata *udata)
{
return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
KBUILD_MODNAME, udata);
}
/**
* ib_alloc_cq: Allocate kernel CQ
* @dev: The IB device
* @private: Private data attached to the CQE
* @nr_cqe: Number of CQEs in the CQ
* @comp_vector: Completion vector used for the IRQs
* @poll_ctx: Context used for polling the CQ
*
* NOTE: for user cq use ib_alloc_cq_user with valid udata!
*/
static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
int nr_cqe, int comp_vector, int nr_cqe, int comp_vector,
enum ib_poll_context poll_ctx) enum ib_poll_context poll_ctx)
{ {
return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
NULL); KBUILD_MODNAME);
} }
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
@ -3863,26 +3832,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
KBUILD_MODNAME); KBUILD_MODNAME);
} }
/** void ib_free_cq(struct ib_cq *cq);
* ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free
* @udata: Valid user data or NULL for kernel objects
*
* NOTE: This function shouldn't be called on shared CQs.
*/
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
/**
* ib_free_cq - Free kernel CQ
* @cq: The CQ to free
*
* NOTE: for user cq use ib_free_cq_user with valid udata!
*/
static inline void ib_free_cq(struct ib_cq *cq)
{
ib_free_cq_user(cq, NULL);
}
int ib_process_cq_direct(struct ib_cq *cq, int budget); int ib_process_cq_direct(struct ib_cq *cq, int budget);
/** /**