forked from luck/tmp_suning_uos_patched
RDMA: Move XRCD to be under ib_core responsibility
Update the code to allocate and free ib_xrcd structure in the ib_core instead of inside drivers. Link: https://lore.kernel.org/r/20200630101855.368895-4-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
3b023e1b68
commit
28ad5f65c3
|
@ -2692,6 +2692,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
||||||
SET_OBJ_SIZE(dev_ops, ib_pd);
|
SET_OBJ_SIZE(dev_ops, ib_pd);
|
||||||
SET_OBJ_SIZE(dev_ops, ib_srq);
|
SET_OBJ_SIZE(dev_ops, ib_srq);
|
||||||
SET_OBJ_SIZE(dev_ops, ib_ucontext);
|
SET_OBJ_SIZE(dev_ops, ib_ucontext);
|
||||||
|
SET_OBJ_SIZE(dev_ops, ib_xrcd);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_set_device_ops);
|
EXPORT_SYMBOL(ib_set_device_ops);
|
||||||
|
|
||||||
|
|
|
@ -2293,20 +2293,28 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
|
||||||
struct inode *inode, struct ib_udata *udata)
|
struct inode *inode, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct ib_xrcd *xrcd;
|
struct ib_xrcd *xrcd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!device->ops.alloc_xrcd)
|
if (!device->ops.alloc_xrcd)
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
|
|
||||||
xrcd = device->ops.alloc_xrcd(device, udata);
|
xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
|
||||||
if (!IS_ERR(xrcd)) {
|
if (!xrcd)
|
||||||
xrcd->device = device;
|
return ERR_PTR(-ENOMEM);
|
||||||
xrcd->inode = inode;
|
|
||||||
atomic_set(&xrcd->usecnt, 0);
|
|
||||||
init_rwsem(&xrcd->tgt_qps_rwsem);
|
|
||||||
xa_init(&xrcd->tgt_qps);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
xrcd->device = device;
|
||||||
|
xrcd->inode = inode;
|
||||||
|
atomic_set(&xrcd->usecnt, 0);
|
||||||
|
init_rwsem(&xrcd->tgt_qps_rwsem);
|
||||||
|
xa_init(&xrcd->tgt_qps);
|
||||||
|
|
||||||
|
ret = device->ops.alloc_xrcd(xrcd, udata);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
return xrcd;
|
return xrcd;
|
||||||
|
err:
|
||||||
|
kfree(xrcd);
|
||||||
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_alloc_xrcd_user);
|
EXPORT_SYMBOL(ib_alloc_xrcd_user);
|
||||||
|
|
||||||
|
@ -2321,7 +2329,9 @@ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
WARN_ON(!xa_empty(&xrcd->tgt_qps));
|
WARN_ON(!xa_empty(&xrcd->tgt_qps));
|
||||||
return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
|
xrcd->device->ops.dealloc_xrcd(xrcd, udata);
|
||||||
|
kfree(xrcd);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_dealloc_xrcd_user);
|
EXPORT_SYMBOL(ib_dealloc_xrcd_user);
|
||||||
|
|
||||||
|
|
|
@ -1219,56 +1219,47 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||||
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
|
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
|
static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
|
||||||
struct ib_udata *udata)
|
|
||||||
{
|
{
|
||||||
struct mlx4_ib_xrcd *xrcd;
|
struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
|
||||||
|
struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
|
||||||
struct ib_cq_init_attr cq_attr = {};
|
struct ib_cq_init_attr cq_attr = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
|
if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
|
||||||
return ERR_PTR(-ENOSYS);
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
|
err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
|
||||||
if (!xrcd)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto err1;
|
return err;
|
||||||
|
|
||||||
xrcd->pd = ib_alloc_pd(ibdev, 0);
|
xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
|
||||||
if (IS_ERR(xrcd->pd)) {
|
if (IS_ERR(xrcd->pd)) {
|
||||||
err = PTR_ERR(xrcd->pd);
|
err = PTR_ERR(xrcd->pd);
|
||||||
goto err2;
|
goto err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
cq_attr.cqe = 1;
|
cq_attr.cqe = 1;
|
||||||
xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
|
xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
|
||||||
if (IS_ERR(xrcd->cq)) {
|
if (IS_ERR(xrcd->cq)) {
|
||||||
err = PTR_ERR(xrcd->cq);
|
err = PTR_ERR(xrcd->cq);
|
||||||
goto err3;
|
goto err3;
|
||||||
}
|
}
|
||||||
|
|
||||||
return &xrcd->ibxrcd;
|
return 0;
|
||||||
|
|
||||||
err3:
|
err3:
|
||||||
ib_dealloc_pd(xrcd->pd);
|
ib_dealloc_pd(xrcd->pd);
|
||||||
err2:
|
err2:
|
||||||
mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
|
mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
|
||||||
err1:
|
return err;
|
||||||
kfree(xrcd);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
|
static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
ib_destroy_cq(to_mxrcd(xrcd)->cq);
|
ib_destroy_cq(to_mxrcd(xrcd)->cq);
|
||||||
ib_dealloc_pd(to_mxrcd(xrcd)->pd);
|
ib_dealloc_pd(to_mxrcd(xrcd)->pd);
|
||||||
mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
|
mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
|
||||||
kfree(xrcd);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
|
static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
|
||||||
|
@ -2607,6 +2598,8 @@ static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
|
||||||
static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
|
static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
|
||||||
.alloc_xrcd = mlx4_ib_alloc_xrcd,
|
.alloc_xrcd = mlx4_ib_alloc_xrcd,
|
||||||
.dealloc_xrcd = mlx4_ib_dealloc_xrcd,
|
.dealloc_xrcd = mlx4_ib_dealloc_xrcd,
|
||||||
|
|
||||||
|
INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
|
static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
|
||||||
|
|
|
@ -6671,6 +6671,8 @@ static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
|
||||||
static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
|
static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
|
||||||
.alloc_xrcd = mlx5_ib_alloc_xrcd,
|
.alloc_xrcd = mlx5_ib_alloc_xrcd,
|
||||||
.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
|
.dealloc_xrcd = mlx5_ib_dealloc_xrcd,
|
||||||
|
|
||||||
|
INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
|
static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
|
||||||
|
|
|
@ -1224,9 +1224,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||||
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||||
const struct ib_mad *in, struct ib_mad *out,
|
const struct ib_mad *in, struct ib_mad *out,
|
||||||
size_t *out_mad_size, u16 *out_mad_pkey_index);
|
size_t *out_mad_size, u16 *out_mad_pkey_index);
|
||||||
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
|
int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||||
struct ib_udata *udata);
|
void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||||
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
|
||||||
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
|
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
|
||||||
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
|
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
|
||||||
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
|
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
|
||||||
|
|
|
@ -4700,41 +4700,23 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
|
int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
|
||||||
struct ib_udata *udata)
|
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device);
|
||||||
struct mlx5_ib_xrcd *xrcd;
|
struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!MLX5_CAP_GEN(dev->mdev, xrc))
|
if (!MLX5_CAP_GEN(dev->mdev, xrc))
|
||||||
return ERR_PTR(-ENOSYS);
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
|
return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
|
||||||
if (!xrcd)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
|
|
||||||
if (err) {
|
|
||||||
kfree(xrcd);
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
return &xrcd->ibxrcd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
|
void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
|
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
|
||||||
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
|
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
|
mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
|
||||||
if (err)
|
|
||||||
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
|
|
||||||
|
|
||||||
kfree(xrcd);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
|
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
|
||||||
|
|
|
@ -2494,9 +2494,8 @@ struct ib_device_ops {
|
||||||
int (*dealloc_mw)(struct ib_mw *mw);
|
int (*dealloc_mw)(struct ib_mw *mw);
|
||||||
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
||||||
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
||||||
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
|
int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||||
struct ib_udata *udata);
|
void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||||
int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
|
||||||
struct ib_flow *(*create_flow)(struct ib_qp *qp,
|
struct ib_flow *(*create_flow)(struct ib_qp *qp,
|
||||||
struct ib_flow_attr *flow_attr,
|
struct ib_flow_attr *flow_attr,
|
||||||
int domain, struct ib_udata *udata);
|
int domain, struct ib_udata *udata);
|
||||||
|
@ -2655,6 +2654,7 @@ struct ib_device_ops {
|
||||||
DECLARE_RDMA_OBJ_SIZE(ib_pd);
|
DECLARE_RDMA_OBJ_SIZE(ib_pd);
|
||||||
DECLARE_RDMA_OBJ_SIZE(ib_srq);
|
DECLARE_RDMA_OBJ_SIZE(ib_srq);
|
||||||
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
|
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
|
||||||
|
DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ib_core_device {
|
struct ib_core_device {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user