forked from luck/tmp_suning_uos_patched
IB/mlx5: Respect new UMR capabilities
In some firmware configuration, UMR usage from Virtual Functions is restricted. This information is published to the driver using new capability bits. Avoid using UMRs in these cases and use the Firmware slow-path flow to create mkeys and populate them with Virtual to Physical address translation. Older drivers that do not have this patch, will end up using memory keys that aren't populated with Virtual to Physical address translation that is done part of the UMR work. Reviewed-by: Mark Bloch <markb@mellanox.com> Signed-off-by: Majd Dibbiny <majd@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Tested-by: Laurence Oberman <loberman@redhat.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
ea8af0d2f2
commit
c8d75a980f
|
@ -51,6 +51,21 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||||
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
||||||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||||
|
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
|
||||||
|
{
|
||||||
|
return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
|
||||||
|
{
|
||||||
|
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool use_umr(struct mlx5_ib_dev *dev, int order)
|
||||||
|
{
|
||||||
|
return order <= mr_cache_max_order(dev) &&
|
||||||
|
umr_can_modify_entity_size(dev);
|
||||||
|
}
|
||||||
|
|
||||||
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||||
{
|
{
|
||||||
|
@ -956,7 +971,10 @@ static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev = mr->dev;
|
struct mlx5_ib_dev *dev = mr->dev;
|
||||||
struct ib_umem *umem = mr->umem;
|
struct ib_umem *umem = mr->umem;
|
||||||
|
|
||||||
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
|
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
|
||||||
|
if (!umr_can_use_indirect_mkey(dev))
|
||||||
|
return -EPERM;
|
||||||
mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
|
mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
|
||||||
return npages;
|
return npages;
|
||||||
}
|
}
|
||||||
|
@ -1003,6 +1021,10 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
bool use_emergency_page = false;
|
bool use_emergency_page = false;
|
||||||
|
|
||||||
|
if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
|
||||||
|
!umr_can_use_indirect_mkey(dev))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
|
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
|
||||||
* so we need to align the offset and length accordingly
|
* so we need to align the offset and length accordingly
|
||||||
*/
|
*/
|
||||||
|
@ -1211,13 +1233,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||||
struct mlx5_ib_mr *mr = NULL;
|
struct mlx5_ib_mr *mr = NULL;
|
||||||
|
bool populate_mtts = false;
|
||||||
struct ib_umem *umem;
|
struct ib_umem *umem;
|
||||||
int page_shift;
|
int page_shift;
|
||||||
int npages;
|
int npages;
|
||||||
int ncont;
|
int ncont;
|
||||||
int order;
|
int order;
|
||||||
int err;
|
int err;
|
||||||
bool use_umr = true;
|
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
|
if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
|
@ -1244,26 +1266,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
|
||||||
if (order <= mr_cache_max_order(dev)) {
|
if (use_umr(dev, order)) {
|
||||||
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
||||||
page_shift, order, access_flags);
|
page_shift, order, access_flags);
|
||||||
if (PTR_ERR(mr) == -EAGAIN) {
|
if (PTR_ERR(mr) == -EAGAIN) {
|
||||||
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
|
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
|
||||||
mr = NULL;
|
mr = NULL;
|
||||||
}
|
}
|
||||||
|
populate_mtts = false;
|
||||||
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
||||||
if (access_flags & IB_ACCESS_ON_DEMAND) {
|
if (access_flags & IB_ACCESS_ON_DEMAND) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
|
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
use_umr = false;
|
populate_mtts = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mr) {
|
if (!mr) {
|
||||||
|
if (!umr_can_modify_entity_size(dev))
|
||||||
|
populate_mtts = true;
|
||||||
mutex_lock(&dev->slow_path_mutex);
|
mutex_lock(&dev->slow_path_mutex);
|
||||||
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
||||||
page_shift, access_flags, !use_umr);
|
page_shift, access_flags, populate_mtts);
|
||||||
mutex_unlock(&dev->slow_path_mutex);
|
mutex_unlock(&dev->slow_path_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1281,7 +1306,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||||
update_odp_mr(mr);
|
update_odp_mr(mr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (use_umr) {
|
if (!populate_mtts) {
|
||||||
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
|
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
|
||||||
|
|
||||||
if (access_flags & IB_ACCESS_ON_DEMAND)
|
if (access_flags & IB_ACCESS_ON_DEMAND)
|
||||||
|
|
|
@ -3697,8 +3697,19 @@ static __be64 get_umr_update_pd_mask(void)
|
||||||
return cpu_to_be64(result);
|
return cpu_to_be64(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
|
||||||
struct ib_send_wr *wr, int atomic)
|
{
|
||||||
|
if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
|
||||||
|
MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
|
||||||
|
(mask & MLX5_MKEY_MASK_A &&
|
||||||
|
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
|
||||||
|
return -EPERM;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
|
||||||
|
struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||||
|
struct ib_send_wr *wr, int atomic)
|
||||||
{
|
{
|
||||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||||
|
|
||||||
|
@ -3730,6 +3741,8 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||||
|
|
||||||
if (!wr->num_sge)
|
if (!wr->num_sge)
|
||||||
umr->flags |= MLX5_UMR_INLINE;
|
umr->flags |= MLX5_UMR_INLINE;
|
||||||
|
|
||||||
|
return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
|
||||||
}
|
}
|
||||||
|
|
||||||
static u8 get_umr_flags(int acc)
|
static u8 get_umr_flags(int acc)
|
||||||
|
@ -4552,7 +4565,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
}
|
}
|
||||||
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
|
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
|
||||||
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
|
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
|
||||||
set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
|
err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
|
||||||
|
if (unlikely(err))
|
||||||
|
goto out;
|
||||||
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||||
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||||
if (unlikely((seg == qend)))
|
if (unlikely((seg == qend)))
|
||||||
|
|
|
@ -916,7 +916,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||||
u8 reserved_at_202[0x1];
|
u8 reserved_at_202[0x1];
|
||||||
u8 ipoib_enhanced_offloads[0x1];
|
u8 ipoib_enhanced_offloads[0x1];
|
||||||
u8 ipoib_basic_offloads[0x1];
|
u8 ipoib_basic_offloads[0x1];
|
||||||
u8 reserved_at_205[0x5];
|
u8 reserved_at_205[0x1];
|
||||||
|
u8 repeated_block_disabled[0x1];
|
||||||
|
u8 umr_modify_entity_size_disabled[0x1];
|
||||||
|
u8 umr_modify_atomic_disabled[0x1];
|
||||||
|
u8 umr_indirect_mkey_disabled[0x1];
|
||||||
u8 umr_fence[0x2];
|
u8 umr_fence[0x2];
|
||||||
u8 reserved_at_20c[0x3];
|
u8 reserved_at_20c[0x3];
|
||||||
u8 drain_sigerr[0x1];
|
u8 drain_sigerr[0x1];
|
||||||
|
|
Loading…
Reference in New Issue
Block a user