RDMA/mlx5: Fix corruption of reg_pages in mlx5_ib_rereg_user_mr()

[ Upstream commit fc3325701a6353594083f08e297d4c1965c601aa ]

reg_pages should always contain mr->npage since when the mr is finally
de-reg'd it is always subtracted out.

If there were any error exits then mlx5_ib_rereg_user_mr() would leave the
reg_pages adjusted and this will cause it to be double subtracted
eventually.

The manipulation of reg_pages is inherently connected to the umem, so lift
it out of set_mr_fields() and only adjust it around creating/destroying a
umem.

reg_pages is only used for diagnostics in sysfs.

Fixes: 7d0cc6edcc ("IB/mlx5: Add MR cache for large UMR regions")
Link: https://lore.kernel.org/r/20201026131936.1335664-3-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Jason Gunthorpe 2020-10-26 15:19:31 +02:00 committed by Greg Kroah-Hartman
parent 8b1a51fb42
commit c5c1af1107

View File

@ -1247,10 +1247,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
} }
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
int npages, u64 length, int access_flags) u64 length, int access_flags)
{ {
mr->npages = npages;
atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length; mr->ibmr.length = length;
@ -1290,8 +1288,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in); kfree(in);
mr->umem = NULL; set_mr_fields(dev, mr, length, acc);
set_mr_fields(dev, mr, 0, length, acc);
return &mr->ibmr; return &mr->ibmr;
@ -1419,7 +1416,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem; mr->umem = umem;
set_mr_fields(dev, mr, npages, length, access_flags); mr->npages = npages;
atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
set_mr_fields(dev, mr, length, access_flags);
if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) { if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
/* /*
@ -1531,8 +1530,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags); start, virt_addr, length, access_flags);
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
if (!mr->umem) if (!mr->umem)
return -EINVAL; return -EINVAL;
@ -1553,12 +1550,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* used. * used.
*/ */
flags |= IB_MR_REREG_TRANS; flags |= IB_MR_REREG_TRANS;
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
mr->npages = 0;
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = NULL; mr->umem = NULL;
err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order); &npages, &page_shift, &ncont, &order);
if (err) if (err)
goto err; goto err;
mr->npages = ncont;
atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
} }
if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags, if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
@ -1609,7 +1611,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err; goto err;
} }
set_mr_fields(dev, mr, npages, len, access_flags); set_mr_fields(dev, mr, len, access_flags);
return 0; return 0;