forked from luck/tmp_suning_uos_patched
RDMA: Connect between the mmap entry and the umap_priv structure
The rdma_user_mmap_io interface created a common interface for drivers to correctly map hw resources and zap them once the ucontext is destroyed enabling the drivers to safely free the hw resources. However, this meant the drivers need to delay freeing the resource to the ucontext destroy phase to ensure they were no longer mapped. The new mechanism for a common way of handling user/driver address mapping enabled notifying the driver if all umap_priv mappings were removed, and enabled freeing the hw resources when they are done with and not delay it until ucontext destroy. Since not all drivers use the mechanism, NULL can be sent to the rdma_user_mmap_io interface to continue working as before. Drivers that use the mmap_xa interface can pass the entry being mapped to the rdma_user_mmap_io function to be linked together. Link: https://lore.kernel.org/r/20191030094417.16866-4-michal.kalderon@marvell.com Signed-off-by: Ariel Elior <ariel.elior@marvell.com> Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
3411f9f01b
commit
c043ff2cfb
|
@ -391,9 +391,11 @@ void rdma_nl_net_exit(struct rdma_dev_net *rnet);
|
||||||
struct rdma_umap_priv {
|
struct rdma_umap_priv {
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
struct rdma_user_mmap_entry *entry;
|
||||||
};
|
};
|
||||||
|
|
||||||
void rdma_umap_priv_init(struct rdma_umap_priv *priv,
|
void rdma_umap_priv_init(struct rdma_umap_priv *priv,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma,
|
||||||
|
struct rdma_user_mmap_entry *entry);
|
||||||
|
|
||||||
#endif /* _CORE_PRIV_H */
|
#endif /* _CORE_PRIV_H */
|
||||||
|
|
|
@ -8,23 +8,36 @@
|
||||||
#include "uverbs.h"
|
#include "uverbs.h"
|
||||||
#include "core_priv.h"
|
#include "core_priv.h"
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Each time we map IO memory into user space this keeps track of the mapping.
|
* rdma_umap_priv_init() - Initialize the private data of a vma
|
||||||
* When the device is hot-unplugged we 'zap' the mmaps in user space to point
|
*
|
||||||
* to the zero page and allow the hot unplug to proceed.
|
* @priv: The already allocated private data
|
||||||
|
* @vma: The vm area struct that needs private data
|
||||||
|
* @entry: entry into the mmap_xa that needs to be linked with
|
||||||
|
* this vma
|
||||||
|
*
|
||||||
|
* Each time we map IO memory into user space this keeps track of the
|
||||||
|
* mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
|
||||||
|
* to point to the zero page and allow the hot unplug to proceed.
|
||||||
*
|
*
|
||||||
* This is necessary for cases like PCI physical hot unplug as the actual BAR
|
* This is necessary for cases like PCI physical hot unplug as the actual BAR
|
||||||
* memory may vanish after this and access to it from userspace could MCE.
|
* memory may vanish after this and access to it from userspace could MCE.
|
||||||
*
|
*
|
||||||
* RDMA drivers supporting disassociation must have their user space designed
|
* RDMA drivers supporting disassociation must have their user space designed
|
||||||
* to cope in some way with their IO pages going to the zero page.
|
* to cope in some way with their IO pages going to the zero page.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
void rdma_umap_priv_init(struct rdma_umap_priv *priv,
|
void rdma_umap_priv_init(struct rdma_umap_priv *priv,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma,
|
||||||
|
struct rdma_user_mmap_entry *entry)
|
||||||
{
|
{
|
||||||
struct ib_uverbs_file *ufile = vma->vm_file->private_data;
|
struct ib_uverbs_file *ufile = vma->vm_file->private_data;
|
||||||
|
|
||||||
priv->vma = vma;
|
priv->vma = vma;
|
||||||
|
if (entry) {
|
||||||
|
kref_get(&entry->ref);
|
||||||
|
priv->entry = entry;
|
||||||
|
}
|
||||||
vma->vm_private_data = priv;
|
vma->vm_private_data = priv;
|
||||||
/* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
|
/* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
|
||||||
|
|
||||||
|
@ -34,13 +47,26 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rdma_umap_priv_init);
|
EXPORT_SYMBOL(rdma_umap_priv_init);
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Map IO memory into a process. This is to be called by drivers as part of
|
* rdma_user_mmap_io() - Map IO memory into a process
|
||||||
* their mmap() functions if they wish to send something like PCI-E BAR memory
|
*
|
||||||
* to userspace.
|
* @ucontext: associated user context
|
||||||
|
* @vma: the vma related to the current mmap call
|
||||||
|
* @pfn: pfn to map
|
||||||
|
* @size: size to map
|
||||||
|
* @prot: pgprot to use in remap call
|
||||||
|
* @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
|
||||||
|
* if mmap_entry is not used by the driver
|
||||||
|
*
|
||||||
|
* This is to be called by drivers as part of their mmap() functions if they
|
||||||
|
* wish to send something like PCI-E BAR memory to userspace.
|
||||||
|
*
|
||||||
|
* Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
|
||||||
|
* success.
|
||||||
*/
|
*/
|
||||||
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
|
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
|
||||||
unsigned long pfn, unsigned long size, pgprot_t prot)
|
unsigned long pfn, unsigned long size, pgprot_t prot,
|
||||||
|
struct rdma_user_mmap_entry *entry)
|
||||||
{
|
{
|
||||||
struct ib_uverbs_file *ufile = ucontext->ufile;
|
struct ib_uverbs_file *ufile = ucontext->ufile;
|
||||||
struct rdma_umap_priv *priv;
|
struct rdma_umap_priv *priv;
|
||||||
|
@ -67,7 +93,7 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
rdma_umap_priv_init(priv, vma);
|
rdma_umap_priv_init(priv, vma, entry);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rdma_user_mmap_io);
|
EXPORT_SYMBOL(rdma_user_mmap_io);
|
||||||
|
|
|
@ -819,7 +819,7 @@ static void rdma_umap_open(struct vm_area_struct *vma)
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv)
|
if (!priv)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
rdma_umap_priv_init(priv, vma);
|
rdma_umap_priv_init(priv, vma, opriv->entry);
|
||||||
|
|
||||||
up_read(&ufile->hw_destroy_rwsem);
|
up_read(&ufile->hw_destroy_rwsem);
|
||||||
return;
|
return;
|
||||||
|
@ -850,6 +850,9 @@ static void rdma_umap_close(struct vm_area_struct *vma)
|
||||||
* this point.
|
* this point.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&ufile->umap_lock);
|
mutex_lock(&ufile->umap_lock);
|
||||||
|
if (priv->entry)
|
||||||
|
rdma_user_mmap_entry_put(priv->entry);
|
||||||
|
|
||||||
list_del(&priv->list);
|
list_del(&priv->list);
|
||||||
mutex_unlock(&ufile->umap_lock);
|
mutex_unlock(&ufile->umap_lock);
|
||||||
kfree(priv);
|
kfree(priv);
|
||||||
|
@ -950,6 +953,11 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
||||||
|
|
||||||
zap_vma_ptes(vma, vma->vm_start,
|
zap_vma_ptes(vma, vma->vm_start,
|
||||||
vma->vm_end - vma->vm_start);
|
vma->vm_end - vma->vm_start);
|
||||||
|
|
||||||
|
if (priv->entry) {
|
||||||
|
rdma_user_mmap_entry_put(priv->entry);
|
||||||
|
priv->entry = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&ufile->umap_lock);
|
mutex_unlock(&ufile->umap_lock);
|
||||||
skip_mm:
|
skip_mm:
|
||||||
|
|
|
@ -1612,11 +1612,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||||
switch (entry->mmap_flag) {
|
switch (entry->mmap_flag) {
|
||||||
case EFA_MMAP_IO_NC:
|
case EFA_MMAP_IO_NC:
|
||||||
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
|
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
|
||||||
pgprot_noncached(vma->vm_page_prot));
|
pgprot_noncached(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
break;
|
break;
|
||||||
case EFA_MMAP_IO_WC:
|
case EFA_MMAP_IO_WC:
|
||||||
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
|
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
|
||||||
pgprot_writecombine(vma->vm_page_prot));
|
pgprot_writecombine(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
break;
|
break;
|
||||||
case EFA_MMAP_DMA_PAGE:
|
case EFA_MMAP_DMA_PAGE:
|
||||||
for (va = vma->vm_start; va < vma->vm_end;
|
for (va = vma->vm_start; va < vma->vm_end;
|
||||||
|
|
|
@ -359,7 +359,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
|
||||||
return rdma_user_mmap_io(context, vma,
|
return rdma_user_mmap_io(context, vma,
|
||||||
to_hr_ucontext(context)->uar.pfn,
|
to_hr_ucontext(context)->uar.pfn,
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
pgprot_noncached(vma->vm_page_prot));
|
pgprot_noncached(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
|
|
||||||
/* vm_pgoff: 1 -- TPTR */
|
/* vm_pgoff: 1 -- TPTR */
|
||||||
case 1:
|
case 1:
|
||||||
|
@ -372,7 +373,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
|
||||||
return rdma_user_mmap_io(context, vma,
|
return rdma_user_mmap_io(context, vma,
|
||||||
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
|
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
|
||||||
hr_dev->tptr_size,
|
hr_dev->tptr_size,
|
||||||
vma->vm_page_prot);
|
vma->vm_page_prot,
|
||||||
|
NULL);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -1146,7 +1146,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||||
return rdma_user_mmap_io(context, vma,
|
return rdma_user_mmap_io(context, vma,
|
||||||
to_mucontext(context)->uar.pfn,
|
to_mucontext(context)->uar.pfn,
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
pgprot_noncached(vma->vm_page_prot));
|
pgprot_noncached(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
if (dev->dev->caps.bf_reg_size == 0)
|
if (dev->dev->caps.bf_reg_size == 0)
|
||||||
|
@ -1155,7 +1156,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||||
context, vma,
|
context, vma,
|
||||||
to_mucontext(context)->uar.pfn +
|
to_mucontext(context)->uar.pfn +
|
||||||
dev->dev->caps.num_uars,
|
dev->dev->caps.num_uars,
|
||||||
PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
|
PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
|
|
||||||
case 3: {
|
case 3: {
|
||||||
struct mlx4_clock_params params;
|
struct mlx4_clock_params params;
|
||||||
|
@ -1171,7 +1173,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||||
params.bar) +
|
params.bar) +
|
||||||
params.offset) >>
|
params.offset) >>
|
||||||
PAGE_SHIFT,
|
PAGE_SHIFT,
|
||||||
PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
|
PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -2156,7 +2156,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
|
||||||
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
|
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
|
||||||
|
|
||||||
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
|
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
|
||||||
prot);
|
prot, NULL);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_ib_err(dev,
|
mlx5_ib_err(dev,
|
||||||
"rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
|
"rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
|
||||||
|
@ -2198,7 +2198,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||||
PAGE_SHIFT) +
|
PAGE_SHIFT) +
|
||||||
page_idx;
|
page_idx;
|
||||||
return rdma_user_mmap_io(context, vma, pfn, map_size,
|
return rdma_user_mmap_io(context, vma, pfn, map_size,
|
||||||
pgprot_writecombine(vma->vm_page_prot));
|
pgprot_writecombine(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
||||||
|
@ -2236,7 +2237,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
||||||
PAGE_SHIFT;
|
PAGE_SHIFT;
|
||||||
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
|
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
pgprot_noncached(vma->vm_page_prot));
|
pgprot_noncached(vma->vm_page_prot),
|
||||||
|
NULL);
|
||||||
case MLX5_IB_MMAP_CLOCK_INFO:
|
case MLX5_IB_MMAP_CLOCK_INFO:
|
||||||
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
|
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
|
||||||
|
|
||||||
|
|
|
@ -2826,18 +2826,9 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
|
||||||
void ib_set_device_ops(struct ib_device *device,
|
void ib_set_device_ops(struct ib_device *device,
|
||||||
const struct ib_device_ops *ops);
|
const struct ib_device_ops *ops);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
|
||||||
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
|
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
|
||||||
unsigned long pfn, unsigned long size, pgprot_t prot);
|
unsigned long pfn, unsigned long size, pgprot_t prot,
|
||||||
#else
|
struct rdma_user_mmap_entry *entry);
|
||||||
static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
|
|
||||||
struct vm_area_struct *vma,
|
|
||||||
unsigned long pfn, unsigned long size,
|
|
||||||
pgprot_t prot)
|
|
||||||
{
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
|
int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
|
||||||
struct rdma_user_mmap_entry *entry,
|
struct rdma_user_mmap_entry *entry,
|
||||||
size_t length);
|
size_t length);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user