forked from luck/tmp_suning_uos_patched
IB/core: Make ib_dealloc_pd return void
The majority of callers never check the return value, and even if they did, they can't do anything about a failure. All possible failure cases represent a bug in the caller, so just WARN_ON inside the function instead. This fixes a few random errors: net/rd/iw.c infinite loops while it fails. (racing with EBUSY?) This also lays the ground work to get rid of error return from the drivers. Most drivers do not error, the few that do are broken since it cannot be handled. Since uverbs can legitimately make use of EBUSY, open code the check. Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
03f6fb93fd
commit
7dd78647a2
|
@ -606,6 +606,7 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
|
|||
{
|
||||
struct ib_uverbs_dealloc_pd cmd;
|
||||
struct ib_uobject *uobj;
|
||||
struct ib_pd *pd;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
|
@ -614,15 +615,20 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
|
|||
uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
|
||||
if (!uobj)
|
||||
return -EINVAL;
|
||||
pd = uobj->object;
|
||||
|
||||
ret = ib_dealloc_pd(uobj->object);
|
||||
if (!ret)
|
||||
uobj->live = 0;
|
||||
|
||||
put_uobj_write(uobj);
|
||||
if (atomic_read(&pd->usecnt)) {
|
||||
ret = -EBUSY;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
ret = pd->device->dealloc_pd(uobj->object);
|
||||
WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put;
|
||||
|
||||
uobj->live = 0;
|
||||
put_uobj_write(uobj);
|
||||
|
||||
idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
|
||||
|
||||
|
@ -633,6 +639,10 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
|
|||
put_uobj(uobj);
|
||||
|
||||
return in_len;
|
||||
|
||||
err_put:
|
||||
put_uobj_write(uobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct xrcd_table_entry {
|
||||
|
|
|
@ -260,18 +260,32 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_alloc_pd);
|
||||
|
||||
int ib_dealloc_pd(struct ib_pd *pd)
|
||||
/**
|
||||
* ib_dealloc_pd - Deallocates a protection domain.
|
||||
* @pd: The protection domain to deallocate.
|
||||
*
|
||||
* It is an error to call this function while any resources in the pd still
|
||||
* exist. The caller is responsible to synchronously destroy them and
|
||||
* guarantee no new allocations will happen.
|
||||
*/
|
||||
void ib_dealloc_pd(struct ib_pd *pd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (pd->local_mr) {
|
||||
if (ib_dereg_mr(pd->local_mr))
|
||||
return -EBUSY;
|
||||
ret = ib_dereg_mr(pd->local_mr);
|
||||
WARN_ON(ret);
|
||||
pd->local_mr = NULL;
|
||||
}
|
||||
|
||||
if (atomic_read(&pd->usecnt))
|
||||
return -EBUSY;
|
||||
/* uverbs manipulates usecnt with proper locking, while the kabi
|
||||
requires the caller to guarantee we can't race here. */
|
||||
WARN_ON(atomic_read(&pd->usecnt));
|
||||
|
||||
return pd->device->dealloc_pd(pd);
|
||||
/* Making delalloc_pd a void return is a WIP, no driver should return
|
||||
an error here. */
|
||||
ret = pd->device->dealloc_pd(pd);
|
||||
WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
|
||||
}
|
||||
EXPORT_SYMBOL(ib_dealloc_pd);
|
||||
|
||||
|
|
|
@ -280,9 +280,7 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
|
|||
priv->wq = NULL;
|
||||
}
|
||||
|
||||
if (ib_dealloc_pd(priv->pd))
|
||||
ipoib_warn(priv, "ib_dealloc_pd failed\n");
|
||||
|
||||
ib_dealloc_pd(priv->pd);
|
||||
}
|
||||
|
||||
void ipoib_event(struct ib_event_handler *handler,
|
||||
|
|
|
@ -185,7 +185,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
|
|||
|
||||
(void)ib_unregister_event_handler(&device->event_handler);
|
||||
(void)ib_dereg_mr(device->mr);
|
||||
(void)ib_dealloc_pd(device->pd);
|
||||
ib_dealloc_pd(device->pd);
|
||||
|
||||
kfree(device->comps);
|
||||
device->comps = NULL;
|
||||
|
|
|
@ -2196,11 +2196,7 @@ int ib_find_pkey(struct ib_device *device,
|
|||
|
||||
struct ib_pd *ib_alloc_pd(struct ib_device *device);
|
||||
|
||||
/**
|
||||
* ib_dealloc_pd - Deallocates a protection domain.
|
||||
* @pd: The protection domain to deallocate.
|
||||
*/
|
||||
int ib_dealloc_pd(struct ib_pd *pd);
|
||||
void ib_dealloc_pd(struct ib_pd *pd);
|
||||
|
||||
/**
|
||||
* ib_create_ah - Creates an address handle for the given address vector.
|
||||
|
|
|
@ -148,10 +148,7 @@ static void rds_iw_remove_one(struct ib_device *device, void *client_data)
|
|||
if (rds_iwdev->mr)
|
||||
ib_dereg_mr(rds_iwdev->mr);
|
||||
|
||||
while (ib_dealloc_pd(rds_iwdev->pd)) {
|
||||
rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd);
|
||||
msleep(1);
|
||||
}
|
||||
ib_dealloc_pd(rds_iwdev->pd);
|
||||
|
||||
list_del(&rds_iwdev->list);
|
||||
kfree(rds_iwdev);
|
||||
|
|
|
@ -624,7 +624,7 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia)
|
|||
|
||||
/* If the pd is still busy, xprtrdma missed freeing a resource */
|
||||
if (ia->ri_pd && !IS_ERR(ia->ri_pd))
|
||||
WARN_ON(ib_dealloc_pd(ia->ri_pd));
|
||||
ib_dealloc_pd(ia->ri_pd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user