RDMA/core: Add XRC domain support

XRC ("eXtended reliable connected") is an IB transport that provides
better scalability by allowing senders to specify which shared receive
queue (SRQ) should be used to receive a message, which essentially
allows one transport context (QP connection) to serve multiple
destinations (as long as they share an adapter, of course).

A few new concepts are introduced to support this.  This patch adds:

 - A new device capability flag, IB_DEVICE_XRC, which low-level
   drivers set to indicate that a device supports XRC.
 - A new object type, XRC domains (struct ib_xrcd), and new verbs
   ib_alloc_xrcd()/ib_dealloc_xrcd().  XRCDs are used to limit which
   XRC SRQs an incoming message can target.

This patch is derived from work by Jack Morgenstein <jackm@dev.mellanox.co.il>.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Sean Hefty 2011-05-23 17:52:46 -07:00 committed by Roland Dreier
parent 976d167615
commit 59991f94eb
2 changed files with 48 additions and 0 deletions

View File

@ -920,3 +920,29 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
return qp->device->detach_mcast(qp, gid, lid); return qp->device->detach_mcast(qp, gid, lid);
} }
EXPORT_SYMBOL(ib_detach_mcast); EXPORT_SYMBOL(ib_detach_mcast);
struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
{
struct ib_xrcd *xrcd;
if (!device->alloc_xrcd)
return ERR_PTR(-ENOSYS);
xrcd = device->alloc_xrcd(device, NULL, NULL);
if (!IS_ERR(xrcd)) {
xrcd->device = device;
atomic_set(&xrcd->usecnt, 0);
}
return xrcd;
}
EXPORT_SYMBOL(ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
if (atomic_read(&xrcd->usecnt))
return -EBUSY;
return xrcd->device->dealloc_xrcd(xrcd);
}
EXPORT_SYMBOL(ib_dealloc_xrcd);

View File

@ -112,6 +112,7 @@ enum ib_device_cap_flags {
*/ */
IB_DEVICE_UD_IP_CSUM = (1<<18), IB_DEVICE_UD_IP_CSUM = (1<<18),
IB_DEVICE_UD_TSO = (1<<19), IB_DEVICE_UD_TSO = (1<<19),
IB_DEVICE_XRC = (1<<20),
IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
}; };
@ -858,6 +859,11 @@ struct ib_pd {
atomic_t usecnt; /* count all resources */ atomic_t usecnt; /* count all resources */
}; };
struct ib_xrcd {
struct ib_device *device;
atomic_t usecnt; /* count all resources */
};
struct ib_ah { struct ib_ah {
struct ib_device *device; struct ib_device *device;
struct ib_pd *pd; struct ib_pd *pd;
@ -1149,6 +1155,10 @@ struct ib_device {
struct ib_grh *in_grh, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *in_mad,
struct ib_mad *out_mad); struct ib_mad *out_mad);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);
int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
struct ib_dma_mapping_ops *dma_ops; struct ib_dma_mapping_ops *dma_ops;
@ -2060,4 +2070,16 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
*/ */
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/**
* ib_alloc_xrcd - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
*/
struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
* @xrcd: The XRC domain to deallocate.
*/
int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
#endif /* IB_VERBS_H */ #endif /* IB_VERBS_H */