forked from luck/tmp_suning_uos_patched
RDMA second 5.8 rc pull request
Small update, a few more merge window bugs and normal driver bug fixes: - Two merge window regressions in mlx5: a error path bug found by syzkaller and some lost code during a rework preventing ipoib from working in some configurations - Silence clang compilation warning in OPA related code - Fix a long standing race condition in ib_nl for ACM - Resolve when the HFI1 is shutdown -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl8In+wACgkQOG33FX4g mxqDrg/+LMh1gW992DrtFP7ijGClUsYVjRUQyJelcXVkkgvKj9ThB5iFG8P7UL4v YP2rTNTE4hOOIfiYs5aUS6o4xVM2Q9RSLkwqjvJ5sLLf7/FUU3z4+2JTozfGg1s5 dyAoJpWGTUKCoaWSPr9r4SfLsirTgbfqc93KO5SlqLOg7e+UHYBsw/OEaVNSqEDy KJX6F/l6xDGCXvtPpmLw4jxoU6ii1SSnTd8woNwyOqrOg3NrCgFrT2BP3oexJsRD NJrIOqQa4kINzBiP2HgTk6UCU5zf7BdkFpiQN/hE3a/r5ms19koVUUXvKJQ3lkC3 GHYfTJIfHi74ZXj/S/xg/aHPEIG/xIS58W/acpLD5yef3rcNX+Ij/jWF1DLJdwnp RynNLFZJXjW3SNoyR858AZ8I90k9VNwqvwzLqUQziXeObPrxUrL+VLcl7wnK8voT pyhnBtF4qp23FpzVx5lAeaaZaWxAus2eT4hEhJRAoB9RMtLJJv0X+q4g1QF2wegw NMSIdjeeCfkMFwyXIfUDp4j+tl6vHdGGjXrMCkF3Yg/Qnv6PyGK2yjoDtglaJtgG +Rc8UXwZgT9owkWedl8NW3jQ603lmQYoU38f+YxMSyjPk+XAaBTrNkB16SJCVEyJ ngJIzLgFYV/LDTR3wV3Ti8Q/7nHz7i5lauMODFsx6a0hoV7Ha1w= =dFOT -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Small update, a few more merge window bugs and normal driver bug fixes: - Two merge window regressions in mlx5: a error path bug found by syzkaller and some lost code during a rework preventing ipoib from working in some configurations - Silence clang compilation warning in OPA related code - Fix a long standing race condition in ib_nl for ACM - Resolve when the HFI1 is shutdown" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/mlx5: Set PD pointers for the error flow unwind IB/mlx5: Fix 50G per lane indication RDMA/siw: Fix reporting vendor_part_id IB/sa: Resolv use-after-free in ib_nl_make_request() IB/hfi1: Do not destroy link_wq when the device is shut down IB/hfi1: Do not destroy hfi1_wq when the device is shut down RDMA/mlx5: Fix legacy IPoIB QP initialization IB/hfi1: Add explicit cast OPA_MTU_8192 to 'enum ib_mtu'
This commit is contained in:
commit
aa0c9086b4
|
@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
|
|||
return len;
|
||||
}
|
||||
|
||||
static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
|
||||
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct nlmsghdr *nlh;
|
||||
void *data;
|
||||
struct ib_sa_mad *mad;
|
||||
int len;
|
||||
unsigned long flags;
|
||||
unsigned long delay;
|
||||
gfp_t gfp_flag;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&query->list);
|
||||
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
|
||||
|
||||
mad = query->mad_buf->mad;
|
||||
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
|
||||
|
@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
|
|||
/* Repair the nlmsg header length */
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
|
||||
}
|
||||
gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
|
||||
GFP_NOWAIT;
|
||||
|
||||
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long delay;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&query->list);
|
||||
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
|
||||
|
||||
/* Put the request on the list first.*/
|
||||
spin_lock_irqsave(&ib_nl_request_lock, flags);
|
||||
ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Put the request on the list.*/
|
||||
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
|
||||
query->timeout = delay + jiffies;
|
||||
list_add_tail(&query->list, &ib_nl_request_list);
|
||||
/* Start the timeout if this is the only request */
|
||||
if (ib_nl_request_list.next == &query->list)
|
||||
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
|
||||
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
|
||||
|
||||
ret = ib_nl_send_msg(query, gfp_mask);
|
||||
if (ret) {
|
||||
ret = -EIO;
|
||||
/* Remove the request */
|
||||
spin_lock_irqsave(&ib_nl_request_lock, flags);
|
||||
list_del(&query->list);
|
||||
out:
|
||||
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -830,6 +830,29 @@ static int create_workqueues(struct hfi1_devdata *dd)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* destroy_workqueues - destroy per port workqueues
|
||||
* @dd: the hfi1_ib device
|
||||
*/
|
||||
static void destroy_workqueues(struct hfi1_devdata *dd)
|
||||
{
|
||||
int pidx;
|
||||
struct hfi1_pportdata *ppd;
|
||||
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
|
||||
ppd = dd->pport + pidx;
|
||||
|
||||
if (ppd->hfi1_wq) {
|
||||
destroy_workqueue(ppd->hfi1_wq);
|
||||
ppd->hfi1_wq = NULL;
|
||||
}
|
||||
if (ppd->link_wq) {
|
||||
destroy_workqueue(ppd->link_wq);
|
||||
ppd->link_wq = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* enable_general_intr() - Enable the IRQs that will be handled by the
|
||||
* general interrupt handler.
|
||||
|
@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
|
|||
* We can't count on interrupts since we are stopping.
|
||||
*/
|
||||
hfi1_quiet_serdes(ppd);
|
||||
|
||||
if (ppd->hfi1_wq) {
|
||||
destroy_workqueue(ppd->hfi1_wq);
|
||||
ppd->hfi1_wq = NULL;
|
||||
}
|
||||
if (ppd->link_wq) {
|
||||
destroy_workqueue(ppd->link_wq);
|
||||
ppd->link_wq = NULL;
|
||||
}
|
||||
if (ppd->hfi1_wq)
|
||||
flush_workqueue(ppd->hfi1_wq);
|
||||
if (ppd->link_wq)
|
||||
flush_workqueue(ppd->link_wq);
|
||||
}
|
||||
sdma_exit(dd);
|
||||
}
|
||||
|
@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
|
|||
* clear dma engines, etc.
|
||||
*/
|
||||
shutdown_device(dd);
|
||||
destroy_workqueues(dd);
|
||||
|
||||
stop_timers(dd);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
|
|||
{
|
||||
/* Constraining 10KB packets to 8KB packets */
|
||||
if (mtu == (enum ib_mtu)OPA_MTU_10240)
|
||||
mtu = OPA_MTU_8192;
|
||||
mtu = (enum ib_mtu)OPA_MTU_8192;
|
||||
return opa_mtu_enum_to_int((enum opa_mtu)mtu);
|
||||
}
|
||||
|
||||
|
@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
|
|||
struct hfi1_ibport *ibp =
|
||||
to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
|
||||
if (dd->flags & HFI1_SHUTDOWN)
|
||||
return true;
|
||||
|
||||
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
|
||||
priv->s_sde ?
|
||||
|
|
|
@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
|
|||
struct hfi1_ibport *ibp =
|
||||
to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
|
||||
if ((dd->flags & HFI1_SHUTDOWN))
|
||||
return true;
|
||||
|
||||
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
|
||||
priv->s_sde ?
|
||||
|
|
|
@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
|
|||
mdev_port_num);
|
||||
if (err)
|
||||
goto out;
|
||||
ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
|
||||
ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
|
||||
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
|
||||
|
||||
props->active_width = IB_WIDTH_4X;
|
||||
|
|
|
@ -2668,6 +2668,10 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
|
||||
return (create_flags) ? -EINVAL : 0;
|
||||
|
||||
process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
|
||||
mlx5_get_flow_namespace(dev->mdev,
|
||||
MLX5_FLOW_NAMESPACE_BYPASS),
|
||||
qp);
|
||||
process_create_flag(dev, &create_flags,
|
||||
IB_QP_CREATE_INTEGRITY_EN,
|
||||
MLX5_CAP_GEN(mdev, sho), qp);
|
||||
|
@ -3001,11 +3005,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
|
|||
mlx5_ib_destroy_dct(qp);
|
||||
} else {
|
||||
/*
|
||||
* The two lines below are temp solution till QP allocation
|
||||
* These lines below are temp solution till QP allocation
|
||||
* will be moved to be under IB/core responsiblity.
|
||||
*/
|
||||
qp->ibqp.send_cq = attr->send_cq;
|
||||
qp->ibqp.recv_cq = attr->recv_cq;
|
||||
qp->ibqp.pd = pd;
|
||||
destroy_qp_common(dev, qp, udata);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
|
|||
static int dev_id = 1;
|
||||
int rv;
|
||||
|
||||
sdev->vendor_part_id = dev_id++;
|
||||
|
||||
rv = ib_register_device(base_dev, name);
|
||||
if (rv) {
|
||||
pr_warn("siw: device registration error %d\n", rv);
|
||||
return rv;
|
||||
}
|
||||
sdev->vendor_part_id = dev_id++;
|
||||
|
||||
siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user