RDMA/nldev: Allow counter manual mode configration through RDMA netlink

Provide an option to allow users to manually bind a qp with a counter
through RDMA netlink. Limit it to users with ADMIN capability only.

Signed-off-by: Mark Zhang <markz@mellanox.com>
Reviewed-by: Majd Dibbiny <majd@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Mark Zhang 2019-07-02 13:02:44 +03:00 committed by Jason Gunthorpe
parent 1bd8e0a9d0
commit b389327df9
3 changed files with 106 additions and 16 deletions

View File

@ -1649,8 +1649,8 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
u32 index, port, mode, mask = 0, qpn, cntn = 0;
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
u32 index, port, mode, mask = 0;
struct ib_device *device; struct ib_device *device;
struct sk_buff *msg; struct sk_buff *msg;
int ret; int ret;
@ -1688,29 +1688,42 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
0, 0); 0, 0);
mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
if (mode != RDMA_COUNTER_MODE_AUTO) { if (mode == RDMA_COUNTER_MODE_AUTO) {
ret = -EMSGSIZE; if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
goto err_msg; mask = nla_get_u32(
} tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) ret = rdma_counter_set_auto_mode(device, port,
mask = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); mask ? true : false, mask);
if (ret)
goto err_msg;
} else {
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
} else {
ret = rdma_counter_bind_qpn_alloc(device, port,
qpn, &cntn);
}
if (ret)
goto err_msg;
ret = rdma_counter_set_auto_mode(device, port, if (fill_nldev_handle(msg, device) ||
mask ? true : false, mask); nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
if (ret) nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
goto err_msg; nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
ret = -EMSGSIZE;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode) || goto err_fill;
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { }
ret = -EMSGSIZE;
goto err_msg;
} }
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
err_fill:
rdma_counter_unbind_qpn(device, port, qpn, cntn);
err_msg: err_msg:
nlmsg_free(msg); nlmsg_free(msg);
err: err:
@ -1718,6 +1731,74 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return ret; return ret;
} }
static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
struct ib_device *device;
struct sk_buff *msg;
u32 index, port, qpn, cntn;
int ret;
ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
!tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
!tb[RDMA_NLDEV_ATTR_RES_LQPN])
return -EINVAL;
if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = ib_device_get_by_index(sock_net(skb->sk), index);
if (!device)
return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
if (!rdma_is_port_valid(device, port)) {
ret = -EINVAL;
goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto err;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
RDMA_NLDEV_CMD_STAT_SET),
0, 0);
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
if (ret)
goto err_unbind;
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
ret = -EMSGSIZE;
goto err_fill;
}
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
err_fill:
rdma_counter_bind_qpn(device, port, qpn, cntn);
err_unbind:
nlmsg_free(msg);
err:
ib_device_put(device);
return ret;
}
static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
@ -1828,6 +1909,10 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_stat_get_doit, .doit = nldev_stat_get_doit,
.dump = nldev_stat_get_dumpit, .dump = nldev_stat_get_dumpit,
}, },
[RDMA_NLDEV_CMD_STAT_DEL] = {
.doit = nldev_stat_del_doit,
.flags = RDMA_NL_ADMIN_PERM,
},
}; };
void __init nldev_init(void) void __init nldev_init(void)

View File

@ -58,5 +58,8 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
u32 qp_num, u32 *counter_id); u32 qp_num, u32 *counter_id);
int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
u32 qp_num, u32 counter_id); u32 qp_num, u32 counter_id);
int rdma_counter_get_mode(struct ib_device *dev, u8 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask);
#endif /* _RDMA_COUNTER_H_ */ #endif /* _RDMA_COUNTER_H_ */

View File

@ -285,6 +285,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_STAT_GET, /* can dump */ RDMA_NLDEV_CMD_STAT_GET, /* can dump */
RDMA_NLDEV_CMD_STAT_DEL,
RDMA_NLDEV_NUM_OPS RDMA_NLDEV_NUM_OPS
}; };