forked from luck/tmp_suning_uos_patched
RDMA/mlx5: Replace open-coded offsetofend() macro
Clean mlx5_ib from open-coded implementations of offsetofend(). Link: https://lore.kernel.org/r/20200730081235.1581127-3-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
156f378985
commit
70c1430fba
|
@ -106,8 +106,8 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
|||
if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) {
|
||||
int err;
|
||||
struct mlx5_ib_create_ah_resp resp = {};
|
||||
u32 min_resp_len = offsetof(typeof(resp), dmac) +
|
||||
sizeof(resp.dmac);
|
||||
u32 min_resp_len =
|
||||
offsetofend(struct mlx5_ib_create_ah_resp, dmac);
|
||||
|
||||
if (udata->outlen < min_resp_len)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -136,12 +136,9 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
|
|||
#define LAST_COUNTERS_FIELD counters
|
||||
|
||||
/* Field is the last supported field */
|
||||
#define FIELDS_NOT_SUPPORTED(filter, field)\
|
||||
memchr_inv((void *)&filter.field +\
|
||||
sizeof(filter.field), 0,\
|
||||
sizeof(filter) -\
|
||||
offsetof(typeof(filter), field) -\
|
||||
sizeof(filter.field))
|
||||
#define FIELDS_NOT_SUPPORTED(filter, field) \
|
||||
memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \
|
||||
sizeof(filter) - offsetofend(typeof(filter), field))
|
||||
|
||||
int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
|
||||
bool is_egress,
|
||||
|
@ -1164,8 +1161,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
|||
int underlay_qpn;
|
||||
|
||||
if (udata && udata->inlen) {
|
||||
min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
|
||||
sizeof(ucmd_hdr.reserved);
|
||||
min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved);
|
||||
if (udata->inlen < min_ucmd_sz)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
|
|
|
@ -2029,8 +2029,8 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
|||
mw->ibmw.rkey = mw->mmkey.key;
|
||||
mw->ndescs = ndescs;
|
||||
|
||||
resp.response_length = min(offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length), udata->outlen);
|
||||
resp.response_length =
|
||||
min(offsetofend(typeof(resp), response_length), udata->outlen);
|
||||
if (resp.response_length) {
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err) {
|
||||
|
|
|
@ -4921,8 +4921,8 @@ static int prepare_user_rq(struct ib_pd *pd,
|
|||
int err;
|
||||
size_t required_cmd_sz;
|
||||
|
||||
required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
|
||||
+ sizeof(ucmd.single_stride_log_num_of_bytes);
|
||||
required_cmd_sz = offsetofend(struct mlx5_ib_create_wq,
|
||||
single_stride_log_num_of_bytes);
|
||||
if (udata->inlen < required_cmd_sz) {
|
||||
mlx5_ib_dbg(dev, "invalid inlen\n");
|
||||
return -EINVAL;
|
||||
|
@ -5006,7 +5006,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
|||
if (!udata)
|
||||
return ERR_PTR(-ENOSYS);
|
||||
|
||||
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
|
||||
min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved);
|
||||
if (udata->outlen && udata->outlen < min_resp_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -5036,8 +5036,8 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
|||
rwq->ibwq.wq_num = rwq->core_qp.qpn;
|
||||
rwq->ibwq.state = IB_WQS_RESET;
|
||||
if (udata->outlen) {
|
||||
resp.response_length = offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length);
|
||||
resp.response_length = offsetofend(
|
||||
struct mlx5_ib_create_wq_resp, response_length);
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err)
|
||||
goto err_copy;
|
||||
|
@ -5094,7 +5094,8 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
|
||||
min_resp_len =
|
||||
offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
|
||||
if (udata->outlen && udata->outlen < min_resp_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -5128,8 +5129,9 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
|||
|
||||
rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
|
||||
if (udata->outlen) {
|
||||
resp.response_length = offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length);
|
||||
resp.response_length =
|
||||
offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp,
|
||||
response_length);
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err)
|
||||
goto err_copy;
|
||||
|
@ -5169,7 +5171,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
|||
void *rqc;
|
||||
void *in;
|
||||
|
||||
required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
|
||||
required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved);
|
||||
if (udata->inlen < required_cmd_sz)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user