mlx5-updates-2017-06-11

This series provides updates to mlx5 header rewrite feature, from Or Gerlitz.
 and three more small updates From maor and eran.
 
 -------
 Or says:
 
 Packets belonging to flows which are different by matching may still need
 to go through the same header re-writes (e.g set the current routing hop
 MACs and issue TTL decrement).  To minimize the number of modify header
 IDs, we add a cache for header re-write IDs which is keyed by the binary
 chain of modify header actions.
 
 The caching is supported for both eswitch and NIC use-cases, where the
 actual conversion of the code to use caching comes in separate patches,
 one per use-case.
 
 Using a per field mask field, the TC pedit action supports modifying
 partial fields. The last patch enables offloading that.
 -------
 
 From Maor, update flow table commands layout to the latest HW spec.
 From Eran, ethtool connector type reporting updates.
 
 Thanks,
 Saeed.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJZPUrBAAoJEEg/ir3gV/o+ptQH+gIXfHH1mrJp0ZwM/hhLidYE
 Bj/ie9Y1Ir6q3RU2+g/NLqejtvTIzAyhMfiq4ag4eCVVRuGGjPXRZJWivWXUCbjm
 /XLaXTK62qNxJNAWyzgxEJSUI1URMtQWIf9SF8LMLGiNfZfx8b7o/Q08P18tNxbb
 AeNzmgYetva9lBialWF0dPDaAvd8THngBPF7LYDWghEXbDPvYTfvABN0qUrHs1s2
 /LbmZ7L9U+RDSusz/klYW+/WorNiOm44nwk+KgdnsrVITZVVfblI6VEvgEjprhZF
 c3SFUPYz079WjndNHVWezqsl4gIZIoM9FAtmoiBg+hxTmWJVI9uwllUcarW9s+k=
 =0Cyo
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2017-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-06-11

This series provides updates to mlx5 header rewrite feature, from Or Gerlitz.
and three more small updates From maor and eran.

-------
Or says:

Packets belonging to flows which are different by matching may still need
to go through the same header re-writes (e.g set the current routing hop
MACs and issue TTL decrement).  To minimize the number of modify header
IDs, we add a cache for header re-write IDs which is keyed by the binary
chain of modify header actions.

The caching is supported for both eswitch and NIC use-cases, where the
actual conversion of the code to use caching comes in separate patches,
one per use-case.

Using a per field mask field, the TC pedit action supports modifying
partial fields. The last patch enables offloading that.
-------

From Maor, update flow table commands layout to the latest HW spec.
From Eran, ethtool connector type reporting updates.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-06-11 18:10:42 -04:00
commit 66e037ca57
8 changed files with 322 additions and 95 deletions

View File

@ -623,6 +623,8 @@ struct mlx5e_tc_table {
struct rhashtable_params ht_params;
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
};
struct mlx5e_vlan_table {

View File

@ -723,24 +723,81 @@ static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap)
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
u8 connector_type)
{
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising,
FIBRE);
}
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
| MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported,
Backplane);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising,
Backplane);
}
return;
}
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
| MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
switch (connector_type) {
case MLX5E_PORT_TP:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, TP);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, TP);
break;
case MLX5E_PORT_AUI:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, AUI);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, AUI);
break;
case MLX5E_PORT_BNC:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, BNC);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, BNC);
break;
case MLX5E_PORT_MII:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, MII);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, MII);
break;
case MLX5E_PORT_FIBRE:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, FIBRE);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, FIBRE);
break;
case MLX5E_PORT_DA:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, Backplane);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Backplane);
break;
case MLX5E_PORT_NONE:
case MLX5E_PORT_OTHER:
default:
break;
}
}
@ -791,7 +848,6 @@ static void get_supported(u32 eth_proto_cap,
{
unsigned long *supported = link_ksettings->link_modes.supported;
ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
ptys2ethtool_supported_link(supported, eth_proto_cap);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
@ -809,8 +865,23 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
static u8 get_connector_port(u32 eth_proto)
static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_UNKNOWN] = PORT_OTHER,
[MLX5E_PORT_NONE] = PORT_NONE,
[MLX5E_PORT_TP] = PORT_TP,
[MLX5E_PORT_AUI] = PORT_AUI,
[MLX5E_PORT_BNC] = PORT_BNC,
[MLX5E_PORT_MII] = PORT_MII,
[MLX5E_PORT_FIBRE] = PORT_FIBRE,
[MLX5E_PORT_DA] = PORT_DA,
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
static u8 get_connector_port(u32 eth_proto, u8 connector_type)
{
if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type];
if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
@ -856,6 +927,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
u32 eth_proto_oper;
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@ -871,6 +943,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
connector_type = MLX5_GET(ptys_reg, out, connector_type);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
@ -883,7 +956,10 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper);
link_ksettings->base.port = get_connector_port(eth_proto_oper,
connector_type);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
connector_type);
get_lp_advertising(eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)

View File

@ -69,7 +69,8 @@ struct mlx5e_tc_flow {
u64 cookie;
u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@ -90,6 +91,135 @@ enum {
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
struct mod_hdr_key {
int num_actions;
void *actions;
};
struct mlx5e_mod_hdr_entry {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
/* flows sharing the same mod_hdr entry */
struct list_head flows;
struct mod_hdr_key key;
u32 mod_hdr_id;
};
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
{
return jhash(key->actions,
key->num_actions * MLX5_MH_ACT_SZ, 0);
}
static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
struct mod_hdr_key *b)
{
if (a->num_actions != b->num_actions)
return 1;
return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int num_actions, actions_size, namespace, err;
struct mlx5e_mod_hdr_entry *mh;
struct mod_hdr_key key;
bool found = false;
u32 hash_key;
num_actions = parse_attr->num_mod_hdr_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
key.actions = parse_attr->mod_hdr_actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
namespace = MLX5_FLOW_NAMESPACE_FDB;
hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
mod_hdr_hlist, hash_key) {
if (!cmp_mod_hdr_info(&mh->key, &key)) {
found = true;
break;
}
}
} else {
namespace = MLX5_FLOW_NAMESPACE_KERNEL;
hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
mod_hdr_hlist, hash_key) {
if (!cmp_mod_hdr_info(&mh->key, &key)) {
found = true;
break;
}
}
}
if (found)
goto attach_flow;
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
if (!mh)
return -ENOMEM;
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
INIT_LIST_HEAD(&mh->flows);
err = mlx5_modify_header_alloc(priv->mdev, namespace,
mh->key.num_actions,
mh->key.actions,
&mh->mod_hdr_id);
if (err)
goto out_err;
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
else
hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
attach_flow:
list_add(&flow->mod_hdr, &mh->flows);
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
else
flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
return 0;
out_err:
kfree(mh);
return err;
}
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct list_head *next = flow->mod_hdr.next;
list_del(&flow->mod_hdr);
if (list_empty(next)) {
struct mlx5e_mod_hdr_entry *mh;
mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
hash_del(&mh->mod_hdr_hlist);
kfree(mh);
}
}
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@ -121,10 +251,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
parse_attr->num_mod_hdr_actions,
parse_attr->mod_hdr_actions,
&attr->mod_hdr_id);
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
if (err) {
@ -166,8 +293,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
err_create_ft:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5_modify_header_dealloc(priv->mdev,
attr->mod_hdr_id);
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
@ -177,6 +303,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(flow->rule);
@ -188,9 +315,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
priv->fs.tc.t = NULL;
}
if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5_modify_header_dealloc(priv->mdev,
flow->nic_attr->mod_hdr_id);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@ -213,10 +339,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
parse_attr->num_mod_hdr_actions,
parse_attr->mod_hdr_actions,
&attr->mod_hdr_id);
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err) {
rule = ERR_PTR(err);
@ -231,9 +354,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return rule;
err_add_rule:
if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5_modify_header_dealloc(priv->mdev,
attr->mod_hdr_id);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
@ -250,19 +372,18 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
}
mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
mlx5_eswitch_del_vlan_action(esw, attr);
if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
kvfree(flow->esw_attr->parse_attr);
kvfree(attr->parse_attr);
}
if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5_modify_header_dealloc(priv->mdev,
attr->mod_hdr_id);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@ -970,12 +1091,14 @@ static int offload_pedit_fields(struct pedit_headers *masks,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
int i, action_size, nactions, max_actions, first, last, first_z;
int i, action_size, nactions, max_actions, first, last, next_z;
void *s_masks_p, *a_masks_p, *vals_p;
struct mlx5_fields *f;
u8 cmd, field_bsize;
u32 s_mask, a_mask;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
void *action;
set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
@ -1029,11 +1152,19 @@ static int offload_pedit_fields(struct pedit_headers *masks,
field_bsize = f->size * BITS_PER_BYTE;
first_z = find_first_zero_bit(&mask, field_bsize);
if (field_bsize == 32) {
mask_be32 = *(__be32 *)&mask;
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (field_bsize == 16) {
mask_be16 = *(__be16 *)&mask;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
first = find_first_bit(&mask, field_bsize);
next_z = find_next_zero_bit(&mask, field_bsize, first);
last = find_last_bit(&mask, field_bsize);
if (first > 0 || last != (field_bsize - 1) || first_z < last) {
printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
if (first < next_z && next_z < last) {
printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
}
@ -1042,17 +1173,17 @@ static int offload_pedit_fields(struct pedit_headers *masks,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, offset, first);
/* length is num of bits to be written, zero means length of 32 */
MLX5_SET(set_action_in, action, length, field_bsize);
MLX5_SET(set_action_in, action, length, (last - first + 1));
}
if (field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
else if (field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
else if (field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size;
nactions++;
@ -1194,10 +1325,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (attr->action)
return -EINVAL;
if (is_tcf_gact_shot(a)) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@ -1942,6 +2069,8 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
}

View File

@ -1769,6 +1769,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl);
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {

View File

@ -207,6 +207,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
u8 inline_mode;
u64 num_flows;
u8 encap;

View File

@ -78,28 +78,33 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
if (vport) {
MLX5_SET(create_flow_table_in, in, vport_number, vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_encap_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
en_encap_decap);
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action, 1);
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_id, next_ft->id);
}
break;
case FS_FT_OP_MOD_LAG_DEMUX:
MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
if (next_ft)
MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
MLX5_SET(create_flow_table_in, in,
flow_table_context.lag_master_next_table_id,
next_ft->id);
break;
}
@ -146,10 +151,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
lag_master_next_table_id, next_ft->id);
flow_table_context.lag_master_next_table_id, next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
lag_master_next_table_id, 0);
flow_table_context.lag_master_next_table_id, 0);
}
} else {
if (ft->vport) {
@ -160,11 +165,14 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
MLX5_SET(modify_flow_table_in, in, table_miss_id,
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action, 1);
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_id,
next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action, 0);
}
}

View File

@ -6627,6 +6627,24 @@ struct mlx5_ifc_create_flow_table_out_bits {
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_flow_table_context_bits {
u8 encap_en[0x1];
u8 decap_en[0x1];
u8 reserved_at_2[0x2];
u8 table_miss_action[0x4];
u8 level[0x8];
u8 reserved_at_10[0x8];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_40[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0xe0];
};
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@ -6645,21 +6663,7 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_a0[0x20];
u8 encap_en[0x1];
u8 decap_en[0x1];
u8 reserved_at_c2[0x2];
u8 table_miss_mode[0x4];
u8 level[0x8];
u8 reserved_at_d0[0x8];
u8 log_size[0x8];
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_100[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_120[0x80];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_create_flow_group_out_bits {
@ -7291,7 +7295,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
u8 reserved_at_160[0x20];
u8 reserved_at_160[0x1c];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@ -7694,8 +7699,10 @@ struct mlx5_ifc_peir_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x7e];
u8 reserved_at_0[0x7c];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1];
u8 ppcnt_statistical_group[0x1];
};
@ -8277,17 +8284,7 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x4];
u8 table_miss_mode[0x4];
u8 reserved_at_c8[0x18];
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_100[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_120[0x80];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_ets_tcn_config_reg_bits {

View File

@ -92,6 +92,19 @@ enum mlx5e_link_mode {
MLX5E_LINK_MODES_NUMBER,
};
enum mlx5e_connector_type {
MLX5E_PORT_UNKNOWN = 0,
MLX5E_PORT_NONE = 1,
MLX5E_PORT_TP = 2,
MLX5E_PORT_AUI = 3,
MLX5E_PORT_BNC = 4,
MLX5E_PORT_MII = 5,
MLX5E_PORT_FIBRE = 6,
MLX5E_PORT_DA = 7,
MLX5E_PORT_OTHER = 8,
MLX5E_CONNECTOR_TYPE_NUMBER,
};
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF