forked from luck/tmp_suning_uos_patched
mlx5-fixes-2020-07-02
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl8H0fsACgkQSD+KveBX +j4xwwf7BIordw3SH3O1gpmxBCx7/i+28BYjwY6Z0jgEisJp/nCx9gigu+Fx2Bxc 0b0CBz2IKkRYs42MmIEWBr2/jxKAoIrvf/rCohvUhRvMdZ8n0GNQTvGlzHCAaWdj Shgj63ty7OP42yIMhrAtOMtPNdy0HrLVEk/ghZzcBhlYoAXS/GElgTEFmldWNfJ8 sAWx+nAd9IUR/+tDLsIiUntvApiRnTKYNrsiqXlWYW79OLHBHZtYySDb51t9NjYz Luk96bDpPoyaz1EOboWkGO5FEq8EQS0XtqJ1G9WSkCAZsN8faVWmhaCFJi0bH1En gYiCHhJ3TX1sx218Jfjj8UyNJnPqxQ== =8fIi -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2020-07-02' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2020-07-02 This series introduces some fixes to mlx5 driver. V1->v2: - Drop "ip -s" patch and mirred device hold reference patch. - Will revise them in a later submission. Please pull and let me know if there is any problem. For -stable v5.2 ('net/mlx5: Fix eeprom support for SFP module') For -stable v5.4 ('net/mlx5e: Fix 50G per lane indication') For -stable v5.5 ('net/mlx5e: Fix CPU mapping after function reload to avoid aRFS RX crash') ('net/mlx5e: Fix VXLAN configuration restore after function reload') For -stable v5.7 ('net/mlx5e: CT: Fix memory leak in cleanup') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ca68d5637a
|
@ -29,6 +29,7 @@ struct mlx5e_dcbx {
|
|||
bool manual_buffer;
|
||||
u32 cable_len;
|
||||
u32 xoff;
|
||||
u16 port_buff_cell_sz;
|
||||
};
|
||||
|
||||
#define MLX5E_MAX_DSCP (64)
|
||||
|
|
|
@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
|
|||
[MLX5E_400GAUI_8] = 400000,
|
||||
};
|
||||
|
||||
bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5e_port_eth_proto eproto;
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
|
||||
return true;
|
||||
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
return !!eproto.cap;
|
||||
}
|
||||
|
||||
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
|
||||
const u32 **arr, u32 *size,
|
||||
bool force_legacy)
|
||||
{
|
||||
bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
|
||||
|
||||
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
|
||||
ARRAY_SIZE(mlx5e_link_speed);
|
||||
|
@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
|||
bool ext;
|
||||
int err;
|
||||
|
||||
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
ext = mlx5e_ptys_ext_supported(mdev);
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
ext = mlx5e_ptys_ext_supported(mdev);
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
|
|||
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
|
||||
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
|
||||
bool force_legacy);
|
||||
|
||||
bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
|
||||
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
|
||||
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
|
||||
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
|
||||
struct mlx5e_port_buffer *port_buffer)
|
||||
{
|
||||
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
|
||||
u32 total_used = 0;
|
||||
|
@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
|
|||
port_buffer->buffer[i].epsb =
|
||||
MLX5_GET(bufferx_reg, buffer, epsb);
|
||||
port_buffer->buffer[i].size =
|
||||
MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
|
||||
MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
|
||||
port_buffer->buffer[i].xon =
|
||||
MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
|
||||
MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
|
||||
port_buffer->buffer[i].xoff =
|
||||
MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
|
||||
MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
|
||||
total_used += port_buffer->buffer[i].size;
|
||||
|
||||
mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
|
||||
|
@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
|
|||
}
|
||||
|
||||
port_buffer->port_buffer_size =
|
||||
MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
|
||||
MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
|
||||
port_buffer->spare_buffer_size =
|
||||
port_buffer->port_buffer_size - total_used;
|
||||
|
||||
|
@ -88,9 +89,9 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
|
|||
static int port_set_buffer(struct mlx5e_priv *priv,
|
||||
struct mlx5e_port_buffer *port_buffer)
|
||||
{
|
||||
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
|
||||
void *buffer;
|
||||
void *in;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv,
|
|||
goto out;
|
||||
|
||||
for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
|
||||
buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
|
||||
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
|
||||
u64 size = port_buffer->buffer[i].size;
|
||||
u64 xoff = port_buffer->buffer[i].xoff;
|
||||
u64 xon = port_buffer->buffer[i].xon;
|
||||
|
||||
MLX5_SET(bufferx_reg, buffer, size,
|
||||
port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
|
||||
MLX5_SET(bufferx_reg, buffer, lossy,
|
||||
port_buffer->buffer[i].lossy);
|
||||
MLX5_SET(bufferx_reg, buffer, xoff_threshold,
|
||||
port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
|
||||
MLX5_SET(bufferx_reg, buffer, xon_threshold,
|
||||
port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
|
||||
do_div(size, port_buff_cell_sz);
|
||||
do_div(xoff, port_buff_cell_sz);
|
||||
do_div(xon, port_buff_cell_sz);
|
||||
MLX5_SET(bufferx_reg, buffer, size, size);
|
||||
MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
|
||||
MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
|
||||
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
|
||||
}
|
||||
|
||||
err = mlx5e_port_set_pbmc(mdev, in);
|
||||
|
@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
|
|||
}
|
||||
|
||||
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
|
||||
u32 xoff, unsigned int max_mtu)
|
||||
u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
|
|||
}
|
||||
|
||||
if (port_buffer->buffer[i].size <
|
||||
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
|
||||
(xoff + max_mtu + port_buff_cell_sz)) {
|
||||
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
|
||||
i, port_buffer->buffer[i].size);
|
||||
return -ENOMEM;
|
||||
|
@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
|
|||
* @pfc_en: <input> current pfc configuration
|
||||
* @buffer: <input> current prio to buffer mapping
|
||||
* @xoff: <input> xoff value
|
||||
* @port_buff_cell_sz: <input> port buffer cell_size
|
||||
* @port_buffer: <output> port receive buffer configuration
|
||||
* @change: <output>
|
||||
*
|
||||
|
@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
|
|||
* sets change to true if buffer configuration was modified.
|
||||
*/
|
||||
static int update_buffer_lossy(unsigned int max_mtu,
|
||||
u8 pfc_en, u8 *buffer, u32 xoff,
|
||||
u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
|
||||
struct mlx5e_port_buffer *port_buffer,
|
||||
bool *change)
|
||||
{
|
||||
|
@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu,
|
|||
}
|
||||
|
||||
if (changed) {
|
||||
err = update_xoff_threshold(port_buffer, xoff, max_mtu);
|
||||
err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||
u32 *buffer_size,
|
||||
u8 *prio2buffer)
|
||||
{
|
||||
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
|
||||
struct mlx5e_port_buffer port_buffer;
|
||||
u32 xoff = calculate_xoff(priv, mtu);
|
||||
bool update_prio2buffer = false;
|
||||
|
@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||
|
||||
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
|
||||
update_buffer = true;
|
||||
err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
|
||||
err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
|
||||
err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
|
||||
&port_buffer, &update_buffer);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
|
||||
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
|
||||
xoff, &port_buffer, &update_buffer);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||
return -EINVAL;
|
||||
|
||||
update_buffer = true;
|
||||
err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
|
||||
err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||
/* Need to update buffer configuration if xoff value is changed */
|
||||
if (!update_buffer && xoff != priv->dcbx.xoff) {
|
||||
update_buffer = true;
|
||||
err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
|
||||
err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
#include "port.h"
|
||||
|
||||
#define MLX5E_MAX_BUFFER 8
|
||||
#define MLX5E_BUFFER_CELL_SHIFT 7
|
||||
#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
|
||||
|
||||
#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
|
||||
|
|
|
@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
|
|||
struct mlx5_ct_entry *entry = ptr;
|
||||
|
||||
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define MLX5E_BUFFER_CELL_SHIFT 7
|
||||
|
||||
static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
|
||||
return (1 << MLX5E_BUFFER_CELL_SHIFT);
|
||||
|
||||
if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
|
||||
MLX5_REG_SBCAM, 0, 0))
|
||||
return (1 << MLX5E_BUFFER_CELL_SHIFT);
|
||||
|
||||
return MLX5_GET(sbcam_reg, out, cap_cell_size);
|
||||
}
|
||||
|
||||
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_dcbx *dcbx = &priv->dcbx;
|
||||
|
@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
|
|||
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
|
||||
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
|
||||
|
||||
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
|
||||
priv->dcbx.manual_buffer = false;
|
||||
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
|
|||
struct ptys2ethtool_config **arr,
|
||||
u32 *size)
|
||||
{
|
||||
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
bool ext = mlx5e_ptys_ext_supported(mdev);
|
||||
|
||||
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
|
||||
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
|
||||
|
@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
|
|||
struct ethtool_link_ksettings *link_ksettings)
|
||||
{
|
||||
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
|
||||
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
bool ext = mlx5e_ptys_ext_supported(mdev);
|
||||
|
||||
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
|
||||
}
|
||||
|
@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
|
|||
__func__, err);
|
||||
goto err_query_regs;
|
||||
}
|
||||
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
|
||||
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
|
||||
eth_proto_capability);
|
||||
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
|
||||
|
@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
|||
autoneg = link_ksettings->base.autoneg;
|
||||
speed = link_ksettings->base.speed;
|
||||
|
||||
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
ext_supported = mlx5e_ptys_ext_supported(mdev);
|
||||
ext = ext_requested(autoneg, adver, ext_supported);
|
||||
if (!ext_supported && ext)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev)
|
|||
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
|||
if (err)
|
||||
goto err_destroy_flow_steering;
|
||||
|
||||
#ifdef CONFIG_MLX5_EN_ARFS
|
||||
priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_flow_steering:
|
||||
|
@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|||
rtnl_lock();
|
||||
if (netif_running(netdev))
|
||||
mlx5e_open(netdev);
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
netif_device_attach(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
|||
rtnl_lock();
|
||||
if (netif_running(priv->netdev))
|
||||
mlx5e_close(priv->netdev);
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_drop_rx_info(priv->netdev);
|
||||
netif_device_detach(priv->netdev);
|
||||
rtnl_unlock();
|
||||
|
||||
|
@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
|
|||
/* netdev init */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
#ifdef CONFIG_MLX5_EN_ARFS
|
||||
netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_cpumask:
|
||||
|
|
|
@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
|
|||
struct mlx5e_rep_priv *rpriv)
|
||||
{
|
||||
/* Offloaded flow rule is allowed to duplicate on non-uplink representor
|
||||
* sharing tc block with other slaves of a lag device.
|
||||
* sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
|
||||
* function is called from NIC mode.
|
||||
*/
|
||||
return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
|
||||
return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
|
||||
}
|
||||
|
||||
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
|
@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
|||
|
||||
rcu_read_lock();
|
||||
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
|
||||
rcu_read_unlock();
|
||||
if (flow) {
|
||||
/* Same flow rule offloaded to non-uplink representor sharing tc block,
|
||||
* just return 0.
|
||||
*/
|
||||
if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
|
||||
goto out;
|
||||
goto rcu_unlock;
|
||||
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"flow cookie already exists, ignoring");
|
||||
|
@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
|||
"flow cookie %lx already exists, ignoring\n",
|
||||
f->cookie);
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
goto rcu_unlock;
|
||||
}
|
||||
rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
if (flow)
|
||||
goto out;
|
||||
|
||||
trace_mlx5e_configure_flower(f);
|
||||
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
|
||||
|
|
|
@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
/* Create ingress allow rule */
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
|
||||
|
|
|
@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_eeprom_page(int offset)
|
||||
static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
|
||||
u8 *module_id)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
|
||||
int err, status;
|
||||
u8 *ptr;
|
||||
|
||||
MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
|
||||
MLX5_SET(mcia_reg, in, module, module_num);
|
||||
MLX5_SET(mcia_reg, in, device_address, 0);
|
||||
MLX5_SET(mcia_reg, in, page_number, 0);
|
||||
MLX5_SET(mcia_reg, in, size, 1);
|
||||
MLX5_SET(mcia_reg, in, l, 0);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_MCIA, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
status = MLX5_GET(mcia_reg, out, status);
|
||||
if (status) {
|
||||
mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
|
||||
status);
|
||||
return -EIO;
|
||||
}
|
||||
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
|
||||
|
||||
*module_id = ptr[0];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_qsfp_eeprom_page(u16 offset)
|
||||
{
|
||||
if (offset < MLX5_EEPROM_PAGE_LENGTH)
|
||||
/* Addresses between 0-255 - page 00 */
|
||||
|
@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
|
|||
MLX5_EEPROM_HIGH_PAGE_LENGTH);
|
||||
}
|
||||
|
||||
static int mlx5_eeprom_high_page_offset(int page_num)
|
||||
static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
|
||||
{
|
||||
if (!page_num) /* Page 0 always start from low page */
|
||||
return 0;
|
||||
|
@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
|
|||
return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
|
||||
}
|
||||
|
||||
static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
|
||||
{
|
||||
*i2c_addr = MLX5_I2C_ADDR_LOW;
|
||||
*page_num = mlx5_qsfp_eeprom_page(*offset);
|
||||
*offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num);
|
||||
}
|
||||
|
||||
static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
|
||||
{
|
||||
*i2c_addr = MLX5_I2C_ADDR_LOW;
|
||||
*page_num = 0;
|
||||
|
||||
if (*offset < MLX5_EEPROM_PAGE_LENGTH)
|
||||
return;
|
||||
|
||||
*i2c_addr = MLX5_I2C_ADDR_HIGH;
|
||||
*offset -= MLX5_EEPROM_PAGE_LENGTH;
|
||||
}
|
||||
|
||||
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
|
||||
u16 offset, u16 size, u8 *data)
|
||||
{
|
||||
int module_num, page_num, status, err;
|
||||
int module_num, status, err, page_num = 0;
|
||||
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
|
||||
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
|
||||
u16 i2c_addr;
|
||||
void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
|
||||
u16 i2c_addr = 0;
|
||||
u8 module_id;
|
||||
void *ptr;
|
||||
|
||||
err = mlx5_query_module_num(dev, &module_num);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
|
||||
err = mlx5_query_module_id(dev, module_num, &module_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Get the page number related to the given offset */
|
||||
page_num = mlx5_eeprom_page(offset);
|
||||
|
||||
/* Set the right offset according to the page number,
|
||||
* For page_num > 0, relative offset is always >= 128 (high page).
|
||||
*/
|
||||
offset -= mlx5_eeprom_high_page_offset(page_num);
|
||||
switch (module_id) {
|
||||
case MLX5_MODULE_ID_SFP:
|
||||
mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
|
||||
break;
|
||||
case MLX5_MODULE_ID_QSFP:
|
||||
case MLX5_MODULE_ID_QSFP_PLUS:
|
||||
case MLX5_MODULE_ID_QSFP28:
|
||||
mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
|
||||
/* Cross pages read, read until offset 256 in low page */
|
||||
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
|
||||
|
||||
i2c_addr = MLX5_I2C_ADDR_LOW;
|
||||
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
|
||||
|
||||
MLX5_SET(mcia_reg, in, l, 0);
|
||||
MLX5_SET(mcia_reg, in, module, module_num);
|
||||
|
@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
|
||||
memcpy(data, ptr, size);
|
||||
|
||||
return size;
|
||||
|
|
|
@ -147,6 +147,7 @@ enum {
|
|||
MLX5_REG_MCDA = 0x9063,
|
||||
MLX5_REG_MCAM = 0x907f,
|
||||
MLX5_REG_MIRC = 0x9162,
|
||||
MLX5_REG_SBCAM = 0xB01F,
|
||||
MLX5_REG_RESOURCE_DUMP = 0xC000,
|
||||
};
|
||||
|
||||
|
|
|
@ -9960,6 +9960,34 @@ struct mlx5_ifc_pptb_reg_bits {
|
|||
u8 untagged_buff[0x4];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_sbcam_reg_bits {
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 feature_group[0x8];
|
||||
u8 reserved_at_10[0x8];
|
||||
u8 access_reg_group[0x8];
|
||||
|
||||
u8 reserved_at_20[0x20];
|
||||
|
||||
u8 sb_access_reg_cap_mask[4][0x20];
|
||||
|
||||
u8 reserved_at_c0[0x80];
|
||||
|
||||
u8 sb_feature_cap_mask[4][0x20];
|
||||
|
||||
u8 reserved_at_1c0[0x40];
|
||||
|
||||
u8 cap_total_buffer_size[0x20];
|
||||
|
||||
u8 cap_cell_size[0x10];
|
||||
u8 cap_max_pg_buffers[0x8];
|
||||
u8 cap_num_pool_supported[0x8];
|
||||
|
||||
u8 reserved_at_240[0x8];
|
||||
u8 cap_sbsr_stat_size[0x8];
|
||||
u8 cap_max_tclass_data[0x8];
|
||||
u8 cap_max_cpu_ingress_tclass_sb[0x8];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pbmc_reg_bits {
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 local_port[0x8];
|
||||
|
|
Loading…
Reference in New Issue
Block a user