forked from luck/tmp_suning_uos_patched
Merge branch 'mlx4-net'
Or Gerlitz says: ==================== mlx4 driver fixes for 4.0-rc Just few small fixes for the 4.0 rc cycle. The fix from Moni addresses an issue from 4.0-rc1 so we just need it for net. Eran's fix for off-by-one should go to 3.19.y too. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2fc800f2c5
|
@ -64,6 +64,14 @@ enum {
|
|||
#define GUID_TBL_BLK_NUM_ENTRIES 8
|
||||
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
|
||||
|
||||
/* Counters should be saturate once they reach their maximum value */
|
||||
#define ASSIGN_32BIT_COUNTER(counter, value) do {\
|
||||
if ((value) > U32_MAX) \
|
||||
counter = cpu_to_be32(U32_MAX); \
|
||||
else \
|
||||
counter = cpu_to_be32(value); \
|
||||
} while (0)
|
||||
|
||||
struct mlx4_mad_rcv_buf {
|
||||
struct ib_grh grh;
|
||||
u8 payload[256];
|
||||
|
@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
|||
static void edit_counter(struct mlx4_counter *cnt,
|
||||
struct ib_pma_portcounters *pma_cnt)
|
||||
{
|
||||
pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
|
||||
pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
|
||||
pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
|
||||
pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
|
||||
(be64_to_cpu(cnt->tx_bytes) >> 2));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
|
||||
(be64_to_cpu(cnt->rx_bytes) >> 2));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
|
||||
be64_to_cpu(cnt->tx_frames));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
|
||||
be64_to_cpu(cnt->rx_frames));
|
||||
}
|
||||
|
||||
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
|
|
|
@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
|
|||
spin_lock_bh(&ibdev->iboe.lock);
|
||||
for (i = 0; i < MLX4_MAX_PORTS; ++i) {
|
||||
struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
|
||||
enum ib_port_state curr_port_state;
|
||||
|
||||
enum ib_port_state curr_port_state =
|
||||
if (!curr_netdev)
|
||||
continue;
|
||||
|
||||
curr_port_state =
|
||||
(netif_running(curr_netdev) &&
|
||||
netif_carrier_ok(curr_netdev)) ?
|
||||
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
||||
|
|
|
@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||
/* Schedule multicast task to populate multicast list */
|
||||
queue_work(mdev->workqueue, &priv->rx_mode_task);
|
||||
|
||||
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_VXLAN
|
||||
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
|
||||
vxlan_get_rx_port(dev);
|
||||
|
@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||
SERVICE_TASK_DELAY);
|
||||
|
||||
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
|
|||
unsigned long rx_chksum_none;
|
||||
unsigned long rx_chksum_complete;
|
||||
unsigned long tx_chksum_offload;
|
||||
#define NUM_PORT_STATS 9
|
||||
#define NUM_PORT_STATS 10
|
||||
};
|
||||
|
||||
struct mlx4_en_perf_stats {
|
||||
|
|
Loading…
Reference in New Issue
Block a user