drop_monitor: Expose tail drop counter

Previous patch made the length of the per-CPU skb drop list
configurable. Expose a counter that shows how many packets could not be
enqueued to this list.

This allows users determine the desired queue length.

Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ido Schimmel 2019-08-11 10:35:55 +03:00 committed by David S. Miller
parent 30328d46af
commit e9feb58020
2 changed files with 111 additions and 0 deletions

View File

@ -56,6 +56,8 @@ enum {
NET_DM_CMD_PACKET_ALERT, NET_DM_CMD_PACKET_ALERT,
NET_DM_CMD_CONFIG_GET, NET_DM_CMD_CONFIG_GET,
NET_DM_CMD_CONFIG_NEW, NET_DM_CMD_CONFIG_NEW,
NET_DM_CMD_STATS_GET,
NET_DM_CMD_STATS_NEW,
_NET_DM_CMD_MAX, _NET_DM_CMD_MAX,
}; };
@ -80,6 +82,7 @@ enum net_dm_attr {
NET_DM_ATTR_TRUNC_LEN, /* u32 */ NET_DM_ATTR_TRUNC_LEN, /* u32 */
NET_DM_ATTR_ORIG_LEN, /* u32 */ NET_DM_ATTR_ORIG_LEN, /* u32 */
NET_DM_ATTR_QUEUE_LEN, /* u32 */ NET_DM_ATTR_QUEUE_LEN, /* u32 */
NET_DM_ATTR_STATS, /* nested */
__NET_DM_ATTR_MAX, __NET_DM_ATTR_MAX,
NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1
@ -103,4 +106,11 @@ enum {
NET_DM_ATTR_PORT_MAX = __NET_DM_ATTR_PORT_MAX - 1 NET_DM_ATTR_PORT_MAX = __NET_DM_ATTR_PORT_MAX - 1
}; };
enum {
NET_DM_ATTR_STATS_DROPPED, /* u64 */
__NET_DM_ATTR_STATS_MAX,
NET_DM_ATTR_STATS_MAX = __NET_DM_ATTR_STATS_MAX - 1
};
#endif #endif

View File

@ -51,12 +51,18 @@ static int trace_state = TRACE_OFF;
*/ */
static DEFINE_MUTEX(net_dm_mutex); static DEFINE_MUTEX(net_dm_mutex);
struct net_dm_stats {
u64 dropped;
struct u64_stats_sync syncp;
};
struct per_cpu_dm_data { struct per_cpu_dm_data {
spinlock_t lock; /* Protects 'skb' and 'send_timer' */ spinlock_t lock; /* Protects 'skb' and 'send_timer' */
struct sk_buff *skb; struct sk_buff *skb;
struct sk_buff_head drop_queue; struct sk_buff_head drop_queue;
struct work_struct dm_alert_work; struct work_struct dm_alert_work;
struct timer_list send_timer; struct timer_list send_timer;
struct net_dm_stats stats;
}; };
struct dm_hw_stat_delta { struct dm_hw_stat_delta {
@ -300,6 +306,9 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
unlock_free: unlock_free:
spin_unlock_irqrestore(&data->drop_queue.lock, flags); spin_unlock_irqrestore(&data->drop_queue.lock, flags);
u64_stats_update_begin(&data->stats.syncp);
data->stats.dropped++;
u64_stats_update_end(&data->stats.syncp);
consume_skb(nskb); consume_skb(nskb);
} }
@ -732,6 +741,93 @@ static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info)
return rc; return rc;
} }
static void net_dm_stats_read(struct net_dm_stats *stats)
{
int cpu;
memset(stats, 0, sizeof(*stats));
for_each_possible_cpu(cpu) {
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
struct net_dm_stats *cpu_stats = &data->stats;
unsigned int start;
u64 dropped;
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
dropped = cpu_stats->dropped;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->dropped += dropped;
}
}
static int net_dm_stats_put(struct sk_buff *msg)
{
struct net_dm_stats stats;
struct nlattr *attr;
net_dm_stats_read(&stats);
attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
if (!attr)
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
stats.dropped, NET_DM_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(msg, attr);
return 0;
nla_put_failure:
nla_nest_cancel(msg, attr);
return -EMSGSIZE;
}
static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
{
void *hdr;
int rc;
hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
&net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
if (!hdr)
return -EMSGSIZE;
rc = net_dm_stats_put(msg);
if (rc)
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
int rc;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
rc = net_dm_stats_fill(msg, info);
if (rc)
goto free_msg;
return genlmsg_reply(msg, info);
free_msg:
nlmsg_free(msg);
return rc;
}
static int dropmon_net_event(struct notifier_block *ev_block, static int dropmon_net_event(struct notifier_block *ev_block,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
@ -799,6 +895,10 @@ static const struct genl_ops dropmon_ops[] = {
.cmd = NET_DM_CMD_CONFIG_GET, .cmd = NET_DM_CMD_CONFIG_GET,
.doit = net_dm_cmd_config_get, .doit = net_dm_cmd_config_get,
}, },
{
.cmd = NET_DM_CMD_STATS_GET,
.doit = net_dm_cmd_stats_get,
},
}; };
static int net_dm_nl_pre_doit(const struct genl_ops *ops, static int net_dm_nl_pre_doit(const struct genl_ops *ops,
@ -865,6 +965,7 @@ static int __init init_net_drop_monitor(void)
data = &per_cpu(dm_cpu_data, cpu); data = &per_cpu(dm_cpu_data, cpu);
spin_lock_init(&data->lock); spin_lock_init(&data->lock);
skb_queue_head_init(&data->drop_queue); skb_queue_head_init(&data->drop_queue);
u64_stats_init(&data->stats.syncp);
} }
goto out; goto out;