dpaa2-eth: Add congestion group taildrop

The increase in number of ingress frame queues means we now risk
depleting the buffer pool before the FQ taildrop kicks in.

Congestion group taildrop allows us to control the number of frames that
can accumulate on a group of Rx frame queues belonging to the same
traffic class.  This setting coexists with the frame queue based
taildrop: whichever limit gets hit first triggers the frame drop.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ioana Radulescu 2020-05-31 00:08:11 +03:00 committed by David S. Miller
parent ad054f2654
commit 2c8d1c8d7d
2 changed files with 38 additions and 10 deletions

View File

@ -1287,17 +1287,20 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
bool tx_pause)
{
struct dpni_taildrop td = {0};
struct dpaa2_eth_fq *fq;
int i, err;
if (priv->rx_td_enabled == enable)
td.enable = !tx_pause;
if (priv->rx_td_enabled == td.enable)
return;
td.enable = enable;
td.threshold = DPAA2_ETH_TAILDROP_THRESH;
/* FQ taildrop: threshold is in bytes, per frame queue */
td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
td.units = DPNI_CONGESTION_UNIT_BYTES;
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
@ -1308,12 +1311,28 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
fq->tc, fq->flowid, &td);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_taildrop() failed\n");
break;
"dpni_set_taildrop(FQ) failed\n");
return;
}
}
priv->rx_td_enabled = enable;
/* Congestion group taildrop: threshold is in frames, per group
* of FQs belonging to the same traffic class
*/
td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
td.units = DPNI_CONGESTION_UNIT_FRAMES;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
DPNI_CP_GROUP, DPNI_QUEUE_RX,
i, 0, &td);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_taildrop(CG) failed\n");
return;
}
}
priv->rx_td_enabled = td.enable;
}
static int link_state_update(struct dpaa2_eth_priv *priv)
@ -1334,7 +1353,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
* only when pause frame generation is disabled.
*/
tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
dpaa2_eth_set_rx_taildrop(priv, tx_pause);
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.

View File

@ -40,7 +40,7 @@
* frames in the Rx queues (length of the current frame is not
* taken into account when making the taildrop decision)
*/
#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
#define DPAA2_ETH_FQ_TAILDROP_THRESH (64 * 1024)
/* Maximum number of Tx confirmation frames to be processed
* in a single NAPI call
@ -52,11 +52,20 @@
* how many 64B frames fit inside the taildrop threshold and add a margin
* to accommodate the buffer refill delay.
*/
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_FQ_TAILDROP_THRESH / 64)
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
#define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
/* Congestion group taildrop threshold: number of frames allowed to accumulate
* at any moment in a group of Rx queues belonging to the same traffic class.
* Choose value such that we don't risk depleting the buffer pool before the
* taildrop kicks in
*/
#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
(DPAA2_ETH_MAX_FRAMES_PER_QUEUE * dpaa2_eth_queue_count(priv) / \
dpaa2_eth_tc_count(priv))
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
*/