forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Fix reference count leaks in various parts of batman-adv, from Xiyu Yang. 2) Update NAT checksum even when it is zero, from Guillaume Nault. 3) sk_psock reference count leak in tls code, also from Xiyu Yang. 4) Sanity check TCA_FQ_CODEL_DROP_BATCH_SIZE netlink attribute in fq_codel, from Eric Dumazet. 5) Fix panic in choke_reset(), also from Eric Dumazet. 6) Fix VLAN accel handling in bnxt_fix_features(), from Michael Chan. 7) Disallow out of range quantum values in sch_sfq, from Eric Dumazet. 8) Fix crash in x25_disconnect(), from Yue Haibing. 9) Don't pass pointer to local variable back to the caller in nf_osf_hdr_ctx_init(), from Arnd Bergmann. 10) Wireguard should use the ECN decap helper functions, from Toke Høiland-Jørgensen. 11) Fix command entry leak in mlx5 driver, from Moshe Shemesh. 12) Fix uninitialized variable access in mptcp's subflow_syn_recv_sock(), from Paolo Abeni. 13) Fix unnecessary out-of-order ingress frame ordering in macsec, from Scott Dial. 14) IPv6 needs to use a global serial number for dst validation just like ipv4, from David Ahern. 15) Fix up PTP_1588_CLOCK deps, from Clay McClure. 16) Missing NLM_F_MULTI flag in gtp driver netlink messages, from Yoshiyuki Kurauchi. 17) Fix a regression in that dsa user port errors should not be fatal, from Florian Fainelli. 18) Fix iomap leak in enetc driver, from Dejin Zheng. 19) Fix use after free in lec_arp_clear_vccs(), from Cong Wang. 20) Initialize protocol value earlier in neigh code paths when generating events, from Roman Mashak. 21) netdev_update_features() must be called with RTNL mutex in macsec driver, from Antoine Tenart. 22) Validate untrusted GSO packets even more strictly, from Willem de Bruijn. 23) Wireguard decrypt worker needs a cond_resched(), from Jason Donenfeld. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (111 commits) net: flow_offload: skip hw stats check for FLOW_ACTION_HW_STATS_DONT_CARE MAINTAINERS: put DYNAMIC INTERRUPT MODERATION in proper order wireguard: send/receive: use explicit unlikely branch instead of implicit coalescing wireguard: selftests: initalize ipv6 members to NULL to squelch clang warning wireguard: send/receive: cond_resched() when processing worker ringbuffers wireguard: socket: remove errant restriction on looping to self wireguard: selftests: use normal kernel stack size on ppc64 net: ethernet: ti: am65-cpsw-nuss: fix irqs type ionic: Use debugfs_create_bool() to export bool net: dsa: Do not leave DSA master with NULL netdev_ops net: dsa: remove duplicate assignment in dsa_slave_add_cls_matchall_mirred net: stricter validation of untrusted gso packets seg6: fix SRH processing to comply with RFC8754 net: mscc: ocelot: ANA_AUTOAGE_AGE_PERIOD holds a value in seconds, not ms net: dsa: ocelot: the MAC table on Felix is twice as large net: dsa: sja1105: the PTP_CLK extts input reacts on both edges selftests: net: tcp_mmap: fix SO_RCVLOWAT setting net: hsr: fix incorrect type usage for protocol variable net: macsec: fix rtnl locking issue net: mvpp2: cls: Prevent buffer overflow in mvpp2_ethtool_cls_rule_del() ...
This commit is contained in:
commit
a811c1fa0a
|
@ -61,8 +61,8 @@ The ``ice`` driver reports the following versions
|
|||
- running
|
||||
- ICE OS Default Package
|
||||
- The name of the DDP package that is active in the device. The DDP
|
||||
package is loaded by the driver during initialization. Each varation
|
||||
of DDP package shall have a unique name.
|
||||
package is loaded by the driver during initialization. Each
|
||||
variation of the DDP package has a unique name.
|
||||
* - ``fw.app``
|
||||
- running
|
||||
- 1.3.1.0
|
||||
|
|
|
@ -5935,9 +5935,9 @@ F: lib/dynamic_debug.c
|
|||
DYNAMIC INTERRUPT MODERATION
|
||||
M: Tal Gilboa <talgi@mellanox.com>
|
||||
S: Maintained
|
||||
F: Documentation/networking/net_dim.rst
|
||||
F: include/linux/dim.h
|
||||
F: lib/dim/
|
||||
F: Documentation/networking/net_dim.rst
|
||||
|
||||
DZ DECSTATION DZ11 SERIAL DRIVER
|
||||
M: "Maciej W. Rozycki" <macro@linux-mips.org>
|
||||
|
|
|
@ -673,41 +673,14 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* chcr_write_cpl_set_tcb_ulp: update tcb values.
|
||||
* TCB is responsible to create tcp headers, so all the related values
|
||||
* should be correctly updated.
|
||||
* @tx_info - driver specific tls info.
|
||||
* @q - tx queue on which packet is going out.
|
||||
* @tid - TCB identifier.
|
||||
* @pos - current index where should we start writing.
|
||||
* @word - TCB word.
|
||||
* @mask - TCB word related mask.
|
||||
* @val - TCB word related value.
|
||||
* @reply - set 1 if looking for TP response.
|
||||
* return - next position to write.
|
||||
*/
|
||||
static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
|
||||
struct sge_eth_txq *q, u32 tid,
|
||||
void *pos, u16 word, u64 mask,
|
||||
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
|
||||
u32 tid, void *pos, u16 word, u64 mask,
|
||||
u64 val, u32 reply)
|
||||
{
|
||||
struct cpl_set_tcb_field_core *cpl;
|
||||
struct ulptx_idata *idata;
|
||||
struct ulp_txpkt *txpkt;
|
||||
void *save_pos = NULL;
|
||||
u8 buf[48] = {0};
|
||||
int left;
|
||||
|
||||
left = (void *)q->q.stat - pos;
|
||||
if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
|
||||
if (!left) {
|
||||
pos = q->q.desc;
|
||||
} else {
|
||||
save_pos = pos;
|
||||
pos = buf;
|
||||
}
|
||||
}
|
||||
/* ULP_TXPKT */
|
||||
txpkt = pos;
|
||||
txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
|
||||
|
@ -732,18 +705,54 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
|
|||
idata = (struct ulptx_idata *)(cpl + 1);
|
||||
idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
|
||||
idata->len = htonl(0);
|
||||
pos = idata + 1;
|
||||
|
||||
if (save_pos) {
|
||||
pos = chcr_copy_to_txd(buf, &q->q, save_pos,
|
||||
CHCR_SET_TCB_FIELD_LEN);
|
||||
} else {
|
||||
/* check again if we are at the end of the queue */
|
||||
if (left == CHCR_SET_TCB_FIELD_LEN)
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* chcr_write_cpl_set_tcb_ulp: update tcb values.
|
||||
* TCB is responsible to create tcp headers, so all the related values
|
||||
* should be correctly updated.
|
||||
* @tx_info - driver specific tls info.
|
||||
* @q - tx queue on which packet is going out.
|
||||
* @tid - TCB identifier.
|
||||
* @pos - current index where should we start writing.
|
||||
* @word - TCB word.
|
||||
* @mask - TCB word related mask.
|
||||
* @val - TCB word related value.
|
||||
* @reply - set 1 if looking for TP response.
|
||||
* return - next position to write.
|
||||
*/
|
||||
static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
|
||||
struct sge_eth_txq *q, u32 tid,
|
||||
void *pos, u16 word, u64 mask,
|
||||
u64 val, u32 reply)
|
||||
{
|
||||
int left = (void *)q->q.stat - pos;
|
||||
|
||||
if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
|
||||
if (!left) {
|
||||
pos = q->q.desc;
|
||||
else
|
||||
pos = idata + 1;
|
||||
} else {
|
||||
u8 buf[48] = {0};
|
||||
|
||||
__chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
|
||||
mask, val, reply);
|
||||
|
||||
return chcr_copy_to_txd(buf, &q->q, pos,
|
||||
CHCR_SET_TCB_FIELD_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
|
||||
mask, val, reply);
|
||||
|
||||
/* check again if we are at the end of the queue */
|
||||
if (left == CHCR_SET_TCB_FIELD_LEN)
|
||||
pos = q->q.desc;
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ config NET_DSA_MV88E6XXX_PTP
|
|||
bool "PTP support for Marvell 88E6xxx"
|
||||
default n
|
||||
depends on NET_DSA_MV88E6XXX_GLOBAL2
|
||||
depends on PTP_1588_CLOCK
|
||||
imply NETWORK_PHY_TIMESTAMPING
|
||||
imply PTP_1588_CLOCK
|
||||
help
|
||||
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
|
||||
chips that support it.
|
||||
|
|
|
@ -3962,7 +3962,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
|
|||
.serdes_get_stats = mv88e6390_serdes_get_stats,
|
||||
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
|
||||
.serdes_get_regs = mv88e6390_serdes_get_regs,
|
||||
.phylink_validate = mv88e6390_phylink_validate,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.phylink_validate = mv88e6390_phylink_validate,
|
||||
};
|
||||
|
@ -4021,7 +4020,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
|
|||
.serdes_get_stats = mv88e6390_serdes_get_stats,
|
||||
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
|
||||
.serdes_get_regs = mv88e6390_serdes_get_regs,
|
||||
.phylink_validate = mv88e6390_phylink_validate,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.phylink_validate = mv88e6390x_phylink_validate,
|
||||
};
|
||||
|
@ -4079,7 +4077,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
|
|||
.serdes_get_stats = mv88e6390_serdes_get_stats,
|
||||
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
|
||||
.serdes_get_regs = mv88e6390_serdes_get_regs,
|
||||
.phylink_validate = mv88e6390_phylink_validate,
|
||||
.avb_ops = &mv88e6390_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
.phylink_validate = mv88e6390_phylink_validate,
|
||||
|
@ -4235,7 +4232,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
|
|||
.serdes_get_stats = mv88e6390_serdes_get_stats,
|
||||
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
|
||||
.serdes_get_regs = mv88e6390_serdes_get_regs,
|
||||
.phylink_validate = mv88e6390_phylink_validate,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.avb_ops = &mv88e6390_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
|
|
|
@ -400,6 +400,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
|
|||
ocelot->stats_layout = felix->info->stats_layout;
|
||||
ocelot->num_stats = felix->info->num_stats;
|
||||
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
|
||||
ocelot->num_mact_rows = felix->info->num_mact_rows;
|
||||
ocelot->vcap_is2_keys = felix->info->vcap_is2_keys;
|
||||
ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
|
||||
ocelot->vcap = felix->info->vcap;
|
||||
|
|
|
@ -15,6 +15,7 @@ struct felix_info {
|
|||
const u32 *const *map;
|
||||
const struct ocelot_ops *ops;
|
||||
int shared_queue_sz;
|
||||
int num_mact_rows;
|
||||
const struct ocelot_stat_layout *stats_layout;
|
||||
unsigned int num_stats;
|
||||
int num_ports;
|
||||
|
|
|
@ -1220,6 +1220,7 @@ struct felix_info felix_info_vsc9959 = {
|
|||
.vcap_is2_actions = vsc9959_vcap_is2_actions,
|
||||
.vcap = vsc9959_vcap_props,
|
||||
.shared_queue_sz = 128 * 1024,
|
||||
.num_mact_rows = 2048,
|
||||
.num_ports = 6,
|
||||
.switch_pci_bar = 4,
|
||||
.imdio_pci_bar = 0,
|
||||
|
|
|
@ -20,6 +20,7 @@ tristate "NXP SJA1105 Ethernet switch family support"
|
|||
config NET_DSA_SJA1105_PTP
|
||||
bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
|
||||
depends on NET_DSA_SJA1105
|
||||
depends on PTP_1588_CLOCK
|
||||
help
|
||||
This enables support for timestamping and PTP clock manipulations in
|
||||
the SJA1105 DSA driver.
|
||||
|
|
|
@ -16,14 +16,15 @@
|
|||
|
||||
/* PTPSYNCTS has no interrupt or update mechanism, because the intended
|
||||
* hardware use case is for the timestamp to be collected synchronously,
|
||||
* immediately after the CAS_MASTER SJA1105 switch has triggered a CASSYNC
|
||||
* pulse on the PTP_CLK pin. When used as a generic extts source, it needs
|
||||
* polling and a comparison with the old value. The polling interval is just
|
||||
* the Nyquist rate of a canonical PPS input (e.g. from a GPS module).
|
||||
* Anything of higher frequency than 1 Hz will be lost, since there is no
|
||||
* timestamp FIFO.
|
||||
* immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
|
||||
* one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
|
||||
* generic extts source, the PTPSYNCTS register needs polling and a comparison
|
||||
* with the old value. The polling interval is configured as the Nyquist rate
|
||||
* of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
|
||||
* this hardware can do (but may be enough for some setups). Anything of higher
|
||||
* frequency than 1 Hz will be lost, since there is no timestamp FIFO.
|
||||
*/
|
||||
#define SJA1105_EXTTS_INTERVAL (HZ / 2)
|
||||
#define SJA1105_EXTTS_INTERVAL (HZ / 4)
|
||||
|
||||
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
|
||||
* divided by 1000 (ppb -> ppm) and with a 16-bit
|
||||
|
@ -754,7 +755,16 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (extts->flags)
|
||||
if (extts->flags & ~(PTP_ENABLE_FEATURE |
|
||||
PTP_RISING_EDGE |
|
||||
PTP_FALLING_EDGE |
|
||||
PTP_STRICT_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* We can only enable time stamping on both edges, sadly. */
|
||||
if ((extts->flags & PTP_STRICT_FLAGS) &&
|
||||
(extts->flags & PTP_ENABLE_FEATURE) &&
|
||||
(extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
* 16kB.
|
||||
*/
|
||||
#if PAGE_SIZE > SZ_16K
|
||||
#define ENA_PAGE_SIZE SZ_16K
|
||||
#define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
|
||||
#else
|
||||
#define ENA_PAGE_SIZE PAGE_SIZE
|
||||
#endif
|
||||
|
|
|
@ -57,7 +57,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
|
|||
{ AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
|
||||
{ AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
|
||||
|
||||
{ AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
|
||||
{ AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
|
||||
{ AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
|
||||
{ AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
|
||||
{ AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
|
||||
|
|
|
@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct bgmac *bgmac;
|
||||
struct resource *regs;
|
||||
const u8 *mac_addr;
|
||||
|
||||
bgmac = bgmac_alloc(&pdev->dev);
|
||||
|
@ -206,16 +207,21 @@ static int bgmac_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(bgmac->plat.base))
|
||||
return PTR_ERR(bgmac->plat.base);
|
||||
|
||||
bgmac->plat.idm_base =
|
||||
devm_platform_ioremap_resource_byname(pdev, "idm_base");
|
||||
if (IS_ERR(bgmac->plat.idm_base))
|
||||
return PTR_ERR(bgmac->plat.idm_base);
|
||||
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
|
||||
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
|
||||
if (regs) {
|
||||
bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
|
||||
if (IS_ERR(bgmac->plat.idm_base))
|
||||
return PTR_ERR(bgmac->plat.idm_base);
|
||||
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
|
||||
}
|
||||
|
||||
bgmac->plat.nicpm_base =
|
||||
devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
|
||||
if (IS_ERR(bgmac->plat.nicpm_base))
|
||||
return PTR_ERR(bgmac->plat.nicpm_base);
|
||||
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
|
||||
if (regs) {
|
||||
bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
|
||||
regs);
|
||||
if (IS_ERR(bgmac->plat.nicpm_base))
|
||||
return PTR_ERR(bgmac->plat.nicpm_base);
|
||||
}
|
||||
|
||||
bgmac->read = platform_bgmac_read;
|
||||
bgmac->write = platform_bgmac_write;
|
||||
|
|
|
@ -6642,7 +6642,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
|
|||
int rc;
|
||||
|
||||
if (!mem_size)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
|
||||
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
|
||||
|
@ -9780,6 +9780,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
|
|||
netdev_features_t features)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
netdev_features_t vlan_features;
|
||||
|
||||
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
|
||||
features &= ~NETIF_F_NTUPLE;
|
||||
|
@ -9796,12 +9797,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
|
|||
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
|
||||
* turned on or off together.
|
||||
*/
|
||||
if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
|
||||
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
|
||||
vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_RX);
|
||||
if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_RX)) {
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_RX);
|
||||
else
|
||||
else if (vlan_features)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_RX;
|
||||
}
|
||||
|
@ -12212,12 +12215,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
|
|||
bnxt_ulp_start(bp, err);
|
||||
}
|
||||
|
||||
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
|
||||
dev_close(netdev);
|
||||
if (result != PCI_ERS_RESULT_RECOVERED) {
|
||||
if (netif_running(netdev))
|
||||
dev_close(netdev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1066,7 +1066,6 @@ struct bnxt_vf_info {
|
|||
#define BNXT_VF_LINK_FORCED 0x4
|
||||
#define BNXT_VF_LINK_UP 0x8
|
||||
#define BNXT_VF_TRUST 0x10
|
||||
u32 func_flags; /* func cfg flags */
|
||||
u32 min_tx_rate;
|
||||
u32 max_tx_rate;
|
||||
void *hwrm_cmd_req_addr;
|
||||
|
|
|
@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
|
|||
#define BNXT_NVM_CFG_VER_BITS 24
|
||||
#define BNXT_NVM_CFG_VER_BYTES 4
|
||||
|
||||
#define BNXT_MSIX_VEC_MAX 1280
|
||||
#define BNXT_MSIX_VEC_MAX 512
|
||||
#define BNXT_MSIX_VEC_MIN_MAX 128
|
||||
|
||||
enum bnxt_nvm_dir_type {
|
||||
|
|
|
@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
|
|||
if (old_setting == setting)
|
||||
return 0;
|
||||
|
||||
func_flags = vf->func_flags;
|
||||
if (setting)
|
||||
func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
|
||||
func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
|
||||
else
|
||||
func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
|
||||
func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
|
||||
/*TODO: if the driver supports VLAN filter on guest VLAN,
|
||||
* the spoof check should also include vlan anti-spoofing
|
||||
*/
|
||||
|
@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
|
|||
req.flags = cpu_to_le32(func_flags);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
vf->func_flags = func_flags;
|
||||
if (setting)
|
||||
vf->flags |= BNXT_VF_SPOOFCHK;
|
||||
else
|
||||
|
@ -228,7 +226,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
|
|||
memcpy(vf->mac_addr, mac, ETH_ALEN);
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
|
||||
req.fid = cpu_to_le16(vf->fw_fid);
|
||||
req.flags = cpu_to_le32(vf->func_flags);
|
||||
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
|
||||
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
|
||||
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
|
@ -266,7 +263,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
|
|||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
|
||||
req.fid = cpu_to_le16(vf->fw_fid);
|
||||
req.flags = cpu_to_le32(vf->func_flags);
|
||||
req.dflt_vlan = cpu_to_le16(vlan_tag);
|
||||
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
|
@ -305,7 +301,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
|
|||
return 0;
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
|
||||
req.fid = cpu_to_le16(vf->fw_fid);
|
||||
req.flags = cpu_to_le32(vf->func_flags);
|
||||
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
|
||||
req.max_bw = cpu_to_le32(max_tx_rate);
|
||||
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
|
||||
|
@ -477,7 +472,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
|
|||
vf = &bp->pf.vf[vf_id];
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
|
||||
req.fid = cpu_to_le16(vf->fw_fid);
|
||||
req.flags = cpu_to_le32(vf->func_flags);
|
||||
|
||||
if (is_valid_ether_addr(vf->mac_addr)) {
|
||||
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
|
||||
|
|
|
@ -35,8 +35,8 @@ config MACB
|
|||
config MACB_USE_HWSTAMP
|
||||
bool "Use IEEE 1588 hwstamp"
|
||||
depends on MACB
|
||||
depends on PTP_1588_CLOCK
|
||||
default y
|
||||
imply PTP_1588_CLOCK
|
||||
---help---
|
||||
Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
|
||||
|
||||
|
|
|
@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
|||
int status;
|
||||
|
||||
status = pm_runtime_get_sync(&bp->pdev->dev);
|
||||
if (status < 0)
|
||||
if (status < 0) {
|
||||
pm_runtime_put_noidle(&bp->pdev->dev);
|
||||
goto mdio_pm_exit;
|
||||
}
|
||||
|
||||
status = macb_mdio_wait_for_idle(bp);
|
||||
if (status < 0)
|
||||
|
@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
|||
int status;
|
||||
|
||||
status = pm_runtime_get_sync(&bp->pdev->dev);
|
||||
if (status < 0)
|
||||
if (status < 0) {
|
||||
pm_runtime_put_noidle(&bp->pdev->dev);
|
||||
goto mdio_pm_exit;
|
||||
}
|
||||
|
||||
status = macb_mdio_wait_for_idle(bp);
|
||||
if (status < 0)
|
||||
|
@ -3816,8 +3820,10 @@ static int at91ether_open(struct net_device *dev)
|
|||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(&lp->pdev->dev);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(&lp->pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Clear internal statistics */
|
||||
ctl = macb_readl(lp, NCR);
|
||||
|
@ -4172,15 +4178,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
|
|||
|
||||
static int fu540_c000_init(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
|
||||
mgmt->reg = ioremap(res->start, resource_size(res));
|
||||
if (!mgmt->reg)
|
||||
return -ENOMEM;
|
||||
mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(mgmt->reg))
|
||||
return PTR_ERR(mgmt->reg);
|
||||
|
||||
return macb_init(pdev);
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ config THUNDER_NIC_RGX
|
|||
config CAVIUM_PTP
|
||||
tristate "Cavium PTP coprocessor as PTP clock"
|
||||
depends on 64BIT && PCI
|
||||
imply PTP_1588_CLOCK
|
||||
depends on PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver adds support for the Precision Time Protocol Clocks and
|
||||
Timestamping coprocessor (PTP) found on Cavium processors.
|
||||
|
|
|
@ -2207,6 +2207,9 @@ static void ethofld_hard_xmit(struct net_device *dev,
|
|||
if (unlikely(skip_eotx_wr)) {
|
||||
start = (u64 *)wr;
|
||||
eosw_txq->state = next_state;
|
||||
eosw_txq->cred -= wrlen16;
|
||||
eosw_txq->ncompl++;
|
||||
eosw_txq->last_compl = 0;
|
||||
goto write_wr_headers;
|
||||
}
|
||||
|
||||
|
@ -2365,6 +2368,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return cxgb4_eth_xmit(skb, dev);
|
||||
}
|
||||
|
||||
static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
|
||||
{
|
||||
int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
|
||||
int pidx = eosw_txq->pidx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!pktcount)
|
||||
return;
|
||||
|
||||
if (pktcount < 0)
|
||||
pktcount += eosw_txq->ndesc;
|
||||
|
||||
while (pktcount--) {
|
||||
pidx--;
|
||||
if (pidx < 0)
|
||||
pidx += eosw_txq->ndesc;
|
||||
|
||||
skb = eosw_txq->desc[pidx].skb;
|
||||
if (skb) {
|
||||
dev_consume_skb_any(skb);
|
||||
eosw_txq->desc[pidx].skb = NULL;
|
||||
eosw_txq->inuse--;
|
||||
}
|
||||
}
|
||||
|
||||
eosw_txq->pidx = eosw_txq->last_pidx + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
|
||||
* @dev - netdevice
|
||||
|
@ -2440,9 +2471,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
|
|||
FW_FLOWC_MNEM_EOSTATE_CLOSING :
|
||||
FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
|
||||
|
||||
eosw_txq->cred -= len16;
|
||||
eosw_txq->ncompl++;
|
||||
eosw_txq->last_compl = 0;
|
||||
/* Free up any pending skbs to ensure there's room for
|
||||
* termination FLOWC.
|
||||
*/
|
||||
if (tc == FW_SCHED_CLS_NONE)
|
||||
eosw_txq_flush_pending_skbs(eosw_txq);
|
||||
|
||||
ret = eosw_txq_enqueue(eosw_txq, skb);
|
||||
if (ret) {
|
||||
|
@ -2695,6 +2728,7 @@ static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
|
|||
* is ever running at a time ...
|
||||
*/
|
||||
static void service_ofldq(struct sge_uld_txq *q)
|
||||
__must_hold(&q->sendq.lock)
|
||||
{
|
||||
u64 *pos, *before, *end;
|
||||
int credits;
|
||||
|
|
|
@ -74,8 +74,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
|
|||
pci_disable_device(pdev);
|
||||
err_pci_enable:
|
||||
err_mdiobus_alloc:
|
||||
iounmap(port_regs);
|
||||
err_hw_alloc:
|
||||
iounmap(port_regs);
|
||||
err_ioremap:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2189,7 +2189,8 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
rc = do_hard_reset(adapter, rwi, reset_state);
|
||||
rtnl_unlock();
|
||||
}
|
||||
} else {
|
||||
} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
|
||||
adapter->from_passive_init)) {
|
||||
rc = do_reset(adapter, rwi, reset_state);
|
||||
}
|
||||
kfree(rwi);
|
||||
|
|
|
@ -1428,6 +1428,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
|
|||
struct mvpp2_ethtool_fs *efs;
|
||||
int ret;
|
||||
|
||||
if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
|
||||
return -EINVAL;
|
||||
|
||||
efs = port->rfs_rules[info->fs.location];
|
||||
if (!efs)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -4329,6 +4329,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
|
|||
|
||||
if (!mvpp22_rss_is_supported())
|
||||
return -EOPNOTSUPP;
|
||||
if (rss_context >= MVPP22_N_RSS_TABLES)
|
||||
return -EINVAL;
|
||||
|
||||
if (hfunc)
|
||||
*hfunc = ETH_RSS_HASH_CRC32;
|
||||
|
|
|
@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
|
|||
|
||||
if (!err || err == -ENOSPC) {
|
||||
priv->def_counter[port] = idx;
|
||||
err = 0;
|
||||
} else if (err == -ENOENT) {
|
||||
err = 0;
|
||||
continue;
|
||||
|
@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
|
|||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (!err)
|
||||
*idx = get_param_l(&out_param);
|
||||
|
||||
if (WARN_ON(err == -ENOSPC))
|
||||
err = -EINVAL;
|
||||
return err;
|
||||
}
|
||||
return __mlx4_counter_alloc(dev, idx);
|
||||
|
|
|
@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
}
|
||||
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
|
||||
lay = get_inst(cmd, ent->idx);
|
||||
ent->lay = lay;
|
||||
memset(lay, 0, sizeof(*lay));
|
||||
|
@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
|
||||
if (ent->callback)
|
||||
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
|
||||
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
|
||||
|
||||
/* Skip sending command to fw if internal error */
|
||||
if (pci_channel_offline(dev->pdev) ||
|
||||
|
@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
|
||||
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
||||
/* no doorbell, no need to keep the entry */
|
||||
free_ent(cmd, ent->idx);
|
||||
if (ent->callback)
|
||||
free_cmd(ent);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1773,19 +1773,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
|
|||
|
||||
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
int err = mlx5e_init_rep_rx(priv);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5e_create_q_counters(priv);
|
||||
return 0;
|
||||
return mlx5e_init_rep_rx(priv);
|
||||
}
|
||||
|
||||
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_destroy_q_counters(priv);
|
||||
mlx5e_cleanup_rep_rx(priv);
|
||||
mlx5e_destroy_q_counters(priv);
|
||||
}
|
||||
|
||||
static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
|
||||
|
|
|
@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
|
|||
MLX5_FLOW_NAMESPACE_KERNEL, 1,
|
||||
modact);
|
||||
if (IS_ERR(mod_hdr)) {
|
||||
err = PTR_ERR(mod_hdr);
|
||||
esw_warn(dev, "Failed to create restore mod header, err: %d\n",
|
||||
err);
|
||||
err = PTR_ERR(mod_hdr);
|
||||
goto err_mod_hdr;
|
||||
}
|
||||
|
||||
|
@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
|||
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
||||
|
||||
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
|
||||
mutex_init(&esw->fdb_table.offloads.vports.lock);
|
||||
hash_init(esw->fdb_table.offloads.vports.table);
|
||||
|
||||
err = esw_create_uplink_offloads_acl_tables(esw);
|
||||
if (err)
|
||||
return err;
|
||||
goto create_acl_err;
|
||||
|
||||
err = esw_create_offloads_table(esw, total_vports);
|
||||
if (err)
|
||||
|
@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
|||
if (err)
|
||||
goto create_fg_err;
|
||||
|
||||
mutex_init(&esw->fdb_table.offloads.vports.lock);
|
||||
hash_init(esw->fdb_table.offloads.vports.table);
|
||||
|
||||
return 0;
|
||||
|
||||
create_fg_err:
|
||||
|
@ -2253,18 +2252,19 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
|||
esw_destroy_offloads_table(esw);
|
||||
create_offloads_err:
|
||||
esw_destroy_uplink_offloads_acl_tables(esw);
|
||||
|
||||
create_acl_err:
|
||||
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
|
||||
esw_destroy_vport_rx_group(esw);
|
||||
esw_destroy_offloads_fdb_tables(esw);
|
||||
esw_destroy_restore_table(esw);
|
||||
esw_destroy_offloads_table(esw);
|
||||
esw_destroy_uplink_offloads_acl_tables(esw);
|
||||
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||
err_vports:
|
||||
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
|
||||
err_uplink:
|
||||
esw_set_passing_vport_metadata(esw, false);
|
||||
err_steering_init:
|
||||
esw_offloads_steering_cleanup(esw);
|
||||
err_steering_init:
|
||||
esw_set_passing_vport_metadata(esw, false);
|
||||
err_vport_metadata:
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
mutex_destroy(&esw->offloads.termtbl_mutex);
|
||||
|
|
|
@ -695,6 +695,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
|
|||
pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
|
||||
}
|
||||
|
||||
static void dr_cq_complete(struct mlx5_core_cq *mcq,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
pr_err("CQ completion CQ: #%u\n", mcq->cqn);
|
||||
}
|
||||
|
||||
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_uars_page *uar,
|
||||
size_t ncqe)
|
||||
|
@ -756,6 +762,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
|
||||
|
||||
cq->mcq.event = dr_cq_event;
|
||||
cq->mcq.comp = dr_cq_complete;
|
||||
|
||||
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
|
||||
kvfree(in);
|
||||
|
@ -767,7 +774,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
|
||||
cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
|
||||
*cq->mcq.set_ci_db = 0;
|
||||
*cq->mcq.arm_db = 0;
|
||||
|
||||
/* set no-zero value, in order to avoid the HW to run db-recovery on
|
||||
* CQ that used in polling mode.
|
||||
*/
|
||||
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
|
||||
|
||||
cq->mcq.vector = 0;
|
||||
cq->mcq.irqn = irqn;
|
||||
cq->mcq.uar = uar;
|
||||
|
|
|
@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
|
|||
unsigned int priority,
|
||||
struct mlxsw_afk_element_usage *elusage)
|
||||
{
|
||||
struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
|
||||
struct mlxsw_sp_acl_tcam_vregion *vregion;
|
||||
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
|
||||
struct list_head *pos;
|
||||
int err;
|
||||
|
||||
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
|
||||
|
@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
|
|||
}
|
||||
|
||||
mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
|
||||
list_add_tail(&vchunk->list, &vregion->vchunk_list);
|
||||
|
||||
/* Position the vchunk inside the list according to priority */
|
||||
list_for_each(pos, &vregion->vchunk_list) {
|
||||
vchunk2 = list_entry(pos, typeof(*vchunk2), list);
|
||||
if (vchunk2->priority > priority)
|
||||
break;
|
||||
}
|
||||
list_add_tail(&vchunk->list, pos);
|
||||
mutex_unlock(&vregion->lock);
|
||||
|
||||
return vchunk;
|
||||
|
|
|
@ -36,7 +36,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
|||
err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) {
|
||||
} else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
|
||||
act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
|
|||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
|
||||
unregister_netdev(ndev);
|
||||
free_irq(ndev->irq, ndev);
|
||||
devm_free_irq(&pdev->dev, ndev->irq, ndev);
|
||||
moxart_mac_free_memory(ndev);
|
||||
free_netdev(ndev);
|
||||
|
||||
|
|
|
@ -1031,10 +1031,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
|
|||
{
|
||||
int i, j;
|
||||
|
||||
/* Loop through all the mac tables entries. There are 1024 rows of 4
|
||||
* entries.
|
||||
*/
|
||||
for (i = 0; i < 1024; i++) {
|
||||
/* Loop through all the mac tables entries. */
|
||||
for (i = 0; i < ocelot->num_mact_rows; i++) {
|
||||
for (j = 0; j < 4; j++) {
|
||||
struct ocelot_mact_entry entry;
|
||||
bool is_static;
|
||||
|
@ -1453,8 +1451,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
|
|||
|
||||
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
|
||||
{
|
||||
ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
|
||||
ANA_AUTOAGE);
|
||||
unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
|
||||
|
||||
/* Setting AGE_PERIOD to zero effectively disables automatic aging,
|
||||
* which is clearly not what our intention is. So avoid that.
|
||||
*/
|
||||
if (!age_period)
|
||||
age_period = 1;
|
||||
|
||||
ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_set_ageing_time);
|
||||
|
||||
|
|
|
@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
|
|||
ocelot->stats_layout = ocelot_stats_layout;
|
||||
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
|
||||
ocelot->shared_queue_sz = 224 * 1024;
|
||||
ocelot->num_mact_rows = 1024;
|
||||
ocelot->ops = ops;
|
||||
|
||||
ret = ocelot_regfields_init(ocelot, ocelot_regfields);
|
||||
|
|
|
@ -208,11 +208,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
|
|||
|
||||
err = register_netdev(dev);
|
||||
if (err)
|
||||
goto out1;
|
||||
goto undo_probe1;
|
||||
|
||||
return 0;
|
||||
|
||||
out1:
|
||||
undo_probe1:
|
||||
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
|
||||
lp->descriptors, lp->descriptors_laddr);
|
||||
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
|
||||
out:
|
||||
free_netdev(dev);
|
||||
|
|
|
@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
|
|||
if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
|
||||
nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
|
||||
eth_hw_addr_random(nn->dp.netdev);
|
||||
nfp_nsp_close(nsp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -170,8 +170,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|||
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
|
||||
debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
|
||||
debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
|
||||
debugfs_create_u8("done_color", 0400, cq_dentry,
|
||||
(u8 *)&cq->done_color);
|
||||
debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
|
||||
|
||||
debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
|
||||
|
||||
|
|
|
@ -2101,6 +2101,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
|
|||
ionic_txrx_free(lif);
|
||||
}
|
||||
ionic_lifs_deinit(ionic);
|
||||
ionic_reset(ionic);
|
||||
ionic_qcqs_free(lif);
|
||||
|
||||
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
|
||||
|
@ -2116,6 +2117,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
|
|||
|
||||
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
|
||||
|
||||
ionic_init_devinfo(ionic);
|
||||
err = ionic_qcqs_alloc(lif);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
@ -2549,8 +2551,6 @@ int ionic_lifs_register(struct ionic *ionic)
|
|||
dev_err(ionic->dev, "Cannot register net device, aborting\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
ionic_link_status_check_request(ionic->master_lif);
|
||||
ionic->master_lif->registered = true;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
|
|||
total_offset += offset;
|
||||
}
|
||||
|
||||
total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
|
||||
total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
|
||||
total_ctr += total_offset;
|
||||
|
||||
ctr_low = do_div(total_ctr, 1000000000);
|
||||
|
|
|
@ -4060,7 +4060,7 @@ static int stmmac_set_features(struct net_device *netdev,
|
|||
/**
|
||||
* stmmac_interrupt - main ISR
|
||||
* @irq: interrupt number.
|
||||
* @dev_id: to pass the net device pointer.
|
||||
* @dev_id: to pass the net device pointer (must be valid).
|
||||
* Description: this is the main driver interrupt service routine.
|
||||
* It can call:
|
||||
* o DMA service routine (to manage incoming frame reception and transmission
|
||||
|
@ -4084,11 +4084,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
|||
if (priv->irq_wake)
|
||||
pm_wakeup_event(priv->device, 0);
|
||||
|
||||
if (unlikely(!dev)) {
|
||||
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* Check if adapter is up */
|
||||
if (test_bit(STMMAC_DOWN, &priv->state))
|
||||
return IRQ_HANDLED;
|
||||
|
@ -4991,7 +4986,7 @@ int stmmac_dvr_probe(struct device *device,
|
|||
priv->plat->bsp_priv);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto error_serdes_powerup;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -5000,6 +4995,8 @@ int stmmac_dvr_probe(struct device *device,
|
|||
|
||||
return ret;
|
||||
|
||||
error_serdes_powerup:
|
||||
unregister_netdev(ndev);
|
||||
error_netdev_register:
|
||||
phylink_destroy(priv->phylink);
|
||||
error_phy_setup:
|
||||
|
|
|
@ -90,9 +90,8 @@ config TI_CPTS
|
|||
config TI_CPTS_MOD
|
||||
tristate
|
||||
depends on TI_CPTS
|
||||
depends on PTP_1588_CLOCK
|
||||
default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
|
||||
select NET_PTP_CLASSIFY
|
||||
imply PTP_1588_CLOCK
|
||||
default m
|
||||
|
||||
config TI_K3_AM65_CPSW_NUSS
|
||||
|
|
|
@ -1719,7 +1719,8 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
|
|||
|
||||
ret = devm_request_irq(dev, tx_chn->irq,
|
||||
am65_cpsw_nuss_tx_irq,
|
||||
0, tx_chn->tx_chn_name, tx_chn);
|
||||
IRQF_TRIGGER_HIGH,
|
||||
tx_chn->tx_chn_name, tx_chn);
|
||||
if (ret) {
|
||||
dev_err(dev, "failure requesting tx%u irq %u, %d\n",
|
||||
tx_chn->id, tx_chn->irq, ret);
|
||||
|
@ -1744,7 +1745,7 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
|
|||
|
||||
ret = devm_request_irq(dev, common->rx_chns.irq,
|
||||
am65_cpsw_nuss_rx_irq,
|
||||
0, dev_name(dev), common);
|
||||
IRQF_TRIGGER_HIGH, dev_name(dev), common);
|
||||
if (ret) {
|
||||
dev_err(dev, "failure requesting rx irq %u, %d\n",
|
||||
common->rx_chns.irq, ret);
|
||||
|
|
|
@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev)
|
|||
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
|
||||
}
|
||||
linkmode_and(phydev->supported, phydev->supported, mask);
|
||||
linkmode_andnot(phydev->supported, phydev->supported, mask);
|
||||
linkmode_copy(phydev->advertising, phydev->supported);
|
||||
|
||||
lp->link = 0;
|
||||
|
|
|
@ -1169,11 +1169,11 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
|
|||
static struct genl_family gtp_genl_family;
|
||||
|
||||
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
|
||||
u32 type, struct pdp_ctx *pctx)
|
||||
int flags, u32 type, struct pdp_ctx *pctx)
|
||||
{
|
||||
void *genlh;
|
||||
|
||||
genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0,
|
||||
genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
|
||||
type);
|
||||
if (genlh == NULL)
|
||||
goto nlmsg_failure;
|
||||
|
@ -1227,8 +1227,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
|
|||
goto err_unlock;
|
||||
}
|
||||
|
||||
err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
|
||||
info->snd_seq, info->nlhdr->nlmsg_type, pctx);
|
||||
err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
|
||||
0, info->nlhdr->nlmsg_type, pctx);
|
||||
if (err < 0)
|
||||
goto err_unlock_free;
|
||||
|
||||
|
@ -1271,6 +1271,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
|
|||
gtp_genl_fill_info(skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
NLM_F_MULTI,
|
||||
cb->nlh->nlmsg_type, pctx)) {
|
||||
cb->args[0] = i;
|
||||
cb->args[1] = j;
|
||||
|
|
|
@ -707,7 +707,8 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
|
|||
goto drop;
|
||||
}
|
||||
|
||||
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
return netvsc_xmit(skb, ndev, false);
|
||||
}
|
||||
|
|
|
@ -1041,6 +1041,7 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
|
|||
|
||||
complete(&gsi->completion);
|
||||
}
|
||||
|
||||
/* Inter-EE interrupt handler */
|
||||
static void gsi_isr_glob_ee(struct gsi *gsi)
|
||||
{
|
||||
|
@ -1493,6 +1494,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
|
|||
struct completion *completion = &gsi->completion;
|
||||
u32 val;
|
||||
|
||||
/* First zero the result code field */
|
||||
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
|
||||
val &= ~GENERIC_EE_RESULT_FMASK;
|
||||
iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
|
||||
|
||||
/* Now issue the command */
|
||||
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
|
||||
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
|
||||
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
|
||||
|
@ -1798,9 +1805,9 @@ static int gsi_channel_init_one(struct gsi *gsi,
|
|||
|
||||
/* Worst case we need an event for every outstanding TRE */
|
||||
if (data->channel.tre_count > data->channel.event_count) {
|
||||
dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
|
||||
data->channel_id, data->channel.tre_count);
|
||||
tre_count = data->channel.event_count;
|
||||
dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
|
||||
data->channel_id, tre_count);
|
||||
} else {
|
||||
tre_count = data->channel.tre_count;
|
||||
}
|
||||
|
|
|
@ -410,6 +410,8 @@
|
|||
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
|
||||
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
|
||||
#define GENERIC_EE_SUCCESS_FVAL 1
|
||||
#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3
|
||||
#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5
|
||||
#define GENERIC_EE_NO_RESOURCES_FVAL 7
|
||||
#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
|
||||
#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
|
||||
|
|
|
@ -1283,7 +1283,7 @@ static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
|
|||
*/
|
||||
int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
|
||||
{
|
||||
u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES;
|
||||
u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
|
@ -1291,12 +1291,9 @@ int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
|
|||
struct gsi *gsi = &ipa->gsi;
|
||||
|
||||
ret = gsi_channel_stop(gsi, endpoint->channel_id);
|
||||
if (ret != -EAGAIN)
|
||||
if (ret != -EAGAIN || endpoint->toward_ipa)
|
||||
break;
|
||||
|
||||
if (endpoint->toward_ipa)
|
||||
continue;
|
||||
|
||||
/* For IPA v3.5.1, send a DMA read task and check again */
|
||||
if (ipa->version == IPA_VERSION_3_5_1) {
|
||||
ret = ipa_endpoint_stop_rx_dma(ipa);
|
||||
|
|
|
@ -1305,7 +1305,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
|
|||
struct crypto_aead *tfm;
|
||||
int ret;
|
||||
|
||||
tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
|
||||
/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
|
||||
tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
|
||||
|
||||
if (IS_ERR(tfm))
|
||||
return tfm;
|
||||
|
@ -2640,11 +2641,12 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
|
|||
if (ret)
|
||||
goto rollback;
|
||||
|
||||
rtnl_unlock();
|
||||
/* Force features update, since they are different for SW MACSec and
|
||||
* HW offloading cases.
|
||||
*/
|
||||
netdev_update_features(dev);
|
||||
|
||||
rtnl_unlock();
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
|
|
|
@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
|
|||
goto out;
|
||||
}
|
||||
dp83640_clock_init(clock, bus);
|
||||
list_add_tail(&phyter_clocks, &clock->list);
|
||||
list_add_tail(&clock->list, &phyter_clocks);
|
||||
out:
|
||||
mutex_unlock(&phyter_clocks_lock);
|
||||
|
||||
|
|
|
@ -137,19 +137,18 @@ static int dp83822_set_wol(struct phy_device *phydev,
|
|||
value &= ~DP83822_WOL_SECURE_ON;
|
||||
}
|
||||
|
||||
value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
|
||||
DP83822_WOL_CLR_INDICATION);
|
||||
phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
|
||||
value);
|
||||
} else {
|
||||
value = phy_read_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_WOL_CFG);
|
||||
value &= ~DP83822_WOL_EN;
|
||||
phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
|
||||
value);
|
||||
}
|
||||
/* Clear any pending WoL interrupt */
|
||||
phy_read(phydev, MII_DP83822_MISR2);
|
||||
|
||||
return 0;
|
||||
value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
|
||||
DP83822_WOL_CLR_INDICATION;
|
||||
|
||||
return phy_write_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_WOL_CFG, value);
|
||||
} else {
|
||||
return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_WOL_CFG, DP83822_WOL_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static void dp83822_get_wol(struct phy_device *phydev,
|
||||
|
@ -258,12 +257,11 @@ static int dp83822_config_intr(struct phy_device *phydev)
|
|||
|
||||
static int dp83822_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int value;
|
||||
int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
|
||||
DP83822_WOL_SECURE_ON;
|
||||
|
||||
value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
|
||||
|
||||
return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
|
||||
value);
|
||||
return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
||||
MII_DP83822_WOL_CFG, value);
|
||||
}
|
||||
|
||||
static int dp83822_phy_reset(struct phy_device *phydev)
|
||||
|
|
|
@ -139,16 +139,19 @@ static int dp83811_set_wol(struct phy_device *phydev,
|
|||
value &= ~DP83811_WOL_SECURE_ON;
|
||||
}
|
||||
|
||||
value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
|
||||
DP83811_WOL_CLR_INDICATION);
|
||||
phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
|
||||
value);
|
||||
/* Clear any pending WoL interrupt */
|
||||
phy_read(phydev, MII_DP83811_INT_STAT1);
|
||||
|
||||
value |= DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
|
||||
DP83811_WOL_CLR_INDICATION;
|
||||
|
||||
return phy_write_mmd(phydev, DP83811_DEVADDR,
|
||||
MII_DP83811_WOL_CFG, value);
|
||||
} else {
|
||||
phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
|
||||
DP83811_WOL_EN);
|
||||
return phy_clear_bits_mmd(phydev, DP83811_DEVADDR,
|
||||
MII_DP83811_WOL_CFG, DP83811_WOL_EN);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dp83811_get_wol(struct phy_device *phydev,
|
||||
|
@ -292,8 +295,8 @@ static int dp83811_config_init(struct phy_device *phydev)
|
|||
|
||||
value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
|
||||
|
||||
return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
|
||||
value);
|
||||
return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
|
||||
value);
|
||||
}
|
||||
|
||||
static int dp83811_phy_reset(struct phy_device *phydev)
|
||||
|
|
|
@ -66,6 +66,9 @@ enum {
|
|||
MV_PCS_CSSR1_SPD2_2500 = 0x0004,
|
||||
MV_PCS_CSSR1_SPD2_10000 = 0x0000,
|
||||
|
||||
/* Temperature read register (88E2110 only) */
|
||||
MV_PCS_TEMP = 0x8042,
|
||||
|
||||
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
|
||||
* registers appear to set themselves to the 0x800X when AN is
|
||||
* restarted, but status registers appear readable from either.
|
||||
|
@ -77,6 +80,7 @@ enum {
|
|||
MV_V2_PORT_CTRL = 0xf001,
|
||||
MV_V2_PORT_CTRL_SWRST = BIT(15),
|
||||
MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
|
||||
/* Temperature control/read registers (88X3310 only) */
|
||||
MV_V2_TEMP_CTRL = 0xf08a,
|
||||
MV_V2_TEMP_CTRL_MASK = 0xc000,
|
||||
MV_V2_TEMP_CTRL_SAMPLE = 0x0000,
|
||||
|
@ -104,6 +108,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev)
|
||||
{
|
||||
return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
|
||||
}
|
||||
|
||||
static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev)
|
||||
{
|
||||
return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP);
|
||||
}
|
||||
|
||||
static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev)
|
||||
{
|
||||
if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310)
|
||||
return mv3310_hwmon_read_temp_reg(phydev);
|
||||
else /* MARVELL_PHY_ID_88E2110 */
|
||||
return mv2110_hwmon_read_temp_reg(phydev);
|
||||
}
|
||||
|
||||
static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *value)
|
||||
{
|
||||
|
@ -116,7 +138,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
|||
}
|
||||
|
||||
if (type == hwmon_temp && attr == hwmon_temp_input) {
|
||||
temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
|
||||
temp = mv10g_hwmon_read_temp_reg(phydev);
|
||||
if (temp < 0)
|
||||
return temp;
|
||||
|
||||
|
@ -169,6 +191,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
|
|||
u16 val;
|
||||
int ret;
|
||||
|
||||
if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310)
|
||||
return 0;
|
||||
|
||||
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP,
|
||||
MV_V2_TEMP_UNKNOWN);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
|
||||
|
|
|
@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
|
|||
if (multicore) {
|
||||
queue->worker = wg_packet_percpu_multicore_worker_alloc(
|
||||
function, queue);
|
||||
if (!queue->worker)
|
||||
if (!queue->worker) {
|
||||
ptr_ring_cleanup(&queue->ring, NULL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
INIT_WORK(&queue->work, function);
|
||||
}
|
||||
|
|
|
@ -226,21 +226,20 @@ void wg_packet_handshake_receive_worker(struct work_struct *work)
|
|||
static void keep_key_fresh(struct wg_peer *peer)
|
||||
{
|
||||
struct noise_keypair *keypair;
|
||||
bool send = false;
|
||||
bool send;
|
||||
|
||||
if (peer->sent_lastminute_handshake)
|
||||
return;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
|
||||
if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
|
||||
keypair->i_am_the_initiator &&
|
||||
unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
|
||||
REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
|
||||
send = true;
|
||||
send = keypair && READ_ONCE(keypair->sending.is_valid) &&
|
||||
keypair->i_am_the_initiator &&
|
||||
wg_birthdate_has_expired(keypair->sending.birthdate,
|
||||
REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
if (send) {
|
||||
if (unlikely(send)) {
|
||||
peer->sent_lastminute_handshake = true;
|
||||
wg_packet_send_queued_handshake_initiation(peer, false);
|
||||
}
|
||||
|
@ -393,13 +392,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
|
|||
len = ntohs(ip_hdr(skb)->tot_len);
|
||||
if (unlikely(len < sizeof(struct iphdr)))
|
||||
goto dishonest_packet_size;
|
||||
if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
|
||||
IP_ECN_set_ce(ip_hdr(skb));
|
||||
INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
len = ntohs(ipv6_hdr(skb)->payload_len) +
|
||||
sizeof(struct ipv6hdr);
|
||||
if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
|
||||
IP6_ECN_set_ce(skb, ipv6_hdr(skb));
|
||||
INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
|
||||
} else {
|
||||
goto dishonest_packet_type;
|
||||
}
|
||||
|
@ -518,6 +515,8 @@ void wg_packet_decrypt_worker(struct work_struct *work)
|
|||
&PACKET_CB(skb)->keypair->receiving)) ?
|
||||
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
|
||||
wg_queue_enqueue_per_peer_napi(skb, state);
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void)
|
|||
enum { TRIALS_BEFORE_GIVING_UP = 5000 };
|
||||
bool success = false;
|
||||
int test = 0, trials;
|
||||
struct sk_buff *skb4, *skb6;
|
||||
struct sk_buff *skb4, *skb6 = NULL;
|
||||
struct iphdr *hdr4;
|
||||
struct ipv6hdr *hdr6;
|
||||
struct ipv6hdr *hdr6 = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
|
||||
return true;
|
||||
|
|
|
@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg,
|
|||
static void keep_key_fresh(struct wg_peer *peer)
|
||||
{
|
||||
struct noise_keypair *keypair;
|
||||
bool send = false;
|
||||
bool send;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
|
||||
if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
|
||||
(unlikely(atomic64_read(&keypair->sending.counter.counter) >
|
||||
REKEY_AFTER_MESSAGES) ||
|
||||
(keypair->i_am_the_initiator &&
|
||||
unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
|
||||
REKEY_AFTER_TIME)))))
|
||||
send = true;
|
||||
send = keypair && READ_ONCE(keypair->sending.is_valid) &&
|
||||
(atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES ||
|
||||
(keypair->i_am_the_initiator &&
|
||||
wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
if (send)
|
||||
if (unlikely(send))
|
||||
wg_packet_send_queued_handshake_initiation(peer, false);
|
||||
}
|
||||
|
||||
|
@ -281,6 +278,8 @@ void wg_packet_tx_worker(struct work_struct *work)
|
|||
|
||||
wg_noise_keypair_put(keypair, false);
|
||||
wg_peer_put(peer);
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -304,7 +303,8 @@ void wg_packet_encrypt_worker(struct work_struct *work)
|
|||
}
|
||||
wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
|
||||
state);
|
||||
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
|
|||
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
|
||||
wg->dev->name, &endpoint->addr, ret);
|
||||
goto err;
|
||||
} else if (unlikely(rt->dst.dev == skb->dev)) {
|
||||
ip_rt_put(rt);
|
||||
ret = -ELOOP;
|
||||
net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
|
||||
wg->dev->name, &endpoint->addr);
|
||||
goto err;
|
||||
}
|
||||
if (cache)
|
||||
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
|
||||
|
@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
|
|||
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
|
||||
wg->dev->name, &endpoint->addr, ret);
|
||||
goto err;
|
||||
} else if (unlikely(dst->dev == skb->dev)) {
|
||||
dst_release(dst);
|
||||
ret = -ELOOP;
|
||||
net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
|
||||
wg->dev->name, &endpoint->addr);
|
||||
goto err;
|
||||
}
|
||||
if (cache)
|
||||
dst_cache_set_ip6(cache, dst, &fl.saddr);
|
||||
|
|
|
@ -6717,17 +6717,17 @@ int qeth_stop(struct net_device *dev)
|
|||
unsigned int i;
|
||||
|
||||
/* Quiesce the NAPI instances: */
|
||||
qeth_for_each_output_queue(card, queue, i) {
|
||||
qeth_for_each_output_queue(card, queue, i)
|
||||
napi_disable(&queue->napi);
|
||||
del_timer_sync(&queue->timer);
|
||||
}
|
||||
|
||||
/* Stop .ndo_start_xmit, might still access queue->napi. */
|
||||
netif_tx_disable(dev);
|
||||
|
||||
/* Queues may get re-allocated, so remove the NAPIs here. */
|
||||
qeth_for_each_output_queue(card, queue, i)
|
||||
qeth_for_each_output_queue(card, queue, i) {
|
||||
del_timer_sync(&queue->timer);
|
||||
/* Queues may get re-allocated, so remove the NAPIs. */
|
||||
netif_napi_del(&queue->napi);
|
||||
}
|
||||
} else {
|
||||
netif_tx_disable(dev);
|
||||
}
|
||||
|
|
|
@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
|||
break;
|
||||
}
|
||||
|
||||
vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
|
||||
added = true;
|
||||
|
||||
/* Deliver to monitoring devices all correctly transmitted
|
||||
* packets.
|
||||
/* Deliver to monitoring devices all packets that we
|
||||
* will transmit.
|
||||
*/
|
||||
virtio_transport_deliver_tap_pkt(pkt);
|
||||
|
||||
vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
|
||||
added = true;
|
||||
|
||||
pkt->off += payload_len;
|
||||
total_len += payload_len;
|
||||
|
||||
|
@ -196,6 +196,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
|||
* to send it with the next available buffer.
|
||||
*/
|
||||
if (pkt->off < pkt->len) {
|
||||
/* We are queueing the same virtio_vsock_pkt to handle
|
||||
* the remaining bytes, and we want to deliver it
|
||||
* to monitoring devices in the next iteration.
|
||||
*/
|
||||
pkt->tap_delivered = false;
|
||||
|
||||
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||
list_add(&pkt->list, &vsock->send_pkt_list);
|
||||
spin_unlock_bh(&vsock->send_pkt_list_lock);
|
||||
|
|
|
@ -78,47 +78,6 @@ struct tcp_sack_block {
|
|||
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
|
||||
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
struct mptcp_options_received {
|
||||
u64 sndr_key;
|
||||
u64 rcvr_key;
|
||||
u64 data_ack;
|
||||
u64 data_seq;
|
||||
u32 subflow_seq;
|
||||
u16 data_len;
|
||||
u16 mp_capable : 1,
|
||||
mp_join : 1,
|
||||
dss : 1,
|
||||
add_addr : 1,
|
||||
rm_addr : 1,
|
||||
family : 4,
|
||||
echo : 1,
|
||||
backup : 1;
|
||||
u32 token;
|
||||
u32 nonce;
|
||||
u64 thmac;
|
||||
u8 hmac[20];
|
||||
u8 join_id;
|
||||
u8 use_map:1,
|
||||
dsn64:1,
|
||||
data_fin:1,
|
||||
use_ack:1,
|
||||
ack64:1,
|
||||
mpc_map:1,
|
||||
__unused:2;
|
||||
u8 addr_id;
|
||||
u8 rm_id;
|
||||
union {
|
||||
struct in_addr addr;
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
struct in6_addr addr6;
|
||||
#endif
|
||||
};
|
||||
u64 ahmac;
|
||||
u16 port;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct tcp_options_received {
|
||||
/* PAWS/RTTM data */
|
||||
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
|
||||
|
@ -136,9 +95,6 @@ struct tcp_options_received {
|
|||
u8 num_sacks; /* Number of SACK blocks */
|
||||
u16 user_mss; /* mss requested by user in ioctl */
|
||||
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
struct mptcp_options_received mptcp;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
|
||||
|
@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
|
|||
#if IS_ENABLED(CONFIG_SMC)
|
||||
rx_opt->smc_ok = 0;
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
rx_opt->mptcp.mp_capable = 0;
|
||||
rx_opt->mptcp.mp_join = 0;
|
||||
rx_opt->mptcp.add_addr = 0;
|
||||
rx_opt->mptcp.rm_addr = 0;
|
||||
rx_opt->mptcp.dss = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* This is the max number of SACKS that we'll generate and process. It's safe
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#define _LINUX_VIRTIO_NET_H
|
||||
|
||||
#include <linux/if_vlan.h>
|
||||
#include <uapi/linux/tcp.h>
|
||||
#include <uapi/linux/udp.h>
|
||||
#include <uapi/linux/virtio_net.h>
|
||||
|
||||
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
|
||||
|
@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
bool little_endian)
|
||||
{
|
||||
unsigned int gso_type = 0;
|
||||
unsigned int thlen = 0;
|
||||
unsigned int ip_proto;
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
case VIRTIO_NET_HDR_GSO_TCPV4:
|
||||
gso_type = SKB_GSO_TCPV4;
|
||||
ip_proto = IPPROTO_TCP;
|
||||
thlen = sizeof(struct tcphdr);
|
||||
break;
|
||||
case VIRTIO_NET_HDR_GSO_TCPV6:
|
||||
gso_type = SKB_GSO_TCPV6;
|
||||
ip_proto = IPPROTO_TCP;
|
||||
thlen = sizeof(struct tcphdr);
|
||||
break;
|
||||
case VIRTIO_NET_HDR_GSO_UDP:
|
||||
gso_type = SKB_GSO_UDP;
|
||||
ip_proto = IPPROTO_UDP;
|
||||
thlen = sizeof(struct udphdr);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
|
||||
if (!skb_partial_csum_set(skb, start, off))
|
||||
return -EINVAL;
|
||||
|
||||
if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* gso packets without NEEDS_CSUM do not set transport_offset.
|
||||
* probe and drop if does not match one of the above types.
|
||||
*/
|
||||
if (gso_type && skb->network_header) {
|
||||
struct flow_keys_basic keys;
|
||||
|
||||
if (!skb->protocol)
|
||||
virtio_net_hdr_set_proto(skb, hdr);
|
||||
retry:
|
||||
skb_probe_transport_header(skb);
|
||||
if (!skb_transport_header_was_set(skb)) {
|
||||
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
|
||||
NULL, 0, 0, 0,
|
||||
0)) {
|
||||
/* UFO does not specify ipv4 or 6: try both */
|
||||
if (gso_type & SKB_GSO_UDP &&
|
||||
skb->protocol == htons(ETH_P_IP)) {
|
||||
|
@ -75,6 +91,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (keys.control.thoff + thlen > skb_headlen(skb) ||
|
||||
keys.basic.ip_proto != ip_proto)
|
||||
return -EINVAL;
|
||||
|
||||
skb_set_transport_header(skb, keys.control.thoff);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ struct virtio_vsock_pkt {
|
|||
u32 len;
|
||||
u32 off;
|
||||
bool reply;
|
||||
bool tap_delivered;
|
||||
};
|
||||
|
||||
struct virtio_vsock_pkt_info {
|
||||
|
|
|
@ -166,15 +166,18 @@ enum flow_action_mangle_base {
|
|||
enum flow_action_hw_stats_bit {
|
||||
FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
|
||||
FLOW_ACTION_HW_STATS_DELAYED_BIT,
|
||||
FLOW_ACTION_HW_STATS_DISABLED_BIT,
|
||||
};
|
||||
|
||||
enum flow_action_hw_stats {
|
||||
FLOW_ACTION_HW_STATS_DISABLED = 0,
|
||||
FLOW_ACTION_HW_STATS_DONT_CARE = 0,
|
||||
FLOW_ACTION_HW_STATS_IMMEDIATE =
|
||||
BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
|
||||
FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
|
||||
FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
|
||||
FLOW_ACTION_HW_STATS_DELAYED,
|
||||
FLOW_ACTION_HW_STATS_DISABLED =
|
||||
BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
|
||||
};
|
||||
|
||||
typedef void (*action_destr)(void *priv);
|
||||
|
@ -325,7 +328,11 @@ __flow_action_hw_stats_check(const struct flow_action *action,
|
|||
return true;
|
||||
if (!flow_action_mixed_hw_stats_check(action, extack))
|
||||
return false;
|
||||
|
||||
action_entry = flow_action_first_entry_get(action);
|
||||
if (action_entry->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
|
||||
return true;
|
||||
|
||||
if (!check_allow_bit &&
|
||||
action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
|
||||
|
|
|
@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int IP_ECN_set_ect1(struct iphdr *iph)
|
||||
{
|
||||
u32 check = (__force u32)iph->check;
|
||||
|
||||
if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
|
||||
return 0;
|
||||
|
||||
check += (__force u16)htons(0x100);
|
||||
|
||||
iph->check = (__force __sum16)(check + (check>=0xFFFF));
|
||||
iph->tos ^= INET_ECN_MASK;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void IP_ECN_clear(struct iphdr *iph)
|
||||
{
|
||||
iph->tos &= ~INET_ECN_MASK;
|
||||
|
@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
|
||||
{
|
||||
__be32 from, to;
|
||||
|
||||
if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
|
||||
return 0;
|
||||
|
||||
from = *(__be32 *)iph;
|
||||
to = from ^ htonl(INET_ECN_MASK << 20);
|
||||
*(__be32 *)iph = to;
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
|
||||
(__force __wsum)to);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
|
||||
{
|
||||
dscp &= ~INET_ECN_MASK;
|
||||
|
@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
|
||||
{
|
||||
switch (skb->protocol) {
|
||||
case cpu_to_be16(ETH_P_IP):
|
||||
if (skb_network_header(skb) + sizeof(struct iphdr) <=
|
||||
skb_tail_pointer(skb))
|
||||
return IP_ECN_set_ect1(ip_hdr(skb));
|
||||
break;
|
||||
|
||||
case cpu_to_be16(ETH_P_IPV6):
|
||||
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
|
||||
skb_tail_pointer(skb))
|
||||
return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* RFC 6040 4.2
|
||||
* To decapsulate the inner header at the tunnel egress, a compliant
|
||||
|
@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
|
|||
int rc;
|
||||
|
||||
rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
|
||||
if (!rc && set_ce)
|
||||
INET_ECN_set_ce(skb);
|
||||
if (!rc) {
|
||||
if (set_ce)
|
||||
INET_ECN_set_ce(skb);
|
||||
else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
|
||||
INET_ECN_set_ect1(skb);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -203,6 +203,7 @@ struct fib6_info {
|
|||
struct rt6_info {
|
||||
struct dst_entry dst;
|
||||
struct fib6_info __rcu *from;
|
||||
int sernum;
|
||||
|
||||
struct rt6key rt6i_dst;
|
||||
struct rt6key rt6i_src;
|
||||
|
@ -291,6 +292,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
|
|||
struct fib6_info *from;
|
||||
u32 cookie = 0;
|
||||
|
||||
if (rt->sernum)
|
||||
return rt->sernum;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
from = rcu_dereference(rt->from);
|
||||
|
|
|
@ -68,11 +68,8 @@ static inline bool rsk_is_mptcp(const struct request_sock *req)
|
|||
return tcp_rsk(req)->is_mptcp;
|
||||
}
|
||||
|
||||
void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
|
||||
int opsize, struct tcp_options_received *opt_rx);
|
||||
bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
|
||||
unsigned int *size, struct mptcp_out_options *opts);
|
||||
void mptcp_rcv_synsent(struct sock *sk);
|
||||
bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
|
||||
struct mptcp_out_options *opts);
|
||||
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
|
||||
|
|
|
@ -437,6 +437,13 @@ static inline int rt_genid_ipv4(const struct net *net)
|
|||
return atomic_read(&net->ipv4.rt_genid);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static inline int rt_genid_ipv6(const struct net *net)
|
||||
{
|
||||
return atomic_read(&net->ipv6.fib6_sernum);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void rt_genid_bump_ipv4(struct net *net)
|
||||
{
|
||||
atomic_inc(&net->ipv4.rt_genid);
|
||||
|
|
|
@ -407,6 +407,7 @@ struct tcf_block {
|
|||
struct mutex lock;
|
||||
struct list_head chain_list;
|
||||
u32 index; /* block index for shared blocks */
|
||||
u32 classid; /* which class this block belongs to */
|
||||
refcount_t refcnt;
|
||||
struct net *net;
|
||||
struct Qdisc *q;
|
||||
|
|
|
@ -502,6 +502,7 @@ struct ocelot {
|
|||
unsigned int num_stats;
|
||||
|
||||
int shared_queue_sz;
|
||||
int num_mact_rows;
|
||||
|
||||
struct net_device *hw_bridge_dev;
|
||||
u16 bridge_mask;
|
||||
|
|
|
@ -177,18 +177,18 @@ static void vcc_destroy_socket(struct sock *sk)
|
|||
|
||||
set_bit(ATM_VF_CLOSE, &vcc->flags);
|
||||
clear_bit(ATM_VF_READY, &vcc->flags);
|
||||
if (vcc->dev) {
|
||||
if (vcc->dev->ops->close)
|
||||
vcc->dev->ops->close(vcc);
|
||||
if (vcc->push)
|
||||
vcc->push(vcc, NULL); /* atmarpd has no push */
|
||||
module_put(vcc->owner);
|
||||
if (vcc->dev && vcc->dev->ops->close)
|
||||
vcc->dev->ops->close(vcc);
|
||||
if (vcc->push)
|
||||
vcc->push(vcc, NULL); /* atmarpd has no push */
|
||||
module_put(vcc->owner);
|
||||
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
atm_return(vcc, skb->truesize);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||
atm_return(vcc, skb->truesize);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
if (vcc->dev && vcc->dev->ops->owner) {
|
||||
module_put(vcc->dev->ops->owner);
|
||||
atm_dev_put(vcc->dev);
|
||||
}
|
||||
|
|
|
@ -1264,6 +1264,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
|
|||
entry->vcc = NULL;
|
||||
}
|
||||
if (entry->recv_vcc) {
|
||||
struct atm_vcc *vcc = entry->recv_vcc;
|
||||
struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
|
||||
|
||||
kfree(vpriv);
|
||||
vcc->user_back = NULL;
|
||||
|
||||
entry->recv_vcc->push = entry->old_recv_push;
|
||||
vcc_release_async(entry->recv_vcc, -EPIPE);
|
||||
entry->recv_vcc = NULL;
|
||||
|
|
|
@ -893,7 +893,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
|
|||
|
||||
orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
|
||||
if (!orig_node)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
|
||||
ethhdr->h_source);
|
||||
|
|
|
@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
|
|||
*/
|
||||
static u8 batadv_nc_random_weight_tq(u8 tq)
|
||||
{
|
||||
u8 rand_val, rand_tq;
|
||||
|
||||
get_random_bytes(&rand_val, sizeof(rand_val));
|
||||
|
||||
/* randomize the estimated packet loss (max TQ - estimated TQ) */
|
||||
rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
|
||||
|
||||
/* normalize the randomized packet loss */
|
||||
rand_tq /= BATADV_TQ_MAX_VALUE;
|
||||
u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
|
||||
|
||||
/* convert to (randomized) estimated tq again */
|
||||
return BATADV_TQ_MAX_VALUE - rand_tq;
|
||||
|
|
|
@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
|
|||
ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
|
||||
&tp_override);
|
||||
if (!ret)
|
||||
return count;
|
||||
goto out;
|
||||
|
||||
old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
|
||||
if (old_tp_override == tp_override)
|
||||
|
@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
|
|||
|
||||
tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
|
||||
|
||||
batadv_hardif_put(hard_iface);
|
||||
return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
|
||||
tp_override % 10);
|
||||
}
|
||||
|
|
|
@ -612,6 +612,7 @@ int br_process_vlan_info(struct net_bridge *br,
|
|||
v - 1, rtm_cmd);
|
||||
v_change_start = 0;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
/* v_change_start is set only if the last/whole range changed */
|
||||
if (v_change_start)
|
||||
|
|
|
@ -4283,6 +4283,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
|
|||
end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
|
||||
end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
|
||||
dump = false;
|
||||
|
||||
if (start_offset == end_offset) {
|
||||
err = 0;
|
||||
goto nla_put_failure;
|
||||
}
|
||||
}
|
||||
|
||||
err = devlink_nl_region_read_snapshot_fill(skb, devlink,
|
||||
|
@ -5363,6 +5368,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
|
|||
{
|
||||
enum devlink_health_reporter_state prev_health_state;
|
||||
struct devlink *devlink = reporter->devlink;
|
||||
unsigned long recover_ts_threshold;
|
||||
|
||||
/* write a log message of the current error */
|
||||
WARN_ON(!msg);
|
||||
|
@ -5373,10 +5379,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
|
|||
devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
|
||||
|
||||
/* abort if the previous error wasn't recovered */
|
||||
recover_ts_threshold = reporter->last_recovery_ts +
|
||||
msecs_to_jiffies(reporter->graceful_period);
|
||||
if (reporter->auto_recover &&
|
||||
(prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
|
||||
jiffies - reporter->last_recovery_ts <
|
||||
msecs_to_jiffies(reporter->graceful_period))) {
|
||||
(reporter->last_recovery_ts && reporter->recovery_count &&
|
||||
time_is_after_jiffies(recover_ts_threshold)))) {
|
||||
trace_devlink_health_recover_aborted(devlink,
|
||||
reporter->ops->name,
|
||||
reporter->health_state,
|
||||
|
|
|
@ -213,6 +213,7 @@ static void sched_send_work(struct timer_list *t)
|
|||
static void trace_drop_common(struct sk_buff *skb, void *location)
|
||||
{
|
||||
struct net_dm_alert_msg *msg;
|
||||
struct net_dm_drop_point *point;
|
||||
struct nlmsghdr *nlh;
|
||||
struct nlattr *nla;
|
||||
int i;
|
||||
|
@ -231,11 +232,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|||
nlh = (struct nlmsghdr *)dskb->data;
|
||||
nla = genlmsg_data(nlmsg_data(nlh));
|
||||
msg = nla_data(nla);
|
||||
point = msg->points;
|
||||
for (i = 0; i < msg->entries; i++) {
|
||||
if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
|
||||
msg->points[i].count++;
|
||||
if (!memcmp(&location, &point->pc, sizeof(void *))) {
|
||||
point->count++;
|
||||
goto out;
|
||||
}
|
||||
point++;
|
||||
}
|
||||
if (msg->entries == dm_hit_limit)
|
||||
goto out;
|
||||
|
@ -244,8 +247,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|||
*/
|
||||
__nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
|
||||
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
|
||||
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
|
||||
msg->points[msg->entries].count = 1;
|
||||
memcpy(point->pc, &location, sizeof(void *));
|
||||
point->count = 1;
|
||||
msg->entries++;
|
||||
|
||||
if (!timer_pending(&data->send_timer)) {
|
||||
|
|
|
@ -1956,6 +1956,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
|
||||
}
|
||||
|
||||
if (protocol)
|
||||
neigh->protocol = protocol;
|
||||
|
||||
if (ndm->ndm_flags & NTF_EXT_LEARNED)
|
||||
flags |= NEIGH_UPDATE_F_EXT_LEARNED;
|
||||
|
||||
|
@ -1969,9 +1972,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
|
||||
NETLINK_CB(skb).portid, extack);
|
||||
|
||||
if (protocol)
|
||||
neigh->protocol = protocol;
|
||||
|
||||
neigh_release(neigh);
|
||||
|
||||
out:
|
||||
|
|
|
@ -2364,7 +2364,6 @@ static void sk_leave_memory_pressure(struct sock *sk)
|
|||
}
|
||||
}
|
||||
|
||||
/* On 32bit arches, an skb frag is limited to 2^15 */
|
||||
#define SKB_FRAG_PAGE_ORDER get_order(32768)
|
||||
DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
|
||||
|
||||
|
|
|
@ -459,7 +459,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
|
|||
list_for_each_entry(dp, &dst->ports, list) {
|
||||
err = dsa_port_setup(dp);
|
||||
if (err)
|
||||
goto teardown;
|
||||
continue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -289,7 +289,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
|
|||
{
|
||||
struct dsa_port *cpu_dp = dev->dsa_ptr;
|
||||
|
||||
dev->netdev_ops = cpu_dp->orig_ndo_ops;
|
||||
if (cpu_dp->orig_ndo_ops)
|
||||
dev->netdev_ops = cpu_dp->orig_ndo_ops;
|
||||
cpu_dp->orig_ndo_ops = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -856,20 +856,18 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
|
|||
struct dsa_port *to_dp;
|
||||
int err;
|
||||
|
||||
act = &cls->rule->action.entries[0];
|
||||
|
||||
if (!ds->ops->port_mirror_add)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!act->dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (!flow_action_basic_hw_stats_check(&cls->rule->action,
|
||||
cls->common.extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
act = &cls->rule->action.entries[0];
|
||||
|
||||
if (!act->dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dsa_slave_dev_check(act->dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
|
|||
{
|
||||
struct sk_buff *skb = *pskb;
|
||||
struct hsr_port *port;
|
||||
u16 protocol;
|
||||
__be16 protocol;
|
||||
|
||||
if (!skb_mac_header_was_set(skb)) {
|
||||
WARN_ONCE(1, "%s: skb invalid", __func__);
|
||||
|
|
|
@ -3926,10 +3926,6 @@ void tcp_parse_options(const struct net *net,
|
|||
*/
|
||||
break;
|
||||
#endif
|
||||
case TCPOPT_MPTCP:
|
||||
mptcp_parse_option(skb, ptr, opsize, opt_rx);
|
||||
break;
|
||||
|
||||
case TCPOPT_FASTOPEN:
|
||||
tcp_parse_fastopen_option(
|
||||
opsize - TCPOLEN_FASTOPEN_BASE,
|
||||
|
@ -5990,9 +5986,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|||
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
||||
tcp_initialize_rcv_mss(sk);
|
||||
|
||||
if (sk_is_mptcp(sk))
|
||||
mptcp_rcv_synsent(sk);
|
||||
|
||||
/* Remember, tcp_poll() does not lock socket!
|
||||
* Change state from SYN-SENT only after copied_seq
|
||||
* is initialized. */
|
||||
|
|
|
@ -1385,9 +1385,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
|
|||
}
|
||||
ip6_rt_copy_init(pcpu_rt, res);
|
||||
pcpu_rt->rt6i_flags |= RTF_PCPU;
|
||||
|
||||
if (f6i->nh)
|
||||
pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
|
||||
|
||||
return pcpu_rt;
|
||||
}
|
||||
|
||||
static bool rt6_is_valid(const struct rt6_info *rt6)
|
||||
{
|
||||
return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
|
||||
}
|
||||
|
||||
/* It should be called with rcu_read_lock() acquired */
|
||||
static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
|
||||
{
|
||||
|
@ -1395,6 +1404,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
|
|||
|
||||
pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
|
||||
|
||||
if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
|
||||
struct rt6_info *prev, **p;
|
||||
|
||||
p = this_cpu_ptr(res->nh->rt6i_pcpu);
|
||||
prev = xchg(p, NULL);
|
||||
if (prev) {
|
||||
dst_dev_put(&prev->dst);
|
||||
dst_release(&prev->dst);
|
||||
}
|
||||
|
||||
pcpu_rt = NULL;
|
||||
}
|
||||
|
||||
return pcpu_rt;
|
||||
}
|
||||
|
||||
|
@ -2593,6 +2615,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
|
||||
rt = container_of(dst, struct rt6_info, dst);
|
||||
|
||||
if (rt->sernum)
|
||||
return rt6_is_valid(rt) ? dst : NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* All IPV6 dsts are created with ->obsolete set to the value
|
||||
|
|
|
@ -27,8 +27,9 @@
|
|||
|
||||
bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
|
||||
{
|
||||
int trailing;
|
||||
unsigned int tlv_offset;
|
||||
int max_last_entry;
|
||||
int trailing;
|
||||
|
||||
if (srh->type != IPV6_SRCRT_TYPE_4)
|
||||
return false;
|
||||
|
@ -36,7 +37,12 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
|
|||
if (((srh->hdrlen + 1) << 3) != len)
|
||||
return false;
|
||||
|
||||
if (srh->segments_left > srh->first_segment)
|
||||
max_last_entry = (srh->hdrlen / 2) - 1;
|
||||
|
||||
if (srh->first_segment > max_last_entry)
|
||||
return false;
|
||||
|
||||
if (srh->segments_left > srh->first_segment + 1)
|
||||
return false;
|
||||
|
||||
tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);
|
||||
|
|
|
@ -16,10 +16,10 @@ static bool mptcp_cap_flag_sha256(u8 flags)
|
|||
return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
|
||||
}
|
||||
|
||||
void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
|
||||
int opsize, struct tcp_options_received *opt_rx)
|
||||
static void mptcp_parse_option(const struct sk_buff *skb,
|
||||
const unsigned char *ptr, int opsize,
|
||||
struct mptcp_options_received *mp_opt)
|
||||
{
|
||||
struct mptcp_options_received *mp_opt = &opt_rx->mptcp;
|
||||
u8 subtype = *ptr >> 4;
|
||||
int expected_opsize;
|
||||
u8 version;
|
||||
|
@ -283,12 +283,20 @@ void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
|
|||
}
|
||||
|
||||
void mptcp_get_options(const struct sk_buff *skb,
|
||||
struct tcp_options_received *opt_rx)
|
||||
struct mptcp_options_received *mp_opt)
|
||||
{
|
||||
const unsigned char *ptr;
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
int length = (th->doff * 4) - sizeof(struct tcphdr);
|
||||
const unsigned char *ptr;
|
||||
int length;
|
||||
|
||||
/* initialize option status */
|
||||
mp_opt->mp_capable = 0;
|
||||
mp_opt->mp_join = 0;
|
||||
mp_opt->add_addr = 0;
|
||||
mp_opt->rm_addr = 0;
|
||||
mp_opt->dss = 0;
|
||||
|
||||
length = (th->doff * 4) - sizeof(struct tcphdr);
|
||||
ptr = (const unsigned char *)(th + 1);
|
||||
|
||||
while (length > 0) {
|
||||
|
@ -308,7 +316,7 @@ void mptcp_get_options(const struct sk_buff *skb,
|
|||
if (opsize > length)
|
||||
return; /* don't parse partial options */
|
||||
if (opcode == TCPOPT_MPTCP)
|
||||
mptcp_parse_option(skb, ptr, opsize, opt_rx);
|
||||
mptcp_parse_option(skb, ptr, opsize, mp_opt);
|
||||
ptr += opsize - 2;
|
||||
length -= opsize;
|
||||
}
|
||||
|
@ -344,28 +352,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
|
|||
return false;
|
||||
}
|
||||
|
||||
void mptcp_rcv_synsent(struct sock *sk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (subflow->request_mptcp && tp->rx_opt.mptcp.mp_capable) {
|
||||
subflow->mp_capable = 1;
|
||||
subflow->can_ack = 1;
|
||||
subflow->remote_key = tp->rx_opt.mptcp.sndr_key;
|
||||
pr_debug("subflow=%p, remote_key=%llu", subflow,
|
||||
subflow->remote_key);
|
||||
} else if (subflow->request_join && tp->rx_opt.mptcp.mp_join) {
|
||||
subflow->mp_join = 1;
|
||||
subflow->thmac = tp->rx_opt.mptcp.thmac;
|
||||
subflow->remote_nonce = tp->rx_opt.mptcp.nonce;
|
||||
pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
|
||||
subflow->thmac, subflow->remote_nonce);
|
||||
} else if (subflow->request_mptcp) {
|
||||
tcp_sk(sk)->is_mptcp = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* MP_JOIN client subflow must wait for 4th ack before sending any data:
|
||||
* TCP can't schedule delack timer before the subflow is fully established.
|
||||
* MPTCP uses the delack timer to do 3rd ack retransmissions
|
||||
|
@ -709,7 +695,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
|
|||
if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
|
||||
return subflow->mp_capable;
|
||||
|
||||
if (mp_opt->use_ack) {
|
||||
if (mp_opt->dss && mp_opt->use_ack) {
|
||||
/* subflows are fully established as soon as we get any
|
||||
* additional ack.
|
||||
*/
|
||||
|
@ -717,8 +703,6 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
|
|||
goto fully_established;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(subflow->can_ack);
|
||||
|
||||
/* If the first established packet does not contain MP_CAPABLE + data
|
||||
* then fallback to TCP
|
||||
*/
|
||||
|
@ -728,6 +712,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(!READ_ONCE(msk->pm.server_side)))
|
||||
pr_warn_once("bogus mpc option on established client sk");
|
||||
subflow->fully_established = 1;
|
||||
subflow->remote_key = mp_opt->sndr_key;
|
||||
subflow->can_ack = 1;
|
||||
|
@ -819,41 +805,41 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
|
|||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
||||
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
||||
struct mptcp_options_received *mp_opt;
|
||||
struct mptcp_options_received mp_opt;
|
||||
struct mptcp_ext *mpext;
|
||||
|
||||
mp_opt = &opt_rx->mptcp;
|
||||
if (!check_fully_established(msk, sk, subflow, skb, mp_opt))
|
||||
mptcp_get_options(skb, &mp_opt);
|
||||
if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
|
||||
return;
|
||||
|
||||
if (mp_opt->add_addr && add_addr_hmac_valid(msk, mp_opt)) {
|
||||
if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
|
||||
struct mptcp_addr_info addr;
|
||||
|
||||
addr.port = htons(mp_opt->port);
|
||||
addr.id = mp_opt->addr_id;
|
||||
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
|
||||
addr.port = htons(mp_opt.port);
|
||||
addr.id = mp_opt.addr_id;
|
||||
if (mp_opt.family == MPTCP_ADDR_IPVERSION_4) {
|
||||
addr.family = AF_INET;
|
||||
addr.addr = mp_opt->addr;
|
||||
addr.addr = mp_opt.addr;
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
else if (mp_opt->family == MPTCP_ADDR_IPVERSION_6) {
|
||||
else if (mp_opt.family == MPTCP_ADDR_IPVERSION_6) {
|
||||
addr.family = AF_INET6;
|
||||
addr.addr6 = mp_opt->addr6;
|
||||
addr.addr6 = mp_opt.addr6;
|
||||
}
|
||||
#endif
|
||||
if (!mp_opt->echo)
|
||||
if (!mp_opt.echo)
|
||||
mptcp_pm_add_addr_received(msk, &addr);
|
||||
mp_opt->add_addr = 0;
|
||||
mp_opt.add_addr = 0;
|
||||
}
|
||||
|
||||
if (!mp_opt->dss)
|
||||
if (!mp_opt.dss)
|
||||
return;
|
||||
|
||||
/* we can't wait for recvmsg() to update the ack_seq, otherwise
|
||||
* monodirectional flows will stuck
|
||||
*/
|
||||
if (mp_opt->use_ack)
|
||||
update_una(msk, mp_opt);
|
||||
if (mp_opt.use_ack)
|
||||
update_una(msk, &mp_opt);
|
||||
|
||||
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
|
||||
if (!mpext)
|
||||
|
@ -861,8 +847,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
memset(mpext, 0, sizeof(*mpext));
|
||||
|
||||
if (mp_opt->use_map) {
|
||||
if (mp_opt->mpc_map) {
|
||||
if (mp_opt.use_map) {
|
||||
if (mp_opt.mpc_map) {
|
||||
/* this is an MP_CAPABLE carrying MPTCP data
|
||||
* we know this map the first chunk of data
|
||||
*/
|
||||
|
@ -872,13 +858,14 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
|
|||
mpext->subflow_seq = 1;
|
||||
mpext->dsn64 = 1;
|
||||
mpext->mpc_map = 1;
|
||||
mpext->data_fin = 0;
|
||||
} else {
|
||||
mpext->data_seq = mp_opt->data_seq;
|
||||
mpext->subflow_seq = mp_opt->subflow_seq;
|
||||
mpext->dsn64 = mp_opt->dsn64;
|
||||
mpext->data_fin = mp_opt->data_fin;
|
||||
mpext->data_seq = mp_opt.data_seq;
|
||||
mpext->subflow_seq = mp_opt.subflow_seq;
|
||||
mpext->dsn64 = mp_opt.dsn64;
|
||||
mpext->data_fin = mp_opt.data_fin;
|
||||
}
|
||||
mpext->data_len = mp_opt->data_len;
|
||||
mpext->data_len = mp_opt.data_len;
|
||||
mpext->use_map = 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1316,11 +1316,12 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
|
|||
|
||||
static int mptcp_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
lock_sock(sk);
|
||||
__mptcp_clear_xmit(sk);
|
||||
release_sock(sk);
|
||||
mptcp_cancel_work(sk);
|
||||
return tcp_disconnect(sk, flags);
|
||||
/* Should never be called.
|
||||
* inet_stream_connect() calls ->disconnect, but that
|
||||
* refers to the subflow socket, not the mptcp one.
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
|
@ -1333,7 +1334,7 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
|
|||
#endif
|
||||
|
||||
struct sock *mptcp_sk_clone(const struct sock *sk,
|
||||
const struct tcp_options_received *opt_rx,
|
||||
const struct mptcp_options_received *mp_opt,
|
||||
struct request_sock *req)
|
||||
{
|
||||
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
|
||||
|
@ -1372,9 +1373,9 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
|
|||
|
||||
msk->write_seq = subflow_req->idsn + 1;
|
||||
atomic64_set(&msk->snd_una, msk->write_seq);
|
||||
if (opt_rx->mptcp.mp_capable) {
|
||||
if (mp_opt->mp_capable) {
|
||||
msk->can_ack = true;
|
||||
msk->remote_key = opt_rx->mptcp.sndr_key;
|
||||
msk->remote_key = mp_opt->sndr_key;
|
||||
mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
|
||||
ack_seq++;
|
||||
msk->ack_seq = ack_seq;
|
||||
|
|
|
@ -91,6 +91,45 @@
|
|||
#define MPTCP_WORK_RTX 2
|
||||
#define MPTCP_WORK_EOF 3
|
||||
|
||||
struct mptcp_options_received {
|
||||
u64 sndr_key;
|
||||
u64 rcvr_key;
|
||||
u64 data_ack;
|
||||
u64 data_seq;
|
||||
u32 subflow_seq;
|
||||
u16 data_len;
|
||||
u16 mp_capable : 1,
|
||||
mp_join : 1,
|
||||
dss : 1,
|
||||
add_addr : 1,
|
||||
rm_addr : 1,
|
||||
family : 4,
|
||||
echo : 1,
|
||||
backup : 1;
|
||||
u32 token;
|
||||
u32 nonce;
|
||||
u64 thmac;
|
||||
u8 hmac[20];
|
||||
u8 join_id;
|
||||
u8 use_map:1,
|
||||
dsn64:1,
|
||||
data_fin:1,
|
||||
use_ack:1,
|
||||
ack64:1,
|
||||
mpc_map:1,
|
||||
__unused:2;
|
||||
u8 addr_id;
|
||||
u8 rm_id;
|
||||
union {
|
||||
struct in_addr addr;
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
struct in6_addr addr6;
|
||||
#endif
|
||||
};
|
||||
u64 ahmac;
|
||||
u16 port;
|
||||
};
|
||||
|
||||
static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
|
||||
{
|
||||
return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) |
|
||||
|
@ -331,10 +370,10 @@ int mptcp_proto_v6_init(void);
|
|||
#endif
|
||||
|
||||
struct sock *mptcp_sk_clone(const struct sock *sk,
|
||||
const struct tcp_options_received *opt_rx,
|
||||
const struct mptcp_options_received *mp_opt,
|
||||
struct request_sock *req);
|
||||
void mptcp_get_options(const struct sk_buff *skb,
|
||||
struct tcp_options_received *opt_rx);
|
||||
struct mptcp_options_received *mp_opt);
|
||||
|
||||
void mptcp_finish_connect(struct sock *sk);
|
||||
void mptcp_data_ready(struct sock *sk, struct sock *ssk);
|
||||
|
|
|
@ -124,12 +124,11 @@ static void subflow_init_req(struct request_sock *req,
|
|||
{
|
||||
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
|
||||
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
|
||||
struct tcp_options_received rx_opt;
|
||||
struct mptcp_options_received mp_opt;
|
||||
|
||||
pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
|
||||
|
||||
memset(&rx_opt.mptcp, 0, sizeof(rx_opt.mptcp));
|
||||
mptcp_get_options(skb, &rx_opt);
|
||||
mptcp_get_options(skb, &mp_opt);
|
||||
|
||||
subflow_req->mp_capable = 0;
|
||||
subflow_req->mp_join = 0;
|
||||
|
@ -142,16 +141,16 @@ static void subflow_init_req(struct request_sock *req,
|
|||
return;
|
||||
#endif
|
||||
|
||||
if (rx_opt.mptcp.mp_capable) {
|
||||
if (mp_opt.mp_capable) {
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
|
||||
|
||||
if (rx_opt.mptcp.mp_join)
|
||||
if (mp_opt.mp_join)
|
||||
return;
|
||||
} else if (rx_opt.mptcp.mp_join) {
|
||||
} else if (mp_opt.mp_join) {
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
|
||||
}
|
||||
|
||||
if (rx_opt.mptcp.mp_capable && listener->request_mptcp) {
|
||||
if (mp_opt.mp_capable && listener->request_mptcp) {
|
||||
int err;
|
||||
|
||||
err = mptcp_token_new_request(req);
|
||||
|
@ -159,13 +158,13 @@ static void subflow_init_req(struct request_sock *req,
|
|||
subflow_req->mp_capable = 1;
|
||||
|
||||
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
|
||||
} else if (rx_opt.mptcp.mp_join && listener->request_mptcp) {
|
||||
} else if (mp_opt.mp_join && listener->request_mptcp) {
|
||||
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
|
||||
subflow_req->mp_join = 1;
|
||||
subflow_req->backup = rx_opt.mptcp.backup;
|
||||
subflow_req->remote_id = rx_opt.mptcp.join_id;
|
||||
subflow_req->token = rx_opt.mptcp.token;
|
||||
subflow_req->remote_nonce = rx_opt.mptcp.nonce;
|
||||
subflow_req->backup = mp_opt.backup;
|
||||
subflow_req->remote_id = mp_opt.join_id;
|
||||
subflow_req->token = mp_opt.token;
|
||||
subflow_req->remote_nonce = mp_opt.nonce;
|
||||
pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
|
||||
subflow_req->remote_nonce);
|
||||
if (!subflow_token_join_request(req, skb)) {
|
||||
|
@ -221,23 +220,47 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
|
|||
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
||||
struct mptcp_options_received mp_opt;
|
||||
struct sock *parent = subflow->conn;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
|
||||
|
||||
if (inet_sk_state_load(parent) != TCP_ESTABLISHED) {
|
||||
if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
|
||||
inet_sk_state_store(parent, TCP_ESTABLISHED);
|
||||
parent->sk_state_change(parent);
|
||||
}
|
||||
|
||||
if (subflow->conn_finished || !tcp_sk(sk)->is_mptcp)
|
||||
/* be sure no special action on any packet other than syn-ack */
|
||||
if (subflow->conn_finished)
|
||||
return;
|
||||
|
||||
subflow->conn_finished = 1;
|
||||
|
||||
mptcp_get_options(skb, &mp_opt);
|
||||
if (subflow->request_mptcp && mp_opt.mp_capable) {
|
||||
subflow->mp_capable = 1;
|
||||
subflow->can_ack = 1;
|
||||
subflow->remote_key = mp_opt.sndr_key;
|
||||
pr_debug("subflow=%p, remote_key=%llu", subflow,
|
||||
subflow->remote_key);
|
||||
} else if (subflow->request_join && mp_opt.mp_join) {
|
||||
subflow->mp_join = 1;
|
||||
subflow->thmac = mp_opt.thmac;
|
||||
subflow->remote_nonce = mp_opt.nonce;
|
||||
pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
|
||||
subflow->thmac, subflow->remote_nonce);
|
||||
} else if (subflow->request_mptcp) {
|
||||
tp->is_mptcp = 0;
|
||||
}
|
||||
|
||||
if (!tp->is_mptcp)
|
||||
return;
|
||||
|
||||
if (subflow->mp_capable) {
|
||||
pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
|
||||
subflow->remote_key);
|
||||
mptcp_finish_connect(sk);
|
||||
subflow->conn_finished = 1;
|
||||
|
||||
if (skb) {
|
||||
pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
|
||||
|
@ -264,7 +287,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
|
|||
if (!mptcp_finish_join(sk))
|
||||
goto do_reset;
|
||||
|
||||
subflow->conn_finished = 1;
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
|
||||
} else {
|
||||
do_reset:
|
||||
|
@ -322,7 +344,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
/* validate hmac received in third ACK */
|
||||
static bool subflow_hmac_valid(const struct request_sock *req,
|
||||
const struct tcp_options_received *rx_opt)
|
||||
const struct mptcp_options_received *mp_opt)
|
||||
{
|
||||
const struct mptcp_subflow_request_sock *subflow_req;
|
||||
u8 hmac[MPTCPOPT_HMAC_LEN];
|
||||
|
@ -339,7 +361,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
|
|||
subflow_req->local_nonce, hmac);
|
||||
|
||||
ret = true;
|
||||
if (crypto_memneq(hmac, rx_opt->mptcp.hmac, sizeof(hmac)))
|
||||
if (crypto_memneq(hmac, mp_opt->hmac, sizeof(hmac)))
|
||||
ret = false;
|
||||
|
||||
sock_put((struct sock *)msk);
|
||||
|
@ -395,7 +417,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
|||
{
|
||||
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
|
||||
struct mptcp_subflow_request_sock *subflow_req;
|
||||
struct tcp_options_received opt_rx;
|
||||
struct mptcp_options_received mp_opt;
|
||||
bool fallback_is_fatal = false;
|
||||
struct sock *new_msk = NULL;
|
||||
bool fallback = false;
|
||||
|
@ -403,7 +425,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
|||
|
||||
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
|
||||
|
||||
opt_rx.mptcp.mp_capable = 0;
|
||||
/* we need later a valid 'mp_capable' value even when options are not
|
||||
* parsed
|
||||
*/
|
||||
mp_opt.mp_capable = 0;
|
||||
if (tcp_rsk(req)->is_mptcp == 0)
|
||||
goto create_child;
|
||||
|
||||
|
@ -418,22 +443,21 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
|||
goto create_msk;
|
||||
}
|
||||
|
||||
mptcp_get_options(skb, &opt_rx);
|
||||
if (!opt_rx.mptcp.mp_capable) {
|
||||
mptcp_get_options(skb, &mp_opt);
|
||||
if (!mp_opt.mp_capable) {
|
||||
fallback = true;
|
||||
goto create_child;
|
||||
}
|
||||
|
||||
create_msk:
|
||||
new_msk = mptcp_sk_clone(listener->conn, &opt_rx, req);
|
||||
new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
|
||||
if (!new_msk)
|
||||
fallback = true;
|
||||
} else if (subflow_req->mp_join) {
|
||||
fallback_is_fatal = true;
|
||||
opt_rx.mptcp.mp_join = 0;
|
||||
mptcp_get_options(skb, &opt_rx);
|
||||
if (!opt_rx.mptcp.mp_join ||
|
||||
!subflow_hmac_valid(req, &opt_rx)) {
|
||||
mptcp_get_options(skb, &mp_opt);
|
||||
if (!mp_opt.mp_join ||
|
||||
!subflow_hmac_valid(req, &mp_opt)) {
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -473,9 +497,9 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
|||
/* with OoO packets we can reach here without ingress
|
||||
* mpc option
|
||||
*/
|
||||
ctx->remote_key = opt_rx.mptcp.sndr_key;
|
||||
ctx->fully_established = opt_rx.mptcp.mp_capable;
|
||||
ctx->can_ack = opt_rx.mptcp.mp_capable;
|
||||
ctx->remote_key = mp_opt.sndr_key;
|
||||
ctx->fully_established = mp_opt.mp_capable;
|
||||
ctx->can_ack = mp_opt.mp_capable;
|
||||
} else if (ctx->mp_join) {
|
||||
struct mptcp_sock *owner;
|
||||
|
||||
|
@ -499,7 +523,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
|
|||
/* check for expected invariant - should never trigger, just help
|
||||
* catching eariler subtle bugs
|
||||
*/
|
||||
WARN_ON_ONCE(*own_req && child && tcp_sk(child)->is_mptcp &&
|
||||
WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
|
||||
(!mptcp_subflow_ctx(child) ||
|
||||
!mptcp_subflow_ctx(child)->conn));
|
||||
return child;
|
||||
|
|
|
@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
|
|||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
struct udphdr *hdr;
|
||||
bool do_csum;
|
||||
|
||||
if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
|
||||
return false;
|
||||
|
||||
hdr = (struct udphdr *)(skb->data + hdroff);
|
||||
do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
|
||||
__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
|
||||
|
||||
__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
|
|||
static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
|
||||
const struct sk_buff *skb,
|
||||
const struct iphdr *ip,
|
||||
unsigned char *opts)
|
||||
unsigned char *opts,
|
||||
struct tcphdr *_tcph)
|
||||
{
|
||||
const struct tcphdr *tcp;
|
||||
struct tcphdr _tcph;
|
||||
|
||||
tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
|
||||
tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
|
||||
if (!tcp)
|
||||
return NULL;
|
||||
|
||||
|
@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
|
|||
int fmatch = FMATCH_WRONG;
|
||||
struct nf_osf_hdr_ctx ctx;
|
||||
const struct tcphdr *tcp;
|
||||
struct tcphdr _tcph;
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
|
||||
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
|
||||
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
|
||||
if (!tcp)
|
||||
return false;
|
||||
|
||||
|
@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
|
|||
const struct nf_osf_finger *kf;
|
||||
struct nf_osf_hdr_ctx ctx;
|
||||
const struct tcphdr *tcp;
|
||||
struct tcphdr _tcph;
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
|
||||
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
|
||||
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
|
||||
if (!tcp)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -2070,6 +2070,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
err = PTR_ERR(block);
|
||||
goto errout;
|
||||
}
|
||||
block->classid = parent;
|
||||
|
||||
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
|
||||
if (chain_index > TC_ACT_EXT_VAL_MASK) {
|
||||
|
@ -2612,12 +2613,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
return skb->len;
|
||||
|
||||
parent = tcm->tcm_parent;
|
||||
if (!parent) {
|
||||
if (!parent)
|
||||
q = dev->qdisc;
|
||||
parent = q->handle;
|
||||
} else {
|
||||
else
|
||||
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
|
||||
}
|
||||
if (!q)
|
||||
goto out;
|
||||
cops = q->ops->cl_ops;
|
||||
|
@ -2633,6 +2632,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
block = cops->tcf_block(q, cl, NULL);
|
||||
if (!block)
|
||||
goto out;
|
||||
parent = block->classid;
|
||||
if (tcf_block_shared(block))
|
||||
q = NULL;
|
||||
}
|
||||
|
@ -3523,6 +3523,16 @@ static void tcf_sample_get_group(struct flow_action_entry *entry,
|
|||
#endif
|
||||
}
|
||||
|
||||
static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
|
||||
{
|
||||
if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
|
||||
return FLOW_ACTION_HW_STATS_DONT_CARE;
|
||||
else if (!hw_stats)
|
||||
return FLOW_ACTION_HW_STATS_DISABLED;
|
||||
|
||||
return hw_stats;
|
||||
}
|
||||
|
||||
int tc_setup_flow_action(struct flow_action *flow_action,
|
||||
const struct tcf_exts *exts)
|
||||
{
|
||||
|
@ -3546,7 +3556,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
|
|||
if (err)
|
||||
goto err_out_locked;
|
||||
|
||||
entry->hw_stats = act->hw_stats;
|
||||
entry->hw_stats = tc_act_hw_stats(act->hw_stats);
|
||||
|
||||
if (is_tcf_gact_ok(act)) {
|
||||
entry->id = FLOW_ACTION_ACCEPT;
|
||||
|
@ -3614,7 +3624,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
|
|||
entry->mangle.mask = tcf_pedit_mask(act, k);
|
||||
entry->mangle.val = tcf_pedit_val(act, k);
|
||||
entry->mangle.offset = tcf_pedit_offset(act, k);
|
||||
entry->hw_stats = act->hw_stats;
|
||||
entry->hw_stats = tc_act_hw_stats(act->hw_stats);
|
||||
entry = &flow_action->entries[++j];
|
||||
}
|
||||
} else if (is_tcf_csum(act)) {
|
||||
|
|
|
@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
|
|||
|
||||
sch->q.qlen = 0;
|
||||
sch->qstats.backlog = 0;
|
||||
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
|
||||
if (q->tab)
|
||||
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
|
||||
q->head = q->tail = 0;
|
||||
red_restart(&q->vars);
|
||||
}
|
||||
|
|
|
@ -416,7 +416,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
|
||||
|
||||
if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
|
||||
q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
|
||||
q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
|
||||
|
||||
if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
|
||||
q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
|
||||
|
|
|
@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||
if (ctl->divisor &&
|
||||
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
|
||||
return -EINVAL;
|
||||
|
||||
/* slot->allot is a short, make sure quantum is not too big. */
|
||||
if (ctl->quantum) {
|
||||
unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
|
||||
|
||||
if (scaled <= 0 || scaled > SHRT_MAX)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
|
||||
ctl_v1->Wlog))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
{
|
||||
struct tc_skbprio_qopt *ctl = nla_data(opt);
|
||||
|
||||
if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
|
||||
return -EINVAL;
|
||||
|
||||
sch->limit = ctl->limit;
|
||||
return 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user