forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Netfilter list handling fix, from Linus. 2) RXRPC/AFS bug fixes from David Howells (oops on call to serviceless endpoints, build warnings, missing notifications, etc.) From David Howells. 3) Kernel log message missing newlines, from Colin Ian King. 4) Don't enter direct reclaim in netlink dumps, the idea is to use a high order allocation first and fallback quickly to a 0-order allocation if such a high-order one cannot be done cheaply and without reclaim. From Eric Dumazet. 5) Fix firmware download errors in btusb bluetooth driver, from Ethan Hsieh. 6) Missing Kconfig deps for QCOM_EMAC, from Geert Uytterhoeven. 7) Fix MDIO_XGENE dup Kconfig entry. From Laura Abbott. 8) Constrain ipv6 rtr_solicits sysctl values properly, from Maciej Żenczykowski. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits) netfilter: Fix slab corruption. be2net: Enable VF link state setting for BE3 be2net: Fix TX stats for TSO packets be2net: Update Copyright string in be_hw.h be2net: NCSI FW section should be properly updated with ethtool for BE3 be2net: Provide an alternate way to read pf_num for BEx chips wan/fsl_ucc_hdlc: Fix size used in dma_free_coherent() net: macb: NULL out phydev after removing mdio bus xen-netback: make sure that hashes are not send to unaware frontends Fixing a bug in team driver due to incorrect 'unsigned int' to 'int' conversion MAINTAINERS: add myself as a maintainer of xen-netback ipv6 addrconf: disallow rtr_solicits < -1 Bluetooth: btusb: Fix atheros firmware download error drivers: net: phy: Correct duplicate MDIO_XGENE entry ethernet: qualcomm: QCOM_EMAC should depend on HAS_DMA and HAS_IOMEM net: ethernet: mediatek: remove hwlro property in the device tree net: ethernet: mediatek: get hw lro capability by the chip id instead of by the dtsi net: ethernet: mediatek: get the chip id by ETHDMASYS registers net: bgmac: Fix errant feature flag check netlink: do not enter direct reclaim from netlink_dump() ...
This commit is contained in:
commit
6b5e09a748
|
@ -24,7 +24,6 @@ Required properties:
|
|||
Optional properties:
|
||||
- interrupt-parent: Should be the phandle for the interrupt controller
|
||||
that services interrupts for this device
|
||||
- mediatek,hwlro: the capability if the hardware supports LRO functions
|
||||
|
||||
* Ethernet MAC node
|
||||
|
||||
|
@ -54,7 +53,6 @@ eth: ethernet@1b100000 {
|
|||
reset-names = "eth";
|
||||
mediatek,ethsys = <ðsys>;
|
||||
mediatek,pctl = <&syscfg_pctl_a>;
|
||||
mediatek,hwlro;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
|
|
@ -34,16 +34,17 @@ KSZ9031:
|
|||
|
||||
All skew control options are specified in picoseconds. The minimum
|
||||
value is 0, and the maximum is property-dependent. The increment
|
||||
step is 60ps.
|
||||
step is 60ps. The default value is the neutral setting, so setting
|
||||
rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
|
||||
|
||||
Optional properties:
|
||||
|
||||
Maximum value of 1860:
|
||||
Maximum value of 1860, default value 900:
|
||||
|
||||
- rxc-skew-ps : Skew control of RX clock pad
|
||||
- txc-skew-ps : Skew control of TX clock pad
|
||||
|
||||
Maximum value of 900:
|
||||
Maximum value of 900, default value 420:
|
||||
|
||||
- rxdv-skew-ps : Skew control of RX CTL pad
|
||||
- txen-skew-ps : Skew control of TX CTL pad
|
||||
|
|
|
@ -10,6 +10,7 @@ Required properties:
|
|||
"renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC.
|
||||
"renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
|
||||
"renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC.
|
||||
"renesas,etheravb-r8a7796" if the device is a part of R8A7796 SoC.
|
||||
"renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface.
|
||||
"renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface.
|
||||
|
||||
|
@ -33,7 +34,7 @@ Optional properties:
|
|||
- interrupt-parent: the phandle for the interrupt controller that services
|
||||
interrupts for this device.
|
||||
- interrupt-names: A list of interrupt names.
|
||||
For the R8A7795 SoC this property is mandatory;
|
||||
For the R8A779[56] SoCs this property is mandatory;
|
||||
it should include one entry per channel, named "ch%u",
|
||||
where %u is the channel number ranging from 0 to 24.
|
||||
For other SoCs this property is optional; if present
|
||||
|
|
|
@ -5033,6 +5033,13 @@ F: drivers/net/ethernet/freescale/fec_ptp.c
|
|||
F: drivers/net/ethernet/freescale/fec.h
|
||||
F: Documentation/devicetree/bindings/net/fsl-fec.txt
|
||||
|
||||
FREESCALE QORIQ DPAA FMAN DRIVER
|
||||
M: Madalin Bucur <madalin.bucur@nxp.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/freescale/fman
|
||||
F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt
|
||||
|
||||
FREESCALE QUICC ENGINE LIBRARY
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Orphan
|
||||
|
@ -13128,6 +13135,7 @@ F: arch/arm64/include/asm/xen/
|
|||
|
||||
XEN NETWORK BACKEND DRIVER
|
||||
M: Wei Liu <wei.liu2@citrix.com>
|
||||
M: Paul Durrant <paul.durrant@citrix.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
|
@ -314,6 +314,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
/* Marvell Bluetooth devices */
|
||||
{ USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
|
||||
{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
|
||||
{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
|
||||
|
||||
/* Intel Bluetooth devices */
|
||||
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
|
||||
|
@ -1042,6 +1043,10 @@ static int btusb_open(struct hci_dev *hdev)
|
|||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
err = usb_autopm_get_interface(data->intf);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Patching USB firmware files prior to starting any URBs of HCI path
|
||||
* It is more safe to use USB bulk channel for downloading USB patch
|
||||
*/
|
||||
|
@ -1051,10 +1056,6 @@ static int btusb_open(struct hci_dev *hdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
err = usb_autopm_get_interface(data->intf);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
data->intf->needs_remote_wakeup = 1;
|
||||
|
||||
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
|
||||
|
|
|
@ -1046,7 +1046,7 @@ static void bgmac_enable(struct bgmac *bgmac)
|
|||
|
||||
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
|
||||
BGMAC_DS_MM_SHIFT;
|
||||
if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
|
||||
if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0)
|
||||
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
|
||||
if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
|
||||
bgmac_cco_ctl_maskset(bgmac, 1, ~0,
|
||||
|
|
|
@ -3117,6 +3117,7 @@ static int macb_remove(struct platform_device *pdev)
|
|||
if (dev->phydev)
|
||||
phy_disconnect(dev->phydev);
|
||||
mdiobus_unregister(bp->mii_bus);
|
||||
dev->phydev = NULL;
|
||||
mdiobus_free(bp->mii_bus);
|
||||
|
||||
/* Shutdown the PHY if there is a GPIO reset */
|
||||
|
|
|
@ -2728,6 +2728,26 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define NCSI_UPDATE_LOG "NCSI section update is not supported in FW ver %s\n"
|
||||
static bool be_fw_ncsi_supported(char *ver)
|
||||
{
|
||||
int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */
|
||||
int v2[4];
|
||||
int i;
|
||||
|
||||
if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (v1[i] < v2[i])
|
||||
return true;
|
||||
else if (v1[i] > v2[i])
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* For BE2, BE3 and BE3-R */
|
||||
static int be_flash_BEx(struct be_adapter *adapter,
|
||||
const struct firmware *fw,
|
||||
|
@ -2805,8 +2825,10 @@ static int be_flash_BEx(struct be_adapter *adapter,
|
|||
continue;
|
||||
|
||||
if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
|
||||
memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
|
||||
!be_fw_ncsi_supported(adapter->fw_ver)) {
|
||||
dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
|
||||
!phy_flashing_required(adapter))
|
||||
|
@ -3527,6 +3549,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
|
|||
for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
|
||||
adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
|
||||
(BIT_MASK(16) - 1);
|
||||
/* For BEx, since GET_FUNC_CONFIG command is not
|
||||
* supported, we read funcnum here as a workaround.
|
||||
*/
|
||||
if (BEx_chip(adapter))
|
||||
adapter->pf_num = attribs->hba_attribs.pci_funcnum;
|
||||
}
|
||||
|
||||
err:
|
||||
|
@ -4950,7 +4977,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
|
|||
{
|
||||
int status;
|
||||
|
||||
if (BEx_chip(adapter))
|
||||
if (BE2_chip(adapter))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
status = __be_cmd_set_logical_link_config(adapter, link_state,
|
||||
|
|
|
@ -1720,7 +1720,11 @@ struct mgmt_hba_attribs {
|
|||
u32 rsvd2[55];
|
||||
u8 rsvd3[3];
|
||||
u8 phy_port;
|
||||
u32 rsvd4[13];
|
||||
u32 rsvd4[15];
|
||||
u8 rsvd5[2];
|
||||
u8 pci_funcnum;
|
||||
u8 rsvd6;
|
||||
u32 rsvd7[6];
|
||||
} __packed;
|
||||
|
||||
struct mgmt_controller_attrib {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2005 - 2015 Emulex
|
||||
* Copyright (C) 2005-2016 Broadcom.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
|
|
|
@ -724,14 +724,24 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
|
|||
netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
|
||||
}
|
||||
|
||||
static int be_gso_hdr_len(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->encapsulation)
|
||||
return skb_inner_transport_offset(skb) +
|
||||
inner_tcp_hdrlen(skb);
|
||||
return skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
}
|
||||
|
||||
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
|
||||
{
|
||||
struct be_tx_stats *stats = tx_stats(txo);
|
||||
u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
|
||||
u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
|
||||
/* Account for headers which get duplicated in TSO pkt */
|
||||
u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
|
||||
|
||||
u64_stats_update_begin(&stats->sync);
|
||||
stats->tx_reqs++;
|
||||
stats->tx_bytes += skb->len;
|
||||
stats->tx_bytes += skb->len + dup_hdr_len;
|
||||
stats->tx_pkts += tx_pkts;
|
||||
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
stats->tx_vxlan_offload_pkts += tx_pkts;
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
|
||||
|
||||
obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o
|
||||
obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
|
||||
obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
|
||||
obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
|
||||
|
||||
fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o
|
||||
fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o
|
||||
fsl_mac-objs += mac.o
|
||||
fsl_fman-objs := fman_muram.o fman.o fman_sp.o
|
||||
fsl_fman_port-objs := fman_port.o
|
||||
fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
|
||||
|
|
|
@ -618,7 +618,7 @@ struct fman {
|
|||
unsigned long cam_offset;
|
||||
size_t cam_size;
|
||||
/* Fifo in MURAM */
|
||||
int fifo_offset;
|
||||
unsigned long fifo_offset;
|
||||
size_t fifo_size;
|
||||
|
||||
u32 liodn_base[64];
|
||||
|
@ -2036,7 +2036,7 @@ static int fman_init(struct fman *fman)
|
|||
/* allocate MURAM for FIFO according to total size */
|
||||
fman->fifo_offset = fman_muram_alloc(fman->muram,
|
||||
fman->state->total_fifo_size);
|
||||
if (IS_ERR_VALUE(fman->cam_offset)) {
|
||||
if (IS_ERR_VALUE(fman->fifo_offset)) {
|
||||
free_init_resources(fman);
|
||||
dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
|
||||
__func__);
|
||||
|
@ -2115,6 +2115,7 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module,
|
|||
fman->intr_mng[event].isr_cb = isr_cb;
|
||||
fman->intr_mng[event].src_handle = src_arg;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_register_intr);
|
||||
|
||||
/**
|
||||
* fman_unregister_intr
|
||||
|
@ -2138,6 +2139,7 @@ void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
|
|||
fman->intr_mng[event].isr_cb = NULL;
|
||||
fman->intr_mng[event].src_handle = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_unregister_intr);
|
||||
|
||||
/**
|
||||
* fman_set_port_params
|
||||
|
@ -2241,6 +2243,7 @@ int fman_set_port_params(struct fman *fman,
|
|||
spin_unlock_irqrestore(&fman->spinlock, flags);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_set_port_params);
|
||||
|
||||
/**
|
||||
* fman_reset_mac
|
||||
|
@ -2310,6 +2313,7 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_reset_mac);
|
||||
|
||||
/**
|
||||
* fman_set_mac_max_frame
|
||||
|
@ -2327,8 +2331,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
|
|||
* or equal to the port's max
|
||||
*/
|
||||
if ((!fman->state->port_mfl[mac_id]) ||
|
||||
(fman->state->port_mfl[mac_id] &&
|
||||
(mfl <= fman->state->port_mfl[mac_id]))) {
|
||||
(mfl <= fman->state->port_mfl[mac_id])) {
|
||||
fman->state->mac_mfl[mac_id] = mfl;
|
||||
} else {
|
||||
dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
|
||||
|
@ -2337,6 +2340,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_set_mac_max_frame);
|
||||
|
||||
/**
|
||||
* fman_get_clock_freq
|
||||
|
@ -2363,6 +2367,7 @@ u32 fman_get_bmi_max_fifo_size(struct fman *fman)
|
|||
{
|
||||
return fman->state->bmi_max_fifo_size;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
|
||||
|
||||
/**
|
||||
* fman_get_revision
|
||||
|
@ -2384,6 +2389,7 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
|
|||
FPM_REV1_MAJOR_SHIFT);
|
||||
rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_get_revision);
|
||||
|
||||
/**
|
||||
* fman_get_qman_channel_id
|
||||
|
@ -2419,6 +2425,7 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
|
|||
|
||||
return fman->state->qman_channel_base + i;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_get_qman_channel_id);
|
||||
|
||||
/**
|
||||
* fman_get_mem_region
|
||||
|
@ -2432,6 +2439,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
|
|||
{
|
||||
return fman->state->res;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_get_mem_region);
|
||||
|
||||
/* Bootargs defines */
|
||||
/* Extra headroom for RX buffers - Default, min and max */
|
||||
|
@ -2453,7 +2461,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
|
|||
* particular forwarding scenarios that add extra headers to the
|
||||
* forwarded frame.
|
||||
*/
|
||||
int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
|
||||
static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
|
||||
module_param(fsl_fm_rx_extra_headroom, int, 0);
|
||||
MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
|
||||
|
||||
|
@ -2466,7 +2474,7 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
|
|||
* Could be overridden once, at boot-time, via the
|
||||
* fm_set_max_frm() callback.
|
||||
*/
|
||||
int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
|
||||
static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
|
||||
module_param(fsl_fm_max_frm, int, 0);
|
||||
MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
|
||||
|
||||
|
@ -2538,6 +2546,7 @@ struct fman *fman_bind(struct device *fm_dev)
|
|||
{
|
||||
return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
|
||||
}
|
||||
EXPORT_SYMBOL(fman_bind);
|
||||
|
||||
static irqreturn_t fman_err_irq(int irq, void *handle)
|
||||
{
|
||||
|
@ -2727,8 +2736,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
|
|||
struct fman *fman;
|
||||
struct device_node *fm_node, *muram_node;
|
||||
struct resource *res;
|
||||
const u32 *u32_prop;
|
||||
int lenp, err, irq;
|
||||
u32 val, range[2];
|
||||
int err, irq;
|
||||
struct clk *clk;
|
||||
u32 clk_rate;
|
||||
phys_addr_t phys_base_addr;
|
||||
|
@ -2740,16 +2749,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
|
|||
|
||||
fm_node = of_node_get(of_dev->dev.of_node);
|
||||
|
||||
u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n",
|
||||
err = of_property_read_u32(fm_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
|
||||
__func__, fm_node->full_name);
|
||||
goto fman_node_put;
|
||||
}
|
||||
if (WARN_ON(lenp != sizeof(u32)))
|
||||
goto fman_node_put;
|
||||
|
||||
fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]);
|
||||
fman->dts_params.id = (u8)val;
|
||||
|
||||
/* Get the FM interrupt */
|
||||
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
|
||||
|
@ -2796,18 +2802,15 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
|
|||
/* Rounding to MHz */
|
||||
fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
|
||||
|
||||
u32_prop = (const u32 *)of_get_property(fm_node,
|
||||
"fsl,qman-channel-range",
|
||||
&lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n",
|
||||
err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
|
||||
&range[0], 2);
|
||||
if (err) {
|
||||
dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
|
||||
__func__, fm_node->full_name);
|
||||
goto fman_node_put;
|
||||
}
|
||||
if (WARN_ON(lenp != sizeof(u32) * 2))
|
||||
goto fman_node_put;
|
||||
fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]);
|
||||
fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]);
|
||||
fman->dts_params.qman_channel_base = range[0];
|
||||
fman->dts_params.num_of_qman_channels = range[1];
|
||||
|
||||
/* Get the MURAM base address and size */
|
||||
muram_node = of_find_matching_node(fm_node, fman_muram_match);
|
||||
|
@ -2858,7 +2861,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
|
|||
|
||||
fman->dts_params.base_addr =
|
||||
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
|
||||
if (fman->dts_params.base_addr == 0) {
|
||||
if (!fman->dts_params.base_addr) {
|
||||
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
|
||||
goto fman_free;
|
||||
}
|
||||
|
@ -2930,7 +2933,7 @@ static const struct of_device_id fman_match[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, fm_match);
|
||||
MODULE_DEVICE_TABLE(of, fman_match);
|
||||
|
||||
static struct platform_driver fman_driver = {
|
||||
.driver = {
|
||||
|
@ -2940,4 +2943,25 @@ static struct platform_driver fman_driver = {
|
|||
.probe = fman_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(fman_driver);
|
||||
static int __init fman_load(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("FSL DPAA FMan driver\n");
|
||||
|
||||
err = platform_driver_register(&fman_driver);
|
||||
if (err < 0)
|
||||
pr_err("Error, platform_driver_register() = %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
module_init(fman_load);
|
||||
|
||||
static void __exit fman_unload(void)
|
||||
{
|
||||
platform_driver_unregister(&fman_driver);
|
||||
}
|
||||
module_exit(fman_unload);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
|
||||
|
|
|
@ -191,10 +191,6 @@ struct fman_mac_params {
|
|||
u16 max_speed;
|
||||
/* A handle to the FM object this port related to */
|
||||
void *fm;
|
||||
/* MDIO exceptions interrupt source - not valid for all
|
||||
* MACs; MUST be set to 0 for MACs that don't have
|
||||
* mdio-irq, or for polling
|
||||
*/
|
||||
void *dev_id; /* device cookie used by the exception cbs */
|
||||
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
|
||||
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
|
||||
|
|
|
@ -507,6 +507,9 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac,
|
|||
{
|
||||
u16 tmp_reg16;
|
||||
|
||||
if (WARN_ON(!memac->pcsphy))
|
||||
return;
|
||||
|
||||
/* SGMII mode */
|
||||
tmp_reg16 = IF_MODE_SGMII_EN;
|
||||
if (!fixed_link)
|
||||
|
@ -1151,7 +1154,8 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
|
|||
/* Save FMan revision */
|
||||
fman_get_revision(memac->fm, &memac->fm_rev_info);
|
||||
|
||||
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
|
||||
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
|
||||
memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
|
||||
if (!params->internal_phy_node) {
|
||||
pr_err("PCS PHY node is not available\n");
|
||||
memac_free(memac);
|
||||
|
|
|
@ -150,7 +150,8 @@ unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
|
|||
*
|
||||
* Free an allocated memory from FM-MURAM partition.
|
||||
*/
|
||||
void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
|
||||
void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
|
||||
size_t size)
|
||||
{
|
||||
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
|
|||
|
||||
unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
|
||||
|
||||
void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
|
||||
void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
|
||||
size_t size);
|
||||
|
||||
#endif /* __FM_MURAM_EXT */
|
||||
|
|
|
@ -1477,7 +1477,8 @@ EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
|
|||
*/
|
||||
int fman_port_disable(struct fman_port *port)
|
||||
{
|
||||
u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
|
||||
u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
|
||||
u32 tmp;
|
||||
bool rx_port, failure = false;
|
||||
int count;
|
||||
|
||||
|
@ -1553,7 +1554,8 @@ EXPORT_SYMBOL(fman_port_disable);
|
|||
*/
|
||||
int fman_port_enable(struct fman_port *port)
|
||||
{
|
||||
u32 __iomem *bmi_cfg_reg, tmp;
|
||||
u32 __iomem *bmi_cfg_reg;
|
||||
u32 tmp;
|
||||
bool rx_port;
|
||||
|
||||
if (!is_init_done(port->cfg))
|
||||
|
@ -1623,7 +1625,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
|||
struct device_node *fm_node, *port_node;
|
||||
struct resource res;
|
||||
struct resource *dev_res;
|
||||
const u32 *u32_prop;
|
||||
u32 val;
|
||||
int err = 0, lenp;
|
||||
enum fman_port_type port_type;
|
||||
u16 port_speed;
|
||||
|
@ -1652,28 +1654,20 @@ static int fman_port_probe(struct platform_device *of_dev)
|
|||
goto return_err;
|
||||
}
|
||||
|
||||
u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n",
|
||||
err = of_property_read_u32(port_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(port->dev, "%s: reading cell-index for %s failed\n",
|
||||
__func__, port_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
}
|
||||
if (WARN_ON(lenp != sizeof(u32))) {
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
}
|
||||
port_id = (u8)fdt32_to_cpu(u32_prop[0]);
|
||||
|
||||
port_id = (u8)val;
|
||||
port->dts_params.id = port_id;
|
||||
|
||||
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
|
||||
port_type = FMAN_PORT_TYPE_TX;
|
||||
port_speed = 1000;
|
||||
u32_prop = (const u32 *)of_get_property(port_node,
|
||||
"fsl,fman-10g-port",
|
||||
&lenp);
|
||||
if (u32_prop)
|
||||
if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
|
||||
port_speed = 10000;
|
||||
|
||||
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
|
||||
|
@ -1686,9 +1680,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
|||
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
|
||||
port_type = FMAN_PORT_TYPE_RX;
|
||||
port_speed = 1000;
|
||||
u32_prop = (const u32 *)of_get_property(port_node,
|
||||
"fsl,fman-10g-port", &lenp);
|
||||
if (u32_prop)
|
||||
if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
|
||||
port_speed = 10000;
|
||||
|
||||
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
|
||||
|
@ -1743,7 +1735,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
|||
|
||||
port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
|
||||
resource_size(&res));
|
||||
if (port->dts_params.base_addr == 0)
|
||||
if (!port->dts_params.base_addr)
|
||||
dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
|
||||
|
||||
dev_set_drvdata(&of_dev->dev, port);
|
||||
|
@ -1775,4 +1767,25 @@ static struct platform_driver fman_port_driver = {
|
|||
.probe = fman_port_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(fman_port_driver);
|
||||
static int __init fman_port_load(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("FSL DPAA FMan driver\n");
|
||||
|
||||
err = platform_driver_register(&fman_port_driver);
|
||||
if (err < 0)
|
||||
pr_err("Error, platform_driver_register() = %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
module_init(fman_port_load);
|
||||
|
||||
static void __exit fman_port_unload(void)
|
||||
{
|
||||
platform_driver_unregister(&fman_port_driver);
|
||||
}
|
||||
module_exit(fman_port_unload);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
|
||||
|
|
|
@ -80,6 +80,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
|
|||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
|
||||
|
||||
int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
|
||||
int_context_data_copy,
|
||||
|
@ -164,3 +165,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_sp_build_buffer_struct);
|
||||
|
||||
|
|
|
@ -469,9 +469,9 @@ static void adjust_link_memac(struct net_device *net_dev)
|
|||
/* Initializes driver's PHY state, and attaches to the PHY.
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
static int init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev,
|
||||
void (*adj_lnk)(struct net_device *))
|
||||
static struct phy_device *init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev,
|
||||
void (*adj_lnk)(struct net_device *))
|
||||
{
|
||||
struct phy_device *phy_dev;
|
||||
struct mac_priv_s *priv = mac_dev->priv;
|
||||
|
@ -480,7 +480,7 @@ static int init_phy(struct net_device *net_dev,
|
|||
priv->phy_if);
|
||||
if (!phy_dev) {
|
||||
netdev_err(net_dev, "Could not connect to PHY\n");
|
||||
return -ENODEV;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Remove any features not supported by the controller */
|
||||
|
@ -493,23 +493,23 @@ static int init_phy(struct net_device *net_dev,
|
|||
|
||||
mac_dev->phy_dev = phy_dev;
|
||||
|
||||
return 0;
|
||||
return phy_dev;
|
||||
}
|
||||
|
||||
static int dtsec_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
{
|
||||
return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
|
||||
}
|
||||
|
||||
static int tgec_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
static struct phy_device *tgec_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
{
|
||||
return init_phy(net_dev, mac_dev, adjust_link_void);
|
||||
}
|
||||
|
||||
static int memac_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
static struct phy_device *memac_init_phy(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev)
|
||||
{
|
||||
return init_phy(net_dev, mac_dev, &adjust_link_memac);
|
||||
}
|
||||
|
@ -583,31 +583,6 @@ static void setup_memac(struct mac_device *mac_dev)
|
|||
|
||||
static DEFINE_MUTEX(eth_lock);
|
||||
|
||||
static const char phy_str[][11] = {
|
||||
[PHY_INTERFACE_MODE_MII] = "mii",
|
||||
[PHY_INTERFACE_MODE_GMII] = "gmii",
|
||||
[PHY_INTERFACE_MODE_SGMII] = "sgmii",
|
||||
[PHY_INTERFACE_MODE_TBI] = "tbi",
|
||||
[PHY_INTERFACE_MODE_RMII] = "rmii",
|
||||
[PHY_INTERFACE_MODE_RGMII] = "rgmii",
|
||||
[PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
|
||||
[PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
|
||||
[PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
|
||||
[PHY_INTERFACE_MODE_RTBI] = "rtbi",
|
||||
[PHY_INTERFACE_MODE_XGMII] = "xgmii"
|
||||
};
|
||||
|
||||
static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(phy_str); i++)
|
||||
if (strcmp(str, phy_str[i]) == 0)
|
||||
return (phy_interface_t)i;
|
||||
|
||||
return PHY_INTERFACE_MODE_MII;
|
||||
}
|
||||
|
||||
static const u16 phy2speed[] = {
|
||||
[PHY_INTERFACE_MODE_MII] = SPEED_100,
|
||||
[PHY_INTERFACE_MODE_GMII] = SPEED_1000,
|
||||
|
@ -678,7 +653,7 @@ MODULE_DEVICE_TABLE(of, mac_match);
|
|||
|
||||
static int mac_probe(struct platform_device *_of_dev)
|
||||
{
|
||||
int err, i, lenp, nph;
|
||||
int err, i, nph;
|
||||
struct device *dev;
|
||||
struct device_node *mac_node, *dev_node;
|
||||
struct mac_device *mac_dev;
|
||||
|
@ -686,9 +661,9 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
struct resource res;
|
||||
struct mac_priv_s *priv;
|
||||
const u8 *mac_addr;
|
||||
const char *char_prop;
|
||||
const u32 *u32_prop;
|
||||
u32 val;
|
||||
u8 fman_id;
|
||||
int phy_if;
|
||||
|
||||
dev = &_of_dev->dev;
|
||||
mac_node = dev->of_node;
|
||||
|
@ -749,16 +724,15 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
}
|
||||
|
||||
/* Get the FMan cell-index */
|
||||
u32_prop = of_get_property(dev_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(dev, "of_get_property(%s, cell-index) failed\n",
|
||||
err = of_property_read_u32(dev_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to read cell-index for %s\n",
|
||||
dev_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto _return_of_node_put;
|
||||
}
|
||||
WARN_ON(lenp != sizeof(u32));
|
||||
/* cell-index 0 => FMan id 1 */
|
||||
fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1);
|
||||
fman_id = (u8)(val + 1);
|
||||
|
||||
priv->fman = fman_bind(&of_dev->dev);
|
||||
if (!priv->fman) {
|
||||
|
@ -805,15 +779,14 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
}
|
||||
|
||||
/* Get the cell-index */
|
||||
u32_prop = of_get_property(mac_node, "cell-index", &lenp);
|
||||
if (!u32_prop) {
|
||||
dev_err(dev, "of_get_property(%s, cell-index) failed\n",
|
||||
err = of_property_read_u32(mac_node, "cell-index", &val);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to read cell-index for %s\n",
|
||||
mac_node->full_name);
|
||||
err = -EINVAL;
|
||||
goto _return_dev_set_drvdata;
|
||||
}
|
||||
WARN_ON(lenp != sizeof(u32));
|
||||
priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]);
|
||||
priv->cell_index = (u8)val;
|
||||
|
||||
/* Get the MAC address */
|
||||
mac_addr = of_get_mac_address(mac_node);
|
||||
|
@ -870,16 +843,14 @@ static int mac_probe(struct platform_device *_of_dev)
|
|||
}
|
||||
|
||||
/* Get the PHY connection type */
|
||||
char_prop = (const char *)of_get_property(mac_node,
|
||||
"phy-connection-type", NULL);
|
||||
if (!char_prop) {
|
||||
phy_if = of_get_phy_mode(mac_node);
|
||||
if (phy_if < 0) {
|
||||
dev_warn(dev,
|
||||
"of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
|
||||
"of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
|
||||
mac_node->full_name);
|
||||
priv->phy_if = PHY_INTERFACE_MODE_MII;
|
||||
} else {
|
||||
priv->phy_if = str2phy(char_prop);
|
||||
phy_if = PHY_INTERFACE_MODE_SGMII;
|
||||
}
|
||||
priv->phy_if = phy_if;
|
||||
|
||||
priv->speed = phy2speed[priv->phy_if];
|
||||
priv->max_speed = priv->speed;
|
||||
|
|
|
@ -58,7 +58,8 @@ struct mac_device {
|
|||
bool tx_pause_active;
|
||||
bool promisc;
|
||||
|
||||
int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
|
||||
struct phy_device *(*init_phy)(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev);
|
||||
int (*init)(struct mac_device *mac_dev);
|
||||
int (*start)(struct mac_device *mac_dev);
|
||||
int (*stop)(struct mac_device *mac_dev);
|
||||
|
|
|
@ -126,7 +126,7 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
|
|||
(enum mac_speed)speed, duplex);
|
||||
if (ret) {
|
||||
dev_err(mac_cb->dev,
|
||||
"adjust_link failed,%s mac%d ret = %#x!\n",
|
||||
"adjust_link failed, %s mac%d ret = %#x!\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name,
|
||||
mac_cb->mac_id, ret);
|
||||
return;
|
||||
|
@ -149,7 +149,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
|
|||
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
|
||||
if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
|
||||
dev_err(mac_cb->dev,
|
||||
"input invalid,%s mac%d vmid%d !\n",
|
||||
"input invalid, %s mac%d vmid%d !\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name,
|
||||
mac_cb->mac_id, vmid);
|
||||
return -EINVAL;
|
||||
|
@ -157,19 +157,19 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
|
|||
} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
|
||||
if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
|
||||
dev_err(mac_cb->dev,
|
||||
"input invalid,%s mac%d vmid%d!\n",
|
||||
"input invalid, %s mac%d vmid%d!\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name,
|
||||
mac_cb->mac_id, vmid);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
|
||||
dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
|
||||
dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
|
||||
dev_err(mac_cb->dev, "input invalid, %s mac%d vmid%d !\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
|
|||
tmp_port = vmid;
|
||||
break;
|
||||
default:
|
||||
dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
|
||||
dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
|
|||
ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
|
||||
if (ret) {
|
||||
dev_err(dsaf_dev->dev,
|
||||
"set mac mc port failed,%s mac%d ret = %#x!\n",
|
||||
"set mac mc port failed, %s mac%d ret = %#x!\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name,
|
||||
mac_cb->mac_id, ret);
|
||||
return ret;
|
||||
|
@ -305,7 +305,7 @@ int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
|
|||
old_mac = &mac_cb->addr_entry_idx[vfn];
|
||||
} else {
|
||||
dev_err(mac_cb->dev,
|
||||
"vf queue is too large,%s mac%d queue = %#x!\n",
|
||||
"vf queue is too large, %s mac%d queue = %#x!\n",
|
||||
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -547,7 +547,7 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
|
|||
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
|
||||
|
||||
if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
|
||||
dev_err(mac_cb->dev, "enable autoneg is not allowed!");
|
||||
dev_err(mac_cb->dev, "enabling autoneg is not allowed!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -571,7 +571,7 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
|
|||
|
||||
if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
|
||||
if (is_ver1 && (tx_en || rx_en)) {
|
||||
dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
|
||||
dev_err(mac_cb->dev, "macv1 can't enable tx/rx_pause!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -926,7 +926,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
|
|||
ret = hns_mac_get_mode(mac_cb->phy_if);
|
||||
if (ret < 0) {
|
||||
dev_err(dsaf_dev->dev,
|
||||
"hns_mac_get_mode failed,mac%d ret = %#x!\n",
|
||||
"hns_mac_get_mode failed, mac%d ret = %#x!\n",
|
||||
mac_cb->mac_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2323,6 +2323,41 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
|
||||
{
|
||||
u32 val[2], id[4];
|
||||
|
||||
regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
|
||||
regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
|
||||
|
||||
id[3] = ((val[0] >> 16) & 0xff) - '0';
|
||||
id[2] = ((val[0] >> 24) & 0xff) - '0';
|
||||
id[1] = (val[1] & 0xff) - '0';
|
||||
id[0] = ((val[1] >> 8) & 0xff) - '0';
|
||||
|
||||
*chip_id = (id[3] * 1000) + (id[2] * 100) +
|
||||
(id[1] * 10) + id[0];
|
||||
|
||||
if (!(*chip_id)) {
|
||||
dev_err(eth->dev, "failed to get chip id\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dev_info(eth->dev, "chip id = %d\n", *chip_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
|
||||
{
|
||||
switch (eth->chip_id) {
|
||||
case MT7623_ETH:
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mtk_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
@ -2362,8 +2397,6 @@ static int mtk_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(eth->pctl);
|
||||
}
|
||||
|
||||
eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
eth->irq[i] = platform_get_irq(pdev, i);
|
||||
if (eth->irq[i] < 0) {
|
||||
|
@ -2388,6 +2421,12 @@ static int mtk_probe(struct platform_device *pdev)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = mtk_get_chip_id(eth, ð->chip_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
eth->hwlro = mtk_is_hwlro_supported(eth);
|
||||
|
||||
for_each_child_of_node(pdev->dev.of_node, mac_np) {
|
||||
if (!of_device_is_compatible(mac_np,
|
||||
"mediatek,eth-mac"))
|
||||
|
|
|
@ -342,6 +342,11 @@
|
|||
#define GPIO_BIAS_CTRL 0xed0
|
||||
#define GPIO_DRV_SEL10 0xf00
|
||||
|
||||
/* ethernet subsystem chip id register */
|
||||
#define ETHSYS_CHIPID0_3 0x0
|
||||
#define ETHSYS_CHIPID4_7 0x4
|
||||
#define MT7623_ETH 7623
|
||||
|
||||
/* ethernet subsystem config register */
|
||||
#define ETHSYS_SYSCFG0 0x14
|
||||
#define SYSCFG0_GE_MASK 0x3
|
||||
|
@ -534,6 +539,7 @@ struct mtk_eth {
|
|||
unsigned long sysclk;
|
||||
struct regmap *ethsys;
|
||||
struct regmap *pctl;
|
||||
u32 chip_id;
|
||||
bool hwlro;
|
||||
atomic_t dma_refcnt;
|
||||
struct mtk_tx_ring tx_ring;
|
||||
|
|
|
@ -26,6 +26,7 @@ config QCA7000
|
|||
|
||||
config QCOM_EMAC
|
||||
tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
|
||||
depends on HAS_DMA && HAS_IOMEM
|
||||
select CRC32
|
||||
select PHYLIB
|
||||
---help---
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
|
||||
#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
|
||||
#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
|
||||
#define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
|
||||
#define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
|
||||
|
||||
#define GMII_SEL_MODE_MASK 0x3
|
||||
|
||||
|
@ -48,6 +50,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
|
|||
u32 reg;
|
||||
u32 mask;
|
||||
u32 mode = 0;
|
||||
bool rgmii_id = false;
|
||||
|
||||
reg = readl(priv->gmii_sel);
|
||||
|
||||
|
@ -57,10 +60,14 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
|
|||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
mode = AM33XX_GMII_SEL_MODE_RGMII;
|
||||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
mode = AM33XX_GMII_SEL_MODE_RGMII;
|
||||
rgmii_id = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -83,6 +90,13 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
|
|||
mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
|
||||
}
|
||||
|
||||
if (rgmii_id) {
|
||||
if (slave == 0)
|
||||
mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
|
||||
else
|
||||
mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
|
||||
}
|
||||
|
||||
reg &= ~mask;
|
||||
reg |= mode;
|
||||
|
||||
|
|
|
@ -1769,7 +1769,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
|
|||
gelic_ether_setup_netdev_ops(netdev, &card->napi);
|
||||
result = gelic_net_setup_netdev(netdev, card);
|
||||
if (result) {
|
||||
dev_dbg(&dev->core, "%s: setup_netdev failed %d",
|
||||
dev_dbg(&dev->core, "%s: setup_netdev failed %d\n",
|
||||
__func__, result);
|
||||
goto fail_setup_netdev;
|
||||
}
|
||||
|
|
|
@ -818,7 +818,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
|
|||
goto out;
|
||||
}
|
||||
if (!(status & XAXIDMA_IRQ_ALL_MASK))
|
||||
dev_err(&ndev->dev, "No interrupts asserted in Tx path");
|
||||
dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
|
||||
if (status & XAXIDMA_IRQ_ERROR_MASK) {
|
||||
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
|
||||
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
|
||||
|
@ -867,7 +867,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
|
|||
goto out;
|
||||
}
|
||||
if (!(status & XAXIDMA_IRQ_ALL_MASK))
|
||||
dev_err(&ndev->dev, "No interrupts asserted in Rx path");
|
||||
dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
|
||||
if (status & XAXIDMA_IRQ_ERROR_MASK) {
|
||||
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
|
||||
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
|
||||
|
|
|
@ -142,6 +142,7 @@ config MDIO_THUNDER
|
|||
|
||||
config MDIO_XGENE
|
||||
tristate "APM X-Gene SoC MDIO bus controller"
|
||||
depends on ARCH_XGENE || COMPILE_TEST
|
||||
help
|
||||
This module provides a driver for the MDIO busses found in the
|
||||
APM X-Gene SoC's.
|
||||
|
@ -320,13 +321,6 @@ config XILINX_GMII2RGMII
|
|||
the Reduced Gigabit Media Independent Interface(RGMII) between
|
||||
Ethernet physical media devices and the Gigabit Ethernet controller.
|
||||
|
||||
config MDIO_XGENE
|
||||
tristate "APM X-Gene SoC MDIO bus controller"
|
||||
depends on ARCH_XGENE || COMPILE_TEST
|
||||
help
|
||||
This module provides a driver for the MDIO busses found in the
|
||||
APM X-Gene SoC's.
|
||||
|
||||
endif # PHYLIB
|
||||
|
||||
config MICREL_KS8995MA
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/phy.h>
|
||||
#include <linux/of.h>
|
||||
#include <dt-bindings/net/mscc-phy-vsc8531.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
enum rgmii_rx_clock_delay {
|
||||
RGMII_RX_CLK_DELAY_0_2_NS = 0,
|
||||
|
@ -37,6 +38,7 @@ enum rgmii_rx_clock_delay {
|
|||
|
||||
#define MII_VSC85XX_INT_MASK 25
|
||||
#define MII_VSC85XX_INT_MASK_MASK 0xa000
|
||||
#define MII_VSC85XX_INT_MASK_WOL 0x0040
|
||||
#define MII_VSC85XX_INT_STATUS 26
|
||||
|
||||
#define MSCC_PHY_WOL_MAC_CONTROL 27
|
||||
|
@ -52,6 +54,17 @@ enum rgmii_rx_clock_delay {
|
|||
#define RGMII_RX_CLK_DELAY_MASK 0x0070
|
||||
#define RGMII_RX_CLK_DELAY_POS 4
|
||||
|
||||
#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
|
||||
#define MSCC_PHY_WOL_MID_MAC_ADDR 22
|
||||
#define MSCC_PHY_WOL_UPPER_MAC_ADDR 23
|
||||
#define MSCC_PHY_WOL_LOWER_PASSWD 24
|
||||
#define MSCC_PHY_WOL_MID_PASSWD 25
|
||||
#define MSCC_PHY_WOL_UPPER_PASSWD 26
|
||||
|
||||
#define MSCC_PHY_WOL_MAC_CONTROL 27
|
||||
#define SECURE_ON_ENABLE 0x8000
|
||||
#define SECURE_ON_PASSWD_LEN_4 0x4000
|
||||
|
||||
/* Microsemi PHY ID's */
|
||||
#define PHY_ID_VSC8531 0x00070570
|
||||
#define PHY_ID_VSC8541 0x00070770
|
||||
|
@ -81,6 +94,117 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int vsc85xx_wol_set(struct phy_device *phydev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
int rc;
|
||||
u16 reg_val;
|
||||
u8 i;
|
||||
u16 pwd[3] = {0, 0, 0};
|
||||
struct ethtool_wolinfo *wol_conf = wol;
|
||||
u8 *mac_addr = phydev->attached_dev->dev_addr;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
|
||||
if (rc != 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (wol->wolopts & WAKE_MAGIC) {
|
||||
/* Store the device address for the magic packet */
|
||||
for (i = 0; i < ARRAY_SIZE(pwd); i++)
|
||||
pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
|
||||
mac_addr[5 - i * 2];
|
||||
phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
|
||||
phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
|
||||
phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
|
||||
} else {
|
||||
phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
|
||||
phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
|
||||
phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
|
||||
}
|
||||
|
||||
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
|
||||
for (i = 0; i < ARRAY_SIZE(pwd); i++)
|
||||
pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
|
||||
wol_conf->sopass[5 - i * 2];
|
||||
phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
|
||||
phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
|
||||
phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
|
||||
} else {
|
||||
phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
|
||||
phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
|
||||
phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
|
||||
}
|
||||
|
||||
reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
|
||||
if (wol_conf->wolopts & WAKE_MAGICSECURE)
|
||||
reg_val |= SECURE_ON_ENABLE;
|
||||
else
|
||||
reg_val &= ~SECURE_ON_ENABLE;
|
||||
phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
|
||||
|
||||
rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
|
||||
if (rc != 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (wol->wolopts & WAKE_MAGIC) {
|
||||
/* Enable the WOL interrupt */
|
||||
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
|
||||
reg_val |= MII_VSC85XX_INT_MASK_WOL;
|
||||
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
|
||||
if (rc != 0)
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/* Disable the WOL interrupt */
|
||||
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
|
||||
reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
|
||||
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
|
||||
if (rc != 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
/* Clear WOL iterrupt status */
|
||||
reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void vsc85xx_wol_get(struct phy_device *phydev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
int rc;
|
||||
u16 reg_val;
|
||||
u8 i;
|
||||
u16 pwd[3] = {0, 0, 0};
|
||||
struct ethtool_wolinfo *wol_conf = wol;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
|
||||
if (rc != 0)
|
||||
goto out_unlock;
|
||||
|
||||
reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
|
||||
if (reg_val & SECURE_ON_ENABLE)
|
||||
wol_conf->wolopts |= WAKE_MAGICSECURE;
|
||||
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
|
||||
pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
|
||||
pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
|
||||
pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
|
||||
for (i = 0; i < ARRAY_SIZE(pwd); i++) {
|
||||
wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
|
||||
wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
|
||||
>> 8;
|
||||
}
|
||||
}
|
||||
|
||||
rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&phydev->lock);
|
||||
}
|
||||
|
||||
static u8 edge_rate_magic_get(u16 vddmac,
|
||||
int slowdown)
|
||||
{
|
||||
|
@ -301,6 +425,8 @@ static struct phy_driver vsc85xx_driver[] = {
|
|||
.suspend = &genphy_suspend,
|
||||
.resume = &genphy_resume,
|
||||
.probe = &vsc85xx_probe,
|
||||
.set_wol = &vsc85xx_wol_set,
|
||||
.get_wol = &vsc85xx_wol_get,
|
||||
},
|
||||
{
|
||||
.phy_id = PHY_ID_VSC8541,
|
||||
|
@ -318,6 +444,8 @@ static struct phy_driver vsc85xx_driver[] = {
|
|||
.suspend = &genphy_suspend,
|
||||
.resume = &genphy_resume,
|
||||
.probe = &vsc85xx_probe,
|
||||
.set_wol = &vsc85xx_wol_set,
|
||||
.get_wol = &vsc85xx_wol_get,
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -295,11 +295,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
|
|||
qe_muram_free(priv->ucc_pram_offset);
|
||||
free_tx_bd:
|
||||
dma_free_coherent(priv->dev,
|
||||
TX_BD_RING_LEN * sizeof(struct qe_bd),
|
||||
TX_BD_RING_LEN * sizeof(struct qe_bd *),
|
||||
priv->tx_bd_base, priv->dma_tx_bd);
|
||||
free_rx_bd:
|
||||
dma_free_coherent(priv->dev,
|
||||
RX_BD_RING_LEN * sizeof(struct qe_bd),
|
||||
RX_BD_RING_LEN * sizeof(struct qe_bd *),
|
||||
priv->rx_bd_base, priv->dma_rx_bd);
|
||||
free_uccf:
|
||||
ucc_fast_free(priv->uccf);
|
||||
|
@ -688,7 +688,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
|
|||
|
||||
if (priv->rx_bd_base) {
|
||||
dma_free_coherent(priv->dev,
|
||||
RX_BD_RING_LEN * sizeof(struct qe_bd),
|
||||
RX_BD_RING_LEN * sizeof(struct qe_bd *),
|
||||
priv->rx_bd_base, priv->dma_rx_bd);
|
||||
|
||||
priv->rx_bd_base = NULL;
|
||||
|
@ -697,7 +697,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
|
|||
|
||||
if (priv->tx_bd_base) {
|
||||
dma_free_coherent(priv->dev,
|
||||
TX_BD_RING_LEN * sizeof(struct qe_bd),
|
||||
TX_BD_RING_LEN * sizeof(struct qe_bd *),
|
||||
priv->tx_bd_base, priv->dma_tx_bd);
|
||||
|
||||
priv->tx_bd_base = NULL;
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
|
||||
|
||||
xen-netback-y := netback.o xenbus.o interface.o hash.o
|
||||
xen-netback-y := netback.o xenbus.o interface.o hash.o rx.o
|
||||
|
|
|
@ -91,13 +91,6 @@ struct xenvif_rx_meta {
|
|||
*/
|
||||
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
|
||||
|
||||
/* It's possible for an skb to have a maximal number of frags
|
||||
* but still be less than MAX_BUFFER_OFFSET in size. Thus the
|
||||
* worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
|
||||
* ring slot.
|
||||
*/
|
||||
#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
|
||||
|
||||
#define NETBACK_INVALID_HANDLE -1
|
||||
|
||||
/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
|
||||
|
@ -133,6 +126,15 @@ struct xenvif_stats {
|
|||
unsigned long tx_frag_overflow;
|
||||
};
|
||||
|
||||
#define COPY_BATCH_SIZE 64
|
||||
|
||||
struct xenvif_copy_state {
|
||||
struct gnttab_copy op[COPY_BATCH_SIZE];
|
||||
RING_IDX idx[COPY_BATCH_SIZE];
|
||||
unsigned int num;
|
||||
struct sk_buff_head *completed;
|
||||
};
|
||||
|
||||
struct xenvif_queue { /* Per-queue data for xenvif */
|
||||
unsigned int id; /* Queue ID, 0-based */
|
||||
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
|
||||
|
@ -189,12 +191,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
|||
unsigned long last_rx_time;
|
||||
bool stalled;
|
||||
|
||||
struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
|
||||
|
||||
/* We create one meta structure per ring request we consume, so
|
||||
* the maximum number is the same as the ring size.
|
||||
*/
|
||||
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
|
||||
struct xenvif_copy_state rx_copy;
|
||||
|
||||
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
|
||||
unsigned long credit_bytes;
|
||||
|
@ -260,7 +257,6 @@ struct xenvif {
|
|||
|
||||
/* Frontend feature information. */
|
||||
int gso_mask;
|
||||
int gso_prefix_mask;
|
||||
|
||||
u8 can_sg:1;
|
||||
u8 ip_csum:1;
|
||||
|
@ -359,6 +355,7 @@ int xenvif_dealloc_kthread(void *data);
|
|||
|
||||
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
|
||||
|
||||
void xenvif_rx_action(struct xenvif_queue *queue);
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
|
||||
void xenvif_carrier_on(struct xenvif *vif);
|
||||
|
|
|
@ -149,17 +149,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int size = vif->hash.size;
|
||||
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
|
||||
u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
|
||||
|
||||
/* Make sure there is no hash information in the socket
|
||||
* buffer otherwise it would be incorrectly forwarded
|
||||
* to the frontend.
|
||||
*/
|
||||
skb_clear_hash(skb);
|
||||
|
||||
return index;
|
||||
}
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
return fallback(dev, skb) % dev->real_num_tx_queues;
|
||||
|
||||
xenvif_set_skb_hash(vif, skb);
|
||||
|
||||
|
@ -208,6 +199,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
cb = XENVIF_RX_CB(skb);
|
||||
cb->expires = jiffies + vif->drain_timeout;
|
||||
|
||||
/* If there is no hash algorithm configured then make sure there
|
||||
* is no hash information in the socket buffer otherwise it
|
||||
* would be incorrectly forwarded to the frontend.
|
||||
*/
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
skb_clear_hash(skb);
|
||||
|
||||
xenvif_rx_queue_tail(queue, skb);
|
||||
xenvif_kick_thread(queue);
|
||||
|
||||
|
@ -319,9 +317,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
|
|||
|
||||
if (!vif->can_sg)
|
||||
features &= ~NETIF_F_SG;
|
||||
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
|
||||
if (~(vif->gso_mask) & GSO_BIT(TCPV4))
|
||||
features &= ~NETIF_F_TSO;
|
||||
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
|
||||
if (~(vif->gso_mask) & GSO_BIT(TCPV6))
|
||||
features &= ~NETIF_F_TSO6;
|
||||
if (!vif->ip_csum)
|
||||
features &= ~NETIF_F_IP_CSUM;
|
||||
|
@ -467,7 +465,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
|||
dev->netdev_ops = &xenvif_netdev_ops;
|
||||
dev->hw_features = NETIF_F_SG |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO6;
|
||||
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
|
||||
dev->features = dev->hw_features | NETIF_F_RXCSUM;
|
||||
dev->ethtool_ops = &xenvif_ethtool_ops;
|
||||
|
||||
|
|
|
@ -106,13 +106,6 @@ static void push_tx_responses(struct xenvif_queue *queue);
|
|||
|
||||
static inline int tx_work_todo(struct xenvif_queue *queue);
|
||||
|
||||
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
||||
u16 id,
|
||||
s8 st,
|
||||
u16 offset,
|
||||
u16 size,
|
||||
u16 flags);
|
||||
|
||||
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
|
||||
u16 idx)
|
||||
{
|
||||
|
@ -155,571 +148,11 @@ static inline pending_ring_idx_t pending_index(unsigned i)
|
|||
return i & (MAX_PENDING_REQS-1);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
struct sk_buff *skb;
|
||||
int needed;
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
return false;
|
||||
|
||||
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
||||
if (skb_is_gso(skb))
|
||||
needed++;
|
||||
if (skb->sw_hash)
|
||||
needed++;
|
||||
|
||||
do {
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
if (prod - cons >= needed)
|
||||
return true;
|
||||
|
||||
queue->rx.sring->req_event = prod + 1;
|
||||
|
||||
/* Make sure event is visible before we check prod
|
||||
* again.
|
||||
*/
|
||||
mb();
|
||||
} while (queue->rx.sring->req_prod != prod);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
__skb_queue_tail(&queue->rx_queue, skb);
|
||||
|
||||
queue->rx_queue_len += skb->len;
|
||||
if (queue->rx_queue_len > queue->rx_queue_max)
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
}
|
||||
|
||||
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_irq(&queue->rx_queue.lock);
|
||||
|
||||
skb = __skb_dequeue(&queue->rx_queue);
|
||||
if (skb)
|
||||
queue->rx_queue_len -= skb->len;
|
||||
|
||||
spin_unlock_irq(&queue->rx_queue.lock);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
|
||||
{
|
||||
spin_lock_irq(&queue->rx_queue.lock);
|
||||
|
||||
if (queue->rx_queue_len < queue->rx_queue_max)
|
||||
netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
|
||||
|
||||
spin_unlock_irq(&queue->rx_queue.lock);
|
||||
}
|
||||
|
||||
|
||||
static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
while ((skb = xenvif_rx_dequeue(queue)) != NULL)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
for(;;) {
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
break;
|
||||
if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
|
||||
break;
|
||||
xenvif_rx_dequeue(queue);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
struct netrx_pending_operations {
|
||||
unsigned copy_prod, copy_cons;
|
||||
unsigned meta_prod, meta_cons;
|
||||
struct gnttab_copy *copy;
|
||||
struct xenvif_rx_meta *meta;
|
||||
int copy_off;
|
||||
grant_ref_t copy_gref;
|
||||
};
|
||||
|
||||
static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
|
||||
struct netrx_pending_operations *npo)
|
||||
{
|
||||
struct xenvif_rx_meta *meta;
|
||||
struct xen_netif_rx_request req;
|
||||
|
||||
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||
|
||||
meta = npo->meta + npo->meta_prod++;
|
||||
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
||||
meta->gso_size = 0;
|
||||
meta->size = 0;
|
||||
meta->id = req.id;
|
||||
|
||||
npo->copy_off = 0;
|
||||
npo->copy_gref = req.gref;
|
||||
|
||||
return meta;
|
||||
}
|
||||
|
||||
struct gop_frag_copy {
|
||||
struct xenvif_queue *queue;
|
||||
struct netrx_pending_operations *npo;
|
||||
struct xenvif_rx_meta *meta;
|
||||
int head;
|
||||
int gso_type;
|
||||
int protocol;
|
||||
int hash_present;
|
||||
|
||||
struct page *page;
|
||||
};
|
||||
|
||||
static void xenvif_setup_copy_gop(unsigned long gfn,
|
||||
unsigned int offset,
|
||||
unsigned int *len,
|
||||
struct gop_frag_copy *info)
|
||||
{
|
||||
struct gnttab_copy *copy_gop;
|
||||
struct xen_page_foreign *foreign;
|
||||
/* Convenient aliases */
|
||||
struct xenvif_queue *queue = info->queue;
|
||||
struct netrx_pending_operations *npo = info->npo;
|
||||
struct page *page = info->page;
|
||||
|
||||
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
|
||||
|
||||
if (npo->copy_off == MAX_BUFFER_OFFSET)
|
||||
info->meta = get_next_rx_buffer(queue, npo);
|
||||
|
||||
if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
|
||||
*len = MAX_BUFFER_OFFSET - npo->copy_off;
|
||||
|
||||
copy_gop = npo->copy + npo->copy_prod++;
|
||||
copy_gop->flags = GNTCOPY_dest_gref;
|
||||
copy_gop->len = *len;
|
||||
|
||||
foreign = xen_page_foreign(page);
|
||||
if (foreign) {
|
||||
copy_gop->source.domid = foreign->domid;
|
||||
copy_gop->source.u.ref = foreign->gref;
|
||||
copy_gop->flags |= GNTCOPY_source_gref;
|
||||
} else {
|
||||
copy_gop->source.domid = DOMID_SELF;
|
||||
copy_gop->source.u.gmfn = gfn;
|
||||
}
|
||||
copy_gop->source.offset = offset;
|
||||
|
||||
copy_gop->dest.domid = queue->vif->domid;
|
||||
copy_gop->dest.offset = npo->copy_off;
|
||||
copy_gop->dest.u.ref = npo->copy_gref;
|
||||
|
||||
npo->copy_off += *len;
|
||||
info->meta->size += *len;
|
||||
|
||||
if (!info->head)
|
||||
return;
|
||||
|
||||
/* Leave a gap for the GSO descriptor. */
|
||||
if ((1 << info->gso_type) & queue->vif->gso_mask)
|
||||
queue->rx.req_cons++;
|
||||
|
||||
/* Leave a gap for the hash extra segment. */
|
||||
if (info->hash_present)
|
||||
queue->rx.req_cons++;
|
||||
|
||||
info->head = 0; /* There must be something in this buffer now */
|
||||
}
|
||||
|
||||
static void xenvif_gop_frag_copy_grant(unsigned long gfn,
|
||||
unsigned offset,
|
||||
unsigned int len,
|
||||
void *data)
|
||||
{
|
||||
unsigned int bytes;
|
||||
|
||||
while (len) {
|
||||
bytes = len;
|
||||
xenvif_setup_copy_gop(gfn, offset, &bytes, data);
|
||||
offset += bytes;
|
||||
len -= bytes;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the grant operations for this fragment. If it's a flipping
|
||||
* interface, we also set up the unmap request from here.
|
||||
*/
|
||||
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
|
||||
struct netrx_pending_operations *npo,
|
||||
struct page *page, unsigned long size,
|
||||
unsigned long offset, int *head)
|
||||
{
|
||||
struct gop_frag_copy info = {
|
||||
.queue = queue,
|
||||
.npo = npo,
|
||||
.head = *head,
|
||||
.gso_type = XEN_NETIF_GSO_TYPE_NONE,
|
||||
/* xenvif_set_skb_hash() will have either set a s/w
|
||||
* hash or cleared the hash depending on
|
||||
* whether the the frontend wants a hash for this skb.
|
||||
*/
|
||||
.hash_present = skb->sw_hash,
|
||||
};
|
||||
unsigned long bytes;
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
|
||||
info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
|
||||
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
||||
info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
|
||||
}
|
||||
|
||||
/* Data must not cross a page boundary. */
|
||||
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
|
||||
|
||||
info.meta = npo->meta + npo->meta_prod - 1;
|
||||
|
||||
/* Skip unused frames from start of page */
|
||||
page += offset >> PAGE_SHIFT;
|
||||
offset &= ~PAGE_MASK;
|
||||
|
||||
while (size > 0) {
|
||||
BUG_ON(offset >= PAGE_SIZE);
|
||||
|
||||
bytes = PAGE_SIZE - offset;
|
||||
if (bytes > size)
|
||||
bytes = size;
|
||||
|
||||
info.page = page;
|
||||
gnttab_foreach_grant_in_range(page, offset, bytes,
|
||||
xenvif_gop_frag_copy_grant,
|
||||
&info);
|
||||
size -= bytes;
|
||||
offset = 0;
|
||||
|
||||
/* Next page */
|
||||
if (size) {
|
||||
BUG_ON(!PageCompound(page));
|
||||
page++;
|
||||
}
|
||||
}
|
||||
|
||||
*head = info.head;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare an SKB to be transmitted to the frontend.
|
||||
*
|
||||
* This function is responsible for allocating grant operations, meta
|
||||
* structures, etc.
|
||||
*
|
||||
* It returns the number of meta structures consumed. The number of
|
||||
* ring slots used is always equal to the number of meta slots used
|
||||
* plus the number of GSO descriptors used. Currently, we use either
|
||||
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
|
||||
* frontend-side LRO).
|
||||
*/
|
||||
static int xenvif_gop_skb(struct sk_buff *skb,
|
||||
struct netrx_pending_operations *npo,
|
||||
struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(skb->dev);
|
||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
int i;
|
||||
struct xen_netif_rx_request req;
|
||||
struct xenvif_rx_meta *meta;
|
||||
unsigned char *data;
|
||||
int head = 1;
|
||||
int old_meta_prod;
|
||||
int gso_type;
|
||||
|
||||
old_meta_prod = npo->meta_prod;
|
||||
|
||||
gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
||||
if (skb_is_gso(skb)) {
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
|
||||
gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
|
||||
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
||||
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
|
||||
}
|
||||
|
||||
/* Set up a GSO prefix descriptor, if necessary */
|
||||
if ((1 << gso_type) & vif->gso_prefix_mask) {
|
||||
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||
meta = npo->meta + npo->meta_prod++;
|
||||
meta->gso_type = gso_type;
|
||||
meta->gso_size = skb_shinfo(skb)->gso_size;
|
||||
meta->size = 0;
|
||||
meta->id = req.id;
|
||||
}
|
||||
|
||||
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||
meta = npo->meta + npo->meta_prod++;
|
||||
|
||||
if ((1 << gso_type) & vif->gso_mask) {
|
||||
meta->gso_type = gso_type;
|
||||
meta->gso_size = skb_shinfo(skb)->gso_size;
|
||||
} else {
|
||||
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
||||
meta->gso_size = 0;
|
||||
}
|
||||
|
||||
meta->size = 0;
|
||||
meta->id = req.id;
|
||||
npo->copy_off = 0;
|
||||
npo->copy_gref = req.gref;
|
||||
|
||||
data = skb->data;
|
||||
while (data < skb_tail_pointer(skb)) {
|
||||
unsigned int offset = offset_in_page(data);
|
||||
unsigned int len = PAGE_SIZE - offset;
|
||||
|
||||
if (data + len > skb_tail_pointer(skb))
|
||||
len = skb_tail_pointer(skb) - data;
|
||||
|
||||
xenvif_gop_frag_copy(queue, skb, npo,
|
||||
virt_to_page(data), len, offset, &head);
|
||||
data += len;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
xenvif_gop_frag_copy(queue, skb, npo,
|
||||
skb_frag_page(&skb_shinfo(skb)->frags[i]),
|
||||
skb_frag_size(&skb_shinfo(skb)->frags[i]),
|
||||
skb_shinfo(skb)->frags[i].page_offset,
|
||||
&head);
|
||||
}
|
||||
|
||||
return npo->meta_prod - old_meta_prod;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
|
||||
* used to set up the operations on the top of
|
||||
* netrx_pending_operations, which have since been done. Check that
|
||||
* they didn't give any errors and advance over them.
|
||||
*/
|
||||
static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
|
||||
struct netrx_pending_operations *npo)
|
||||
{
|
||||
struct gnttab_copy *copy_op;
|
||||
int status = XEN_NETIF_RSP_OKAY;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_meta_slots; i++) {
|
||||
copy_op = npo->copy + npo->copy_cons++;
|
||||
if (copy_op->status != GNTST_okay) {
|
||||
netdev_dbg(vif->dev,
|
||||
"Bad status %d from copy to DOM%d.\n",
|
||||
copy_op->status, vif->domid);
|
||||
status = XEN_NETIF_RSP_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
|
||||
struct xenvif_rx_meta *meta,
|
||||
int nr_meta_slots)
|
||||
{
|
||||
int i;
|
||||
unsigned long offset;
|
||||
|
||||
/* No fragments used */
|
||||
if (nr_meta_slots <= 1)
|
||||
return;
|
||||
|
||||
nr_meta_slots--;
|
||||
|
||||
for (i = 0; i < nr_meta_slots; i++) {
|
||||
int flags;
|
||||
if (i == nr_meta_slots - 1)
|
||||
flags = 0;
|
||||
else
|
||||
flags = XEN_NETRXF_more_data;
|
||||
|
||||
offset = 0;
|
||||
make_rx_response(queue, meta[i].id, status, offset,
|
||||
meta[i].size, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void xenvif_kick_thread(struct xenvif_queue *queue)
|
||||
{
|
||||
wake_up(&queue->wq);
|
||||
}
|
||||
|
||||
static void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif *vif = queue->vif;
|
||||
s8 status;
|
||||
u16 flags;
|
||||
struct xen_netif_rx_response *resp;
|
||||
struct sk_buff_head rxq;
|
||||
struct sk_buff *skb;
|
||||
LIST_HEAD(notify);
|
||||
int ret;
|
||||
unsigned long offset;
|
||||
bool need_to_notify = false;
|
||||
|
||||
struct netrx_pending_operations npo = {
|
||||
.copy = queue->grant_copy_op,
|
||||
.meta = queue->meta,
|
||||
};
|
||||
|
||||
skb_queue_head_init(&rxq);
|
||||
|
||||
while (xenvif_rx_ring_slots_available(queue)
|
||||
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
|
||||
queue->last_rx_time = jiffies;
|
||||
|
||||
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
|
||||
|
||||
__skb_queue_tail(&rxq, skb);
|
||||
}
|
||||
|
||||
BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
|
||||
|
||||
if (!npo.copy_prod)
|
||||
goto done;
|
||||
|
||||
BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
|
||||
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
|
||||
|
||||
while ((skb = __skb_dequeue(&rxq)) != NULL) {
|
||||
struct xen_netif_extra_info *extra = NULL;
|
||||
|
||||
if ((1 << queue->meta[npo.meta_cons].gso_type) &
|
||||
vif->gso_prefix_mask) {
|
||||
resp = RING_GET_RESPONSE(&queue->rx,
|
||||
queue->rx.rsp_prod_pvt++);
|
||||
|
||||
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
|
||||
|
||||
resp->offset = queue->meta[npo.meta_cons].gso_size;
|
||||
resp->id = queue->meta[npo.meta_cons].id;
|
||||
resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
|
||||
|
||||
npo.meta_cons++;
|
||||
XENVIF_RX_CB(skb)->meta_slots_used--;
|
||||
}
|
||||
|
||||
|
||||
queue->stats.tx_bytes += skb->len;
|
||||
queue->stats.tx_packets++;
|
||||
|
||||
status = xenvif_check_gop(vif,
|
||||
XENVIF_RX_CB(skb)->meta_slots_used,
|
||||
&npo);
|
||||
|
||||
if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
|
||||
flags = 0;
|
||||
else
|
||||
flags = XEN_NETRXF_more_data;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
|
||||
flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
|
||||
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||
/* remote but checksummed. */
|
||||
flags |= XEN_NETRXF_data_validated;
|
||||
|
||||
offset = 0;
|
||||
resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
|
||||
status, offset,
|
||||
queue->meta[npo.meta_cons].size,
|
||||
flags);
|
||||
|
||||
if ((1 << queue->meta[npo.meta_cons].gso_type) &
|
||||
vif->gso_mask) {
|
||||
extra = (struct xen_netif_extra_info *)
|
||||
RING_GET_RESPONSE(&queue->rx,
|
||||
queue->rx.rsp_prod_pvt++);
|
||||
|
||||
resp->flags |= XEN_NETRXF_extra_info;
|
||||
|
||||
extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
|
||||
extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
|
||||
extra->u.gso.pad = 0;
|
||||
extra->u.gso.features = 0;
|
||||
|
||||
extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
|
||||
extra->flags = 0;
|
||||
}
|
||||
|
||||
if (skb->sw_hash) {
|
||||
/* Since the skb got here via xenvif_select_queue()
|
||||
* we know that the hash has been re-calculated
|
||||
* according to a configuration set by the frontend
|
||||
* and therefore we know that it is legitimate to
|
||||
* pass it to the frontend.
|
||||
*/
|
||||
if (resp->flags & XEN_NETRXF_extra_info)
|
||||
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
|
||||
else
|
||||
resp->flags |= XEN_NETRXF_extra_info;
|
||||
|
||||
extra = (struct xen_netif_extra_info *)
|
||||
RING_GET_RESPONSE(&queue->rx,
|
||||
queue->rx.rsp_prod_pvt++);
|
||||
|
||||
extra->u.hash.algorithm =
|
||||
XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
|
||||
|
||||
if (skb->l4_hash)
|
||||
extra->u.hash.type =
|
||||
skb->protocol == htons(ETH_P_IP) ?
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
|
||||
else
|
||||
extra->u.hash.type =
|
||||
skb->protocol == htons(ETH_P_IP) ?
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
|
||||
|
||||
*(uint32_t *)extra->u.hash.value =
|
||||
skb_get_hash_raw(skb);
|
||||
|
||||
extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
|
||||
extra->flags = 0;
|
||||
}
|
||||
|
||||
xenvif_add_frag_responses(queue, status,
|
||||
queue->meta + npo.meta_cons + 1,
|
||||
XENVIF_RX_CB(skb)->meta_slots_used);
|
||||
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
|
||||
|
||||
need_to_notify |= !!ret;
|
||||
|
||||
npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
done:
|
||||
if (need_to_notify)
|
||||
notify_remote_via_irq(queue->rx_irq);
|
||||
}
|
||||
|
||||
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
|
||||
{
|
||||
int more_to_do;
|
||||
|
@ -1951,29 +1384,6 @@ static void push_tx_responses(struct xenvif_queue *queue)
|
|||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
||||
u16 id,
|
||||
s8 st,
|
||||
u16 offset,
|
||||
u16 size,
|
||||
u16 flags)
|
||||
{
|
||||
RING_IDX i = queue->rx.rsp_prod_pvt;
|
||||
struct xen_netif_rx_response *resp;
|
||||
|
||||
resp = RING_GET_RESPONSE(&queue->rx, i);
|
||||
resp->offset = offset;
|
||||
resp->flags = flags;
|
||||
resp->id = id;
|
||||
resp->status = (s16)size;
|
||||
if (st < 0)
|
||||
resp->status = (s16)st;
|
||||
|
||||
queue->rx.rsp_prod_pvt = ++i;
|
||||
|
||||
return resp;
|
||||
}
|
||||
|
||||
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2055,170 +1465,6 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif *vif = queue->vif;
|
||||
|
||||
queue->stalled = true;
|
||||
|
||||
/* At least one queue has stalled? Disable the carrier. */
|
||||
spin_lock(&vif->lock);
|
||||
if (vif->stalled_queues++ == 0) {
|
||||
netdev_info(vif->dev, "Guest Rx stalled");
|
||||
netif_carrier_off(vif->dev);
|
||||
}
|
||||
spin_unlock(&vif->lock);
|
||||
}
|
||||
|
||||
static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif *vif = queue->vif;
|
||||
|
||||
queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
|
||||
queue->stalled = false;
|
||||
|
||||
/* All queues are ready? Enable the carrier. */
|
||||
spin_lock(&vif->lock);
|
||||
if (--vif->stalled_queues == 0) {
|
||||
netdev_info(vif->dev, "Guest Rx ready");
|
||||
netif_carrier_on(vif->dev);
|
||||
}
|
||||
spin_unlock(&vif->lock);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
return !queue->stalled && prod - cons < 1
|
||||
&& time_after(jiffies,
|
||||
queue->last_rx_time + queue->vif->stall_timeout);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
return queue->stalled && prod - cons >= 1;
|
||||
}
|
||||
|
||||
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
|
||||
{
|
||||
return xenvif_rx_ring_slots_available(queue)
|
||||
|| (queue->vif->stall_timeout &&
|
||||
(xenvif_rx_queue_stalled(queue)
|
||||
|| xenvif_rx_queue_ready(queue)))
|
||||
|| kthread_should_stop()
|
||||
|| queue->vif->disabled;
|
||||
}
|
||||
|
||||
static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
long timeout;
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
return MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
timeout = XENVIF_RX_CB(skb)->expires - jiffies;
|
||||
return timeout < 0 ? 0 : timeout;
|
||||
}
|
||||
|
||||
/* Wait until the guest Rx thread has work.
|
||||
*
|
||||
* The timeout needs to be adjusted based on the current head of the
|
||||
* queue (and not just the head at the beginning). In particular, if
|
||||
* the queue is initially empty an infinite timeout is used and this
|
||||
* needs to be reduced when a skb is queued.
|
||||
*
|
||||
* This cannot be done with wait_event_timeout() because it only
|
||||
* calculates the timeout once.
|
||||
*/
|
||||
static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (xenvif_have_rx_work(queue))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
long ret;
|
||||
|
||||
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
if (xenvif_have_rx_work(queue))
|
||||
break;
|
||||
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
finish_wait(&queue->wq, &wait);
|
||||
}
|
||||
|
||||
int xenvif_kthread_guest_rx(void *data)
|
||||
{
|
||||
struct xenvif_queue *queue = data;
|
||||
struct xenvif *vif = queue->vif;
|
||||
|
||||
if (!vif->stall_timeout)
|
||||
xenvif_queue_carrier_on(queue);
|
||||
|
||||
for (;;) {
|
||||
xenvif_wait_for_rx_work(queue);
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
/* This frontend is found to be rogue, disable it in
|
||||
* kthread context. Currently this is only set when
|
||||
* netback finds out frontend sends malformed packet,
|
||||
* but we cannot disable the interface in softirq
|
||||
* context so we defer it here, if this thread is
|
||||
* associated with queue 0.
|
||||
*/
|
||||
if (unlikely(vif->disabled && queue->id == 0)) {
|
||||
xenvif_carrier_off(vif);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!skb_queue_empty(&queue->rx_queue))
|
||||
xenvif_rx_action(queue);
|
||||
|
||||
/* If the guest hasn't provided any Rx slots for a
|
||||
* while it's probably not responsive, drop the
|
||||
* carrier so packets are dropped earlier.
|
||||
*/
|
||||
if (vif->stall_timeout) {
|
||||
if (xenvif_rx_queue_stalled(queue))
|
||||
xenvif_queue_carrier_off(queue);
|
||||
else if (xenvif_rx_queue_ready(queue))
|
||||
xenvif_queue_carrier_on(queue);
|
||||
}
|
||||
|
||||
/* Queued packets may have foreign pages from other
|
||||
* domains. These cannot be queued indefinitely as
|
||||
* this would starve guests of grant refs and transmit
|
||||
* slots.
|
||||
*/
|
||||
xenvif_rx_queue_drop_expired(queue);
|
||||
|
||||
xenvif_rx_queue_maybe_wake(queue);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Bin any remaining skbs */
|
||||
xenvif_rx_queue_purge(queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
|
||||
{
|
||||
/* Dealloc thread must remain running until all inflight
|
||||
|
|
629
drivers/net/xen-netback/rx.c
Normal file
629
drivers/net/xen-netback/rx.c
Normal file
|
@ -0,0 +1,629 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Citrix Systems Inc.
|
||||
* Copyright (c) 2002-2005, K A Fraser
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation; or, when distributed
|
||||
* separately from the Linux kernel or incorporated into other
|
||||
* software packages, subject to the following license:
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this source file (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
#include "common.h"
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
|
||||
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
struct sk_buff *skb;
|
||||
int needed;
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
return false;
|
||||
|
||||
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
||||
if (skb_is_gso(skb))
|
||||
needed++;
|
||||
if (skb->sw_hash)
|
||||
needed++;
|
||||
|
||||
do {
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
if (prod - cons >= needed)
|
||||
return true;
|
||||
|
||||
queue->rx.sring->req_event = prod + 1;
|
||||
|
||||
/* Make sure event is visible before we check prod
|
||||
* again.
|
||||
*/
|
||||
mb();
|
||||
} while (queue->rx.sring->req_prod != prod);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
__skb_queue_tail(&queue->rx_queue, skb);
|
||||
|
||||
queue->rx_queue_len += skb->len;
|
||||
if (queue->rx_queue_len > queue->rx_queue_max) {
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
}
|
||||
|
||||
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_irq(&queue->rx_queue.lock);
|
||||
|
||||
skb = __skb_dequeue(&queue->rx_queue);
|
||||
if (skb) {
|
||||
queue->rx_queue_len -= skb->len;
|
||||
if (queue->rx_queue_len < queue->rx_queue_max) {
|
||||
struct netdev_queue *txq;
|
||||
|
||||
txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irq(&queue->rx_queue.lock);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = xenvif_rx_dequeue(queue)) != NULL)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
for (;;) {
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
break;
|
||||
if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
|
||||
break;
|
||||
xenvif_rx_dequeue(queue);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
|
||||
{
|
||||
unsigned int i;
|
||||
int notify;
|
||||
|
||||
gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
|
||||
|
||||
for (i = 0; i < queue->rx_copy.num; i++) {
|
||||
struct gnttab_copy *op;
|
||||
|
||||
op = &queue->rx_copy.op[i];
|
||||
|
||||
/* If the copy failed, overwrite the status field in
|
||||
* the corresponding response.
|
||||
*/
|
||||
if (unlikely(op->status != GNTST_okay)) {
|
||||
struct xen_netif_rx_response *rsp;
|
||||
|
||||
rsp = RING_GET_RESPONSE(&queue->rx,
|
||||
queue->rx_copy.idx[i]);
|
||||
rsp->status = op->status;
|
||||
}
|
||||
}
|
||||
|
||||
queue->rx_copy.num = 0;
|
||||
|
||||
/* Push responses for all completed packets. */
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->rx_irq);
|
||||
|
||||
__skb_queue_purge(queue->rx_copy.completed);
|
||||
}
|
||||
|
||||
static void xenvif_rx_copy_add(struct xenvif_queue *queue,
|
||||
struct xen_netif_rx_request *req,
|
||||
unsigned int offset, void *data, size_t len)
|
||||
{
|
||||
struct gnttab_copy *op;
|
||||
struct page *page;
|
||||
struct xen_page_foreign *foreign;
|
||||
|
||||
if (queue->rx_copy.num == COPY_BATCH_SIZE)
|
||||
xenvif_rx_copy_flush(queue);
|
||||
|
||||
op = &queue->rx_copy.op[queue->rx_copy.num];
|
||||
|
||||
page = virt_to_page(data);
|
||||
|
||||
op->flags = GNTCOPY_dest_gref;
|
||||
|
||||
foreign = xen_page_foreign(page);
|
||||
if (foreign) {
|
||||
op->source.domid = foreign->domid;
|
||||
op->source.u.ref = foreign->gref;
|
||||
op->flags |= GNTCOPY_source_gref;
|
||||
} else {
|
||||
op->source.u.gmfn = virt_to_gfn(data);
|
||||
op->source.domid = DOMID_SELF;
|
||||
}
|
||||
|
||||
op->source.offset = xen_offset_in_page(data);
|
||||
op->dest.u.ref = req->gref;
|
||||
op->dest.domid = queue->vif->domid;
|
||||
op->dest.offset = offset;
|
||||
op->len = len;
|
||||
|
||||
queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
|
||||
queue->rx_copy.num++;
|
||||
}
|
||||
|
||||
static unsigned int xenvif_gso_type(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_is_gso(skb)) {
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
|
||||
return XEN_NETIF_GSO_TYPE_TCPV4;
|
||||
else
|
||||
return XEN_NETIF_GSO_TYPE_TCPV6;
|
||||
}
|
||||
return XEN_NETIF_GSO_TYPE_NONE;
|
||||
}
|
||||
|
||||
struct xenvif_pkt_state {
|
||||
struct sk_buff *skb;
|
||||
size_t remaining_len;
|
||||
struct sk_buff *frag_iter;
|
||||
int frag; /* frag == -1 => frag_iter->head */
|
||||
unsigned int frag_offset;
|
||||
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
|
||||
unsigned int extra_count;
|
||||
unsigned int slot;
|
||||
};
|
||||
|
||||
static void xenvif_rx_next_skb(struct xenvif_queue *queue,
|
||||
struct xenvif_pkt_state *pkt)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int gso_type;
|
||||
|
||||
skb = xenvif_rx_dequeue(queue);
|
||||
|
||||
queue->stats.tx_bytes += skb->len;
|
||||
queue->stats.tx_packets++;
|
||||
|
||||
/* Reset packet state. */
|
||||
memset(pkt, 0, sizeof(struct xenvif_pkt_state));
|
||||
|
||||
pkt->skb = skb;
|
||||
pkt->frag_iter = skb;
|
||||
pkt->remaining_len = skb->len;
|
||||
pkt->frag = -1;
|
||||
|
||||
gso_type = xenvif_gso_type(skb);
|
||||
if ((1 << gso_type) & queue->vif->gso_mask) {
|
||||
struct xen_netif_extra_info *extra;
|
||||
|
||||
extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
|
||||
|
||||
extra->u.gso.type = gso_type;
|
||||
extra->u.gso.size = skb_shinfo(skb)->gso_size;
|
||||
extra->u.gso.pad = 0;
|
||||
extra->u.gso.features = 0;
|
||||
extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
|
||||
extra->flags = 0;
|
||||
|
||||
pkt->extra_count++;
|
||||
}
|
||||
|
||||
if (skb->sw_hash) {
|
||||
struct xen_netif_extra_info *extra;
|
||||
|
||||
extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
|
||||
|
||||
extra->u.hash.algorithm =
|
||||
XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
|
||||
|
||||
if (skb->l4_hash)
|
||||
extra->u.hash.type =
|
||||
skb->protocol == htons(ETH_P_IP) ?
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
|
||||
else
|
||||
extra->u.hash.type =
|
||||
skb->protocol == htons(ETH_P_IP) ?
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
|
||||
_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
|
||||
|
||||
*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
|
||||
|
||||
extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
|
||||
extra->flags = 0;
|
||||
|
||||
pkt->extra_count++;
|
||||
}
|
||||
}
|
||||
|
||||
static void xenvif_rx_complete(struct xenvif_queue *queue,
|
||||
struct xenvif_pkt_state *pkt)
|
||||
{
|
||||
/* All responses are ready to be pushed. */
|
||||
queue->rx.rsp_prod_pvt = queue->rx.req_cons;
|
||||
|
||||
__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
|
||||
}
|
||||
|
||||
static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
|
||||
{
|
||||
struct sk_buff *frag_iter = pkt->frag_iter;
|
||||
unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
|
||||
|
||||
pkt->frag++;
|
||||
pkt->frag_offset = 0;
|
||||
|
||||
if (pkt->frag >= nr_frags) {
|
||||
if (frag_iter == pkt->skb)
|
||||
pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
|
||||
else
|
||||
pkt->frag_iter = frag_iter->next;
|
||||
|
||||
pkt->frag = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
|
||||
struct xenvif_pkt_state *pkt,
|
||||
unsigned int offset, void **data,
|
||||
size_t *len)
|
||||
{
|
||||
struct sk_buff *frag_iter = pkt->frag_iter;
|
||||
void *frag_data;
|
||||
size_t frag_len, chunk_len;
|
||||
|
||||
BUG_ON(!frag_iter);
|
||||
|
||||
if (pkt->frag == -1) {
|
||||
frag_data = frag_iter->data;
|
||||
frag_len = skb_headlen(frag_iter);
|
||||
} else {
|
||||
skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
|
||||
|
||||
frag_data = skb_frag_address(frag);
|
||||
frag_len = skb_frag_size(frag);
|
||||
}
|
||||
|
||||
frag_data += pkt->frag_offset;
|
||||
frag_len -= pkt->frag_offset;
|
||||
|
||||
chunk_len = min(frag_len, XEN_PAGE_SIZE - offset);
|
||||
chunk_len = min(chunk_len,
|
||||
XEN_PAGE_SIZE - xen_offset_in_page(frag_data));
|
||||
|
||||
pkt->frag_offset += chunk_len;
|
||||
|
||||
/* Advance to next frag? */
|
||||
if (frag_len == chunk_len)
|
||||
xenvif_rx_next_frag(pkt);
|
||||
|
||||
*data = frag_data;
|
||||
*len = chunk_len;
|
||||
}
|
||||
|
||||
static void xenvif_rx_data_slot(struct xenvif_queue *queue,
|
||||
struct xenvif_pkt_state *pkt,
|
||||
struct xen_netif_rx_request *req,
|
||||
struct xen_netif_rx_response *rsp)
|
||||
{
|
||||
unsigned int offset = 0;
|
||||
unsigned int flags;
|
||||
|
||||
do {
|
||||
size_t len;
|
||||
void *data;
|
||||
|
||||
xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
|
||||
xenvif_rx_copy_add(queue, req, offset, data, len);
|
||||
|
||||
offset += len;
|
||||
pkt->remaining_len -= len;
|
||||
|
||||
} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
|
||||
|
||||
if (pkt->remaining_len > 0)
|
||||
flags = XEN_NETRXF_more_data;
|
||||
else
|
||||
flags = 0;
|
||||
|
||||
if (pkt->slot == 0) {
|
||||
struct sk_buff *skb = pkt->skb;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
flags |= XEN_NETRXF_csum_blank |
|
||||
XEN_NETRXF_data_validated;
|
||||
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||
flags |= XEN_NETRXF_data_validated;
|
||||
|
||||
if (pkt->extra_count != 0)
|
||||
flags |= XEN_NETRXF_extra_info;
|
||||
}
|
||||
|
||||
rsp->offset = 0;
|
||||
rsp->flags = flags;
|
||||
rsp->id = req->id;
|
||||
rsp->status = (s16)offset;
|
||||
}
|
||||
|
||||
static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
|
||||
struct xenvif_pkt_state *pkt,
|
||||
struct xen_netif_rx_request *req,
|
||||
struct xen_netif_rx_response *rsp)
|
||||
{
|
||||
struct xen_netif_extra_info *extra = (void *)rsp;
|
||||
unsigned int i;
|
||||
|
||||
pkt->extra_count--;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
|
||||
if (pkt->extras[i].type) {
|
||||
*extra = pkt->extras[i];
|
||||
|
||||
if (pkt->extra_count != 0)
|
||||
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
|
||||
|
||||
pkt->extras[i].type = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
|
||||
void xenvif_rx_skb(struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif_pkt_state pkt;
|
||||
|
||||
xenvif_rx_next_skb(queue, &pkt);
|
||||
|
||||
do {
|
||||
struct xen_netif_rx_request *req;
|
||||
struct xen_netif_rx_response *rsp;
|
||||
|
||||
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
|
||||
rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
|
||||
|
||||
/* Extras must go after the first data slot */
|
||||
if (pkt.slot != 0 && pkt.extra_count != 0)
|
||||
xenvif_rx_extra_slot(queue, &pkt, req, rsp);
|
||||
else
|
||||
xenvif_rx_data_slot(queue, &pkt, req, rsp);
|
||||
|
||||
queue->rx.req_cons++;
|
||||
pkt.slot++;
|
||||
} while (pkt.remaining_len > 0 || pkt.extra_count != 0);
|
||||
|
||||
xenvif_rx_complete(queue, &pkt);
|
||||
}
|
||||
|
||||
#define RX_BATCH_SIZE 64
|
||||
|
||||
void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff_head completed_skbs;
|
||||
unsigned int work_done = 0;
|
||||
|
||||
__skb_queue_head_init(&completed_skbs);
|
||||
queue->rx_copy.completed = &completed_skbs;
|
||||
|
||||
while (xenvif_rx_ring_slots_available(queue) &&
|
||||
work_done < RX_BATCH_SIZE) {
|
||||
xenvif_rx_skb(queue);
|
||||
work_done++;
|
||||
}
|
||||
|
||||
/* Flush any pending copies and complete all skbs. */
|
||||
xenvif_rx_copy_flush(queue);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
return !queue->stalled &&
|
||||
prod - cons < 1 &&
|
||||
time_after(jiffies,
|
||||
queue->last_rx_time + queue->vif->stall_timeout);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
return queue->stalled && prod - cons >= 1;
|
||||
}
|
||||
|
||||
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
|
||||
{
|
||||
return xenvif_rx_ring_slots_available(queue) ||
|
||||
(queue->vif->stall_timeout &&
|
||||
(xenvif_rx_queue_stalled(queue) ||
|
||||
xenvif_rx_queue_ready(queue))) ||
|
||||
kthread_should_stop() ||
|
||||
queue->vif->disabled;
|
||||
}
|
||||
|
||||
static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
long timeout;
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
return MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
timeout = XENVIF_RX_CB(skb)->expires - jiffies;
|
||||
return timeout < 0 ? 0 : timeout;
|
||||
}
|
||||
|
||||
/* Wait until the guest Rx thread has work.
|
||||
*
|
||||
* The timeout needs to be adjusted based on the current head of the
|
||||
* queue (and not just the head at the beginning). In particular, if
|
||||
* the queue is initially empty an infinite timeout is used and this
|
||||
* needs to be reduced when a skb is queued.
|
||||
*
|
||||
* This cannot be done with wait_event_timeout() because it only
|
||||
* calculates the timeout once.
|
||||
*/
|
||||
static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (xenvif_have_rx_work(queue))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
long ret;
|
||||
|
||||
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
if (xenvif_have_rx_work(queue))
|
||||
break;
|
||||
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
finish_wait(&queue->wq, &wait);
|
||||
}
|
||||
|
||||
static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif *vif = queue->vif;
|
||||
|
||||
queue->stalled = true;
|
||||
|
||||
/* At least one queue has stalled? Disable the carrier. */
|
||||
spin_lock(&vif->lock);
|
||||
if (vif->stalled_queues++ == 0) {
|
||||
netdev_info(vif->dev, "Guest Rx stalled");
|
||||
netif_carrier_off(vif->dev);
|
||||
}
|
||||
spin_unlock(&vif->lock);
|
||||
}
|
||||
|
||||
static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenvif *vif = queue->vif;
|
||||
|
||||
queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
|
||||
queue->stalled = false;
|
||||
|
||||
/* All queues are ready? Enable the carrier. */
|
||||
spin_lock(&vif->lock);
|
||||
if (--vif->stalled_queues == 0) {
|
||||
netdev_info(vif->dev, "Guest Rx ready");
|
||||
netif_carrier_on(vif->dev);
|
||||
}
|
||||
spin_unlock(&vif->lock);
|
||||
}
|
||||
|
||||
int xenvif_kthread_guest_rx(void *data)
|
||||
{
|
||||
struct xenvif_queue *queue = data;
|
||||
struct xenvif *vif = queue->vif;
|
||||
|
||||
if (!vif->stall_timeout)
|
||||
xenvif_queue_carrier_on(queue);
|
||||
|
||||
for (;;) {
|
||||
xenvif_wait_for_rx_work(queue);
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
/* This frontend is found to be rogue, disable it in
|
||||
* kthread context. Currently this is only set when
|
||||
* netback finds out frontend sends malformed packet,
|
||||
* but we cannot disable the interface in softirq
|
||||
* context so we defer it here, if this thread is
|
||||
* associated with queue 0.
|
||||
*/
|
||||
if (unlikely(vif->disabled && queue->id == 0)) {
|
||||
xenvif_carrier_off(vif);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!skb_queue_empty(&queue->rx_queue))
|
||||
xenvif_rx_action(queue);
|
||||
|
||||
/* If the guest hasn't provided any Rx slots for a
|
||||
* while it's probably not responsive, drop the
|
||||
* carrier so packets are dropped earlier.
|
||||
*/
|
||||
if (vif->stall_timeout) {
|
||||
if (xenvif_rx_queue_stalled(queue))
|
||||
xenvif_queue_carrier_off(queue);
|
||||
else if (xenvif_rx_queue_ready(queue))
|
||||
xenvif_queue_carrier_on(queue);
|
||||
}
|
||||
|
||||
/* Queued packets may have foreign pages from other
|
||||
* domains. These cannot be queued indefinitely as
|
||||
* this would starve guests of grant refs and transmit
|
||||
* slots.
|
||||
*/
|
||||
xenvif_rx_queue_drop_expired(queue);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Bin any remaining skbs */
|
||||
xenvif_rx_queue_purge(queue);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1135,7 +1135,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
|
|||
vif->can_sg = !!val;
|
||||
|
||||
vif->gso_mask = 0;
|
||||
vif->gso_prefix_mask = 0;
|
||||
|
||||
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
|
||||
"%d", &val) < 0)
|
||||
|
@ -1143,32 +1142,12 @@ static int read_xenbus_vif_flags(struct backend_info *be)
|
|||
if (val)
|
||||
vif->gso_mask |= GSO_BIT(TCPV4);
|
||||
|
||||
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
|
||||
"%d", &val) < 0)
|
||||
val = 0;
|
||||
if (val)
|
||||
vif->gso_prefix_mask |= GSO_BIT(TCPV4);
|
||||
|
||||
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
|
||||
"%d", &val) < 0)
|
||||
val = 0;
|
||||
if (val)
|
||||
vif->gso_mask |= GSO_BIT(TCPV6);
|
||||
|
||||
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
|
||||
"%d", &val) < 0)
|
||||
val = 0;
|
||||
if (val)
|
||||
vif->gso_prefix_mask |= GSO_BIT(TCPV6);
|
||||
|
||||
if (vif->gso_mask & vif->gso_prefix_mask) {
|
||||
xenbus_dev_fatal(dev, err,
|
||||
"%s: gso and gso prefix flags are not "
|
||||
"mutually exclusive",
|
||||
dev->otherend);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
|
||||
"%d", &val) < 0)
|
||||
val = 0;
|
||||
|
|
|
@ -418,7 +418,7 @@ static void afs_deliver_to_call(struct afs_call *call)
|
|||
&call->abort_code);
|
||||
if (ret == -EINPROGRESS || ret == -EAGAIN)
|
||||
return;
|
||||
if (ret == 1) {
|
||||
if (ret == 1 || ret < 0) {
|
||||
call->state = AFS_CALL_COMPLETE;
|
||||
goto done;
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int team_num_to_port_index(struct team *team, int num)
|
||||
static inline int team_num_to_port_index(struct team *team, unsigned int num)
|
||||
{
|
||||
int en_port_count = ACCESS_ONCE(team->en_port_count);
|
||||
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
SOFTWARE IS DISCLAIMED.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
#include <net/bluetooth/mgmt.h>
|
||||
|
@ -973,33 +971,58 @@ void __hci_req_enable_advertising(struct hci_request *req)
|
|||
|
||||
static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
|
||||
{
|
||||
size_t name_len;
|
||||
size_t complete_len;
|
||||
size_t short_len;
|
||||
int max_len;
|
||||
|
||||
max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
|
||||
name_len = strlen(hdev->dev_name);
|
||||
if (name_len > 0 && max_len > 0) {
|
||||
complete_len = strlen(hdev->dev_name);
|
||||
short_len = strlen(hdev->short_name);
|
||||
|
||||
if (name_len > max_len) {
|
||||
name_len = max_len;
|
||||
ptr[1] = EIR_NAME_SHORT;
|
||||
} else
|
||||
ptr[1] = EIR_NAME_COMPLETE;
|
||||
/* no space left for name */
|
||||
if (max_len < 1)
|
||||
return ad_len;
|
||||
|
||||
ptr[0] = name_len + 1;
|
||||
/* no name set */
|
||||
if (!complete_len)
|
||||
return ad_len;
|
||||
|
||||
memcpy(ptr + 2, hdev->dev_name, name_len);
|
||||
/* complete name fits and is eq to max short name len or smaller */
|
||||
if (complete_len <= max_len &&
|
||||
complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
|
||||
hdev->dev_name, complete_len);
|
||||
}
|
||||
|
||||
ad_len += (name_len + 2);
|
||||
ptr += (name_len + 2);
|
||||
/* short name set and fits */
|
||||
if (short_len && short_len <= max_len) {
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
|
||||
hdev->short_name, short_len);
|
||||
}
|
||||
|
||||
/* no short name set so shorten complete name */
|
||||
if (!short_len) {
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
|
||||
hdev->dev_name, max_len);
|
||||
}
|
||||
|
||||
return ad_len;
|
||||
}
|
||||
|
||||
static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
|
||||
{
|
||||
return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
|
||||
}
|
||||
|
||||
static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
|
||||
{
|
||||
return append_local_name(hdev, ptr, 0);
|
||||
u8 scan_rsp_len = 0;
|
||||
|
||||
if (hdev->appearance) {
|
||||
scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
|
||||
}
|
||||
|
||||
return append_local_name(hdev, ptr, scan_rsp_len);
|
||||
}
|
||||
|
||||
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
|
||||
|
@ -1016,18 +1039,13 @@ static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
|
|||
instance_flags = adv_instance->flags;
|
||||
|
||||
if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
|
||||
ptr[0] = 3;
|
||||
ptr[1] = EIR_APPEARANCE;
|
||||
put_unaligned_le16(hdev->appearance, ptr + 2);
|
||||
scan_rsp_len += 4;
|
||||
ptr += 4;
|
||||
scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
|
||||
}
|
||||
|
||||
memcpy(ptr, adv_instance->scan_rsp_data,
|
||||
memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
|
||||
adv_instance->scan_rsp_len);
|
||||
|
||||
scan_rsp_len += adv_instance->scan_rsp_len;
|
||||
ptr += adv_instance->scan_rsp_len;
|
||||
|
||||
if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
|
||||
scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
SOFTWARE IS DISCLAIMED.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
|
||||
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
|
||||
|
||||
|
@ -103,3 +105,24 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
|
|||
|
||||
void hci_request_setup(struct hci_dev *hdev);
|
||||
void hci_request_cancel_all(struct hci_dev *hdev);
|
||||
|
||||
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
|
||||
u8 *data, u8 data_len)
|
||||
{
|
||||
eir[eir_len++] = sizeof(type) + data_len;
|
||||
eir[eir_len++] = type;
|
||||
memcpy(&eir[eir_len], data, data_len);
|
||||
eir_len += data_len;
|
||||
|
||||
return eir_len;
|
||||
}
|
||||
|
||||
static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
|
||||
{
|
||||
eir[eir_len++] = sizeof(type) + sizeof(data);
|
||||
eir[eir_len++] = type;
|
||||
put_unaligned_le16(data, &eir[eir_len]);
|
||||
eir_len += sizeof(data);
|
||||
|
||||
return eir_len;
|
||||
}
|
||||
|
|
|
@ -867,27 +867,6 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
|
|||
sizeof(rp));
|
||||
}
|
||||
|
||||
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
|
||||
u8 data_len)
|
||||
{
|
||||
eir[eir_len++] = sizeof(type) + data_len;
|
||||
eir[eir_len++] = type;
|
||||
memcpy(&eir[eir_len], data, data_len);
|
||||
eir_len += data_len;
|
||||
|
||||
return eir_len;
|
||||
}
|
||||
|
||||
static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
|
||||
{
|
||||
eir[eir_len++] = sizeof(type) + sizeof(data);
|
||||
eir[eir_len++] = type;
|
||||
put_unaligned_le16(data, &eir[eir_len]);
|
||||
eir_len += sizeof(data);
|
||||
|
||||
return eir_len;
|
||||
}
|
||||
|
||||
static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
|
||||
{
|
||||
u16 eir_len = 0;
|
||||
|
|
|
@ -5729,6 +5729,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int minus_one = -1;
|
||||
static const int one = 1;
|
||||
static const int two_five_five = 255;
|
||||
|
||||
|
@ -5789,7 +5790,8 @@ static const struct ctl_table addrconf_sysctl[] = {
|
|||
.data = &ipv6_devconf.rtr_solicits,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &minus_one,
|
||||
},
|
||||
{
|
||||
.procname = "router_solicitation_interval",
|
||||
|
|
|
@ -65,49 +65,24 @@ static DEFINE_MUTEX(nf_hook_mutex);
|
|||
#define nf_entry_dereference(e) \
|
||||
rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
|
||||
|
||||
static struct nf_hook_entry *nf_hook_entry_head(struct net *net,
|
||||
const struct nf_hook_ops *reg)
|
||||
static struct nf_hook_entry __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
|
||||
{
|
||||
struct nf_hook_entry *hook_head = NULL;
|
||||
|
||||
if (reg->pf != NFPROTO_NETDEV)
|
||||
hook_head = nf_entry_dereference(net->nf.hooks[reg->pf]
|
||||
[reg->hooknum]);
|
||||
else if (reg->hooknum == NF_NETDEV_INGRESS) {
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
if (reg->dev && dev_net(reg->dev) == net)
|
||||
hook_head =
|
||||
nf_entry_dereference(
|
||||
reg->dev->nf_hooks_ingress);
|
||||
#endif
|
||||
}
|
||||
return hook_head;
|
||||
}
|
||||
return net->nf.hooks[reg->pf]+reg->hooknum;
|
||||
|
||||
/* must hold nf_hook_mutex */
|
||||
static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg,
|
||||
struct nf_hook_entry *entry)
|
||||
{
|
||||
switch (reg->pf) {
|
||||
case NFPROTO_NETDEV:
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
/* We already checked in nf_register_net_hook() that this is
|
||||
* used from ingress.
|
||||
*/
|
||||
rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry);
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum],
|
||||
entry);
|
||||
break;
|
||||
if (reg->hooknum == NF_NETDEV_INGRESS) {
|
||||
if (reg->dev && dev_net(reg->dev) == net)
|
||||
return ®->dev->nf_hooks_ingress;
|
||||
}
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
|
||||
{
|
||||
struct nf_hook_entry *hooks_entry;
|
||||
struct nf_hook_entry *entry;
|
||||
struct nf_hook_entry __rcu **pp;
|
||||
struct nf_hook_entry *entry, *p;
|
||||
|
||||
if (reg->pf == NFPROTO_NETDEV) {
|
||||
#ifndef CONFIG_NETFILTER_INGRESS
|
||||
|
@ -119,6 +94,10 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
pp = nf_hook_entry_head(net, reg);
|
||||
if (!pp)
|
||||
return -EINVAL;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
@ -128,26 +107,15 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
|
|||
entry->next = NULL;
|
||||
|
||||
mutex_lock(&nf_hook_mutex);
|
||||
hooks_entry = nf_hook_entry_head(net, reg);
|
||||
|
||||
if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) {
|
||||
/* This is the case where we need to insert at the head */
|
||||
entry->next = hooks_entry;
|
||||
hooks_entry = NULL;
|
||||
}
|
||||
|
||||
while (hooks_entry &&
|
||||
reg->priority >= hooks_entry->orig_ops->priority &&
|
||||
nf_entry_dereference(hooks_entry->next)) {
|
||||
hooks_entry = nf_entry_dereference(hooks_entry->next);
|
||||
}
|
||||
|
||||
if (hooks_entry) {
|
||||
entry->next = nf_entry_dereference(hooks_entry->next);
|
||||
rcu_assign_pointer(hooks_entry->next, entry);
|
||||
} else {
|
||||
nf_set_hooks_head(net, reg, entry);
|
||||
/* Find the spot in the list */
|
||||
while ((p = nf_entry_dereference(*pp)) != NULL) {
|
||||
if (reg->priority < p->orig_ops->priority)
|
||||
break;
|
||||
pp = &p->next;
|
||||
}
|
||||
rcu_assign_pointer(entry->next, p);
|
||||
rcu_assign_pointer(*pp, entry);
|
||||
|
||||
mutex_unlock(&nf_hook_mutex);
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
|
@ -163,33 +131,23 @@ EXPORT_SYMBOL(nf_register_net_hook);
|
|||
|
||||
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
|
||||
{
|
||||
struct nf_hook_entry *hooks_entry;
|
||||
struct nf_hook_entry __rcu **pp;
|
||||
struct nf_hook_entry *p;
|
||||
|
||||
pp = nf_hook_entry_head(net, reg);
|
||||
if (WARN_ON_ONCE(!pp))
|
||||
return;
|
||||
|
||||
mutex_lock(&nf_hook_mutex);
|
||||
hooks_entry = nf_hook_entry_head(net, reg);
|
||||
if (hooks_entry && hooks_entry->orig_ops == reg) {
|
||||
nf_set_hooks_head(net, reg,
|
||||
nf_entry_dereference(hooks_entry->next));
|
||||
goto unlock;
|
||||
}
|
||||
while (hooks_entry && nf_entry_dereference(hooks_entry->next)) {
|
||||
struct nf_hook_entry *next =
|
||||
nf_entry_dereference(hooks_entry->next);
|
||||
struct nf_hook_entry *nnext;
|
||||
|
||||
if (next->orig_ops != reg) {
|
||||
hooks_entry = next;
|
||||
continue;
|
||||
while ((p = nf_entry_dereference(*pp)) != NULL) {
|
||||
if (p->orig_ops == reg) {
|
||||
rcu_assign_pointer(*pp, p->next);
|
||||
break;
|
||||
}
|
||||
nnext = nf_entry_dereference(next->next);
|
||||
rcu_assign_pointer(hooks_entry->next, nnext);
|
||||
hooks_entry = next;
|
||||
break;
|
||||
pp = &p->next;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&nf_hook_mutex);
|
||||
if (!hooks_entry) {
|
||||
if (!p) {
|
||||
WARN(1, "nf_unregister_net_hook: hook not found!\n");
|
||||
return;
|
||||
}
|
||||
|
@ -201,10 +159,10 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
|
|||
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
|
||||
#endif
|
||||
synchronize_net();
|
||||
nf_queue_nf_hook_drop(net, hooks_entry);
|
||||
nf_queue_nf_hook_drop(net, p);
|
||||
/* other cpu might still process nfqueue verdict that used reg */
|
||||
synchronize_net();
|
||||
kfree(hooks_entry);
|
||||
kfree(p);
|
||||
}
|
||||
EXPORT_SYMBOL(nf_unregister_net_hook);
|
||||
|
||||
|
|
|
@ -1832,7 +1832,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
/* Record the max length of recvmsg() calls for future allocations */
|
||||
nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
|
||||
nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
|
||||
16384);
|
||||
SKB_WITH_OVERHEAD(32768));
|
||||
|
||||
copied = data_skb->len;
|
||||
if (len < copied) {
|
||||
|
@ -2083,8 +2083,9 @@ static int netlink_dump(struct sock *sk)
|
|||
|
||||
if (alloc_min_size < nlk->max_recvmsg_len) {
|
||||
alloc_size = nlk->max_recvmsg_len;
|
||||
skb = alloc_skb(alloc_size, GFP_KERNEL |
|
||||
__GFP_NOWARN | __GFP_NORETRY);
|
||||
skb = alloc_skb(alloc_size,
|
||||
(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
|
||||
__GFP_NOWARN | __GFP_NORETRY);
|
||||
}
|
||||
if (!skb) {
|
||||
alloc_size = alloc_min_size;
|
||||
|
|
|
@ -3952,6 +3952,7 @@ static int packet_notifier(struct notifier_block *this,
|
|||
}
|
||||
if (msg == NETDEV_UNREGISTER) {
|
||||
packet_cached_dev_reset(po);
|
||||
fanout_release(sk);
|
||||
po->ifindex = -1;
|
||||
if (po->prot_hook.dev)
|
||||
dev_put(po->prot_hook.dev);
|
||||
|
|
|
@ -678,9 +678,9 @@ static int rxrpc_release_sock(struct sock *sk)
|
|||
sk->sk_state = RXRPC_CLOSE;
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
|
||||
if (rx->local && rx->local->service == rx) {
|
||||
if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
|
||||
write_lock(&rx->local->services_lock);
|
||||
rx->local->service = NULL;
|
||||
rcu_assign_pointer(rx->local->service, NULL);
|
||||
write_unlock(&rx->local->services_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -398,6 +398,7 @@ enum rxrpc_call_flag {
|
|||
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
|
||||
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
|
||||
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
|
||||
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
|
||||
RXRPC_CALL_PINGING, /* Ping in process */
|
||||
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
|
||||
};
|
||||
|
@ -410,6 +411,7 @@ enum rxrpc_call_event {
|
|||
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
||||
RXRPC_CALL_EV_TIMER, /* Timer expired */
|
||||
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
||||
RXRPC_CALL_EV_PING, /* Ping send required */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -466,6 +468,7 @@ struct rxrpc_call {
|
|||
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
||||
ktime_t ack_at; /* When deferred ACK needs to happen */
|
||||
ktime_t resend_at; /* When next resend needs to happen */
|
||||
ktime_t ping_at; /* When next to send a ping */
|
||||
ktime_t expire_at; /* When the call times out */
|
||||
struct timer_list timer; /* Combined event timer */
|
||||
struct work_struct processor; /* Event processor */
|
||||
|
@ -558,8 +561,10 @@ struct rxrpc_call {
|
|||
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
|
||||
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
|
||||
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
|
||||
rxrpc_serial_t ackr_ping; /* Last ping sent */
|
||||
ktime_t ackr_ping_time; /* Time last ping sent */
|
||||
|
||||
/* ping management */
|
||||
rxrpc_serial_t ping_serial; /* Last ping sent */
|
||||
ktime_t ping_time; /* Time last ping sent */
|
||||
|
||||
/* transmission-phase ACK management */
|
||||
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
|
||||
|
@ -728,8 +733,10 @@ extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
|
|||
enum rxrpc_timer_trace {
|
||||
rxrpc_timer_begin,
|
||||
rxrpc_timer_init_for_reply,
|
||||
rxrpc_timer_init_for_send_reply,
|
||||
rxrpc_timer_expired,
|
||||
rxrpc_timer_set_for_ack,
|
||||
rxrpc_timer_set_for_ping,
|
||||
rxrpc_timer_set_for_resend,
|
||||
rxrpc_timer_set_for_send,
|
||||
rxrpc_timer__nr_trace
|
||||
|
@ -743,6 +750,7 @@ enum rxrpc_propose_ack_trace {
|
|||
rxrpc_propose_ack_ping_for_lost_ack,
|
||||
rxrpc_propose_ack_ping_for_lost_reply,
|
||||
rxrpc_propose_ack_ping_for_params,
|
||||
rxrpc_propose_ack_processing_op,
|
||||
rxrpc_propose_ack_respond_to_ack,
|
||||
rxrpc_propose_ack_respond_to_ping,
|
||||
rxrpc_propose_ack_retry_tx,
|
||||
|
@ -777,7 +785,7 @@ extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10];
|
|||
extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9];
|
||||
|
||||
extern const char *const rxrpc_pkts[];
|
||||
extern const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
|
||||
extern const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
|
||||
|
||||
#include <trace/events/rxrpc.h>
|
||||
|
||||
|
@ -805,6 +813,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
|
|||
/*
|
||||
* call_event.c
|
||||
*/
|
||||
void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
||||
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
||||
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
||||
enum rxrpc_propose_ack_trace);
|
||||
|
@ -1068,7 +1077,8 @@ extern const s8 rxrpc_ack_priority[];
|
|||
/*
|
||||
* output.c
|
||||
*/
|
||||
int rxrpc_send_call_packet(struct rxrpc_call *, u8);
|
||||
int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
|
||||
int rxrpc_send_abort_packet(struct rxrpc_call *);
|
||||
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
|
||||
void rxrpc_reject_packets(struct rxrpc_local *);
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
|||
|
||||
/* Get the socket providing the service */
|
||||
rx = rcu_dereference(local->service);
|
||||
if (service_id == rx->srx.srx_service)
|
||||
if (rx && service_id == rx->srx.srx_service)
|
||||
goto found_service;
|
||||
|
||||
trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
|
@ -565,7 +565,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
|
|||
write_unlock_bh(&call->state_lock);
|
||||
write_unlock(&rx->call_lock);
|
||||
if (abort) {
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
rxrpc_release_call(rx, call);
|
||||
rxrpc_put_call(call, rxrpc_call_put);
|
||||
}
|
||||
|
|
|
@ -24,19 +24,20 @@
|
|||
/*
|
||||
* Set the timer
|
||||
*/
|
||||
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||
ktime_t now)
|
||||
void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||
ktime_t now)
|
||||
{
|
||||
unsigned long t_j, now_j = jiffies;
|
||||
ktime_t t;
|
||||
bool queue = false;
|
||||
|
||||
read_lock_bh(&call->state_lock);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
t = call->expire_at;
|
||||
if (!ktime_after(t, now))
|
||||
if (!ktime_after(t, now)) {
|
||||
trace_rxrpc_timer(call, why, now, now_j);
|
||||
queue = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ktime_after(call->resend_at, now)) {
|
||||
call->resend_at = call->expire_at;
|
||||
|
@ -54,6 +55,14 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|||
t = call->ack_at;
|
||||
}
|
||||
|
||||
if (!ktime_after(call->ping_at, now)) {
|
||||
call->ping_at = call->expire_at;
|
||||
if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
||||
queue = true;
|
||||
} else if (ktime_before(call->ping_at, t)) {
|
||||
t = call->ping_at;
|
||||
}
|
||||
|
||||
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
|
||||
t_j += jiffies;
|
||||
|
||||
|
@ -68,15 +77,45 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
|||
mod_timer(&call->timer, t_j);
|
||||
trace_rxrpc_timer(call, why, now, now_j);
|
||||
}
|
||||
|
||||
if (queue)
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
|
||||
out:
|
||||
if (queue)
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the timer
|
||||
*/
|
||||
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||
ktime_t now)
|
||||
{
|
||||
read_lock_bh(&call->state_lock);
|
||||
__rxrpc_set_timer(call, why, now);
|
||||
read_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Propose a PING ACK be sent.
|
||||
*/
|
||||
static void rxrpc_propose_ping(struct rxrpc_call *call,
|
||||
bool immediate, bool background)
|
||||
{
|
||||
if (immediate) {
|
||||
if (background &&
|
||||
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
} else {
|
||||
ktime_t now = ktime_get_real();
|
||||
ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
|
||||
|
||||
if (ktime_before(ping_at, call->ping_at)) {
|
||||
call->ping_at = ping_at;
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* propose an ACK be sent
|
||||
*/
|
||||
|
@ -90,6 +129,14 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||
ktime_t now, ack_at;
|
||||
s8 prior = rxrpc_ack_priority[ack_reason];
|
||||
|
||||
/* Pings are handled specially because we don't want to accidentally
|
||||
* lose a ping response by subsuming it into a ping.
|
||||
*/
|
||||
if (ack_reason == RXRPC_ACK_PING) {
|
||||
rxrpc_propose_ping(call, immediate, background);
|
||||
goto trace;
|
||||
}
|
||||
|
||||
/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
|
||||
* numbers, but we don't alter the timeout.
|
||||
*/
|
||||
|
@ -125,7 +172,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||
expiry = rxrpc_soft_ack_delay;
|
||||
break;
|
||||
|
||||
case RXRPC_ACK_PING:
|
||||
case RXRPC_ACK_IDLE:
|
||||
if (rxrpc_idle_ack_delay < expiry)
|
||||
expiry = rxrpc_idle_ack_delay;
|
||||
|
@ -253,7 +299,7 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
|||
goto out;
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
|
||||
rxrpc_propose_ack_ping_for_lost_ack);
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
||||
rxrpc_send_ack_packet(call, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -328,12 +374,13 @@ void rxrpc_process_call(struct work_struct *work)
|
|||
|
||||
recheck_state:
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
goto recheck_state;
|
||||
}
|
||||
|
||||
if (call->state == RXRPC_CALL_COMPLETE) {
|
||||
del_timer_sync(&call->timer);
|
||||
rxrpc_notify_socket(call);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
|
@ -345,13 +392,17 @@ void rxrpc_process_call(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
|
||||
call->ack_at = call->expire_at;
|
||||
if (call->ackr_reason) {
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
||||
rxrpc_send_ack_packet(call, false);
|
||||
goto recheck_state;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
|
||||
rxrpc_send_ack_packet(call, true);
|
||||
goto recheck_state;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
|
||||
rxrpc_resend(call, now);
|
||||
goto recheck_state;
|
||||
|
|
|
@ -205,6 +205,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
|||
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
|
||||
call->expire_at = expire_at;
|
||||
call->ack_at = expire_at;
|
||||
call->ping_at = expire_at;
|
||||
call->resend_at = expire_at;
|
||||
call->timer.expires = jiffies + LONG_MAX / 2;
|
||||
rxrpc_set_timer(call, rxrpc_timer_begin, now);
|
||||
|
@ -498,7 +499,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
|
|||
struct rxrpc_call, sock_link);
|
||||
rxrpc_get_call(call, rxrpc_call_got);
|
||||
rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
rxrpc_release_call(rx, call);
|
||||
rxrpc_put_call(call, rxrpc_call_put);
|
||||
}
|
||||
|
|
|
@ -625,9 +625,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
|
|||
rxrpc_serial_t ping_serial;
|
||||
ktime_t ping_time;
|
||||
|
||||
ping_time = call->ackr_ping_time;
|
||||
ping_time = call->ping_time;
|
||||
smp_rmb();
|
||||
ping_serial = call->ackr_ping;
|
||||
ping_serial = call->ping_serial;
|
||||
|
||||
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
|
||||
before(orig_serial, ping_serial))
|
||||
|
@ -847,7 +847,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
|
||||
if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
|
||||
RXRPC_TX_ANNO_LAST &&
|
||||
summary.nr_acks == call->tx_top - hard_ack)
|
||||
summary.nr_acks == call->tx_top - hard_ack &&
|
||||
rxrpc_is_client_call(call))
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
|
||||
false, true,
|
||||
rxrpc_propose_ack_ping_for_lost_reply);
|
||||
|
@ -937,6 +938,33 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
|
|||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a new call on a channel implicitly completing the preceding call on
|
||||
* that channel.
|
||||
*
|
||||
* TODO: If callNumber > call_id + 1, renegotiate security.
|
||||
*/
|
||||
static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
|
||||
struct rxrpc_call *call)
|
||||
{
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_SERVER_AWAIT_ACK:
|
||||
rxrpc_call_completed(call);
|
||||
break;
|
||||
case RXRPC_CALL_COMPLETE:
|
||||
break;
|
||||
default:
|
||||
if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, ESHUTDOWN)) {
|
||||
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
__rxrpc_disconnect_call(conn, call);
|
||||
rxrpc_notify_socket(call);
|
||||
}
|
||||
|
||||
/*
|
||||
* post connection-level events to the connection
|
||||
* - this includes challenges, responses, some aborts and call terminal packet
|
||||
|
@ -1145,6 +1173,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
|
|||
}
|
||||
|
||||
call = rcu_dereference(chan->call);
|
||||
|
||||
if (sp->hdr.callNumber > chan->call_id) {
|
||||
if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
|
||||
rcu_read_unlock();
|
||||
goto reject_packet;
|
||||
}
|
||||
if (call)
|
||||
rxrpc_input_implicit_end_call(conn, call);
|
||||
call = NULL;
|
||||
}
|
||||
} else {
|
||||
skew = 0;
|
||||
call = NULL;
|
||||
|
|
|
@ -93,10 +93,9 @@ const s8 rxrpc_ack_priority[] = {
|
|||
[RXRPC_ACK_EXCEEDS_WINDOW] = 6,
|
||||
[RXRPC_ACK_NOSPACE] = 7,
|
||||
[RXRPC_ACK_PING_RESPONSE] = 8,
|
||||
[RXRPC_ACK_PING] = 9,
|
||||
};
|
||||
|
||||
const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
|
||||
const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
|
||||
"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
|
||||
"IDL", "-?-"
|
||||
};
|
||||
|
@ -196,7 +195,9 @@ const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8] = {
|
|||
[rxrpc_timer_begin] = "Begin ",
|
||||
[rxrpc_timer_expired] = "*EXPR*",
|
||||
[rxrpc_timer_init_for_reply] = "IniRpl",
|
||||
[rxrpc_timer_init_for_send_reply] = "SndRpl",
|
||||
[rxrpc_timer_set_for_ack] = "SetAck",
|
||||
[rxrpc_timer_set_for_ping] = "SetPng",
|
||||
[rxrpc_timer_set_for_send] = "SetTx ",
|
||||
[rxrpc_timer_set_for_resend] = "SetRTx",
|
||||
};
|
||||
|
@ -207,6 +208,7 @@ const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8] = {
|
|||
[rxrpc_propose_ack_ping_for_lost_ack] = "LostAck",
|
||||
[rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl",
|
||||
[rxrpc_propose_ack_ping_for_params] = "Params ",
|
||||
[rxrpc_propose_ack_processing_op] = "ProcOp ",
|
||||
[rxrpc_propose_ack_respond_to_ack] = "Rsp2Ack",
|
||||
[rxrpc_propose_ack_respond_to_ping] = "Rsp2Png",
|
||||
[rxrpc_propose_ack_retry_tx] = "RetryTx",
|
||||
|
|
|
@ -19,26 +19,27 @@
|
|||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
struct rxrpc_pkt_buffer {
|
||||
struct rxrpc_ack_buffer {
|
||||
struct rxrpc_wire_header whdr;
|
||||
union {
|
||||
struct {
|
||||
struct rxrpc_ackpacket ack;
|
||||
u8 acks[255];
|
||||
u8 pad[3];
|
||||
};
|
||||
__be32 abort_code;
|
||||
};
|
||||
struct rxrpc_ackpacket ack;
|
||||
u8 acks[255];
|
||||
u8 pad[3];
|
||||
struct rxrpc_ackinfo ackinfo;
|
||||
};
|
||||
|
||||
struct rxrpc_abort_buffer {
|
||||
struct rxrpc_wire_header whdr;
|
||||
__be32 abort_code;
|
||||
};
|
||||
|
||||
/*
|
||||
* Fill out an ACK packet.
|
||||
*/
|
||||
static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
|
||||
struct rxrpc_pkt_buffer *pkt,
|
||||
struct rxrpc_ack_buffer *pkt,
|
||||
rxrpc_seq_t *_hard_ack,
|
||||
rxrpc_seq_t *_top)
|
||||
rxrpc_seq_t *_top,
|
||||
u8 reason)
|
||||
{
|
||||
rxrpc_serial_t serial;
|
||||
rxrpc_seq_t hard_ack, top, seq;
|
||||
|
@ -58,10 +59,10 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
|
|||
pkt->ack.firstPacket = htonl(hard_ack + 1);
|
||||
pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
|
||||
pkt->ack.serial = htonl(serial);
|
||||
pkt->ack.reason = call->ackr_reason;
|
||||
pkt->ack.reason = reason;
|
||||
pkt->ack.nAcks = top - hard_ack;
|
||||
|
||||
if (pkt->ack.reason == RXRPC_ACK_PING)
|
||||
if (reason == RXRPC_ACK_PING)
|
||||
pkt->whdr.flags |= RXRPC_REQUEST_ACK;
|
||||
|
||||
if (after(top, hard_ack)) {
|
||||
|
@ -91,22 +92,19 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
|
|||
}
|
||||
|
||||
/*
|
||||
* Send an ACK or ABORT call packet.
|
||||
* Send an ACK call packet.
|
||||
*/
|
||||
int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
|
||||
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
|
||||
{
|
||||
struct rxrpc_connection *conn = NULL;
|
||||
struct rxrpc_pkt_buffer *pkt;
|
||||
struct rxrpc_ack_buffer *pkt;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
rxrpc_serial_t serial;
|
||||
rxrpc_seq_t hard_ack, top;
|
||||
size_t len, n;
|
||||
bool ping = false;
|
||||
int ioc, ret;
|
||||
u32 abort_code;
|
||||
|
||||
_enter("%u,%s", call->debug_id, rxrpc_pkts[type]);
|
||||
int ret;
|
||||
u8 reason;
|
||||
|
||||
spin_lock_bh(&call->lock);
|
||||
if (call->conn)
|
||||
|
@ -131,68 +129,44 @@ int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
|
|||
pkt->whdr.cid = htonl(call->cid);
|
||||
pkt->whdr.callNumber = htonl(call->call_id);
|
||||
pkt->whdr.seq = 0;
|
||||
pkt->whdr.type = type;
|
||||
pkt->whdr.flags = conn->out_clientflag;
|
||||
pkt->whdr.type = RXRPC_PACKET_TYPE_ACK;
|
||||
pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag;
|
||||
pkt->whdr.userStatus = 0;
|
||||
pkt->whdr.securityIndex = call->security_ix;
|
||||
pkt->whdr._rsvd = 0;
|
||||
pkt->whdr.serviceId = htons(call->service_id);
|
||||
|
||||
iov[0].iov_base = pkt;
|
||||
iov[0].iov_len = sizeof(pkt->whdr);
|
||||
len = sizeof(pkt->whdr);
|
||||
|
||||
switch (type) {
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
spin_lock_bh(&call->lock);
|
||||
spin_lock_bh(&call->lock);
|
||||
if (ping) {
|
||||
reason = RXRPC_ACK_PING;
|
||||
} else {
|
||||
reason = call->ackr_reason;
|
||||
if (!call->ackr_reason) {
|
||||
spin_unlock_bh(&call->lock);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
ping = (call->ackr_reason == RXRPC_ACK_PING);
|
||||
n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top);
|
||||
call->ackr_reason = 0;
|
||||
|
||||
spin_unlock_bh(&call->lock);
|
||||
|
||||
|
||||
pkt->whdr.flags |= RXRPC_SLOW_START_OK;
|
||||
|
||||
iov[0].iov_len += sizeof(pkt->ack) + n;
|
||||
iov[1].iov_base = &pkt->ackinfo;
|
||||
iov[1].iov_len = sizeof(pkt->ackinfo);
|
||||
len += sizeof(pkt->ack) + n + sizeof(pkt->ackinfo);
|
||||
ioc = 2;
|
||||
break;
|
||||
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
abort_code = call->abort_code;
|
||||
pkt->abort_code = htonl(abort_code);
|
||||
iov[0].iov_len += sizeof(pkt->abort_code);
|
||||
len += sizeof(pkt->abort_code);
|
||||
ioc = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
ret = -ENOANO;
|
||||
goto out;
|
||||
}
|
||||
n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason);
|
||||
|
||||
spin_unlock_bh(&call->lock);
|
||||
|
||||
iov[0].iov_base = pkt;
|
||||
iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
|
||||
iov[1].iov_base = &pkt->ackinfo;
|
||||
iov[1].iov_len = sizeof(pkt->ackinfo);
|
||||
len = iov[0].iov_len + iov[1].iov_len;
|
||||
|
||||
serial = atomic_inc_return(&conn->serial);
|
||||
pkt->whdr.serial = htonl(serial);
|
||||
switch (type) {
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
trace_rxrpc_tx_ack(call, serial,
|
||||
ntohl(pkt->ack.firstPacket),
|
||||
ntohl(pkt->ack.serial),
|
||||
pkt->ack.reason, pkt->ack.nAcks);
|
||||
break;
|
||||
}
|
||||
trace_rxrpc_tx_ack(call, serial,
|
||||
ntohl(pkt->ack.firstPacket),
|
||||
ntohl(pkt->ack.serial),
|
||||
pkt->ack.reason, pkt->ack.nAcks);
|
||||
|
||||
if (ping) {
|
||||
call->ackr_ping = serial;
|
||||
call->ping_serial = serial;
|
||||
smp_wmb();
|
||||
/* We need to stick a time in before we send the packet in case
|
||||
* the reply gets back before kernel_sendmsg() completes - but
|
||||
|
@ -201,19 +175,19 @@ int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
|
|||
* the packet transmission is more likely to happen towards the
|
||||
* end of the kernel_sendmsg() call.
|
||||
*/
|
||||
call->ackr_ping_time = ktime_get_real();
|
||||
call->ping_time = ktime_get_real();
|
||||
set_bit(RXRPC_CALL_PINGING, &call->flags);
|
||||
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
|
||||
}
|
||||
ret = kernel_sendmsg(conn->params.local->socket,
|
||||
&msg, iov, ioc, len);
|
||||
if (ping)
|
||||
call->ackr_ping_time = ktime_get_real();
|
||||
|
||||
if (type == RXRPC_PACKET_TYPE_ACK &&
|
||||
call->state < RXRPC_CALL_COMPLETE) {
|
||||
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
|
||||
if (ping)
|
||||
call->ping_time = ktime_get_real();
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
if (ret < 0) {
|
||||
clear_bit(RXRPC_CALL_PINGING, &call->flags);
|
||||
if (ping)
|
||||
clear_bit(RXRPC_CALL_PINGING, &call->flags);
|
||||
rxrpc_propose_ACK(call, pkt->ack.reason,
|
||||
ntohs(pkt->ack.maxSkew),
|
||||
ntohl(pkt->ack.serial),
|
||||
|
@ -235,6 +209,56 @@ int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send an ABORT call packet.
|
||||
*/
|
||||
int rxrpc_send_abort_packet(struct rxrpc_call *call)
|
||||
{
|
||||
struct rxrpc_connection *conn = NULL;
|
||||
struct rxrpc_abort_buffer pkt;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[1];
|
||||
rxrpc_serial_t serial;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&call->lock);
|
||||
if (call->conn)
|
||||
conn = rxrpc_get_connection_maybe(call->conn);
|
||||
spin_unlock_bh(&call->lock);
|
||||
if (!conn)
|
||||
return -ECONNRESET;
|
||||
|
||||
msg.msg_name = &call->peer->srx.transport;
|
||||
msg.msg_namelen = call->peer->srx.transport_len;
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
pkt.whdr.epoch = htonl(conn->proto.epoch);
|
||||
pkt.whdr.cid = htonl(call->cid);
|
||||
pkt.whdr.callNumber = htonl(call->call_id);
|
||||
pkt.whdr.seq = 0;
|
||||
pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT;
|
||||
pkt.whdr.flags = conn->out_clientflag;
|
||||
pkt.whdr.userStatus = 0;
|
||||
pkt.whdr.securityIndex = call->security_ix;
|
||||
pkt.whdr._rsvd = 0;
|
||||
pkt.whdr.serviceId = htons(call->service_id);
|
||||
pkt.abort_code = htonl(call->abort_code);
|
||||
|
||||
iov[0].iov_base = &pkt;
|
||||
iov[0].iov_len = sizeof(pkt);
|
||||
|
||||
serial = atomic_inc_return(&conn->serial);
|
||||
pkt.whdr.serial = htonl(serial);
|
||||
|
||||
ret = kernel_sendmsg(conn->params.local->socket,
|
||||
&msg, iov, 1, sizeof(pkt));
|
||||
|
||||
rxrpc_put_connection(conn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* send a packet through the transport endpoint
|
||||
*/
|
||||
|
@ -283,11 +307,12 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
/* If our RTT cache needs working on, request an ACK. Also request
|
||||
* ACKs if a DATA packet appears to have been lost.
|
||||
*/
|
||||
if (retrans ||
|
||||
call->cong_mode == RXRPC_CALL_SLOW_START ||
|
||||
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
|
||||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
|
||||
ktime_get_real()))
|
||||
if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
|
||||
(retrans ||
|
||||
call->cong_mode == RXRPC_CALL_SLOW_START ||
|
||||
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
|
||||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
|
||||
ktime_get_real())))
|
||||
whdr.flags |= RXRPC_REQUEST_ACK;
|
||||
|
||||
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
|
||||
|
|
|
@ -143,7 +143,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
|||
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
|
||||
rxrpc_propose_ack_terminal_ack);
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
||||
rxrpc_send_ack_packet(call, false);
|
||||
}
|
||||
|
||||
write_lock_bh(&call->state_lock);
|
||||
|
@ -151,17 +151,21 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
|
|||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||
__rxrpc_call_completed(call);
|
||||
write_unlock_bh(&call->state_lock);
|
||||
break;
|
||||
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||
call->tx_phase = true;
|
||||
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
||||
call->ack_at = call->expire_at;
|
||||
write_unlock_bh(&call->state_lock);
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
|
||||
rxrpc_propose_ack_processing_op);
|
||||
break;
|
||||
default:
|
||||
write_unlock_bh(&call->state_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -212,7 +216,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
|
|||
true, false,
|
||||
rxrpc_propose_ack_rotate_rx);
|
||||
if (call->ackr_reason)
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
||||
rxrpc_send_ack_packet(call, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -652,7 +656,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
|
|||
goto out;
|
||||
call_complete:
|
||||
*_abort = call->abort_code;
|
||||
ret = call->error;
|
||||
ret = -call->error;
|
||||
if (call->completion == RXRPC_CALL_SUCCEEDED) {
|
||||
ret = 1;
|
||||
if (size > 0)
|
||||
|
|
|
@ -381,7 +381,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
return 0;
|
||||
|
||||
protocol_error:
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
_leave(" = -EPROTO");
|
||||
return -EPROTO;
|
||||
|
||||
|
@ -471,7 +471,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
return 0;
|
||||
|
||||
protocol_error:
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
_leave(" = -EPROTO");
|
||||
return -EPROTO;
|
||||
|
||||
|
@ -523,7 +523,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
|
||||
if (cksum != expected_cksum) {
|
||||
rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO);
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
_leave(" = -EPROTO [csum failed]");
|
||||
return -EPROTO;
|
||||
}
|
||||
|
|
|
@ -130,6 +130,11 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
break;
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
||||
call->ack_at = call->expire_at;
|
||||
if (call->ackr_reason == RXRPC_ACK_DELAY)
|
||||
call->ackr_reason = 0;
|
||||
__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
|
||||
ktime_get_real());
|
||||
if (!last)
|
||||
break;
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
|
@ -197,7 +202,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
|
|||
do {
|
||||
/* Check to see if there's a ping ACK to reply to. */
|
||||
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
||||
rxrpc_send_ack_packet(call, false);
|
||||
|
||||
if (!skb) {
|
||||
size_t size, chunk, max, space;
|
||||
|
@ -514,8 +519,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
|||
} else if (cmd == RXRPC_CMD_SEND_ABORT) {
|
||||
ret = 0;
|
||||
if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED))
|
||||
ret = rxrpc_send_call_packet(call,
|
||||
RXRPC_PACKET_TYPE_ABORT);
|
||||
ret = rxrpc_send_abort_packet(call);
|
||||
} else if (cmd != RXRPC_CMD_SEND_DATA) {
|
||||
ret = -EINVAL;
|
||||
} else if (rxrpc_is_client_call(call) &&
|
||||
|
@ -597,7 +601,7 @@ void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
|
|||
lock_sock(sock->sk);
|
||||
|
||||
if (rxrpc_abort_call(why, call, 0, abort_code, error))
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
|
||||
rxrpc_send_abort_packet(call);
|
||||
|
||||
release_sock(sock->sk);
|
||||
_leave("");
|
||||
|
|
Loading…
Reference in New Issue
Block a user