forked from luck/tmp_suning_uos_patched
ASoC: Fixes for v3.12
A few small fixes, nothing with any broad impact but all useful for the affected systems. The Kirkwood compatible string change is fixing up a string just added in the merge window so that we don't get any changes in released kernels. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.14 (GNU/Linux) iQIcBAABAgAGBQJSMEN7AAoJELSic+t+oim9fUcP/2/JSlEzCnf9mnIVu5JUb4o0 N9sqvZ3FuBmB2s+X7XdzHHatn/SRcj8bdPIUDyghsM+Ax6inzHCkLLxF0gMfgo1k YmbVamD9x+Odj7ovs/M7/pRji9vBUFRmTdh6DnxIcQUGyuZRIMEIYxgnu//tXQ6+ vL2dJ3d3S37k4NtqHebKf1iL4ExGMAkp6f7Rig3YXc/N+jc8O/uUWAitfvBnzuBH ZlFTCxkHhZ9mYi/xRBUSOFbb96bQnHN2KQ3soss1xDKhJVpjuo1rOWfTZE9c7bMY OvqsMJDwaPNiCF0UzdCPQ1SMdE8r8U2lMMjyeQN76R9tqSVBtgxPeLvjzN0l3Xkf mnHd1LZXGOF5cSfrA/Diy4leFk0A0pJQSxdDt54vlRcCG3aV7IRJOkPi4C0tMHQE hkXOfgF6XERDsNV4RgzangAAvjKeXPqItWITZx+qtK7AxtCC6xpMc5EHV4wbFmTP bYz6oKBehQy5gaI/z5Ad0mU1JK1vJH6n2ENtu/zNxBzfHIiyhF5fBPSuE+IGxvJf /ANQf44DkkXLHZv5766lYUbldGmFYbiva2us+uDxaEfjSW5DDSSn/aYn73IsYJ16 L5MsX0s/J2o/i10kuXPlWMWIkoat1Y2vDUrdjg1xQ1IrkZRAZDnIYJz4yS8TwNyT 8rToadF2zYPKxwDVb37E =oTEO -----END PGP SIGNATURE----- Merge tag 'asoc-v3.12-4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus ASoC: Fixes for v3.12 A few small fixes, nothing with any broad impact but all useful for the affected systems. The Kirkwood compatible string change is fixing up a string just added in the merge window so that we don't get any changes in released kernels.
This commit is contained in:
commit
3d0049e852
|
@ -2,13 +2,17 @@
|
|||
|
||||
Required properties:
|
||||
|
||||
- compatible: "marvell,mvebu-audio"
|
||||
- compatible:
|
||||
"marvell,kirkwood-audio" for Kirkwood platforms
|
||||
"marvell,dove-audio" for Dove platforms
|
||||
|
||||
- reg: physical base address of the controller and length of memory mapped
|
||||
region.
|
||||
|
||||
- interrupts: list of two irq numbers.
|
||||
The first irq is used for data flow and the second one is used for errors.
|
||||
- interrupts:
|
||||
with "marvell,kirkwood-audio", the audio interrupt
|
||||
with "marvell,dove-audio", a list of two interrupts, the first for
|
||||
the data flow, and the second for errors.
|
||||
|
||||
- clocks: one or two phandles.
|
||||
The first one is mandatory and defines the internal clock.
|
||||
|
@ -21,7 +25,7 @@ Required properties:
|
|||
Example:
|
||||
|
||||
i2s1: audio-controller@b4000 {
|
||||
compatible = "marvell,mvebu-audio";
|
||||
compatible = "marvell,dove-audio";
|
||||
reg = <0xb4000 0x2210>;
|
||||
interrupts = <21>, <22>;
|
||||
clocks = <&gate_clk 13>;
|
||||
|
|
|
@ -6067,7 +6067,7 @@ M: Rob Herring <rob.herring@calxeda.com>
|
|||
M: Pawel Moll <pawel.moll@arm.com>
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Stephen Warren <swarren@wwwdotorg.org>
|
||||
M: Ian Campbell <ian.campbell@citrix.com>
|
||||
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
|
||||
L: devicetree@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION =
|
||||
NAME = Linux for Workgroups
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
|
|||
|
||||
DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
|
||||
/* Maintainer: Barry Song <baohua.song@csr.com> */
|
||||
.nr_irqs = 128,
|
||||
.map_io = sirfsoc_map_io,
|
||||
.init_time = sirfsoc_init_time,
|
||||
.init_late = sirfsoc_init_late,
|
||||
|
@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
|
|||
|
||||
DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
|
||||
/* Maintainer: Barry Song <baohua.song@csr.com> */
|
||||
.nr_irqs = 128,
|
||||
.map_io = sirfsoc_map_io,
|
||||
.init_time = sirfsoc_init_time,
|
||||
.dma_zone_size = SZ_256M,
|
||||
|
|
|
@ -979,6 +979,7 @@ config RELOCATABLE
|
|||
must live at a different physical address than the primary
|
||||
kernel.
|
||||
|
||||
# This value must have zeroes in the bottom 60 bits otherwise lots will break
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default "0xc000000000000000"
|
||||
|
|
|
@ -211,9 +211,19 @@ extern long long virt_phys_offset;
|
|||
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
|
||||
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
|
||||
#else
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
|
||||
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
|
||||
*/
|
||||
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
|
||||
#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
|
||||
|
||||
#else /* 32-bit, non book E */
|
||||
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
|
||||
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
|
||||
|
|
|
@ -35,7 +35,13 @@
|
|||
#include <asm/vdso_datapage.h>
|
||||
#include <asm/vio.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
|
||||
/*
|
||||
* This isn't a module but we expose that to userspace
|
||||
* via /proc so leave the definitions here
|
||||
*/
|
||||
#define MODULE_VERS "1.9"
|
||||
#define MODULE_NAME "lparcfg"
|
||||
|
||||
|
@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
|
|||
{
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
|
||||
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
|
||||
}
|
||||
|
||||
|
@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
static const struct file_operations lparcfg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = seq_read,
|
||||
.write = lparcfg_write,
|
||||
.open = lparcfg_open,
|
||||
|
@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit lparcfg_cleanup(void)
|
||||
{
|
||||
remove_proc_subtree("powerpc/lparcfg", NULL);
|
||||
}
|
||||
|
||||
module_init(lparcfg_init);
|
||||
module_exit(lparcfg_cleanup);
|
||||
MODULE_DESCRIPTION("Interface for LPAR configuration data");
|
||||
MODULE_AUTHOR("Dave Engebretsen");
|
||||
MODULE_LICENSE("GPL");
|
||||
machine_device_initcall(pseries, lparcfg_init);
|
||||
|
|
|
@ -78,8 +78,8 @@ __ref void *alloc_low_pages(unsigned int num)
|
|||
return __va(pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
|
||||
#define INIT_PGT_BUF_SIZE (5 * PAGE_SIZE)
|
||||
/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
|
||||
#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
|
||||
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
|
||||
void __init early_alloc_pgt_buf(void)
|
||||
{
|
||||
|
|
|
@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev,
|
|||
container_of(dev, struct memory_block, dev);
|
||||
|
||||
for (i = 0; i < sections_per_block; i++) {
|
||||
if (!present_section_nr(mem->start_section_nr + i))
|
||||
continue;
|
||||
pfn = section_nr_to_pfn(mem->start_section_nr + i);
|
||||
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
}
|
||||
|
|
|
@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
|
|||
}
|
||||
|
||||
if (!rbnode->blklen) {
|
||||
rbnode->blklen = sizeof(*rbnode);
|
||||
rbnode->blklen = 1;
|
||||
rbnode->base_reg = reg;
|
||||
}
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ config SIRF_DMA
|
|||
Enable support for the CSR SiRFprimaII DMA engine.
|
||||
|
||||
config TI_EDMA
|
||||
tristate "TI EDMA support"
|
||||
bool "TI EDMA support"
|
||||
depends on ARCH_DAVINCI || ARCH_OMAP
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
|
|
|
@ -4440,7 +4440,7 @@
|
|||
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
|
||||
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
|
||||
|
||||
/* legacy values */
|
||||
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
|
||||
|
|
|
@ -29,7 +29,9 @@
|
|||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
|
||||
#define VMW_PPN_SIZE sizeof(unsigned long)
|
||||
#define VMW_PPN_SIZE (sizeof(unsigned long))
|
||||
/* A future safe maximum remap size. */
|
||||
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
|
||||
|
||||
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
|
@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
|||
{
|
||||
SVGAFifoCmdDefineGMR2 define_cmd;
|
||||
SVGAFifoCmdRemapGMR2 remap_cmd;
|
||||
uint32_t define_size = sizeof(define_cmd) + 4;
|
||||
uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
|
||||
uint32_t *cmd;
|
||||
uint32_t *cmd_orig;
|
||||
uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
|
||||
uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
|
||||
uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
|
||||
uint32_t remap_pos = 0;
|
||||
uint32_t cmd_size = define_size + remap_size;
|
||||
uint32_t i;
|
||||
|
||||
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
|
||||
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
|
||||
if (unlikely(cmd == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
define_cmd.gmrId = gmr_id;
|
||||
define_cmd.numPages = num_pages;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
cmd += sizeof(define_cmd) / sizeof(*cmd);
|
||||
|
||||
/*
|
||||
* Need to split the command if there are too many
|
||||
* pages that goes into the gmr.
|
||||
*/
|
||||
|
||||
remap_cmd.gmrId = gmr_id;
|
||||
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
|
||||
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
|
||||
remap_cmd.offsetPages = 0;
|
||||
remap_cmd.numPages = num_pages;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
cmd += sizeof(define_cmd) / sizeof(uint32);
|
||||
while (num_pages > 0) {
|
||||
unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
|
||||
|
||||
*cmd++ = SVGA_CMD_REMAP_GMR2;
|
||||
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
|
||||
cmd += sizeof(remap_cmd) / sizeof(uint32);
|
||||
remap_cmd.offsetPages = remap_pos;
|
||||
remap_cmd.numPages = nr;
|
||||
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
*cmd++ = SVGA_CMD_REMAP_GMR2;
|
||||
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
|
||||
cmd += sizeof(remap_cmd) / sizeof(*cmd);
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
for (i = 0; i < nr; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
}
|
||||
|
||||
num_pages -= nr;
|
||||
remap_pos += nr;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, define_size + remap_size);
|
||||
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
|
||||
|
||||
vmw_fifo_commit(dev_priv, cmd_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -167,6 +167,7 @@ static const struct xpad_device {
|
|||
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
|
||||
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
|
||||
|
|
|
@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
|
|||
*/
|
||||
static int elantech_packet_check_v3(struct psmouse *psmouse)
|
||||
{
|
||||
struct elantech_data *etd = psmouse->private;
|
||||
const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
|
||||
unsigned char *packet = psmouse->packet;
|
||||
|
||||
|
@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
|
|||
if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
|
||||
return PACKET_DEBOUNCE;
|
||||
|
||||
if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
|
||||
return PACKET_V3_HEAD;
|
||||
/*
|
||||
* If the hardware flag 'crc_enabled' is set the packets have
|
||||
* different signatures.
|
||||
*/
|
||||
if (etd->crc_enabled) {
|
||||
if ((packet[3] & 0x09) == 0x08)
|
||||
return PACKET_V3_HEAD;
|
||||
|
||||
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
|
||||
return PACKET_V3_TAIL;
|
||||
if ((packet[3] & 0x09) == 0x09)
|
||||
return PACKET_V3_TAIL;
|
||||
} else {
|
||||
if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
|
||||
return PACKET_V3_HEAD;
|
||||
|
||||
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
|
||||
return PACKET_V3_TAIL;
|
||||
}
|
||||
|
||||
return PACKET_UNKNOWN;
|
||||
}
|
||||
|
||||
static int elantech_packet_check_v4(struct psmouse *psmouse)
|
||||
{
|
||||
struct elantech_data *etd = psmouse->private;
|
||||
unsigned char *packet = psmouse->packet;
|
||||
unsigned char packet_type = packet[3] & 0x03;
|
||||
bool sanity_check;
|
||||
|
||||
/*
|
||||
* Sanity check based on the constant bits of a packet.
|
||||
* The constant bits change depending on the value of
|
||||
* the hardware flag 'crc_enabled' but are the same for
|
||||
* every packet, regardless of the type.
|
||||
*/
|
||||
if (etd->crc_enabled)
|
||||
sanity_check = ((packet[3] & 0x08) == 0x00);
|
||||
else
|
||||
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
|
||||
(packet[3] & 0x1c) == 0x10);
|
||||
|
||||
if (!sanity_check)
|
||||
return PACKET_UNKNOWN;
|
||||
|
||||
switch (packet_type) {
|
||||
case 0:
|
||||
|
@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
|
|||
etd->reports_pressure = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The signatures of v3 and v4 packets change depending on the
|
||||
* value of this hardware flag.
|
||||
*/
|
||||
etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ struct elantech_data {
|
|||
bool paritycheck;
|
||||
bool jumpy_cursor;
|
||||
bool reports_pressure;
|
||||
bool crc_enabled;
|
||||
unsigned char hw_version;
|
||||
unsigned int fw_version;
|
||||
unsigned int single_finger_reports;
|
||||
|
|
|
@ -22,7 +22,8 @@ config SERIO_I8042
|
|||
tristate "i8042 PC Keyboard controller" if EXPERT || !X86
|
||||
default y
|
||||
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
|
||||
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390
|
||||
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
|
||||
!ARC
|
||||
help
|
||||
i8042 is the chip over which the standard AT keyboard and PS/2
|
||||
mouse are connected to the computer. If you use these devices,
|
||||
|
|
|
@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA =
|
|||
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
|
||||
.touch_max = 2 };
|
||||
static struct wacom_features wacom_features_0xDB =
|
||||
static const struct wacom_features wacom_features_0xDB =
|
||||
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
|
||||
.touch_max = 2 };
|
||||
|
@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF =
|
|||
{ "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
|
||||
.touch_max = 16 };
|
||||
static const struct wacom_features wacom_features_0x300 =
|
||||
{ "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
static const struct wacom_features wacom_features_0x301 =
|
||||
{ "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
static const struct wacom_features wacom_features_0x6004 =
|
||||
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
|
||||
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
|
@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = {
|
|||
{ USB_DEVICE_WACOM(0x100) },
|
||||
{ USB_DEVICE_WACOM(0x101) },
|
||||
{ USB_DEVICE_WACOM(0x10D) },
|
||||
{ USB_DEVICE_WACOM(0x300) },
|
||||
{ USB_DEVICE_WACOM(0x301) },
|
||||
{ USB_DEVICE_WACOM(0x304) },
|
||||
{ USB_DEVICE_WACOM(0x4001) },
|
||||
{ USB_DEVICE_WACOM(0x47) },
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#define SIRFSOC_INT_RISC_LEVEL1 0x0024
|
||||
#define SIRFSOC_INIT_IRQ_ID 0x0038
|
||||
|
||||
#define SIRFSOC_NUM_IRQS 128
|
||||
#define SIRFSOC_NUM_IRQS 64
|
||||
|
||||
static struct irq_domain *sirfsoc_irqdomain;
|
||||
|
||||
|
@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
|
|||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
int ret;
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
|
||||
gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq);
|
||||
ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
|
||||
handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
||||
|
||||
gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
|
||||
gc->reg_base = base;
|
||||
ct = gc->chip_types;
|
||||
|
||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
|
||||
|
||||
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
|
||||
}
|
||||
|
||||
static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
|
||||
|
@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p
|
|||
if (!base)
|
||||
panic("unable to map intc cpu registers\n");
|
||||
|
||||
/* using legacy because irqchip_generic does not work with linear */
|
||||
sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0,
|
||||
&irq_domain_simple_ops, base);
|
||||
sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
|
||||
&irq_generic_chip_ops, base);
|
||||
|
||||
sirfsoc_alloc_gc(base, 0, 32);
|
||||
sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
|
||||
|
|
|
@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
|
|||
u8 *data;
|
||||
int len;
|
||||
|
||||
if (skb->len < sizeof(int))
|
||||
if (skb->len < sizeof(int)) {
|
||||
printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
cont = *((int *)skb->data);
|
||||
len = skb->len - sizeof(int);
|
||||
data = skb->data + sizeof(int);
|
||||
|
|
|
@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
|
|||
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
|
||||
int old_max_eth_txqs, new_max_eth_txqs;
|
||||
int old_txdata_index = 0, new_txdata_index = 0;
|
||||
struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
|
||||
|
||||
/* Copy the NAPI object as it has been already initialized */
|
||||
from_fp->napi = to_fp->napi;
|
||||
|
@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
|
|||
memcpy(to_fp, from_fp, sizeof(*to_fp));
|
||||
to_fp->index = to;
|
||||
|
||||
/* Retain the tpa_info of the original `to' version as we don't want
|
||||
* 2 FPs to contain the same tpa_info pointer.
|
||||
*/
|
||||
to_fp->tpa_info = old_tpa_info;
|
||||
|
||||
/* move sp_objs contents as well, as their indices match fp ones */
|
||||
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
|
||||
|
||||
|
@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
|
|||
if (IS_PF(bp)) {
|
||||
if (CNIC_LOADED(bp))
|
||||
bnx2x_free_mem_cnic(bp);
|
||||
bnx2x_free_mem(bp);
|
||||
}
|
||||
bnx2x_free_mem(bp);
|
||||
|
||||
bp->state = BNX2X_STATE_CLOSED;
|
||||
bp->cnic_loaded = false;
|
||||
|
||||
|
|
|
@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
|
|||
{
|
||||
int i;
|
||||
|
||||
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
|
||||
sizeof(struct host_sp_status_block));
|
||||
|
||||
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
|
||||
bp->fw_stats_data_sz + bp->fw_stats_req_sz);
|
||||
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
|
||||
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
|
||||
sizeof(struct host_sp_status_block));
|
||||
|
||||
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
|
||||
sizeof(struct bnx2x_slowpath));
|
||||
|
||||
|
|
|
@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2x_vfop_config_vlan0(struct bnx2x *bp,
|
||||
struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
|
||||
bool add)
|
||||
{
|
||||
int rc;
|
||||
|
||||
vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
|
||||
BNX2X_VLAN_MAC_DEL;
|
||||
vlan_mac->user_req.u.vlan.vlan = 0;
|
||||
|
||||
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
||||
if (rc == -EEXIST)
|
||||
rc = 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2x_vfop_config_list(struct bnx2x *bp,
|
||||
struct bnx2x_vfop_filters *filters,
|
||||
struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
|
||||
|
@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
|
||||
case BNX2X_VFOP_VLAN_CONFIG_LIST:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
|
||||
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
||||
|
||||
/* remove vlan0 - could be no-op */
|
||||
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
|
||||
/* Do vlan list config. if this operation fails we try to
|
||||
* restore vlan0 to keep the queue is working order
|
||||
*/
|
||||
/* do list config */
|
||||
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
|
||||
if (!vfop->rc) {
|
||||
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
|
||||
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
||||
}
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
|
||||
|
||||
case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
||||
|
||||
if (list_empty(&obj->head))
|
||||
/* add vlan0 */
|
||||
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
||||
|
||||
default:
|
||||
|
@ -2819,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct set_vf_state_cookie {
|
||||
struct bnx2x_virtf *vf;
|
||||
u8 state;
|
||||
};
|
||||
|
||||
void bnx2x_set_vf_state(void *cookie)
|
||||
{
|
||||
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
|
||||
|
||||
p->vf->state = p->state;
|
||||
}
|
||||
|
||||
/* VFOP close (teardown the queues, delete mcasts and close HW) */
|
||||
static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
{
|
||||
|
@ -2869,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
op_err:
|
||||
BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
|
||||
op_done:
|
||||
vf->state = VF_ACQUIRED;
|
||||
|
||||
/* need to make sure there are no outstanding stats ramrods which may
|
||||
* cause the device to access the VF's stats buffer which it will free
|
||||
* as soon as we return from the close flow.
|
||||
*/
|
||||
{
|
||||
struct set_vf_state_cookie cookie;
|
||||
|
||||
cookie.vf = vf;
|
||||
cookie.state = VF_ACQUIRED;
|
||||
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "set state to acquired\n");
|
||||
bnx2x_vfop_end(bp, vf, vfop);
|
||||
}
|
||||
|
|
|
@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
|
|||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
/* vfs travel through here as part of the statistics FSM, but no action
|
||||
* is required
|
||||
*/
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
if (IS_PF(bp)) {
|
||||
if (bp->port.pmf)
|
||||
bnx2x_port_stats_init(bp);
|
||||
|
||||
if (bp->port.pmf)
|
||||
bnx2x_port_stats_init(bp);
|
||||
else if (bp->func_stx)
|
||||
bnx2x_func_stats_init(bp);
|
||||
|
||||
else if (bp->func_stx)
|
||||
bnx2x_func_stats_init(bp);
|
||||
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
}
|
||||
|
||||
bp->stats_started = true;
|
||||
}
|
||||
|
@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
|
|||
estats->mac_discard);
|
||||
}
|
||||
}
|
||||
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie){
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
bnx2x_stats_comp(bp);
|
||||
func_to_exec(cookie);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
|
|
@ -539,6 +539,9 @@ struct bnx2x;
|
|||
void bnx2x_memset_stats(struct bnx2x *bp);
|
||||
void bnx2x_stats_init(struct bnx2x *bp);
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie);
|
||||
|
||||
/**
|
||||
* bnx2x_save_statistics - save statistics when unloading.
|
||||
|
|
|
@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev)
|
|||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
status = be_fw_wait_ready(adapter);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* tell fw we're ready to fire cmds */
|
||||
status = be_cmd_fw_init(adapter);
|
||||
if (status)
|
||||
|
|
|
@ -971,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
|
|||
htons(ETH_P_8021Q),
|
||||
vlan_tag);
|
||||
|
||||
if (!skb_defer_rx_timestamp(skb))
|
||||
napi_gro_receive(&fep->napi, skb);
|
||||
napi_gro_receive(&fep->napi, skb);
|
||||
}
|
||||
|
||||
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
|
||||
|
|
|
@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
|
|||
jwrite32(jme, JME_APMC, apmc);
|
||||
}
|
||||
|
||||
NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
|
||||
NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
|
||||
|
||||
spin_lock_init(&jme->phy_lock);
|
||||
spin_lock_init(&jme->macaddr_lock);
|
||||
|
|
|
@ -1171,7 +1171,6 @@ typedef struct {
|
|||
|
||||
#define NETXEN_DB_MAPSIZE_BYTES 0x1000
|
||||
|
||||
#define NETXEN_NETDEV_WEIGHT 128
|
||||
#define NETXEN_ADAPTER_UP_MAGIC 777
|
||||
#define NETXEN_NIC_PEG_TUNE 0
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
|
|||
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
|
||||
sds_ring = &recv_ctx->sds_rings[ring];
|
||||
netif_napi_add(netdev, &sds_ring->napi,
|
||||
netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
|
||||
netxen_nic_poll, NAPI_POLL_WEIGHT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
|
|||
plat->force_sf_dma_mode = 1;
|
||||
}
|
||||
|
||||
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
|
||||
if (!dma_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
plat->dma_cfg = dma_cfg;
|
||||
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
|
||||
dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
|
||||
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
|
||||
if (of_find_property(np, "snps,pbl", NULL)) {
|
||||
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
|
||||
GFP_KERNEL);
|
||||
if (!dma_cfg)
|
||||
return -ENOMEM;
|
||||
plat->dma_cfg = dma_cfg;
|
||||
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
|
||||
dma_cfg->fixed_burst =
|
||||
of_property_read_bool(np, "snps,fixed-burst");
|
||||
dma_cfg->mixed_burst =
|
||||
of_property_read_bool(np, "snps,mixed-burst");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
|
|||
{
|
||||
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
|
||||
/* NAPI */
|
||||
netif_napi_add(netdev, napi,
|
||||
gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
|
||||
netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
|
||||
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
|
||||
netdev->netdev_ops = &gelic_netdevice_ops;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#define GELIC_NET_RXBUF_ALIGN 128
|
||||
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
|
||||
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
|
||||
#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
|
||||
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
|
||||
|
||||
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
|
||||
|
|
|
@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
|
|||
printk(KERN_WARNING "Setting MDIO clock divisor to "
|
||||
"default %d\n", DEFAULT_CLOCK_DIVISOR);
|
||||
clk_div = DEFAULT_CLOCK_DIVISOR;
|
||||
of_node_put(np1);
|
||||
goto issue;
|
||||
}
|
||||
|
||||
|
|
|
@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
|
|||
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
|
||||
},
|
||||
/* HP hs2434 Mobile Broadband Module needs ZLPs */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
|
||||
},
|
||||
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info,
|
||||
},
|
||||
|
|
|
@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
|
|||
struct ieee80211_conf *cur_conf = &priv->hw->conf;
|
||||
bool txok;
|
||||
int slot;
|
||||
int hdrlen, padsize;
|
||||
|
||||
slot = strip_drv_header(priv, skb);
|
||||
if (slot < 0) {
|
||||
|
@ -504,6 +505,15 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
|
|||
|
||||
ath9k_htc_tx_clear_slot(priv, slot);
|
||||
|
||||
/* Remove padding before handing frame back to mac80211 */
|
||||
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
|
||||
|
||||
padsize = hdrlen & 3;
|
||||
if (padsize && skb->len > hdrlen + padsize) {
|
||||
memmove(skb->data + padsize, skb->data, hdrlen);
|
||||
skb_pull(skb, padsize);
|
||||
}
|
||||
|
||||
/* Send status to mac80211 */
|
||||
ieee80211_tx_status(priv->hw, skb);
|
||||
}
|
||||
|
|
|
@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
|
|||
IEEE80211_HW_PS_NULLFUNC_STACK |
|
||||
IEEE80211_HW_SPECTRUM_MGMT |
|
||||
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
|
||||
IEEE80211_HW_SUPPORTS_RC_TABLE;
|
||||
IEEE80211_HW_SUPPORTS_RC_TABLE |
|
||||
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
|
||||
|
||||
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
|
||||
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
|
||||
|
|
|
@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc)
|
|||
{
|
||||
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
|
||||
|
||||
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
|
||||
AR_SREV_9550(sc->sc_ah))
|
||||
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
|
||||
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
|
||||
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
|
||||
|
||||
|
|
|
@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size)
|
|||
IEEE80211_HW_PS_NULLFUNC_STACK |
|
||||
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
|
||||
IEEE80211_HW_SUPPORTS_RC_TABLE |
|
||||
IEEE80211_HW_SIGNAL_DBM;
|
||||
IEEE80211_HW_SIGNAL_DBM |
|
||||
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
|
||||
|
||||
if (!modparam_noht) {
|
||||
/*
|
||||
|
|
|
@ -4464,9 +4464,9 @@ il4965_irq_tasklet(struct il_priv *il)
|
|||
set_bit(S_RFKILL, &il->status);
|
||||
} else {
|
||||
clear_bit(S_RFKILL, &il->status);
|
||||
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
|
||||
il_force_reset(il, true);
|
||||
}
|
||||
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
|
||||
|
||||
handled |= CSR_INT_BIT_RF_KILL;
|
||||
}
|
||||
|
|
|
@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
|
|||
IEEE80211_HW_SUPPORTS_PS |
|
||||
IEEE80211_HW_PS_NULLFUNC_STACK |
|
||||
IEEE80211_HW_AMPDU_AGGREGATION |
|
||||
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
|
||||
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
|
||||
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
|
||||
|
||||
/*
|
||||
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
|
||||
|
|
|
@ -221,7 +221,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
|
|||
pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
|
||||
for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
|
||||
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
|
||||
PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
|
||||
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
|
||||
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
|
||||
pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
|
||||
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
|
||||
|
@ -247,7 +247,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
|
|||
}
|
||||
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
|
||||
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
|
||||
PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
|
||||
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
|
||||
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
|
||||
pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
|
||||
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
|
||||
|
|
|
@ -275,7 +275,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
|
|||
|
||||
for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
|
||||
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
|
||||
PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
|
||||
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
|
||||
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
|
||||
pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
|
||||
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
|
||||
|
@ -301,7 +301,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
|
|||
}
|
||||
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
|
||||
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
|
||||
PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
|
||||
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
|
||||
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
|
||||
pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
|
||||
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
|
||||
|
|
|
@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
|
|||
|
||||
pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
|
||||
|
||||
/* Try for up to 200s */
|
||||
for (timeout = 0; timeout < 20; timeout++) {
|
||||
/* Try for up to 400ms */
|
||||
for (timeout = 0; timeout < 40; timeout++) {
|
||||
if (pv->established)
|
||||
goto established;
|
||||
if (!hvsi_get_packet(pv))
|
||||
|
|
|
@ -305,9 +305,11 @@ static int __init ohci_pci_init(void)
|
|||
|
||||
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* Entries for the PCI suspend/resume callbacks are special */
|
||||
ohci_pci_hc_driver.pci_suspend = ohci_suspend;
|
||||
ohci_pci_hc_driver.pci_resume = ohci_resume;
|
||||
#endif
|
||||
|
||||
return pci_register_driver(&ohci_pci_driver);
|
||||
}
|
||||
|
|
57
fs/dcache.c
57
fs/dcache.c
|
@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
|
|||
*/
|
||||
static void d_free(struct dentry *dentry)
|
||||
{
|
||||
BUG_ON(dentry->d_count);
|
||||
BUG_ON(dentry->d_lockref.count);
|
||||
this_cpu_dec(nr_dentry);
|
||||
if (dentry->d_op && dentry->d_op->d_release)
|
||||
dentry->d_op->d_release(dentry);
|
||||
|
@ -467,7 +467,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
|
|||
}
|
||||
|
||||
if (ref)
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
/*
|
||||
* inform the fs via d_prune that this dentry is about to be
|
||||
* unhashed and destroyed.
|
||||
|
@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
|
|||
return;
|
||||
|
||||
repeat:
|
||||
if (dentry->d_count == 1)
|
||||
if (dentry->d_lockref.count == 1)
|
||||
might_sleep();
|
||||
spin_lock(&dentry->d_lock);
|
||||
BUG_ON(!dentry->d_count);
|
||||
if (dentry->d_count > 1) {
|
||||
dentry->d_count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
if (lockref_put_or_lock(&dentry->d_lockref))
|
||||
return;
|
||||
}
|
||||
|
||||
if (dentry->d_flags & DCACHE_OP_DELETE) {
|
||||
if (dentry->d_op->d_delete(dentry))
|
||||
|
@ -535,7 +530,7 @@ void dput(struct dentry *dentry)
|
|||
dentry->d_flags |= DCACHE_REFERENCED;
|
||||
dentry_lru_add(dentry);
|
||||
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return;
|
||||
|
||||
|
@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
|
|||
* We also need to leave mountpoints alone,
|
||||
* directory or not.
|
||||
*/
|
||||
if (dentry->d_count > 1 && dentry->d_inode) {
|
||||
if (dentry->d_lockref.count > 1 && dentry->d_inode) {
|
||||
if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return -EBUSY;
|
||||
|
@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate);
|
|||
/* This must be called with d_lock held */
|
||||
static inline void __dget_dlock(struct dentry *dentry)
|
||||
{
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
}
|
||||
|
||||
static inline void __dget(struct dentry *dentry)
|
||||
{
|
||||
spin_lock(&dentry->d_lock);
|
||||
__dget_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
lockref_get(&dentry->d_lockref);
|
||||
}
|
||||
|
||||
struct dentry *dget_parent(struct dentry *dentry)
|
||||
|
@ -634,8 +627,8 @@ struct dentry *dget_parent(struct dentry *dentry)
|
|||
goto repeat;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
BUG_ON(!ret->d_count);
|
||||
ret->d_count++;
|
||||
BUG_ON(!ret->d_lockref.count);
|
||||
ret->d_lockref.count++;
|
||||
spin_unlock(&ret->d_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -718,7 +711,7 @@ void d_prune_aliases(struct inode *inode)
|
|||
spin_lock(&inode->i_lock);
|
||||
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!dentry->d_count) {
|
||||
if (!dentry->d_lockref.count) {
|
||||
__dget_dlock(dentry);
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
|
|||
/* Prune ancestors. */
|
||||
dentry = parent;
|
||||
while (dentry) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (dentry->d_count > 1) {
|
||||
dentry->d_count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
if (lockref_put_or_lock(&dentry->d_lockref))
|
||||
return;
|
||||
}
|
||||
dentry = dentry_kill(dentry, 1);
|
||||
}
|
||||
}
|
||||
|
@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list)
|
|||
* the LRU because of laziness during lookup. Do not free
|
||||
* it - just keep it off the LRU list.
|
||||
*/
|
||||
if (dentry->d_count) {
|
||||
if (dentry->d_lockref.count) {
|
||||
dentry_lru_del(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
continue;
|
||||
|
@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
dentry_lru_del(dentry);
|
||||
__d_shrink(dentry);
|
||||
|
||||
if (dentry->d_count != 0) {
|
||||
if (dentry->d_lockref.count != 0) {
|
||||
printk(KERN_ERR
|
||||
"BUG: Dentry %p{i=%lx,n=%s}"
|
||||
" still in use (%d)"
|
||||
|
@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
dentry->d_inode ?
|
||||
dentry->d_inode->i_ino : 0UL,
|
||||
dentry->d_name.name,
|
||||
dentry->d_count,
|
||||
dentry->d_lockref.count,
|
||||
dentry->d_sb->s_type->name,
|
||||
dentry->d_sb->s_id);
|
||||
BUG();
|
||||
|
@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
list_del(&dentry->d_u.d_child);
|
||||
} else {
|
||||
parent = dentry->d_parent;
|
||||
parent->d_count--;
|
||||
parent->d_lockref.count--;
|
||||
list_del(&dentry->d_u.d_child);
|
||||
}
|
||||
|
||||
|
@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|||
|
||||
dentry = sb->s_root;
|
||||
sb->s_root = NULL;
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
shrink_dcache_for_umount_subtree(dentry);
|
||||
|
||||
while (!hlist_bl_empty(&sb->s_anon)) {
|
||||
|
@ -1147,7 +1136,7 @@ static int select_parent(struct dentry *parent, struct list_head *dispose)
|
|||
* loop in shrink_dcache_parent() might not make any progress
|
||||
* and loop forever.
|
||||
*/
|
||||
if (dentry->d_count) {
|
||||
if (dentry->d_lockref.count) {
|
||||
dentry_lru_del(dentry);
|
||||
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
|
||||
dentry_lru_move_list(dentry, dispose);
|
||||
|
@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
|
|||
smp_wmb();
|
||||
dentry->d_name.name = dname;
|
||||
|
||||
dentry->d_count = 1;
|
||||
dentry->d_lockref.count = 1;
|
||||
dentry->d_flags = 0;
|
||||
spin_lock_init(&dentry->d_lock);
|
||||
seqcount_init(&dentry->d_seq);
|
||||
|
@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
|
|||
goto next;
|
||||
}
|
||||
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
found = dentry;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
break;
|
||||
|
@ -2069,7 +2058,7 @@ void d_delete(struct dentry * dentry)
|
|||
spin_lock(&dentry->d_lock);
|
||||
inode = dentry->d_inode;
|
||||
isdir = S_ISDIR(inode->i_mode);
|
||||
if (dentry->d_count == 1) {
|
||||
if (dentry->d_lockref.count == 1) {
|
||||
if (!spin_trylock(&inode->i_lock)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
cpu_relax();
|
||||
|
@ -2948,7 +2937,7 @@ void d_genocide(struct dentry *root)
|
|||
}
|
||||
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
|
||||
dentry->d_flags |= DCACHE_GENOCIDE;
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
}
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
@ -2956,7 +2945,7 @@ void d_genocide(struct dentry *root)
|
|||
struct dentry *child = this_parent;
|
||||
if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
|
||||
this_parent->d_flags |= DCACHE_GENOCIDE;
|
||||
this_parent->d_count--;
|
||||
this_parent->d_lockref.count--;
|
||||
}
|
||||
this_parent = try_to_ascend(this_parent, locked, seq);
|
||||
if (!this_parent)
|
||||
|
|
|
@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
|
||||
dir_index = (u32) ctx->pos;
|
||||
|
||||
/*
|
||||
* NFSv4 reserves cookies 1 and 2 for . and .. so the value
|
||||
* we return to the vfs is one greater than the one we use
|
||||
* internally.
|
||||
*/
|
||||
if (dir_index)
|
||||
dir_index--;
|
||||
|
||||
if (dir_index > 1) {
|
||||
struct dir_table_slot dirtab_slot;
|
||||
|
||||
|
@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
if (p->header.flag & BT_INTERNAL) {
|
||||
jfs_err("jfs_readdir: bad index table");
|
||||
DT_PUTPAGE(mp);
|
||||
ctx->pos = -1;
|
||||
ctx->pos = DIREND;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
/*
|
||||
* self "."
|
||||
*/
|
||||
ctx->pos = 0;
|
||||
ctx->pos = 1;
|
||||
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* parent ".."
|
||||
*/
|
||||
ctx->pos = 1;
|
||||
ctx->pos = 2;
|
||||
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
|
||||
return 0;
|
||||
|
||||
|
@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
/*
|
||||
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
|
||||
*
|
||||
* pn = index = 0: First entry "."
|
||||
* pn = 0; index = 1: Second entry ".."
|
||||
* pn = 0; index = 1: First entry "."
|
||||
* pn = 0; index = 2: Second entry ".."
|
||||
* pn > 0: Real entries, pn=1 -> leftmost page
|
||||
* pn = index = -1: No more entries
|
||||
*/
|
||||
dtpos = ctx->pos;
|
||||
if (dtpos == 0) {
|
||||
if (dtpos < 2) {
|
||||
/* build "." entry */
|
||||
ctx->pos = 1;
|
||||
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
|
||||
return 0;
|
||||
dtoffset->index = 1;
|
||||
dtoffset->index = 2;
|
||||
ctx->pos = dtpos;
|
||||
}
|
||||
|
||||
if (dtoffset->pn == 0) {
|
||||
if (dtoffset->index == 1) {
|
||||
if (dtoffset->index == 2) {
|
||||
/* build ".." entry */
|
||||
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
|
||||
return 0;
|
||||
|
@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
}
|
||||
jfs_dirent->position = unique_pos++;
|
||||
}
|
||||
/*
|
||||
* We add 1 to the index because we may
|
||||
* use a value of 2 internally, and NFSv4
|
||||
* doesn't like that.
|
||||
*/
|
||||
jfs_dirent->position++;
|
||||
} else {
|
||||
jfs_dirent->position = dtpos;
|
||||
len = min(d_namleft, DTLHDRDATALEN_LEGACY);
|
||||
|
|
16
fs/namei.c
16
fs/namei.c
|
@ -536,8 +536,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
|
|||
* a reference at this point.
|
||||
*/
|
||||
BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
|
||||
BUG_ON(!parent->d_count);
|
||||
parent->d_count++;
|
||||
BUG_ON(!parent->d_lockref.count);
|
||||
parent->d_lockref.count++;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
spin_unlock(&parent->d_lock);
|
||||
|
@ -3327,7 +3327,7 @@ void dentry_unhash(struct dentry *dentry)
|
|||
{
|
||||
shrink_dcache_parent(dentry);
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (dentry->d_count == 1)
|
||||
if (dentry->d_lockref.count == 1)
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
@ -3671,11 +3671,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
|
|||
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Using empty names is equivalent to using AT_SYMLINK_FOLLOW
|
||||
* on /proc/self/fd/<fd>.
|
||||
* To use null names we require CAP_DAC_READ_SEARCH
|
||||
* This ensures that not everyone will be able to create
|
||||
* handlink using the passed filedescriptor.
|
||||
*/
|
||||
if (flags & AT_EMPTY_PATH)
|
||||
if (flags & AT_EMPTY_PATH) {
|
||||
if (!capable(CAP_DAC_READ_SEARCH))
|
||||
return -ENOENT;
|
||||
how = LOOKUP_EMPTY;
|
||||
}
|
||||
|
||||
if (flags & AT_SYMLINK_FOLLOW)
|
||||
how |= LOOKUP_FOLLOW;
|
||||
|
|
|
@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
struct inode *inode = NULL;
|
||||
struct ocfs2_super *osb = NULL;
|
||||
struct buffer_head *bh = NULL;
|
||||
char nodestr[8];
|
||||
char nodestr[12];
|
||||
struct ocfs2_blockcheck_stats stats;
|
||||
|
||||
trace_ocfs2_fill_super(sb, data, silent);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/seqlock.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/lockref.h>
|
||||
|
||||
struct nameidata;
|
||||
struct path;
|
||||
|
@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
|
|||
# endif
|
||||
#endif
|
||||
|
||||
#define d_lock d_lockref.lock
|
||||
|
||||
struct dentry {
|
||||
/* RCU lookup touched fields */
|
||||
unsigned int d_flags; /* protected by d_lock */
|
||||
|
@ -112,8 +115,7 @@ struct dentry {
|
|||
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
|
||||
|
||||
/* Ref lookup also touches following */
|
||||
unsigned int d_count; /* protected by d_lock */
|
||||
spinlock_t d_lock; /* per dentry lock */
|
||||
struct lockref d_lockref; /* per-dentry lock and refcount */
|
||||
const struct dentry_operations *d_op;
|
||||
struct super_block *d_sb; /* The root of the dentry tree */
|
||||
unsigned long d_time; /* used by d_revalidate */
|
||||
|
@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
|
|||
assert_spin_locked(&dentry->d_lock);
|
||||
if (!read_seqcount_retry(&dentry->d_seq, seq)) {
|
||||
ret = 1;
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
|
|||
|
||||
static inline unsigned d_count(const struct dentry *dentry)
|
||||
{
|
||||
return dentry->d_count;
|
||||
return dentry->d_lockref.count;
|
||||
}
|
||||
|
||||
/* validate "insecure" dentry pointer */
|
||||
|
@ -357,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
|
|||
static inline struct dentry *dget_dlock(struct dentry *dentry)
|
||||
{
|
||||
if (dentry)
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
return dentry;
|
||||
}
|
||||
|
||||
static inline struct dentry *dget(struct dentry *dentry)
|
||||
{
|
||||
if (dentry) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
dget_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
if (dentry)
|
||||
lockref_get(&dentry->d_lockref);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
|
71
include/linux/lockref.h
Normal file
71
include/linux/lockref.h
Normal file
|
@ -0,0 +1,71 @@
|
|||
#ifndef __LINUX_LOCKREF_H
|
||||
#define __LINUX_LOCKREF_H
|
||||
|
||||
/*
|
||||
* Locked reference counts.
|
||||
*
|
||||
* These are different from just plain atomic refcounts in that they
|
||||
* are atomic with respect to the spinlock that goes with them. In
|
||||
* particular, there can be implementations that don't actually get
|
||||
* the spinlock for the common decrement/increment operations, but they
|
||||
* still have to check that the operation is done semantically as if
|
||||
* the spinlock had been taken (using a cmpxchg operation that covers
|
||||
* both the lock and the count word, or using memory transactions, for
|
||||
* example).
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct lockref {
|
||||
spinlock_t lock;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
/**
|
||||
* lockref_get - Increments reference count unconditionally
|
||||
* @lockcnt: pointer to lockref structure
|
||||
*
|
||||
* This operation is only valid if you already hold a reference
|
||||
* to the object, so you know the count cannot be zero.
|
||||
*/
|
||||
static inline void lockref_get(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_get_not_zero - Increments count unless the count is 0
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count is 0
|
||||
*/
|
||||
static inline int lockref_get_not_zero(struct lockref *lockref)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count) {
|
||||
lockref->count++;
|
||||
retval = 1;
|
||||
}
|
||||
spin_unlock(&lockref->lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
|
||||
*/
|
||||
static inline int lockref_put_or_lock(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count <= 1)
|
||||
return 0;
|
||||
lockref->count--;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_LOCKREF_H */
|
|
@ -14,6 +14,10 @@ struct fs_struct;
|
|||
* A structure to contain pointers to all per-process
|
||||
* namespaces - fs (mount), uts, network, sysvipc, etc.
|
||||
*
|
||||
* The pid namespace is an exception -- it's accessed using
|
||||
* task_active_pid_ns. The pid namespace here is the
|
||||
* namespace that children will use.
|
||||
*
|
||||
* 'count' is the number of tasks holding a reference.
|
||||
* The count for each namespace, then, will be the number
|
||||
* of nsproxies pointing to it, not the number of tasks.
|
||||
|
@ -27,7 +31,7 @@ struct nsproxy {
|
|||
struct uts_namespace *uts_ns;
|
||||
struct ipc_namespace *ipc_ns;
|
||||
struct mnt_namespace *mnt_ns;
|
||||
struct pid_namespace *pid_ns;
|
||||
struct pid_namespace *pid_ns_for_children;
|
||||
struct net *net_ns;
|
||||
};
|
||||
extern struct nsproxy init_nsproxy;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
struct module;
|
||||
struct device;
|
||||
|
|
|
@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
|
|||
/* local bh are disabled so it is ok to use _BH */
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
|
||||
cpu_relax();
|
||||
|
||||
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
|
||||
!need_resched() && !busy_loop_timeout(end_time));
|
||||
|
|
|
@ -61,6 +61,7 @@ struct genl_family {
|
|||
struct list_head ops_list; /* private */
|
||||
struct list_head family_list; /* private */
|
||||
struct list_head mcast_groups; /* private */
|
||||
struct module *module;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -121,9 +122,24 @@ struct genl_ops {
|
|||
struct list_head ops_list;
|
||||
};
|
||||
|
||||
extern int genl_register_family(struct genl_family *family);
|
||||
extern int genl_register_family_with_ops(struct genl_family *family,
|
||||
extern int __genl_register_family(struct genl_family *family);
|
||||
|
||||
static inline int genl_register_family(struct genl_family *family)
|
||||
{
|
||||
family->module = THIS_MODULE;
|
||||
return __genl_register_family(family);
|
||||
}
|
||||
|
||||
extern int __genl_register_family_with_ops(struct genl_family *family,
|
||||
struct genl_ops *ops, size_t n_ops);
|
||||
|
||||
static inline int genl_register_family_with_ops(struct genl_family *family,
|
||||
struct genl_ops *ops, size_t n_ops)
|
||||
{
|
||||
family->module = THIS_MODULE;
|
||||
return __genl_register_family_with_ops(family, ops, n_ops);
|
||||
}
|
||||
|
||||
extern int genl_unregister_family(struct genl_family *family);
|
||||
extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
|
||||
extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
|
||||
|
|
|
@ -1499,6 +1499,7 @@ enum ieee80211_hw_flags {
|
|||
IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
|
||||
IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
|
||||
IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
|
||||
IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
|
|||
return hoplimit;
|
||||
}
|
||||
|
||||
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
|
||||
{
|
||||
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
|
||||
|
||||
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
|
||||
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
|
||||
}
|
||||
|
||||
#endif /* _ROUTE_H */
|
||||
|
|
|
@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
|
|||
struct sk_buff *skb);
|
||||
int (*transport_finish)(struct sk_buff *skb,
|
||||
int async);
|
||||
void (*local_error)(struct sk_buff *skb, u32 mtu);
|
||||
};
|
||||
|
||||
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
|
||||
extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
|
||||
extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
|
||||
|
||||
|
@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
|
|||
extern int xfrm_output_resume(struct sk_buff *skb, int err);
|
||||
extern int xfrm_output(struct sk_buff *skb);
|
||||
extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern void xfrm_local_error(struct sk_buff *skb, int mtu);
|
||||
extern int xfrm4_extract_header(struct sk_buff *skb);
|
||||
extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||
|
@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
|
|||
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
|
||||
extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
|
||||
extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
|
||||
extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
|
||||
extern int xfrm6_extract_header(struct sk_buff *skb);
|
||||
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
|
||||
|
@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
|
|||
extern int xfrm6_output_finish(struct sk_buff *skb);
|
||||
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
|
||||
u8 **prevhdr);
|
||||
extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
|
||||
|
||||
#ifdef CONFIG_XFRM
|
||||
extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
|
|
|
@ -55,7 +55,7 @@ struct rsnd_ssi_platform_info {
|
|||
/*
|
||||
* flags
|
||||
*/
|
||||
#define RSND_SCU_USB_HPBIF (1 << 31) /* it needs RSND_SSI_DEPENDENT */
|
||||
#define RSND_SCU_USE_HPBIF (1 << 31) /* it needs RSND_SSI_DEPENDENT */
|
||||
|
||||
struct rsnd_scu_platform_info {
|
||||
u32 flags;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _UAPI_CM4000_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define MAX_ATR 33
|
||||
|
||||
|
|
|
@ -839,7 +839,7 @@ static inline void free_copy(struct msg_msg *copy)
|
|||
|
||||
static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
|
||||
{
|
||||
struct msg_msg *msg;
|
||||
struct msg_msg *msg, *found = NULL;
|
||||
long count = 0;
|
||||
|
||||
list_for_each_entry(msg, &msq->q_messages, m_list) {
|
||||
|
@ -848,6 +848,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
|
|||
*msgtyp, mode)) {
|
||||
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
|
||||
*msgtyp = msg->m_type - 1;
|
||||
found = msg;
|
||||
} else if (mode == SEARCH_NUMBER) {
|
||||
if (*msgtyp == count)
|
||||
return msg;
|
||||
|
@ -857,7 +858,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
|
|||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EAGAIN);
|
||||
return found ?: ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
|
||||
|
|
|
@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|||
struct dentry *d = cgrp->dentry;
|
||||
struct cgroup_event *event, *tmp;
|
||||
struct cgroup_subsys *ss;
|
||||
struct cgroup *child;
|
||||
bool empty;
|
||||
|
||||
lockdep_assert_held(&d->d_inode->i_mutex);
|
||||
|
@ -4490,11 +4491,27 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|||
* @cgrp from being removed while __put_css_set() is in progress.
|
||||
*/
|
||||
read_lock(&css_set_lock);
|
||||
empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children);
|
||||
empty = list_empty(&cgrp->cset_links);
|
||||
read_unlock(&css_set_lock);
|
||||
if (!empty)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Make sure there's no live children. We can't test ->children
|
||||
* emptiness as dead children linger on it while being destroyed;
|
||||
* otherwise, "rmdir parent/child parent" may fail with -EBUSY.
|
||||
*/
|
||||
empty = true;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(child, &cgrp->children, sibling) {
|
||||
empty = cgroup_is_dead(child);
|
||||
if (!empty)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!empty)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Block new css_tryget() by killing css refcnts. cgroup core
|
||||
* guarantees that, by the time ->css_offline() is invoked, no new
|
||||
|
|
|
@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
* don't allow the creation of threads.
|
||||
*/
|
||||
if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
|
||||
(task_active_pid_ns(current) != current->nsproxy->pid_ns))
|
||||
(task_active_pid_ns(current) !=
|
||||
current->nsproxy->pid_ns_for_children))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
retval = security_task_create(clone_flags);
|
||||
|
@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
|
||||
if (pid != &init_struct_pid) {
|
||||
retval = -ENOMEM;
|
||||
pid = alloc_pid(p->nsproxy->pid_ns);
|
||||
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
|
||||
if (!pid)
|
||||
goto bad_fork_cleanup_io;
|
||||
}
|
||||
|
|
|
@ -29,15 +29,15 @@
|
|||
static struct kmem_cache *nsproxy_cachep;
|
||||
|
||||
struct nsproxy init_nsproxy = {
|
||||
.count = ATOMIC_INIT(1),
|
||||
.uts_ns = &init_uts_ns,
|
||||
.count = ATOMIC_INIT(1),
|
||||
.uts_ns = &init_uts_ns,
|
||||
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
|
||||
.ipc_ns = &init_ipc_ns,
|
||||
.ipc_ns = &init_ipc_ns,
|
||||
#endif
|
||||
.mnt_ns = NULL,
|
||||
.pid_ns = &init_pid_ns,
|
||||
.mnt_ns = NULL,
|
||||
.pid_ns_for_children = &init_pid_ns,
|
||||
#ifdef CONFIG_NET
|
||||
.net_ns = &init_net,
|
||||
.net_ns = &init_net,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
|
|||
goto out_ipc;
|
||||
}
|
||||
|
||||
new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns);
|
||||
if (IS_ERR(new_nsp->pid_ns)) {
|
||||
err = PTR_ERR(new_nsp->pid_ns);
|
||||
new_nsp->pid_ns_for_children =
|
||||
copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
|
||||
if (IS_ERR(new_nsp->pid_ns_for_children)) {
|
||||
err = PTR_ERR(new_nsp->pid_ns_for_children);
|
||||
goto out_pid;
|
||||
}
|
||||
|
||||
|
@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
|
|||
return new_nsp;
|
||||
|
||||
out_net:
|
||||
if (new_nsp->pid_ns)
|
||||
put_pid_ns(new_nsp->pid_ns);
|
||||
if (new_nsp->pid_ns_for_children)
|
||||
put_pid_ns(new_nsp->pid_ns_for_children);
|
||||
out_pid:
|
||||
if (new_nsp->ipc_ns)
|
||||
put_ipc_ns(new_nsp->ipc_ns);
|
||||
|
@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
|
|||
put_uts_ns(ns->uts_ns);
|
||||
if (ns->ipc_ns)
|
||||
put_ipc_ns(ns->ipc_ns);
|
||||
if (ns->pid_ns)
|
||||
put_pid_ns(ns->pid_ns);
|
||||
if (ns->pid_ns_for_children)
|
||||
put_pid_ns(ns->pid_ns_for_children);
|
||||
put_net(ns->net_ns);
|
||||
kmem_cache_free(nsproxy_cachep, ns);
|
||||
}
|
||||
|
|
|
@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
|
|||
if (ancestor != active)
|
||||
return -EINVAL;
|
||||
|
||||
put_pid_ns(nsproxy->pid_ns);
|
||||
nsproxy->pid_ns = get_pid_ns(new);
|
||||
put_pid_ns(nsproxy->pid_ns_for_children);
|
||||
nsproxy->pid_ns_for_children = get_pid_ns(new);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
|
|||
static int timer_list_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct timer_list_iter *iter = v;
|
||||
u64 now = ktime_to_ns(ktime_get());
|
||||
|
||||
if (iter->cpu == -1 && !iter->second_pass)
|
||||
timer_list_header(m, now);
|
||||
timer_list_header(m, iter->now);
|
||||
else if (!iter->second_pass)
|
||||
print_cpu(m, iter->cpu, iter->now);
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
|
@ -298,33 +297,41 @@ void sysrq_timer_list_show(void)
|
|||
return;
|
||||
}
|
||||
|
||||
static void *move_iter(struct timer_list_iter *iter, loff_t offset)
|
||||
{
|
||||
for (; offset; offset--) {
|
||||
iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
|
||||
if (iter->cpu >= nr_cpu_ids) {
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
if (!iter->second_pass) {
|
||||
iter->cpu = -1;
|
||||
iter->second_pass = true;
|
||||
} else
|
||||
return NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void *timer_list_start(struct seq_file *file, loff_t *offset)
|
||||
{
|
||||
struct timer_list_iter *iter = file->private;
|
||||
|
||||
if (!*offset) {
|
||||
iter->cpu = -1;
|
||||
if (!*offset)
|
||||
iter->now = ktime_to_ns(ktime_get());
|
||||
} else if (iter->cpu >= nr_cpu_ids) {
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
if (!iter->second_pass) {
|
||||
iter->cpu = -1;
|
||||
iter->second_pass = true;
|
||||
} else
|
||||
return NULL;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
return iter;
|
||||
iter->cpu = -1;
|
||||
iter->second_pass = false;
|
||||
return move_iter(iter, *offset);
|
||||
}
|
||||
|
||||
static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
|
||||
{
|
||||
struct timer_list_iter *iter = file->private;
|
||||
iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
|
||||
++*offset;
|
||||
return timer_list_start(file, offset);
|
||||
return move_iter(iter, 1);
|
||||
}
|
||||
|
||||
static void timer_list_stop(struct seq_file *seq, void *v)
|
||||
|
|
|
@ -2201,6 +2201,15 @@ __acquires(&pool->lock)
|
|||
dump_stack();
|
||||
}
|
||||
|
||||
/*
|
||||
* The following prevents a kworker from hogging CPU on !PREEMPT
|
||||
* kernels, where a requeueing work item waiting for something to
|
||||
* happen could deadlock with stop_machine as such work item could
|
||||
* indefinitely requeue itself while all other CPUs are trapped in
|
||||
* stop_machine.
|
||||
*/
|
||||
cond_resched();
|
||||
|
||||
spin_lock_irq(&pool->lock);
|
||||
|
||||
/* clear cpu intensive status */
|
||||
|
|
21
mm/mremap.c
21
mm/mremap.c
|
@ -15,6 +15,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
return pmd;
|
||||
}
|
||||
|
||||
static pte_t move_soft_dirty_pte(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* Set soft dirty bit so we can notice
|
||||
* in userspace the ptes were moved.
|
||||
*/
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
if (pte_present(pte))
|
||||
pte = pte_mksoft_dirty(pte);
|
||||
else if (is_swap_pte(pte))
|
||||
pte = pte_swp_mksoft_dirty(pte);
|
||||
else if (pte_file(pte))
|
||||
pte = pte_file_mksoft_dirty(pte);
|
||||
#endif
|
||||
return pte;
|
||||
}
|
||||
|
||||
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
unsigned long old_addr, unsigned long old_end,
|
||||
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
||||
|
@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
continue;
|
||||
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
||||
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
||||
set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte));
|
||||
pte = move_soft_dirty_pte(pte);
|
||||
set_pte_at(mm, new_addr, new_pte, pte);
|
||||
}
|
||||
|
||||
arch_leave_lazy_mmu_mode();
|
||||
|
|
|
@ -162,6 +162,8 @@ static inline const char *cache_name(struct kmem_cache *s)
|
|||
|
||||
static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
|
||||
{
|
||||
if (!s->memcg_params)
|
||||
return NULL;
|
||||
return s->memcg_params->memcg_caches[idx];
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
mdst = br_mdb_get(br, skb, vid);
|
||||
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
|
||||
br_multicast_querier_exists(br))
|
||||
br_multicast_querier_exists(br, eth_hdr(skb)))
|
||||
br_multicast_deliver(mdst, skb);
|
||||
else
|
||||
br_flood_deliver(br, skb, false);
|
||||
|
|
|
@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
|
|||
} else if (is_multicast_ether_addr(dest)) {
|
||||
mdst = br_mdb_get(br, skb, vid);
|
||||
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
|
||||
br_multicast_querier_exists(br)) {
|
||||
br_multicast_querier_exists(br, eth_hdr(skb))) {
|
||||
if ((mdst && mdst->mglist) ||
|
||||
br_multicast_is_router(br))
|
||||
skb2 = skb;
|
||||
|
|
|
@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
|
|||
if (!netif_running(br->dev) || br->multicast_disabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (timer_pending(&br->multicast_querier_timer))
|
||||
return -EBUSY;
|
||||
|
||||
ip.proto = entry->addr.proto;
|
||||
if (ip.proto == htons(ETH_P_IP))
|
||||
if (ip.proto == htons(ETH_P_IP)) {
|
||||
if (timer_pending(&br->ip4_querier.timer))
|
||||
return -EBUSY;
|
||||
|
||||
ip.u.ip4 = entry->addr.u.ip4;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else
|
||||
} else {
|
||||
if (timer_pending(&br->ip6_querier.timer))
|
||||
return -EBUSY;
|
||||
|
||||
ip.u.ip6 = entry->addr.u.ip6;
|
||||
#endif
|
||||
}
|
||||
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
mdb = mlock_dereference(br->mdb, br);
|
||||
|
|
|
@ -33,7 +33,8 @@
|
|||
|
||||
#include "br_private.h"
|
||||
|
||||
static void br_multicast_start_querier(struct net_bridge *br);
|
||||
static void br_multicast_start_querier(struct net_bridge *br,
|
||||
struct bridge_mcast_query *query);
|
||||
unsigned int br_mdb_rehash_seq;
|
||||
|
||||
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
|
||||
|
@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data)
|
|||
{
|
||||
}
|
||||
|
||||
static void br_multicast_querier_expired(unsigned long data)
|
||||
static void br_multicast_querier_expired(struct net_bridge *br,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
struct net_bridge *br = (void *)data;
|
||||
|
||||
spin_lock(&br->multicast_lock);
|
||||
if (!netif_running(br->dev) || br->multicast_disabled)
|
||||
goto out;
|
||||
|
||||
br_multicast_start_querier(br);
|
||||
br_multicast_start_querier(br, query);
|
||||
|
||||
out:
|
||||
spin_unlock(&br->multicast_lock);
|
||||
}
|
||||
|
||||
static void br_ip4_multicast_querier_expired(unsigned long data)
|
||||
{
|
||||
struct net_bridge *br = (void *)data;
|
||||
|
||||
br_multicast_querier_expired(br, &br->ip4_query);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static void br_ip6_multicast_querier_expired(unsigned long data)
|
||||
{
|
||||
struct net_bridge *br = (void *)data;
|
||||
|
||||
br_multicast_querier_expired(br, &br->ip6_query);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __br_multicast_send_query(struct net_bridge *br,
|
||||
struct net_bridge_port *port,
|
||||
struct br_ip *ip)
|
||||
|
@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
|
|||
}
|
||||
|
||||
static void br_multicast_send_query(struct net_bridge *br,
|
||||
struct net_bridge_port *port, u32 sent)
|
||||
struct net_bridge_port *port,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
unsigned long time;
|
||||
struct br_ip br_group;
|
||||
struct bridge_mcast_querier *querier = NULL;
|
||||
|
||||
if (!netif_running(br->dev) || br->multicast_disabled ||
|
||||
!br->multicast_querier ||
|
||||
timer_pending(&br->multicast_querier_timer))
|
||||
!br->multicast_querier)
|
||||
return;
|
||||
|
||||
memset(&br_group.u, 0, sizeof(br_group.u));
|
||||
|
||||
br_group.proto = htons(ETH_P_IP);
|
||||
__br_multicast_send_query(br, port, &br_group);
|
||||
|
||||
if (port ? (query == &port->ip4_query) :
|
||||
(query == &br->ip4_query)) {
|
||||
querier = &br->ip4_querier;
|
||||
br_group.proto = htons(ETH_P_IP);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
br_group.proto = htons(ETH_P_IPV6);
|
||||
__br_multicast_send_query(br, port, &br_group);
|
||||
} else {
|
||||
querier = &br->ip6_querier;
|
||||
br_group.proto = htons(ETH_P_IPV6);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!querier || timer_pending(&querier->timer))
|
||||
return;
|
||||
|
||||
__br_multicast_send_query(br, port, &br_group);
|
||||
|
||||
time = jiffies;
|
||||
time += sent < br->multicast_startup_query_count ?
|
||||
time += query->startup_sent < br->multicast_startup_query_count ?
|
||||
br->multicast_startup_query_interval :
|
||||
br->multicast_query_interval;
|
||||
mod_timer(port ? &port->multicast_query_timer :
|
||||
&br->multicast_query_timer, time);
|
||||
mod_timer(&query->timer, time);
|
||||
}
|
||||
|
||||
static void br_multicast_port_query_expired(unsigned long data)
|
||||
static void br_multicast_port_query_expired(struct net_bridge_port *port,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
struct net_bridge_port *port = (void *)data;
|
||||
struct net_bridge *br = port->br;
|
||||
|
||||
spin_lock(&br->multicast_lock);
|
||||
|
@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data)
|
|||
port->state == BR_STATE_BLOCKING)
|
||||
goto out;
|
||||
|
||||
if (port->multicast_startup_queries_sent <
|
||||
br->multicast_startup_query_count)
|
||||
port->multicast_startup_queries_sent++;
|
||||
if (query->startup_sent < br->multicast_startup_query_count)
|
||||
query->startup_sent++;
|
||||
|
||||
br_multicast_send_query(port->br, port,
|
||||
port->multicast_startup_queries_sent);
|
||||
br_multicast_send_query(port->br, port, query);
|
||||
|
||||
out:
|
||||
spin_unlock(&br->multicast_lock);
|
||||
}
|
||||
|
||||
static void br_ip4_multicast_port_query_expired(unsigned long data)
|
||||
{
|
||||
struct net_bridge_port *port = (void *)data;
|
||||
|
||||
br_multicast_port_query_expired(port, &port->ip4_query);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static void br_ip6_multicast_port_query_expired(unsigned long data)
|
||||
{
|
||||
struct net_bridge_port *port = (void *)data;
|
||||
|
||||
br_multicast_port_query_expired(port, &port->ip6_query);
|
||||
}
|
||||
#endif
|
||||
|
||||
void br_multicast_add_port(struct net_bridge_port *port)
|
||||
{
|
||||
port->multicast_router = 1;
|
||||
|
||||
setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
|
||||
(unsigned long)port);
|
||||
setup_timer(&port->multicast_query_timer,
|
||||
br_multicast_port_query_expired, (unsigned long)port);
|
||||
setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
|
||||
(unsigned long)port);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
|
||||
(unsigned long)port);
|
||||
#endif
|
||||
}
|
||||
|
||||
void br_multicast_del_port(struct net_bridge_port *port)
|
||||
|
@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
|
|||
del_timer_sync(&port->multicast_router_timer);
|
||||
}
|
||||
|
||||
static void __br_multicast_enable_port(struct net_bridge_port *port)
|
||||
static void br_multicast_enable(struct bridge_mcast_query *query)
|
||||
{
|
||||
port->multicast_startup_queries_sent = 0;
|
||||
query->startup_sent = 0;
|
||||
|
||||
if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
|
||||
del_timer(&port->multicast_query_timer))
|
||||
mod_timer(&port->multicast_query_timer, jiffies);
|
||||
if (try_to_del_timer_sync(&query->timer) >= 0 ||
|
||||
del_timer(&query->timer))
|
||||
mod_timer(&query->timer, jiffies);
|
||||
}
|
||||
|
||||
void br_multicast_enable_port(struct net_bridge_port *port)
|
||||
|
@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
|
|||
if (br->multicast_disabled || !netif_running(br->dev))
|
||||
goto out;
|
||||
|
||||
__br_multicast_enable_port(port);
|
||||
br_multicast_enable(&port->ip4_query);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
br_multicast_enable(&port->ip6_query);
|
||||
#endif
|
||||
|
||||
out:
|
||||
spin_unlock(&br->multicast_lock);
|
||||
|
@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
|
|||
if (!hlist_unhashed(&port->rlist))
|
||||
hlist_del_init_rcu(&port->rlist);
|
||||
del_timer(&port->multicast_router_timer);
|
||||
del_timer(&port->multicast_query_timer);
|
||||
del_timer(&port->ip4_query.timer);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
del_timer(&port->ip6_query.timer);
|
||||
#endif
|
||||
spin_unlock(&br->multicast_lock);
|
||||
}
|
||||
|
||||
|
@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
|
|||
}
|
||||
#endif
|
||||
|
||||
static void br_multicast_update_querier_timer(struct net_bridge *br,
|
||||
unsigned long max_delay)
|
||||
static void
|
||||
br_multicast_update_querier_timer(struct net_bridge *br,
|
||||
struct bridge_mcast_querier *querier,
|
||||
unsigned long max_delay)
|
||||
{
|
||||
if (!timer_pending(&br->multicast_querier_timer))
|
||||
br->multicast_querier_delay_time = jiffies + max_delay;
|
||||
if (!timer_pending(&querier->timer))
|
||||
querier->delay_time = jiffies + max_delay;
|
||||
|
||||
mod_timer(&br->multicast_querier_timer,
|
||||
jiffies + br->multicast_querier_interval);
|
||||
mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1074,12 +1123,13 @@ static void br_multicast_mark_router(struct net_bridge *br,
|
|||
|
||||
static void br_multicast_query_received(struct net_bridge *br,
|
||||
struct net_bridge_port *port,
|
||||
struct bridge_mcast_querier *querier,
|
||||
int saddr,
|
||||
unsigned long max_delay)
|
||||
{
|
||||
if (saddr)
|
||||
br_multicast_update_querier_timer(br, max_delay);
|
||||
else if (timer_pending(&br->multicast_querier_timer))
|
||||
br_multicast_update_querier_timer(br, querier, max_delay);
|
||||
else if (timer_pending(&querier->timer))
|
||||
return;
|
||||
|
||||
br_multicast_mark_router(br, port);
|
||||
|
@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
|
|||
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
|
||||
}
|
||||
|
||||
br_multicast_query_received(br, port, !!iph->saddr, max_delay);
|
||||
br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
|
||||
max_delay);
|
||||
|
||||
if (!group)
|
||||
goto out;
|
||||
|
@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
|||
mld2q = (struct mld2_query *)icmp6_hdr(skb);
|
||||
if (!mld2q->mld2q_nsrcs)
|
||||
group = &mld2q->mld2q_mca;
|
||||
max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
|
||||
|
||||
max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
|
||||
}
|
||||
|
||||
br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
|
||||
max_delay);
|
||||
br_multicast_query_received(br, port, &br->ip6_querier,
|
||||
!ipv6_addr_any(&ip6h->saddr), max_delay);
|
||||
|
||||
if (!group)
|
||||
goto out;
|
||||
|
@ -1244,7 +1296,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
|||
|
||||
static void br_multicast_leave_group(struct net_bridge *br,
|
||||
struct net_bridge_port *port,
|
||||
struct br_ip *group)
|
||||
struct br_ip *group,
|
||||
struct bridge_mcast_querier *querier,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
struct net_bridge_mdb_htable *mdb;
|
||||
struct net_bridge_mdb_entry *mp;
|
||||
|
@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
|
|||
spin_lock(&br->multicast_lock);
|
||||
if (!netif_running(br->dev) ||
|
||||
(port && port->state == BR_STATE_DISABLED) ||
|
||||
timer_pending(&br->multicast_querier_timer))
|
||||
timer_pending(&querier->timer))
|
||||
goto out;
|
||||
|
||||
mdb = mlock_dereference(br->mdb, br);
|
||||
|
@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
|
|||
if (!mp)
|
||||
goto out;
|
||||
|
||||
if (br->multicast_querier &&
|
||||
!timer_pending(&br->multicast_querier_timer)) {
|
||||
if (br->multicast_querier) {
|
||||
__br_multicast_send_query(br, port, &mp->addr);
|
||||
|
||||
time = jiffies + br->multicast_last_member_count *
|
||||
br->multicast_last_member_interval;
|
||||
mod_timer(port ? &port->multicast_query_timer :
|
||||
&br->multicast_query_timer, time);
|
||||
|
||||
mod_timer(&query->timer, time);
|
||||
|
||||
for (p = mlock_dereference(mp->ports, br);
|
||||
p != NULL;
|
||||
|
@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
|
|||
mod_timer(&mp->timer, time);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&br->multicast_lock);
|
||||
}
|
||||
|
@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
|
|||
__u16 vid)
|
||||
{
|
||||
struct br_ip br_group;
|
||||
struct bridge_mcast_query *query = port ? &port->ip4_query :
|
||||
&br->ip4_query;
|
||||
|
||||
if (ipv4_is_local_multicast(group))
|
||||
return;
|
||||
|
@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
|
|||
br_group.proto = htons(ETH_P_IP);
|
||||
br_group.vid = vid;
|
||||
|
||||
br_multicast_leave_group(br, port, &br_group);
|
||||
br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
|
|||
__u16 vid)
|
||||
{
|
||||
struct br_ip br_group;
|
||||
struct bridge_mcast_query *query = port ? &port->ip6_query :
|
||||
&br->ip6_query;
|
||||
|
||||
|
||||
if (!ipv6_is_transient_multicast(group))
|
||||
return;
|
||||
|
@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
|
|||
br_group.proto = htons(ETH_P_IPV6);
|
||||
br_group.vid = vid;
|
||||
|
||||
br_multicast_leave_group(br, port, &br_group);
|
||||
br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1622,20 +1679,33 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void br_multicast_query_expired(unsigned long data)
|
||||
static void br_multicast_query_expired(struct net_bridge *br,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
spin_lock(&br->multicast_lock);
|
||||
if (query->startup_sent < br->multicast_startup_query_count)
|
||||
query->startup_sent++;
|
||||
|
||||
br_multicast_send_query(br, NULL, query);
|
||||
spin_unlock(&br->multicast_lock);
|
||||
}
|
||||
|
||||
static void br_ip4_multicast_query_expired(unsigned long data)
|
||||
{
|
||||
struct net_bridge *br = (void *)data;
|
||||
|
||||
spin_lock(&br->multicast_lock);
|
||||
if (br->multicast_startup_queries_sent <
|
||||
br->multicast_startup_query_count)
|
||||
br->multicast_startup_queries_sent++;
|
||||
|
||||
br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
|
||||
|
||||
spin_unlock(&br->multicast_lock);
|
||||
br_multicast_query_expired(br, &br->ip4_query);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static void br_ip6_multicast_query_expired(unsigned long data)
|
||||
{
|
||||
struct net_bridge *br = (void *)data;
|
||||
|
||||
br_multicast_query_expired(br, &br->ip6_query);
|
||||
}
|
||||
#endif
|
||||
|
||||
void br_multicast_init(struct net_bridge *br)
|
||||
{
|
||||
br->hash_elasticity = 4;
|
||||
|
@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br)
|
|||
br->multicast_querier_interval = 255 * HZ;
|
||||
br->multicast_membership_interval = 260 * HZ;
|
||||
|
||||
br->multicast_querier_delay_time = 0;
|
||||
br->ip4_querier.delay_time = 0;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
br->ip6_querier.delay_time = 0;
|
||||
#endif
|
||||
|
||||
spin_lock_init(&br->multicast_lock);
|
||||
setup_timer(&br->multicast_router_timer,
|
||||
br_multicast_local_router_expired, 0);
|
||||
setup_timer(&br->multicast_querier_timer,
|
||||
br_multicast_querier_expired, (unsigned long)br);
|
||||
setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
|
||||
setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
|
||||
(unsigned long)br);
|
||||
setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
|
||||
(unsigned long)br);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
|
||||
(unsigned long)br);
|
||||
setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
|
||||
(unsigned long)br);
|
||||
#endif
|
||||
}
|
||||
|
||||
void br_multicast_open(struct net_bridge *br)
|
||||
static void __br_multicast_open(struct net_bridge *br,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
br->multicast_startup_queries_sent = 0;
|
||||
query->startup_sent = 0;
|
||||
|
||||
if (br->multicast_disabled)
|
||||
return;
|
||||
|
||||
mod_timer(&br->multicast_query_timer, jiffies);
|
||||
mod_timer(&query->timer, jiffies);
|
||||
}
|
||||
|
||||
void br_multicast_open(struct net_bridge *br)
|
||||
{
|
||||
__br_multicast_open(br, &br->ip4_query);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
__br_multicast_open(br, &br->ip6_query);
|
||||
#endif
|
||||
}
|
||||
|
||||
void br_multicast_stop(struct net_bridge *br)
|
||||
|
@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br)
|
|||
int i;
|
||||
|
||||
del_timer_sync(&br->multicast_router_timer);
|
||||
del_timer_sync(&br->multicast_querier_timer);
|
||||
del_timer_sync(&br->multicast_query_timer);
|
||||
del_timer_sync(&br->ip4_querier.timer);
|
||||
del_timer_sync(&br->ip4_query.timer);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
del_timer_sync(&br->ip6_querier.timer);
|
||||
del_timer_sync(&br->ip6_query.timer);
|
||||
#endif
|
||||
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
mdb = mlock_dereference(br->mdb, br);
|
||||
|
@ -1788,18 +1880,24 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void br_multicast_start_querier(struct net_bridge *br)
|
||||
static void br_multicast_start_querier(struct net_bridge *br,
|
||||
struct bridge_mcast_query *query)
|
||||
{
|
||||
struct net_bridge_port *port;
|
||||
|
||||
br_multicast_open(br);
|
||||
__br_multicast_open(br, query);
|
||||
|
||||
list_for_each_entry(port, &br->port_list, list) {
|
||||
if (port->state == BR_STATE_DISABLED ||
|
||||
port->state == BR_STATE_BLOCKING)
|
||||
continue;
|
||||
|
||||
__br_multicast_enable_port(port);
|
||||
if (query == &br->ip4_query)
|
||||
br_multicast_enable(&port->ip4_query);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else
|
||||
br_multicast_enable(&port->ip6_query);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1834,7 +1932,10 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
|
|||
goto rollback;
|
||||
}
|
||||
|
||||
br_multicast_start_querier(br);
|
||||
br_multicast_start_querier(br, &br->ip4_query);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
br_multicast_start_querier(br, &br->ip6_query);
|
||||
#endif
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
|
@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
|
|||
goto unlock;
|
||||
|
||||
max_delay = br->multicast_query_response_interval;
|
||||
if (!timer_pending(&br->multicast_querier_timer))
|
||||
br->multicast_querier_delay_time = jiffies + max_delay;
|
||||
|
||||
br_multicast_start_querier(br);
|
||||
if (!timer_pending(&br->ip4_querier.timer))
|
||||
br->ip4_querier.delay_time = jiffies + max_delay;
|
||||
|
||||
br_multicast_start_querier(br, &br->ip4_query);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (!timer_pending(&br->ip6_querier.timer))
|
||||
br->ip6_querier.delay_time = jiffies + max_delay;
|
||||
|
||||
br_multicast_start_querier(br, &br->ip6_query);
|
||||
#endif
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
|
|
|
@ -66,6 +66,20 @@ struct br_ip
|
|||
__u16 vid;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
|
||||
/* our own querier */
|
||||
struct bridge_mcast_query {
|
||||
struct timer_list timer;
|
||||
u32 startup_sent;
|
||||
};
|
||||
|
||||
/* other querier */
|
||||
struct bridge_mcast_querier {
|
||||
struct timer_list timer;
|
||||
unsigned long delay_time;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct net_port_vlans {
|
||||
u16 port_idx;
|
||||
u16 pvid;
|
||||
|
@ -162,10 +176,12 @@ struct net_bridge_port
|
|||
#define BR_FLOOD 0x00000040
|
||||
|
||||
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
|
||||
u32 multicast_startup_queries_sent;
|
||||
struct bridge_mcast_query ip4_query;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct bridge_mcast_query ip6_query;
|
||||
#endif /* IS_ENABLED(CONFIG_IPV6) */
|
||||
unsigned char multicast_router;
|
||||
struct timer_list multicast_router_timer;
|
||||
struct timer_list multicast_query_timer;
|
||||
struct hlist_head mglist;
|
||||
struct hlist_node rlist;
|
||||
#endif
|
||||
|
@ -258,7 +274,6 @@ struct net_bridge
|
|||
u32 hash_max;
|
||||
|
||||
u32 multicast_last_member_count;
|
||||
u32 multicast_startup_queries_sent;
|
||||
u32 multicast_startup_query_count;
|
||||
|
||||
unsigned long multicast_last_member_interval;
|
||||
|
@ -267,15 +282,18 @@ struct net_bridge
|
|||
unsigned long multicast_query_interval;
|
||||
unsigned long multicast_query_response_interval;
|
||||
unsigned long multicast_startup_query_interval;
|
||||
unsigned long multicast_querier_delay_time;
|
||||
|
||||
spinlock_t multicast_lock;
|
||||
struct net_bridge_mdb_htable __rcu *mdb;
|
||||
struct hlist_head router_list;
|
||||
|
||||
struct timer_list multicast_router_timer;
|
||||
struct timer_list multicast_querier_timer;
|
||||
struct timer_list multicast_query_timer;
|
||||
struct bridge_mcast_querier ip4_querier;
|
||||
struct bridge_mcast_query ip4_query;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct bridge_mcast_querier ip6_querier;
|
||||
struct bridge_mcast_query ip6_query;
|
||||
#endif /* IS_ENABLED(CONFIG_IPV6) */
|
||||
#endif
|
||||
|
||||
struct timer_list hello_timer;
|
||||
|
@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
|
|||
timer_pending(&br->multicast_router_timer));
|
||||
}
|
||||
|
||||
static inline bool br_multicast_querier_exists(struct net_bridge *br)
|
||||
static inline bool
|
||||
__br_multicast_querier_exists(struct net_bridge *br,
|
||||
struct bridge_mcast_querier *querier)
|
||||
{
|
||||
return time_is_before_jiffies(br->multicast_querier_delay_time) &&
|
||||
(br->multicast_querier ||
|
||||
timer_pending(&br->multicast_querier_timer));
|
||||
return time_is_before_jiffies(querier->delay_time) &&
|
||||
(br->multicast_querier || timer_pending(&querier->timer));
|
||||
}
|
||||
|
||||
static inline bool br_multicast_querier_exists(struct net_bridge *br,
|
||||
struct ethhdr *eth)
|
||||
{
|
||||
switch (eth->h_proto) {
|
||||
case (htons(ETH_P_IP)):
|
||||
return __br_multicast_querier_exists(br, &br->ip4_querier);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case (htons(ETH_P_IPV6)):
|
||||
return __br_multicast_querier_exists(br, &br->ip6_querier);
|
||||
#endif
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline int br_multicast_rcv(struct net_bridge *br,
|
||||
|
@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool br_multicast_querier_exists(struct net_bridge *br)
|
||||
static inline bool br_multicast_querier_exists(struct net_bridge *br,
|
||||
struct ethhdr *eth)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -346,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
|||
if (new_index < 0)
|
||||
new_index = skb_tx_hash(dev, skb);
|
||||
|
||||
if (queue_index != new_index && sk) {
|
||||
struct dst_entry *dst =
|
||||
rcu_dereference_check(sk->sk_dst_cache, 1);
|
||||
|
||||
if (dst && skb_dst(skb) == dst)
|
||||
sk_tx_queue_set(sk, queue_index);
|
||||
|
||||
}
|
||||
if (queue_index != new_index && sk &&
|
||||
rcu_access_pointer(sk->sk_dst_cache))
|
||||
sk_tx_queue_set(sk, queue_index);
|
||||
|
||||
queue_index = new_index;
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
|
|||
return -EINVAL;
|
||||
|
||||
if ((creds->pid == task_tgid_vnr(current) ||
|
||||
ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
|
||||
ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
|
||||
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
|
||||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
|
||||
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
|
||||
|
|
|
@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
|
||||
{
|
||||
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
|
||||
|
||||
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
|
||||
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
|
||||
}
|
||||
|
||||
static int ip_finish_output(struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
|
||||
|
|
|
@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
|
|||
struct ip_tunnel *tunnel;
|
||||
const struct iphdr *iph;
|
||||
|
||||
if (iptunnel_pull_header(skb, 0, tpi.proto))
|
||||
goto drop;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
||||
iph->saddr, iph->daddr, 0);
|
||||
if (tunnel) {
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
if (iptunnel_pull_header(skb, 0, tpi.proto))
|
||||
goto drop;
|
||||
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
|
||||
}
|
||||
|
||||
|
|
|
@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
|
||||
RT_SCOPE_UNIVERSE,
|
||||
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
|
||||
inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
|
||||
inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
|
||||
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
|
||||
daddr, saddr, 0, 0);
|
||||
|
||||
if (!inet->hdrincl) {
|
||||
|
|
|
@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
|
|||
++ptr;
|
||||
tp->rx_opt.rcv_tsval = ntohl(*ptr);
|
||||
++ptr;
|
||||
tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
|
||||
if (*ptr)
|
||||
tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
|
||||
else
|
||||
tp->rx_opt.rcv_tsecr = 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
|
|||
}
|
||||
|
||||
tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
|
||||
if (tp->rx_opt.saw_tstamp)
|
||||
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
|
||||
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
|
||||
|
||||
return true;
|
||||
|
@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|||
int saved_clamp = tp->rx_opt.mss_clamp;
|
||||
|
||||
tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
|
||||
if (tp->rx_opt.saw_tstamp)
|
||||
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
|
||||
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
|
||||
|
||||
if (th->ack) {
|
||||
|
|
|
@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
|||
int tcp_header_size;
|
||||
int mss;
|
||||
|
||||
skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
|
||||
if (unlikely(!skb)) {
|
||||
dst_release(dst);
|
||||
return NULL;
|
||||
|
@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk)
|
|||
|
||||
if (likely(!tp->repair))
|
||||
tp->rcv_nxt = 0;
|
||||
else
|
||||
tp->rcv_tstamp = tcp_time_stamp;
|
||||
tp->rcv_wup = tp->rcv_nxt;
|
||||
tp->copied_seq = tp->rcv_nxt;
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
static int xfrm4_tunnel_check_size(struct sk_buff *skb)
|
||||
{
|
||||
int mtu, ret = 0;
|
||||
struct dst_entry *dst;
|
||||
|
||||
if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
|
||||
goto out;
|
||||
|
@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
|
|||
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
|
||||
goto out;
|
||||
|
||||
dst = skb_dst(skb);
|
||||
mtu = dst_mtu(dst);
|
||||
mtu = dst_mtu(skb_dst(skb));
|
||||
if (skb->len > mtu) {
|
||||
if (skb->sk)
|
||||
ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
|
||||
inet_sk(skb->sk)->inet_dport, mtu);
|
||||
xfrm_local_error(skb, mtu);
|
||||
else
|
||||
icmp_send(skb, ICMP_DEST_UNREACH,
|
||||
ICMP_FRAG_NEEDED, htonl(mtu));
|
||||
|
@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
|
|||
x->outer_mode->afinfo->output_finish,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
}
|
||||
|
||||
void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
|
||||
{
|
||||
struct iphdr *hdr;
|
||||
|
||||
hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
||||
ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
|
||||
inet_sk(skb->sk)->inet_dport, mtu);
|
||||
}
|
||||
|
|
|
@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
|
|||
.extract_input = xfrm4_extract_input,
|
||||
.extract_output = xfrm4_extract_output,
|
||||
.transport_finish = xfrm4_transport_finish,
|
||||
.local_error = xfrm4_local_error,
|
||||
};
|
||||
|
||||
void __init xfrm4_state_init(void)
|
||||
|
|
|
@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|||
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
|
||||
}
|
||||
|
||||
if (likely(!skb->encapsulation)) {
|
||||
skb_reset_inner_headers(skb);
|
||||
skb->encapsulation = 1;
|
||||
}
|
||||
|
||||
skb_push(skb, gre_hlen);
|
||||
skb_reset_network_header(skb);
|
||||
skb_set_transport_header(skb, sizeof(*ipv6h));
|
||||
|
|
|
@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
|||
hdr->saddr = fl6->saddr;
|
||||
hdr->daddr = *first_hop;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->mark = sk->sk_mark;
|
||||
|
||||
|
@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
|
|||
/* initialize protocol header pointer */
|
||||
skb->transport_header = skb->network_header + fragheaderlen;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
skb->csum = 0;
|
||||
}
|
||||
|
@ -1359,6 +1361,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
|
|||
/*
|
||||
* Fill in the control structures
|
||||
*/
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->csum = 0;
|
||||
/* reserve for fragmentation and ipsec header */
|
||||
|
|
|
@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
|
|||
init_tel_txopt(&opt, encap_limit);
|
||||
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
|
||||
}
|
||||
|
||||
if (likely(!skb->encapsulation)) {
|
||||
skb_reset_inner_headers(skb);
|
||||
skb->encapsulation = 1;
|
||||
}
|
||||
|
||||
skb_push(skb, sizeof(struct ipv6hdr));
|
||||
skb_reset_network_header(skb);
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
|
|
|
@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
|
|||
goto error;
|
||||
skb_reserve(skb, hlen);
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->mark = sk->sk_mark;
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
|
|
|
@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb)
|
|||
const struct iphdr *iph;
|
||||
struct ip_tunnel *tunnel;
|
||||
|
||||
if (iptunnel_pull_header(skb, 0, tpi.proto))
|
||||
goto drop;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
|
||||
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
|
||||
iph->saddr, iph->daddr);
|
||||
if (tunnel != NULL) {
|
||||
|
@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb)
|
|||
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
if (iptunnel_pull_header(skb, 0, tpi.proto))
|
||||
goto drop;
|
||||
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
|
||||
}
|
||||
|
||||
|
@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
|||
ttl = iph6->hop_limit;
|
||||
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
|
||||
|
||||
if (likely(!skb->encapsulation)) {
|
||||
skb_reset_inner_headers(skb);
|
||||
skb->encapsulation = 1;
|
||||
}
|
||||
|
||||
err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
|
||||
IPPROTO_IPV6, tos, ttl, df);
|
||||
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
|
||||
|
|
|
@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
|
|||
struct sock *sk = skb->sk;
|
||||
|
||||
if (sk) {
|
||||
proto = sk->sk_protocol;
|
||||
if (sk->sk_family != AF_INET6)
|
||||
return 0;
|
||||
|
||||
proto = sk->sk_protocol;
|
||||
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
|
||||
return inet6_sk(sk)->dontfrag;
|
||||
}
|
||||
|
@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
|
|||
ipv6_local_rxpmtu(sk, &fl6, mtu);
|
||||
}
|
||||
|
||||
static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
|
||||
void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
|
||||
{
|
||||
struct flowi6 fl6;
|
||||
const struct ipv6hdr *hdr;
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
|
||||
fl6.fl6_dport = inet_sk(sk)->inet_dport;
|
||||
fl6.daddr = ipv6_hdr(skb)->daddr;
|
||||
fl6.daddr = hdr->daddr;
|
||||
|
||||
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
|
||||
}
|
||||
|
@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
|
|||
if (xfrm6_local_dontfrag(skb))
|
||||
xfrm6_local_rxpmtu(skb, mtu);
|
||||
else if (skb->sk)
|
||||
xfrm6_local_error(skb, mtu);
|
||||
xfrm_local_error(skb, mtu);
|
||||
else
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
ret = -EMSGSIZE;
|
||||
|
@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
|
|||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct xfrm_state *x = dst->xfrm;
|
||||
int mtu = ip6_skb_dst_mtu(skb);
|
||||
int mtu;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
mtu = ip6_skb_dst_mtu(skb);
|
||||
else
|
||||
mtu = dst_mtu(skb_dst(skb));
|
||||
|
||||
if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
|
||||
xfrm6_local_rxpmtu(skb, mtu);
|
||||
return -EMSGSIZE;
|
||||
} else if (!skb->local_df && skb->len > mtu && skb->sk) {
|
||||
xfrm6_local_error(skb, mtu);
|
||||
xfrm_local_error(skb, mtu);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
|
|||
.extract_input = xfrm6_extract_input,
|
||||
.extract_output = xfrm6_extract_output,
|
||||
.transport_finish = xfrm6_transport_finish,
|
||||
.local_error = xfrm6_local_error,
|
||||
};
|
||||
|
||||
int __init xfrm6_state_init(void)
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
||||
const u8 *bssid, const int beacon_int,
|
||||
struct ieee80211_channel *chan,
|
||||
struct cfg80211_chan_def *req_chandef,
|
||||
const u32 basic_rates,
|
||||
const u16 capability, u64 tsf,
|
||||
bool creator)
|
||||
|
@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
u32 bss_change;
|
||||
u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
|
||||
struct cfg80211_chan_def chandef;
|
||||
struct ieee80211_channel *chan;
|
||||
struct beacon_data *presp;
|
||||
int frame_len;
|
||||
|
||||
|
@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
|
||||
|
||||
chandef = ifibss->chandef;
|
||||
/* make a copy of the chandef, it could be modified below. */
|
||||
chandef = *req_chandef;
|
||||
chan = chandef.chan;
|
||||
if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
|
||||
chandef.width = NL80211_CHAN_WIDTH_20;
|
||||
chandef.center_freq1 = chan->center_freq;
|
||||
|
@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
struct cfg80211_bss *cbss =
|
||||
container_of((void *)bss, struct cfg80211_bss, priv);
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct cfg80211_chan_def chandef;
|
||||
u32 basic_rates;
|
||||
int i, j;
|
||||
u16 beacon_int = cbss->beacon_interval;
|
||||
const struct cfg80211_bss_ies *ies;
|
||||
enum nl80211_channel_type chan_type;
|
||||
u64 tsf;
|
||||
|
||||
sdata_assert_lock(sdata);
|
||||
|
@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
if (beacon_int < 10)
|
||||
beacon_int = 10;
|
||||
|
||||
switch (sdata->u.ibss.chandef.width) {
|
||||
case NL80211_CHAN_WIDTH_20_NOHT:
|
||||
case NL80211_CHAN_WIDTH_20:
|
||||
case NL80211_CHAN_WIDTH_40:
|
||||
chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef);
|
||||
cfg80211_chandef_create(&chandef, cbss->channel, chan_type);
|
||||
break;
|
||||
case NL80211_CHAN_WIDTH_5:
|
||||
case NL80211_CHAN_WIDTH_10:
|
||||
cfg80211_chandef_create(&chandef, cbss->channel,
|
||||
NL80211_CHAN_WIDTH_20_NOHT);
|
||||
chandef.width = sdata->u.ibss.chandef.width;
|
||||
break;
|
||||
default:
|
||||
/* fall back to 20 MHz for unsupported modes */
|
||||
cfg80211_chandef_create(&chandef, cbss->channel,
|
||||
NL80211_CHAN_WIDTH_20_NOHT);
|
||||
break;
|
||||
}
|
||||
|
||||
sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
|
||||
|
||||
basic_rates = 0;
|
||||
|
@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
__ieee80211_sta_join_ibss(sdata, cbss->bssid,
|
||||
beacon_int,
|
||||
cbss->channel,
|
||||
&chandef,
|
||||
basic_rates,
|
||||
cbss->capability,
|
||||
tsf, false);
|
||||
|
@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
|
|||
sdata->drop_unencrypted = 0;
|
||||
|
||||
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
|
||||
ifibss->chandef.chan, ifibss->basic_rates,
|
||||
&ifibss->chandef, ifibss->basic_rates,
|
||||
capability, 0, true);
|
||||
}
|
||||
|
||||
|
@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
|
|||
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
|
||||
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
|
||||
BSS_CHANGED_IBSS);
|
||||
ieee80211_vif_release_channel(sdata);
|
||||
synchronize_rcu();
|
||||
kfree(presp);
|
||||
|
||||
|
|
|
@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
|
|||
if (sband->band != IEEE80211_BAND_2GHZ)
|
||||
return;
|
||||
|
||||
if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
|
||||
return;
|
||||
|
||||
mi->cck_supported = 0;
|
||||
mi->cck_supported_short = 0;
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
|
|
@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
|
|||
EXPORT_SYMBOL(genl_unregister_ops);
|
||||
|
||||
/**
|
||||
* genl_register_family - register a generic netlink family
|
||||
* __genl_register_family - register a generic netlink family
|
||||
* @family: generic netlink family
|
||||
*
|
||||
* Registers the specified family after validating it first. Only one
|
||||
|
@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
|
|||
*
|
||||
* Return 0 on success or a negative error code.
|
||||
*/
|
||||
int genl_register_family(struct genl_family *family)
|
||||
int __genl_register_family(struct genl_family *family)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
|
@ -430,10 +430,10 @@ int genl_register_family(struct genl_family *family)
|
|||
errout:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(genl_register_family);
|
||||
EXPORT_SYMBOL(__genl_register_family);
|
||||
|
||||
/**
|
||||
* genl_register_family_with_ops - register a generic netlink family
|
||||
* __genl_register_family_with_ops - register a generic netlink family
|
||||
* @family: generic netlink family
|
||||
* @ops: operations to be registered
|
||||
* @n_ops: number of elements to register
|
||||
|
@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
|
|||
*
|
||||
* Return 0 on success or a negative error code.
|
||||
*/
|
||||
int genl_register_family_with_ops(struct genl_family *family,
|
||||
int __genl_register_family_with_ops(struct genl_family *family,
|
||||
struct genl_ops *ops, size_t n_ops)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
err = genl_register_family(family);
|
||||
err = __genl_register_family(family);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -476,7 +476,7 @@ int genl_register_family_with_ops(struct genl_family *family,
|
|||
genl_unregister_family(family);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(genl_register_family_with_ops);
|
||||
EXPORT_SYMBOL(__genl_register_family_with_ops);
|
||||
|
||||
/**
|
||||
* genl_unregister_family - unregister generic netlink family
|
||||
|
@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
|
|||
}
|
||||
EXPORT_SYMBOL(genlmsg_put);
|
||||
|
||||
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct genl_ops *ops = cb->data;
|
||||
int rc;
|
||||
|
||||
genl_lock();
|
||||
rc = ops->dumpit(skb, cb);
|
||||
genl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int genl_lock_done(struct netlink_callback *cb)
|
||||
{
|
||||
struct genl_ops *ops = cb->data;
|
||||
int rc = 0;
|
||||
|
||||
if (ops->done) {
|
||||
genl_lock();
|
||||
rc = ops->done(cb);
|
||||
genl_unlock();
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int genl_family_rcv_msg(struct genl_family *family,
|
||||
struct sk_buff *skb,
|
||||
struct nlmsghdr *nlh)
|
||||
|
@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
|
|||
return -EPERM;
|
||||
|
||||
if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
.dump = ops->dumpit,
|
||||
.done = ops->done,
|
||||
};
|
||||
int rc;
|
||||
|
||||
if (ops->dumpit == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return netlink_dump_start(net->genl_sock, skb, nlh, &c);
|
||||
if (!family->parallel_ops) {
|
||||
struct netlink_dump_control c = {
|
||||
.module = family->module,
|
||||
.data = ops,
|
||||
.dump = genl_lock_dumpit,
|
||||
.done = genl_lock_done,
|
||||
};
|
||||
|
||||
genl_unlock();
|
||||
rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
|
||||
genl_lock();
|
||||
|
||||
} else {
|
||||
struct netlink_dump_control c = {
|
||||
.module = family->module,
|
||||
.dump = ops->dumpit,
|
||||
.done = ops->done,
|
||||
};
|
||||
|
||||
rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (ops->doit == NULL)
|
||||
|
|
|
@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
|
|||
pgfrom_base -= copy;
|
||||
|
||||
vto = kmap_atomic(*pgto);
|
||||
vfrom = kmap_atomic(*pgfrom);
|
||||
memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
|
||||
if (*pgto != *pgfrom) {
|
||||
vfrom = kmap_atomic(*pgfrom);
|
||||
memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
|
||||
kunmap_atomic(vfrom);
|
||||
} else
|
||||
memmove(vto + pgto_base, vto + pgfrom_base, copy);
|
||||
flush_dcache_page(*pgto);
|
||||
kunmap_atomic(vfrom);
|
||||
kunmap_atomic(vto);
|
||||
|
||||
} while ((len -= copy) != 0);
|
||||
|
|
|
@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
|
|||
/* Accept only ACK or NACK message */
|
||||
if (unlikely(msg_errcode(msg))) {
|
||||
sock->state = SS_DISCONNECTING;
|
||||
sk->sk_err = -ECONNREFUSED;
|
||||
sk->sk_err = ECONNREFUSED;
|
||||
retval = TIPC_OK;
|
||||
break;
|
||||
}
|
||||
|
@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
|
|||
res = auto_connect(sock, msg);
|
||||
if (res) {
|
||||
sock->state = SS_DISCONNECTING;
|
||||
sk->sk_err = res;
|
||||
sk->sk_err = -res;
|
||||
retval = TIPC_OK;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||
return inner_mode->afinfo->extract_output(x, skb);
|
||||
}
|
||||
|
||||
void xfrm_local_error(struct sk_buff *skb, int mtu)
|
||||
{
|
||||
unsigned int proto;
|
||||
struct xfrm_state_afinfo *afinfo;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
proto = AF_INET;
|
||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
||||
proto = AF_INET6;
|
||||
else
|
||||
return;
|
||||
|
||||
afinfo = xfrm_state_get_afinfo(proto);
|
||||
if (!afinfo)
|
||||
return;
|
||||
|
||||
afinfo->local_error(skb, mtu);
|
||||
xfrm_state_put_afinfo(afinfo);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(xfrm_output);
|
||||
EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
|
||||
EXPORT_SYMBOL_GPL(xfrm_local_error);
|
||||
|
|
|
@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = skb_dequeue(list)) != NULL) {
|
||||
dev_put(skb->dev);
|
||||
while ((skb = skb_dequeue(list)) != NULL)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/* Rule must be locked. Release descentant resources, announce
|
||||
|
@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
|
|||
struct sk_buff *skb;
|
||||
struct sock *sk;
|
||||
struct dst_entry *dst;
|
||||
struct net_device *dev;
|
||||
struct xfrm_policy *pol = (struct xfrm_policy *)arg;
|
||||
struct xfrm_policy_queue *pq = &pol->polq;
|
||||
struct flowi fl;
|
||||
|
@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
|
|||
dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
|
||||
&fl, skb->sk, 0);
|
||||
if (IS_ERR(dst)) {
|
||||
dev_put(skb->dev);
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
|
|||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst);
|
||||
|
||||
dev = skb->dev;
|
||||
err = dst_output(skb);
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
skb_dst_force(skb);
|
||||
dev_hold(skb->dev);
|
||||
|
||||
spin_lock_bh(&pq->hold_queue.lock);
|
||||
|
||||
|
|
|
@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
|
|||
|
||||
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
|
||||
|
||||
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
|
||||
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
|
||||
static inline unsigned int xfrm_dst_hash(struct net *net,
|
||||
const xfrm_address_t *daddr,
|
||||
const xfrm_address_t *saddr,
|
||||
|
@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
|
|||
}
|
||||
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
|
||||
|
||||
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
|
||||
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
|
||||
{
|
||||
struct xfrm_state_afinfo *afinfo;
|
||||
if (unlikely(family >= NPROTO))
|
||||
|
@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
|
|||
return afinfo;
|
||||
}
|
||||
|
||||
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
|
||||
void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
|
|||
|
||||
#endif /* CONFIG_PNP */
|
||||
|
||||
#ifdef OPTi93X
|
||||
#define DEV_NAME "opti93x"
|
||||
#else
|
||||
#define DEV_NAME "opti92x"
|
||||
#endif
|
||||
#define DEV_NAME KBUILD_MODNAME
|
||||
|
||||
static char * snd_opti9xx_names[] = {
|
||||
"unknown",
|
||||
|
@ -1167,7 +1163,7 @@ static int snd_opti9xx_pnp_resume(struct pnp_card_link *pcard)
|
|||
|
||||
static struct pnp_card_driver opti9xx_pnpc_driver = {
|
||||
.flags = PNP_DRIVER_RES_DISABLE,
|
||||
.name = "opti9xx",
|
||||
.name = DEV_NAME,
|
||||
.id_table = snd_opti9xx_pnpids,
|
||||
.probe = snd_opti9xx_pnp_probe,
|
||||
.remove = snd_opti9xx_pnp_remove,
|
||||
|
|
|
@ -1798,6 +1798,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
|
|||
struct snd_pcm_chmap *chmap;
|
||||
struct snd_kcontrol *kctl;
|
||||
int i;
|
||||
|
||||
if (!codec->pcm_info[pin_idx].pcm)
|
||||
break;
|
||||
err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm,
|
||||
SNDRV_PCM_STREAM_PLAYBACK,
|
||||
NULL, 0, pin_idx, &chmap);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user