Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-07-30 13:25:49 -07:00
commit f139c74a8d
136 changed files with 1150 additions and 472 deletions

View File

@ -281,6 +281,19 @@ gestures can normally be extracted from it.
If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
device.
INPUT_PROP_TOPBUTTONPAD:
-----------------------
Some laptops, most notably the Lenovo *40 series provide a trackstick
device but do not have physical buttons associated with the trackstick
device. Instead, the top area of the touchpad is marked to show
visual/haptic areas for left, middle, right buttons intended to be used
with the trackstick.
If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.

View File

@ -6961,6 +6961,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/pinctrl/pinctrl-at91.c
PIN CONTROLLER - RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-sh@vger.kernel.org
S: Maintained
F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <t.figa@samsung.com>
M: Thomas Abraham <thomas.abraham@linaro.org>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -Wa,-gdwarf-2

View File

@ -73,7 +73,7 @@ amba {
L2: l2-cache {
compatible = "arm,pl310-cache";
reg = <0xfc10000 0x100000>;
reg = <0x100000 0x100000>;
interrupts = <0 15 4>;
cache-unified;
cache-level = <2>;

View File

@ -353,7 +353,7 @@ twl_audio: audio {
};
twl_power: power {
compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
compatible = "ti,twl4030-power-n900";
ti,use_poweroff;
};
};

View File

@ -540,9 +540,9 @@ sd1_clk: sd2_clk@e6150078 {
#clock-cells = <0>;
clock-output-names = "sd1";
};
sd2_clk: sd3_clk@e615007c {
sd2_clk: sd3_clk@e615026c {
compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
reg = <0 0xe615007c 0 4>;
reg = <0 0xe615026c 0 4>;
clocks = <&pll1_div2_clk>;
#clock-cells = <0>;
clock-output-names = "sd2";

View File

@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
dst += AES_BLOCK_SIZE;
} while (--blocks);
}
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
while (walk.nbytes) {
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
} while (--blocks);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->enc, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}

View File

@ -50,6 +50,7 @@ struct machine_desc {
struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **);
void (*dt_fixup)(void);
void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */

View File

@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
mdesc_best = &__mach_desc_GENERIC_DT;
#endif
if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
return NULL;
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
dump_machine_table(); /* does not return */
}
/* We really don't want to do this, but sometimes firmware provides buggy data */
if (mdesc->dt_fixup)
mdesc->dt_fixup();
early_init_dt_scan_nodes();
/* Change machine number to match the mdesc we're using */
__machine_arch_type = mdesc->nr;

View File

@ -335,6 +335,15 @@ static void __init exynos_reserve(void)
#endif
}
static void __init exynos_dt_fixup(void)
{
/*
* Some versions of uboot pass garbage entries in the memory node,
* use the old CONFIG_ARM_NR_BANKS
*/
of_fdt_limit_memory(8);
}
DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
/* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@ -348,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
.dt_compat = exynos_dt_compat,
.restart = exynos_restart,
.reserve = exynos_reserve,
.dt_fixup = exynos_dt_fixup,
MACHINE_END

View File

@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
soc_is_omap54xx() || soc_is_dra7xx())
return 1;
if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
if (cpu_is_omap24xx())
return 0;
else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
return 0;
else
return 1;
}
/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
* which require H/W based ECC error detection */
if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
(ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
return 0;
/*
* For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
* and AM33xx derivates. Other chips may be added if confirmed to work.
*/
if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
(!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
return 0;
/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
return 1;

View File

@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
break;
case L310_POWER_CTRL:
pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
return;
default:
WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
return;

View File

@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
{
return -ENOSYS;
}
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
return 0;
}

View File

@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();

View File

@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
early_param("initrd", early_initrd);
#endif
/*
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
static phys_addr_t max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
unsigned long max_dma_phys =
(unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
max_dma = PFN_DOWN(max_zone_dma_phys());
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
dma_phys_limit = max_zone_dma_phys();
dma_contiguous_reserve(dma_phys_limit);
memblock_allow_resize();

View File

@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
CONFIG_SPI_BFIN_V3=y
CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set

View File

@ -145,7 +145,7 @@ SECTIONS
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
#else
.init.data : AT(__data_lma + __data_len)
.init.data : AT(__data_lma + __data_len + 32)
{
__sinitdata = .;
INIT_DATA

View File

@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
};
static int __init ezkit_init(void)

View File

@ -44,6 +44,7 @@
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-pca-platform.h>
#include <linux/delay.h>

View File

@ -18,6 +18,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>

View File

@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
{
#define CONFIG_SMC_GCTL_VAL 0x00000010
if (!devm_pinctrl_get_select_default(&pdev->dev))
return -EBUSY;
bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
bfin_write32(SMC_B0CTL, 0x01002011);
bfin_write32(SMC_B0TIM, 0x08170977);
@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
void bf609_nor_flash_exit(struct platform_device *pdev)
{
devm_pinctrl_put(pdev->dev.pins->p);
bfin_write32(SMC_GCTL, 0);
}
@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"),
#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
#else
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),

View File

@ -10,6 +10,7 @@
#define __MACH_BF609_PM_H__
#include <linux/suspend.h>
#include <linux/platform_device.h>
extern int bfin609_pm_enter(suspend_state_t state);
extern int bf609_pm_prepare(void);
@ -19,6 +20,6 @@ void bf609_hibernate(void);
void bfin_sec_raise_irq(unsigned int sid);
void coreb_enable(void);
int bf609_nor_flash_init(void);
void bf609_nor_flash_exit(void);
int bf609_nor_flash_init(struct platform_device *pdev);
void bf609_nor_flash_exit(struct platform_device *pdev);
#endif

View File

@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static int smc_pm_syscore_suspend(void)
{
bf609_nor_flash_exit();
bf609_nor_flash_exit(NULL);
return 0;
}
static void smc_pm_syscore_resume(void)
{
bf609_nor_flash_init();
bf609_nor_flash_init(NULL);
}
static struct syscore_ops smc_pm_syscore_ops = {

View File

@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
/* Enable interrupts IVG7-15 */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |

View File

@ -69,8 +69,6 @@
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192

View File

@ -728,7 +728,6 @@ static void __init pagetable_init(void)
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
memset(empty_zero_page, 0, PAGE_SIZE);
}
static void __init gateway_init(void)

View File

@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \

View File

@ -22,6 +22,7 @@
*/
#include <asm/pgtable-ppc64.h>
#include <asm/bug.h>
#include <asm/processor.h>
/*
* Segment table
@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
*/
struct subpage_prot_table {
unsigned long maxaddr; /* only addresses < this are protected */
unsigned int **protptrs[2];
unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
unsigned int *low_prot[4];
};

View File

@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,
.cpu_name = "POWER8 (raw)",
.cpu_features = CPU_FTRS_POWER8_DD1,
.cpu_user_features = COMMON_USER_POWER8,
.cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.flush_tlb = __flush_tlb_power8,
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,

View File

@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
f->blocks[i].data = (char *)__pa(f->blocks[i].data);
f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
image_size += f->blocks[i].length;
f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
f->next = (struct flash_block_list *)__pa(f->next);
f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
else
f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
f->num_blocks = cpu_to_be64(f->num_blocks);
}
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);

View File

@ -77,7 +77,7 @@ _GLOBAL(memset)
stb r4,0(r6)
blr
_GLOBAL(memmove)
_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy

View File

@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x3f;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef __powerpc64__
case 27: /* sld */
sh = regs->gpr[rd] & 0x7f;
sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
regs->gpr[ra] = regs->gpr[rd] << sh;
else
@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x7f;
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb | ((instr & 2) << 4);
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;

View File

@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
out_enable:
pmao_restore_workaround(ebb);
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, 0);
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
write_mmcr0(cpuhw, mmcr0);
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, 0);
/*
* Enable instruction sampling if necessary
*/

View File

@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: Opal log read failed\n");
pr_err("ELOG: OPAL log info read failed\n");
return;
}
@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
log_id = be64_to_cpu(id);
elog_type = be64_to_cpu(type);
BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
elog_size = OPAL_MAX_ERRLOG_SIZE;

View File

@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
}
of_node_set_flag(dn, OF_DYNAMIC);
of_node_init(dn);
return dn;
}

View File

@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
of_node_init(np);
np->parent = derive_parent(path);
if (IS_ERR(np->parent)) {

View File

@ -32,7 +32,8 @@ endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
$(call cc-option,-m2a-nofpu,)
$(call cc-option,-m2a-nofpu,) \
$(call cc-option,-m4-nofpu,)
cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
$(call cc-option,-mno-implicit-fp,-m4-nofpu)

View File

@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
*/
detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P3);
#endif
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
/* Work around errata */
srat_detect_node(c);

View File

@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
#endif
}
#ifdef CONFIG_X86_HT
/*
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
* turns means that the only possibility is SMT (as indicated in
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
* c->phys_proc_id.
*/
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
#endif
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;

View File

@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
for_each_online_cpu(i) {
err = mce_device_create(i);
if (err) {
/*
* Register notifier anyway (and do not unreg it) so
* that we don't leave undeleted timers, see notifier
* callback above.
*/
__register_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
goto err_device_create;
}
@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
err_register:
unregister_syscore_ops(&mce_syscore_ops);
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
err_device_create:
/*
* We didn't keep track of which devices were created above, but

View File

@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
/* Check if the extra msrs can be safely accessed*/
if (!er->extra_msr_access)
return -ENXIO;
reg->idx = er->idx;
reg->config = event->attr.config1;

View File

@ -295,14 +295,16 @@ struct extra_reg {
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
bool extra_msr_access;
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i, \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i, \
.extra_msr_access = true, \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \

View File

@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void)
}
}
/*
* Under certain circumstances, access certain MSR may cause #GP.
* The function tests if the input MSR can be safely accessed.
*/
static bool check_msr(unsigned long msr, u64 mask)
{
u64 val_old, val_new, val_tmp;
/*
* Read the current value, change it and read it back to see if it
* matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
if (rdmsrl_safe(msr, &val_old))
return false;
/*
* Only change the bits which can be updated by wrmsrl.
*/
val_tmp = val_old ^ mask;
if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
return false;
if (val_new != val_tmp)
return false;
/* Here it's sure that the MSR can be safely accessed.
* Restore the old value and return.
*/
wrmsrl(msr, val_old);
return true;
}
static __init void intel_sandybridge_quirk(void)
{
x86_pmu.check_microcode = intel_snb_check_microcode;
@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void)
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
struct extra_reg *er;
int version, i;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void)
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void)
}
}
/*
* Access LBR MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support LBR MSR
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
x86_pmu.lbr_nr = 0;
}
/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
* Check all extra_regs here.
*/
if (x86_pmu.extra_regs) {
for (er = x86_pmu.extra_regs; er->msr; er++) {
er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
x86_pmu.lbr_sel_map = NULL;
}
}
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;

View File

@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
if (!x86_pmu.bts)
return 0;
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
if (unlikely(!buffer))
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
if (unlikely(!buffer)) {
WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
return -ENOMEM;
}
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;

View File

@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),

View File

@ -425,8 +425,8 @@ sysenter_do_call:
cmpl $(NR_syscalls), %eax
jae sysenter_badsys
call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp)
sysenter_after_call:
movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@ -502,6 +502,7 @@ ENTRY(system_call)
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@ -675,12 +676,12 @@ syscall_fault:
END(syscall_fault)
syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp)
jmp syscall_exit
movl $-ENOSYS,%eax
jmp syscall_after_call
END(syscall_badsys)
sysenter_badsys:
movl $-ENOSYS,PT_EAX(%esp)
movl $-ENOSYS,%eax
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC

View File

@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p;
struct kprobe_ctlblk *kcb;
if (user_mode_vm(regs))
return 0;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire

View File

@ -36,99 +36,133 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/xen.h>
#include <asm/pgtable.h>
static int map_pte_fn(pte_t *pte, struct page *pmd_page,
unsigned long addr, void *data)
{
unsigned long **frames = (unsigned long **)data;
set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
(*frames)++;
return 0;
}
/*
* This function is used to map shared frames to store grant status. It is
* different from map_pte_fn above, the frames type here is uint64_t.
*/
static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
unsigned long addr, void *data)
{
uint64_t **frames = (uint64_t **)data;
set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
(*frames)++;
return 0;
}
static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
unsigned long addr, void *data)
{
set_pte_at(&init_mm, addr, pte, __pte(0));
return 0;
}
static struct gnttab_vm_area {
struct vm_struct *area;
pte_t **ptes;
} gnttab_shared_vm_area, gnttab_status_vm_area;
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared)
{
int rc;
void *shared = *__shared;
unsigned long addr;
unsigned long i;
if (shared == NULL) {
struct vm_struct *area =
alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
BUG_ON(area == NULL);
shared = area->addr;
*__shared = shared;
if (shared == NULL)
*__shared = shared = gnttab_shared_vm_area.area->addr;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
mfn_pte(frames[i], PAGE_KERNEL));
addr += PAGE_SIZE;
}
rc = apply_to_page_range(&init_mm, (unsigned long)shared,
PAGE_SIZE * nr_gframes,
map_pte_fn, &frames);
return rc;
return 0;
}
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
grant_status_t **__shared)
{
int rc;
grant_status_t *shared = *__shared;
unsigned long addr;
unsigned long i;
if (shared == NULL) {
/* No need to pass in PTE as we are going to do it
* in apply_to_page_range anyhow. */
struct vm_struct *area =
alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
BUG_ON(area == NULL);
shared = area->addr;
*__shared = shared;
if (shared == NULL)
*__shared = shared = gnttab_status_vm_area.area->addr;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
mfn_pte(frames[i], PAGE_KERNEL));
addr += PAGE_SIZE;
}
rc = apply_to_page_range(&init_mm, (unsigned long)shared,
PAGE_SIZE * nr_gframes,
map_pte_fn_status, &frames);
return rc;
return 0;
}
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
{
apply_to_page_range(&init_mm, (unsigned long)shared,
PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
pte_t **ptes;
unsigned long addr;
unsigned long i;
if (shared == gnttab_status_vm_area.area->addr)
ptes = gnttab_status_vm_area.ptes;
else
ptes = gnttab_shared_vm_area.ptes;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, ptes[i], __pte(0));
addr += PAGE_SIZE;
}
}
static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
{
area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
if (area->ptes == NULL)
return -ENOMEM;
area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
if (area->area == NULL) {
kfree(area->ptes);
return -ENOMEM;
}
return 0;
}
static void arch_gnttab_vfree(struct gnttab_vm_area *area)
{
free_vm_area(area->area);
kfree(area->ptes);
}
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
int ret;
if (!xen_pv_domain())
return 0;
ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
if (ret < 0)
return ret;
/*
* Always allocate the space for the status frames in case
* we're migrated to a host with V2 support.
*/
ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
if (ret < 0)
goto err;
return 0;
err:
arch_gnttab_vfree(&gnttab_shared_vm_area);
return -ENOMEM;
}
#ifdef CONFIG_XEN_PVH
#include <xen/balloon.h>
#include <xen/events.h>
#include <xen/xen.h>
#include <linux/slab.h>
static int __init xlated_setup_gnttab_pages(void)
{

View File

@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
beqz a2, 1f # if at start of vector, don't restore
addi a0, a0, -128
bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
bbsi a0, 7, 2f
bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
/*
* This fixup handler is for the extremely unlikely case where the
* overflow handler's reference thru a0 gets a hardware TLB refill
* that bumps out the (distinct, aliasing) TLB entry that mapped its
* prior references thru a9/a13, and where our reference now thru
* a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
*/
movi a2, window_overflow_restore_a0_fixup
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
bbsi.l a0, 7, 2f
/*
* Restore a0 as saved by _WindowOverflow8().
*
* FIXME: we really need a fixup handler for this L32E,
* for the extremely unlikely case where the overflow handler's
* reference thru a0 gets a hardware TLB refill that bumps out
* the (distinct, aliasing) TLB entry that mapped its prior
* references thru a9, and where our reference now thru a9
* gets a 2nd-level miss exception (not hardware TLB refill).
*/
l32e a2, a9, -16
wsr a2, depc # replace the saved a0
j 1f
l32e a0, a9, -16
wsr a0, depc # replace the saved a0
j 3f
2:
/*
* Restore a0 as saved by _WindowOverflow12().
*
* FIXME: we really need a fixup handler for this L32E,
* for the extremely unlikely case where the overflow handler's
* reference thru a0 gets a hardware TLB refill that bumps out
* the (distinct, aliasing) TLB entry that mapped its prior
* references thru a13, and where our reference now thru a13
* gets a 2nd-level miss exception (not hardware TLB refill).
*/
l32e a2, a13, -16
wsr a2, depc # replace the saved a0
l32e a0, a13, -16
wsr a0, depc # replace the saved a0
3:
xsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1:
/*
* Restore WindowBase while leaving all address registers restored.
@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
s32i a0, a2, PT_DEPC
_DoubleExceptionVector_handle_exception:
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
xsr a3, excsave1
@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
rotw -3
j 1b
.end literal_prefix
ENDPROC(_DoubleExceptionVector)
/*
* Fixup handler for TLB miss in double exception handler for window owerflow.
* We get here with windowbase set to the window that was being spilled and
* a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
* (bit set) window.
*
* We do the following here:
* - go to the original window retaining a0 value;
* - set up exception stack to return back to appropriate a0 restore code
* (we'll need to rotate window back and there's no place to save this
* information, use different return address for that);
* - handle the exception;
* - go to the window that was being spilled;
* - set up window_overflow_restore_a0_fixup as a fixup routine;
* - reload a0;
* - restore the original window;
* - reset the default fixup routine;
* - return to user. By the time we get to this fixup handler all information
* about the conditions of the original double exception that happened in
* the window overflow handler is lost, so we just return to userspace to
* retry overflow from start.
*
* a0: value of depc, original value in depc
* a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
* a3: exctable, original value in excsave1
*/
ENTRY(window_overflow_restore_a0_fixup)
rsr a0, ps
extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
rsr a2, windowbase
sub a0, a2, a0
extui a0, a0, 0, 3
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
_beqi a0, 1, .Lhandle_1
_beqi a0, 3, .Lhandle_3
.macro overflow_fixup_handle_exception_pane n
rsr a0, depc
rotw -\n
xsr a3, excsave1
wsr a2, depc
l32i a2, a3, EXC_TABLE_KSTK
s32i a0, a2, PT_AREG0
movi a0, .Lrestore_\n
s32i a0, a2, PT_DEPC
rsr a0, exccause
j _DoubleExceptionVector_handle_exception
.endm
overflow_fixup_handle_exception_pane 2
.Lhandle_1:
overflow_fixup_handle_exception_pane 1
.Lhandle_3:
overflow_fixup_handle_exception_pane 3
.macro overflow_fixup_restore_a0_pane n
rotw \n
/* Need to preserve a0 value here to be able to handle exception
* that may occur on a0 reload from stack. It may occur because
* TLB miss handler may not be atomic and pointer to page table
* may be lost before we get here. There are no free registers,
* so we need to use EXC_TABLE_DOUBLE_SAVE area.
*/
xsr a3, excsave1
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
movi a2, window_overflow_restore_a0_fixup
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
bbsi.l a0, 7, 1f
l32e a0, a9, -16
j 2f
1:
l32e a0, a13, -16
2:
rotw -\n
.endm
.Lrestore_2:
overflow_fixup_restore_a0_pane 2
.Lset_default_fixup:
xsr a3, excsave1
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
movi a2, 0
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
rfe
.Lrestore_1:
overflow_fixup_restore_a0_pane 1
j .Lset_default_fixup
.Lrestore_3:
overflow_fixup_restore_a0_pane 3
j .Lset_default_fixup
ENDPROC(window_overflow_restore_a0_fixup)
.end literal_prefix
/*
* Debug interrupt vector
*

View File

@ -269,13 +269,13 @@ SECTIONS
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
DOUBLEEXC_VECTOR_VADDR - 16,
DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
32,
40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;

View File

@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
return -EINVAL;
}
if (it && start - it->start < bank_sz) {
if (it && start - it->start <= bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;

View File

@ -4798,9 +4798,8 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
unsigned int i, tag, max_queue;
max_queue = ap->scsi_host->can_queue;
unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
@ -6094,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->ops = ops;
}
@ -6175,15 +6175,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
/*
* The max queue supported by hardware must not be greater than
* ATA_MAX_QUEUE.
*/
if (sht->can_queue > ATA_MAX_QUEUE) {
dev_err(host->dev, "BUG: the hardware max queue is too large\n");
WARN_ON(1);
return -EINVAL;
}
host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {

View File

@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
if (reset_capacity) {
if (reset_capacity)
set_capacity(zram->disk, 0);
revalidate_disk(zram->disk);
}
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
if (reset_capacity)
revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
revalidate_disk(zram->disk);
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
revalidate_disk(zram->disk);
return len;
out_destroy_comp:

View File

@ -336,10 +336,10 @@ static const struct {
QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
0},
QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},

View File

@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
struct gpio_rcar_info {

View File

@ -1616,22 +1616,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ret;
}
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct i915_vma *vma;
/*
* Only the global gtt is relevant for gtt memory mappings, so restrict
* list traversal to objects bound into the global address space. Note
* that the active list should be empty, but better safe than sorry.
*/
WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
i915_gem_release_mmap(vma->obj);
list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
i915_gem_release_mmap(vma->obj);
}
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
i915_gem_release_mmap(obj);
}
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{

View File

@ -31,7 +31,7 @@
struct i915_render_state {
struct drm_i915_gem_object *obj;
unsigned long ggtt_offset;
void *batch;
u32 *batch;
u32 size;
u32 len;
};
@ -80,7 +80,7 @@ static struct i915_render_state *render_state_alloc(struct drm_device *dev)
static void render_state_free(struct i915_render_state *so)
{
kunmap(so->batch);
kunmap(kmap_to_page(so->batch));
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
kfree(so);

View File

@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
u32 seqno, ctl;
u32 seqno;
ring->hangcheck.deadlock++;
@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
/* cursory check for an unkickable deadlock */
ctl = I915_READ_CTL(signaller);
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
if (signaller->hangcheck.deadlock)
/* cursory check for an unkickable deadlock */
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
semaphore_passed(signaller) < 0)
return -1;
return 0;

View File

@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -449,6 +449,7 @@ struct radeon_bo_va {
/* protected by vm mutex */
struct list_head vm_list;
struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@ -867,6 +868,9 @@ struct radeon_vm {
struct list_head va;
unsigned id;
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@ -875,6 +879,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
struct radeon_bo_va *ib_bo_va;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
void radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);

View File

@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
r = radeon_vm_clear_freed(rdev, vm);
if (r)
return r;
if (vm->ib_bo_va == NULL) {
DRM_ERROR("Tmp BO not in VM!\n");
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue;
bo = p->relocs[i].robj;
r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}

View File

@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
radeon_vm_size = 4096;
radeon_vm_size = 4;
}
if (radeon_vm_size < 4) {
dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
if (radeon_vm_size < 1) {
dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
radeon_vm_size = 4096;
radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
if (radeon_vm_size > 1024*1024) {
dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
radeon_vm_size = 4096;
radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) to small\n",
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
radeon_vm_size < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) to large\n",
(radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
rdev->vm_manager.max_pfn = radeon_vm_size << 8;
rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);

View File

@ -173,7 +173,7 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
int radeon_vm_size = 4096;
int radeon_vm_size = 4;
int radeon_vm_block_size = 9;
int radeon_deep_color = 0;
@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");

View File

@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
struct radeon_bo_va *bo_va;
struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
r = radeon_vm_init(rdev, &fpriv->vm);
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_bo_va *bo_va;
struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_vm_bo_find(&fpriv->vm,
rdev->ring_tmp_bo.bo);
if (bo_va)
radeon_vm_bo_rmv(rdev, bo_va);
if (vm->ib_bo_va)
radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
radeon_vm_fini(rdev, &fpriv->vm);
radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}

View File

@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
head = &tmp->vm_list;
}
if (bo_va->soffset) {
/* add a clone of the bo_va to clear the old address */
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
return -ENOMEM;
}
tmp->soffset = bo_va->soffset;
tmp->eoffset = bo_va->eoffset;
tmp->vm = vm;
list_add(&tmp->vm_status, &vm->freed);
}
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, ndw;
uint64_t addr;
int r;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo, vm);
bo_va->bo, vm);
return -EINVAL;
}
@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
trace_radeon_vm_bo_update(bo_va);
nptes = radeon_bo_ngpu_pages(bo);
nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */
ndw = 64;
@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return 0;
}
/**
* radeon_vm_clear_freed - clear freed BOs in the PT
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Make sure all freed BOs are cleared in the PT.
* Returns 0 for success.
*
* PTs have to be reserved and mutex must be locked!
*/
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
list_del(&bo_va->vm_status);
r = radeon_vm_bo_update(rdev, bo_va, NULL);
kfree(bo_va);
if (r)
return r;
}
return 0;
}
/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
* Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
* remove the ptes for @bo_va in the page table.
* Returns 0 for success.
*
* Object have to be reserved!
*/
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
void radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
int r = 0;
struct radeon_vm *vm = bo_va->vm;
mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset)
r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
return r;
mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
if (bo_va->soffset) {
bo_va->bo = NULL;
list_add(&bo_va->vm_status, &vm->freed);
} else {
kfree(bo_va);
}
mutex_unlock(&vm->mutex);
}
/**
@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r;
vm->id = 0;
vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va);
INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);

View File

@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
/* There are stability issues reported on latops with
* bapm installed when switching between AC and battery
* power. At the same time, some desktop boards hang
* if it's not enabled and dpm is enabled.
/* There are stability issues reported on with
* bapm enabled when switching between AC and battery
* power. At the same time, some MSI boards hang
* if it's not enabled and dpm is enabled. Just enable
* it for MSI boards right now.
*/
if (rdev->flags & RADEON_IS_MOBILITY)
pi->enable_bapm = false;
else
if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = true;
else
pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;

View File

@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
*/
static inline s8 TEMP_TO_REG(int val)
{
return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
static inline int TEMP_FROM_REG(s8 val)
@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 255)
return -EINVAL;
data->vrm = val;
return count;

View File

@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
}
static int input_get_disposition(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
unsigned int type, unsigned int code, int *pval)
{
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
switch (type) {
@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
break;
}
*pval = value;
return disposition;
}
@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
{
int disposition;
disposition = input_get_disposition(dev, type, code, value);
disposition = input_get_disposition(dev, type, code, &value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);

View File

@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int keyscan_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
mutex_unlock(&input->mutex);
return retval;
}
#endif
static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);

View File

@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
module_platform_driver(sirfsoc_pwrc_driver);
MODULE_LICENSE("GPLv2");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
MODULE_ALIAS("platform:sirfsoc-pwrc");

View File

@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
(const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
(const char * const []){"LEN0034", "LEN0036", "LEN2002",
"LEN2004", NULL},
1024, 5112, 2024, 4832
},
{
@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0049",
"LEN2000",
"LEN2001", /* Edge E431 */
"LEN2002",
"LEN2002", /* Edge E531 */
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",

View File

@ -401,6 +401,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
},
{
/* Acer Aspire 5710 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
},
},
{
/* Gericom Bellagio */
.matches = {

View File

@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
* a=(pi*r^2)/C.
*/
int a = data[5];
int x_res = input_abs_get_res(input, ABS_X);
int y_res = input_abs_get_res(input, ABS_Y);
width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
height = width * y_res / x_res;
}
@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
} else {
if (features->touch_max <= 2) {
if (features->touch_max == 1) {
input_set_abs_params(input_dev, ABS_X, 0,
features->x_max, features->x_fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0,
@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
if (features->device_type == BTN_TOOL_FINGER) {
unsigned int flags = INPUT_MT_DIRECT;
if (wacom_wac->features.type == TABLETPC2FG)
flags = 0;
input_mt_init_slots(input_dev, features->touch_max, flags);
}
if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
/* fall through */
case TABLETPC:
@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_RIGHT, input_dev->keybit);
if (features->touch_max) {
/* touch interface */
unsigned int flags = INPUT_MT_POINTER;
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
} else {
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
flags = 0;
}
input_mt_init_slots(input_dev, features->touch_max, flags);
input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
} else {
/* buttons/keys only interface */
__clear_bit(ABS_X, input_dev->absbit);

View File

@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
*/
err = of_property_read_u32(node, "ti,coordinate-readouts",
&ts_dev->coordinate_readouts);
if (err < 0)
if (err < 0) {
dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
err = of_property_read_u32(node, "ti,coordiante-readouts",
&ts_dev->coordinate_readouts);
}
if (err < 0)
return err;

View File

@ -2400,6 +2400,7 @@ static int gigaset_probe(struct usb_interface *interface,
error:
freeurbs(cs);
usb_set_intfdata(interface, NULL);
usb_put_dev(udev);
gigaset_freecs(cs);
return rc;
}

View File

@ -278,7 +278,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
}
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else

View File

@ -349,7 +349,8 @@ static int xgbe_probe(struct platform_device *pdev)
/* Calculate the number of Tx and Rx rings to be created */
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
dev_err(dev, "error setting real tx queue count\n");
goto err_io;
}

View File

@ -346,6 +346,7 @@ struct sw_tx_bd {
u8 flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD (1<<0)
#define BNX2X_HAS_SECOND_PBD (1<<1)
};
struct sw_rx_page {

View File

@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
/* Skip second parse bd... */
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@ -3902,6 +3908,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set encapsulation flag in start BD */
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_TUNNEL_EXIST, 1);
tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
nbd++;
} else if (xmit_type & XMIT_CSUM) {
/* Set PBD in checksum offload case w/o encapsulation */

View File

@ -416,6 +416,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case PORT_FIBRE:
case PORT_DA:
case PORT_NONE:
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
DP(BNX2X_MSG_ETHTOOL,

View File

@ -1147,6 +1147,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
if (skb_padto(skb, ETH_ZLEN)) {
ret = NETDEV_TX_OK;
goto out;
}
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
ret = bcmgenet_put_tx_csum(dev, skb);

View File

@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
return err;
}
static inline bool port_is_up(struct vnet_port *vnet)
{
struct vio_driver_state *vio = &vnet->vio;
return !!(vio->hs_state & VIO_HS_COMPLETE);
}
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
struct vnet_port *port;
hlist_for_each_entry(port, hp, hash) {
if (!port_is_up(port))
continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
port = NULL;
if (!list_empty(&vp->port_list))
port = list_entry(vp->port_list.next, struct vnet_port, list);
return port;
list_for_each_entry(port, &vp->port_list, list) {
if (!port->switch_port)
continue;
if (!port_is_up(port))
continue;
return port;
}
return NULL;
}
struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)

View File

@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->send_section_map =
kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
if (net_device->send_section_map == NULL)
if (net_device->send_section_map == NULL) {
ret = -ENOMEM;
goto cleanup;
}
goto exit;

View File

@ -255,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
bus->dev.parent = bus->parent;
bus->dev.class = &mdio_bus_class;
bus->dev.driver = bus->parent->driver;
bus->dev.groups = NULL;
dev_set_name(&bus->dev, "%s", bus->id);

View File

@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
phydev->bus->phy_map[phydev->addr] = phydev;
/* Run all of the fixups for this PHY */
err = phy_init_hw(phydev);
err = phy_scan_fixups(phydev);
if (err) {
pr_err("PHY %d failed to initialize\n", phydev->addr);
goto out;
@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
struct module *bus_module;
int err;
/* Assume that if there is no driver, that it doesn't
@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return -EBUSY;
}
/* Increment the bus module reference count */
bus_module = phydev->bus->dev.driver ?
phydev->bus->dev.driver->owner : NULL;
if (!try_module_get(bus_module)) {
dev_err(&dev->dev, "failed to get the bus module\n");
return -EIO;
}
phydev->attached_dev = dev;
dev->phydev = phydev;
@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
void phy_detach(struct phy_device *phydev)
{
int i;
if (phydev->bus->dev.driver)
module_put(phydev->bus->dev.driver->owner);
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_suspend(phydev);

View File

@ -341,6 +341,22 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
usb_driver_release_interface(driver, info->data);
return -ENODEV;
}
/* Some devices don't initialise properly. In particular
* the packet filter is not reset. There are devices that
* don't do reset all the way. So the packet filter should
* be set to a sane initial value.
*/
usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
USB_CDC_SET_ETHERNET_PACKET_FILTER,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
intf->cur_altsetting->desc.bInterfaceNumber,
NULL,
0,
USB_CTRL_SET_TIMEOUT
);
return 0;
bad_desc:

View File

@ -287,7 +287,7 @@
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
#define STAT_SPEED_FULL 0x0001
#define STAT_SPEED_FULL 0x0002
/* USB_TX_AGG */
#define TX_AGG_MAX_THRESHOLD 0x03
@ -2300,9 +2300,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
/* rx share fifo credit full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
ocp_data &= STAT_SPEED_MASK;
if (ocp_data == STAT_SPEED_FULL) {
if (tp->udev->speed == USB_SPEED_FULL ||
tp->udev->speed == USB_SPEED_LOW) {
/* rx share fifo credit near full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
RXFIFO_THR2_FULL);

View File

@ -340,7 +340,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
ndm->ndm_type = NDA_DST;
ndm->ndm_type = RTN_UNICAST;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;

View File

@ -26,6 +26,54 @@
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
/*
* of_fdt_limit_memory - limit the number of regions in the /memory node
* @limit: maximum entries
*
* Adjust the flattened device tree to have at most 'limit' number of
* memory entries in the /memory node. This function may be called
* any time after initial_boot_param is set.
*/
void of_fdt_limit_memory(int limit)
{
int memory;
int len;
const void *val;
int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
const uint32_t *addr_prop;
const uint32_t *size_prop;
int root_offset;
int cell_size;
root_offset = fdt_path_offset(initial_boot_params, "/");
if (root_offset < 0)
return;
addr_prop = fdt_getprop(initial_boot_params, root_offset,
"#address-cells", NULL);
if (addr_prop)
nr_address_cells = fdt32_to_cpu(*addr_prop);
size_prop = fdt_getprop(initial_boot_params, root_offset,
"#size-cells", NULL);
if (size_prop)
nr_size_cells = fdt32_to_cpu(*size_prop);
cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
memory = fdt_path_offset(initial_boot_params, "/memory");
if (memory > 0) {
val = fdt_getprop(initial_boot_params, memory, "reg", &len);
if (len > limit*cell_size) {
len = limit*cell_size;
pr_debug("Limiting number of entries to %d\n", limit);
fdt_setprop(initial_boot_params, memory, "reg", val,
len);
}
}
}
/**
* of_fdt_is_compatible - Return true if given node from the given blob has
* compat in its compatible list
@ -937,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
}
#endif
bool __init early_init_dt_scan(void *params)
bool __init early_init_dt_verify(void *params)
{
if (!params)
return false;
@ -951,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
return false;
}
return true;
}
void __init early_init_dt_scan_nodes(void)
{
/* Retrieve various information from the /chosen node */
of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
@ -959,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
/* Setup memory, calling early_init_dt_add_memory_arch */
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
}
bool __init early_init_dt_scan(void *params)
{
bool status;
status = early_init_dt_verify(params);
if (!status)
return false;
early_init_dt_scan_nodes();
return true;
}

View File

@ -5,6 +5,12 @@
# Parport configuration.
#
config ARCH_MIGHT_HAVE_PC_PARPORT
bool
help
Select this config option from the architecture Kconfig if
the architecture might have PC parallel port hardware.
menuconfig PARPORT
tristate "Parallel port support"
depends on HAS_IOMEM
@ -31,12 +37,6 @@ menuconfig PARPORT
If unsure, say Y.
config ARCH_MIGHT_HAVE_PC_PARPORT
bool
help
Select this config option from the architecture Kconfig if
the architecture might have PC parallel port hardware.
if PARPORT
config PARPORT_PC

View File

@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
status = readl(info->irqmux_base);
for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
for_each_set_bit(n, &status, info->nbanks)
__gpio_irq_handler(&info->banks[n]);
chained_irq_exit(chip, desc);

View File

@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
int gnttab_init(void)
{
int i;
unsigned long max_nr_grant_frames;
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
int ret;
gnttab_request_version();
max_nr_grant_frames = gnttab_max_grant_frames();
nr_grant_frames = 1;
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
BUG_ON(grefs_per_grant_frame == 0);
max_nr_glist_frames = (gnttab_max_grant_frames() *
max_nr_glist_frames = (max_nr_grant_frames *
grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@ -1223,6 +1225,11 @@ int gnttab_init(void)
}
}
ret = arch_gnttab_init(max_nr_grant_frames,
nr_status_frames(max_nr_grant_frames));
if (ret < 0)
goto ini_nomem;
if (gnttab_setup() < 0) {
ret = -ENODEV;
goto ini_nomem;

Some files were not shown because too many files have changed in this diff Show More