forked from luck/tmp_suning_uos_patched
Merge airlied/drm-next into drm-misc-next
Backmerging in order to pull vmwgfx [1] and the new synopsys media format [2] reqs. [1]- http://patchwork.freedesktop.org/patch/msgid/20170331233255.GA38850@syeh-m02 [2]- http://patchwork.freedesktop.org/patch/msgid/20170403163544.kcw5kk52tgku5xua@art_vandelay Signed-off-by: Sean Paul <seanpaul@chromium.org>
This commit is contained in:
commit
c829a33253
|
@ -45,7 +45,7 @@ The following clocks are available:
|
|||
- 1 15 SATA
|
||||
- 1 16 SATA USB
|
||||
- 1 17 Main
|
||||
- 1 18 SD/MMC
|
||||
- 1 18 SD/MMC/GOP
|
||||
- 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
|
||||
- 1 22 USB3H0
|
||||
- 1 23 USB3H1
|
||||
|
@ -65,7 +65,7 @@ Required properties:
|
|||
"cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
|
||||
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
|
||||
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
|
||||
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
|
||||
|
||||
Example:
|
||||
|
@ -78,6 +78,6 @@ Example:
|
|||
gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
|
||||
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
|
||||
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
|
||||
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
|
||||
};
|
||||
|
|
|
@ -4,7 +4,6 @@ Required properties:
|
|||
- compatible: value should be one of the following
|
||||
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
|
||||
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
|
||||
"samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
|
||||
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
|
||||
"samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
|
||||
"samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
|
||||
|
|
|
@ -11,7 +11,6 @@ Required properties:
|
|||
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
|
||||
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
|
||||
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
|
||||
"samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
|
||||
"samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
|
||||
"samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ Optional properties:
|
|||
- enable-gpios: panel enable gpio
|
||||
- reset-gpios: GPIO to control the RESET pin
|
||||
- vcc-supply: phandle of regulator that will be used to enable power to the display
|
||||
- backlight: phandle of the backlight device
|
||||
|
||||
Required nodes:
|
||||
- "panel-timing" containing video timings
|
||||
|
@ -22,6 +23,8 @@ lcd0: display@0 {
|
|||
compatible = "samsung,lte430wq-f0c", "panel-dpi";
|
||||
label = "lcd";
|
||||
|
||||
backlight = <&backlight>;
|
||||
|
||||
port {
|
||||
lcd_in: endpoint {
|
||||
remote-endpoint = <&dpi_out>;
|
||||
|
|
|
@ -13,7 +13,7 @@ Required Properties:
|
|||
- "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
|
||||
before RK3288
|
||||
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
|
||||
- "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108
|
||||
- "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
|
||||
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
|
||||
- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
|
||||
- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
Broadcom USB3 phy binding for northstar plus SoC
|
||||
The USB3 phy is internal to the SoC and is accessed using mdio interface.
|
||||
|
||||
Required mdio bus properties:
|
||||
- reg: Should be 0x0 for SoC internal USB3 phy
|
||||
- #address-cells: must be 1
|
||||
- #size-cells: must be 0
|
||||
|
||||
Required USB3 PHY properties:
|
||||
- compatible: should be "brcm,nsp-usb3-phy"
|
||||
- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
|
||||
- usb3-ctrl-syscon: handler of syscon node defining physical address
|
||||
of usb3 control register.
|
||||
- #phy-cells: must be 0
|
||||
|
||||
Required usb3 control properties:
|
||||
- compatible: should be "brcm,nsp-usb3-ctrl"
|
||||
- reg: offset and length of the control registers
|
||||
|
||||
Example:
|
||||
|
||||
mdio@0 {
|
||||
reg = <0x0>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
usb3_phy: usb-phy@10 {
|
||||
compatible = "brcm,nsp-usb3-phy";
|
||||
reg = <0x10>;
|
||||
usb3-ctrl-syscon = <&usb3_ctrl>;
|
||||
#phy-cells = <0>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
usb3_ctrl: syscon@104408 {
|
||||
compatible = "brcm,nsp-usb3-ctrl", "syscon";
|
||||
reg = <0x104408 0x3fc>;
|
||||
};
|
|
@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
|
|||
Index 2: The output gpio for muxing of the data pins between the USB host and
|
||||
the USB peripheral controller, write 1 to mux to the peripheral
|
||||
controller
|
||||
|
||||
There is a mapping between indices and GPIO connection IDs as follows
|
||||
id index 0
|
||||
vbus index 1
|
||||
mux index 2
|
||||
|
|
|
@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
|
|||
gcc-4.7 can be compiled by a C or a C++ compiler,
|
||||
and versions 4.8+ can only be compiled by a C++ compiler.
|
||||
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm and arm64
|
||||
architectures.
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
|
||||
powerpc architectures.
|
||||
|
||||
This infrastructure was ported from grsecurity [6] and PaX [7].
|
||||
|
||||
|
|
18
MAINTAINERS
18
MAINTAINERS
|
@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/
|
|||
|
||||
CISCO VIC ETHERNET NIC DRIVER
|
||||
M: Christian Benvenuti <benve@cisco.com>
|
||||
M: Sujith Sankar <ssujith@cisco.com>
|
||||
M: Govindarajulu Varadarajan <_govind@gmx.com>
|
||||
M: Neel Patel <neepatel@cisco.com>
|
||||
S: Supported
|
||||
|
@ -7781,13 +7780,6 @@ F: include/net/mac80211.h
|
|||
F: net/mac80211/
|
||||
F: drivers/net/wireless/mac80211_hwsim.[ch]
|
||||
|
||||
MACVLAN DRIVER
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_macvlan.h
|
||||
|
||||
MAILBOX API
|
||||
M: Jassi Brar <jassisinghbrar@gmail.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -7860,6 +7852,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
|
|||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Amitkumar Karwar <akarwar@marvell.com>
|
||||
M: Nishant Sarmukadam <nishants@marvell.com>
|
||||
M: Ganapathi Bhat <gbhat@marvell.com>
|
||||
M: Xinming Hu <huxm@marvell.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/marvell/mwifiex/
|
||||
|
@ -13406,14 +13400,6 @@ W: https://linuxtv.org
|
|||
S: Maintained
|
||||
F: drivers/media/platform/vivid/*
|
||||
|
||||
VLAN (802.1Q)
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_*vlan.h
|
||||
F: net/8021q/
|
||||
|
||||
VLYNQ BUS
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -63,14 +63,14 @@ button@0 {
|
|||
label = "home";
|
||||
linux,code = <KEY_HOME>;
|
||||
gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
|
||||
gpio-key,wakeup;
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
button@1 {
|
||||
label = "menu";
|
||||
linux,code = <KEY_MENU>;
|
||||
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
|
||||
gpio-key,wakeup;
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -315,6 +315,13 @@ extcon_usb2: tps659038_usb {
|
|||
/* ID & VBUS GPIOs provided in board dts */
|
||||
};
|
||||
};
|
||||
|
||||
tpic2810: tpic2810@60 {
|
||||
compatible = "ti,tpic2810";
|
||||
reg = <0x60>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
&mcspi3 {
|
||||
|
@ -330,13 +337,6 @@ sn65hvs882: sn65hvs882@0 {
|
|||
spi-max-frequency = <1000000>;
|
||||
spi-cpol;
|
||||
};
|
||||
|
||||
tpic2810: tpic2810@60 {
|
||||
compatible = "ti,tpic2810";
|
||||
reg = <0x60>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
&uart3 {
|
||||
|
|
|
@ -66,14 +66,14 @@ scu@20000 {
|
|||
timer@20200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x20200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
local-timer@20600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x20600 0x100>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
|
|
|
@ -48,15 +48,14 @@ chosen {
|
|||
};
|
||||
|
||||
memory {
|
||||
reg = <0x00000000 0x10000000>;
|
||||
reg = <0x80000000 0x10000000>;
|
||||
};
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
clock-frequency = <62499840>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&uart1 {
|
||||
clock-frequency = <62499840>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@ memory {
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -121,11 +121,6 @@ reg_bt: regulator-bt {
|
|||
};
|
||||
};
|
||||
|
||||
&cpu0 {
|
||||
arm-supply = <&sw1a_reg>;
|
||||
soc-supply = <&sw1c_reg>;
|
||||
};
|
||||
|
||||
&fec1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet1>;
|
||||
|
|
|
@ -266,7 +266,7 @@ ep@15 {
|
|||
};
|
||||
|
||||
usb1: ohci@00400000 {
|
||||
compatible = "atmel,sama5d2-ohci", "usb-ohci";
|
||||
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
|
||||
reg = <0x00400000 0x100000>;
|
||||
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
|
||||
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <dt-bindings/mfd/dbx500-prcmu.h>
|
||||
#include <dt-bindings/arm/ux500_pm_domains.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/clock/ste-ab8500.h>
|
||||
#include "skeleton.dtsi"
|
||||
|
||||
/ {
|
||||
|
@ -603,6 +604,11 @@ ab8500 {
|
|||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
ab8500_clock: clock-controller {
|
||||
compatible = "stericsson,ab8500-clk";
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
ab8500_gpio: ab8500-gpio {
|
||||
compatible = "stericsson,ab8500-gpio";
|
||||
gpio-controller;
|
||||
|
@ -686,6 +692,8 @@ ab8500-sysctrl {
|
|||
|
||||
ab8500-pwm {
|
||||
compatible = "stericsson,ab8500-pwm";
|
||||
clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
|
||||
clock-names = "intclk";
|
||||
};
|
||||
|
||||
ab8500-debugfs {
|
||||
|
@ -700,6 +708,9 @@ codec: ab8500-codec {
|
|||
V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
|
||||
V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
|
||||
|
||||
clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
|
||||
clock-names = "audioclk";
|
||||
|
||||
stericsson,earpeice-cmv = <950>; /* Units in mV. */
|
||||
};
|
||||
|
||||
|
@ -1095,6 +1106,14 @@ sdi5_per3@80008000 {
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "stericsson,snd-soc-mop500";
|
||||
stericsson,cpu-dai = <&msp1 &msp3>;
|
||||
stericsson,audio-codec = <&codec>;
|
||||
clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
|
||||
clock-names = "sysclk", "ulpclk", "intclk";
|
||||
};
|
||||
|
||||
msp0: msp@80123000 {
|
||||
compatible = "stericsson,ux500-msp-i2s";
|
||||
reg = <0x80123000 0x1000>;
|
||||
|
|
|
@ -186,15 +186,6 @@ sdi4_per2@80114000 {
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "stericsson,snd-soc-mop500";
|
||||
|
||||
stericsson,cpu-dai = <&msp1 &msp3>;
|
||||
stericsson,audio-codec = <&codec>;
|
||||
clocks = <&prcmu_clk PRCMU_SYSCLK>;
|
||||
clock-names = "sysclk";
|
||||
};
|
||||
|
||||
msp0: msp@80123000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&msp0_default_mode>;
|
||||
|
|
|
@ -159,15 +159,6 @@ gpio@8011e080 {
|
|||
"", "", "", "", "", "", "", "";
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "stericsson,snd-soc-mop500";
|
||||
|
||||
stericsson,cpu-dai = <&msp1 &msp3>;
|
||||
stericsson,audio-codec = <&codec>;
|
||||
clocks = <&prcmu_clk PRCMU_SYSCLK>;
|
||||
clock-names = "sysclk";
|
||||
};
|
||||
|
||||
msp0: msp@80123000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&msp0_default_mode>;
|
||||
|
|
|
@ -167,7 +167,7 @@ port8: port@8 {
|
|||
reg = <8>;
|
||||
label = "cpu";
|
||||
ethernet = <&gmac>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-txid";
|
||||
fixed-link {
|
||||
speed = <1000>;
|
||||
full-duplex;
|
||||
|
|
|
@ -495,7 +495,7 @@ mali: gpu@1c40000 {
|
|||
resets = <&ccu RST_BUS_GPU>;
|
||||
|
||||
assigned-clocks = <&ccu CLK_GPU>;
|
||||
assigned-clock-rates = <408000000>;
|
||||
assigned-clock-rates = <384000000>;
|
||||
};
|
||||
|
||||
gic: interrupt-controller@01c81000 {
|
||||
|
|
|
@ -50,8 +50,6 @@ aliases {
|
|||
|
||||
backlight: backlight {
|
||||
compatible = "pwm-backlight";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&bl_en_pin>;
|
||||
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
|
||||
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
|
||||
default-brightness-level = <8>;
|
||||
|
@ -93,11 +91,6 @@ &mmc0 {
|
|||
};
|
||||
|
||||
&pio {
|
||||
bl_en_pin: bl_en_pin@0 {
|
||||
pins = "PH6";
|
||||
function = "gpio_in";
|
||||
};
|
||||
|
||||
mmc0_cd_pin: mmc0_cd_pin@0 {
|
||||
pins = "PB4";
|
||||
function = "gpio_in";
|
||||
|
|
|
@ -188,6 +188,7 @@ CONFIG_WL12XX=m
|
|||
CONFIG_WL18XX=m
|
||||
CONFIG_WLCORE_SPI=m
|
||||
CONFIG_WLCORE_SDIO=m
|
||||
CONFIG_INPUT_MOUSEDEV=m
|
||||
CONFIG_INPUT_JOYDEV=m
|
||||
CONFIG_INPUT_EVDEV=m
|
||||
CONFIG_KEYBOARD_ATKBD=m
|
||||
|
|
|
@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
|
|||
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
|
||||
}
|
||||
|
||||
static void sama5d3_ddr_standby(void)
|
||||
{
|
||||
u32 lpr0;
|
||||
u32 saved_lpr0;
|
||||
|
||||
saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
|
||||
lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
|
||||
lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
|
||||
|
||||
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
|
||||
|
||||
cpu_do_idle();
|
||||
|
||||
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
|
||||
}
|
||||
|
||||
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
|
||||
* remember.
|
||||
*/
|
||||
|
@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
|
|||
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
|
||||
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
|
||||
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
|
||||
{ .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
|
||||
{ .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
|
||||
{ /*sentinel*/ }
|
||||
};
|
||||
|
||||
|
|
|
@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
|
|||
|
||||
onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
|
||||
obj-y += $(onenand-m) $(onenand-y)
|
||||
|
||||
nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
|
||||
obj-y += $(nand-m) $(nand-y)
|
||||
|
|
|
@ -1,154 +0,0 @@
|
|||
/*
|
||||
* gpmc-nand.c
|
||||
*
|
||||
* Copyright (C) 2009 Texas Instruments
|
||||
* Vimal Singh <vimalsingh@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/omap-gpmc.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
#include <linux/platform_data/mtd-nand-omap2.h>
|
||||
|
||||
#include <asm/mach/flash.h>
|
||||
|
||||
#include "soc.h"
|
||||
|
||||
/* minimum size for IO mapping */
|
||||
#define NAND_IO_SIZE 4
|
||||
|
||||
static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
|
||||
{
|
||||
/* platforms which support all ECC schemes */
|
||||
if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
|
||||
soc_is_omap54xx() || soc_is_dra7xx())
|
||||
return 1;
|
||||
|
||||
if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
|
||||
ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
|
||||
if (cpu_is_omap24xx())
|
||||
return 0;
|
||||
else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
|
||||
* which require H/W based ECC error detection */
|
||||
if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
|
||||
((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
|
||||
(ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
|
||||
return 0;
|
||||
|
||||
/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
|
||||
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
|
||||
ecc_opt == OMAP_ECC_HAM1_CODE_SW)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function will go away once the device-tree convertion is complete */
|
||||
static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
|
||||
struct gpmc_settings *s)
|
||||
{
|
||||
/* Enable RD PIN Monitoring Reg */
|
||||
if (gpmc_nand_data->dev_ready) {
|
||||
s->wait_on_read = true;
|
||||
s->wait_on_write = true;
|
||||
}
|
||||
|
||||
if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
|
||||
s->device_width = GPMC_DEVWIDTH_16BIT;
|
||||
else
|
||||
s->device_width = GPMC_DEVWIDTH_8BIT;
|
||||
}
|
||||
|
||||
int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
|
||||
struct gpmc_timings *gpmc_t)
|
||||
{
|
||||
int err = 0;
|
||||
struct gpmc_settings s;
|
||||
struct platform_device *pdev;
|
||||
struct resource gpmc_nand_res[] = {
|
||||
{ .flags = IORESOURCE_MEM, },
|
||||
{ .flags = IORESOURCE_IRQ, },
|
||||
{ .flags = IORESOURCE_IRQ, },
|
||||
};
|
||||
|
||||
BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
|
||||
|
||||
err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
|
||||
(unsigned long *)&gpmc_nand_res[0].start);
|
||||
if (err < 0) {
|
||||
pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
|
||||
gpmc_nand_data->cs, err);
|
||||
return err;
|
||||
}
|
||||
gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
|
||||
gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
|
||||
gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
|
||||
|
||||
memset(&s, 0, sizeof(struct gpmc_settings));
|
||||
gpmc_set_legacy(gpmc_nand_data, &s);
|
||||
|
||||
s.device_nand = true;
|
||||
|
||||
if (gpmc_t) {
|
||||
err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
|
||||
if (err < 0) {
|
||||
pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
|
||||
if (err < 0)
|
||||
goto out_free_cs;
|
||||
|
||||
err = gpmc_configure(GPMC_CONFIG_WP, 0);
|
||||
if (err < 0)
|
||||
goto out_free_cs;
|
||||
|
||||
if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
|
||||
pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
|
||||
err = -EINVAL;
|
||||
goto out_free_cs;
|
||||
}
|
||||
|
||||
|
||||
pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
|
||||
if (pdev) {
|
||||
err = platform_device_add_resources(pdev, gpmc_nand_res,
|
||||
ARRAY_SIZE(gpmc_nand_res));
|
||||
if (!err)
|
||||
pdev->dev.platform_data = gpmc_nand_data;
|
||||
} else {
|
||||
err = -ENOMEM;
|
||||
}
|
||||
if (err)
|
||||
goto out_free_pdev;
|
||||
|
||||
err = platform_device_add(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Unable to register NAND device\n");
|
||||
goto out_free_pdev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_pdev:
|
||||
platform_device_put(pdev);
|
||||
out_free_cs:
|
||||
gpmc_cs_free(gpmc_nand_data->cs);
|
||||
|
||||
return err;
|
||||
}
|
|
@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
||||
int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
||||
{
|
||||
int err;
|
||||
struct device *dev = &gpmc_onenand_device.dev;
|
||||
|
@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
|||
if (err < 0) {
|
||||
dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
|
||||
gpmc_onenand_data->cs, err);
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
|
||||
ONENAND_IO_SIZE - 1;
|
||||
|
||||
if (platform_device_register(&gpmc_onenand_device) < 0) {
|
||||
err = platform_device_register(&gpmc_onenand_device);
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to register OneNAND device\n");
|
||||
gpmc_cs_free(gpmc_onenand_data->cs);
|
||||
return;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#include "omap44xx.h"
|
||||
|
||||
|
@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
|
|||
cmp r0, r4
|
||||
bne wait_2
|
||||
ldr r12, =API_HYP_ENTRY
|
||||
adr r0, hyp_boot
|
||||
badr r0, hyp_boot
|
||||
smc #0
|
||||
hyp_boot:
|
||||
b omap_secondary_startup
|
||||
|
|
|
@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
|
|||
};
|
||||
|
||||
/* L4 CORE -> SR1 interface */
|
||||
static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
|
||||
{
|
||||
.pa_start = OMAP34XX_SR1_BASE,
|
||||
.pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
|
||||
.flags = ADDR_TYPE_RT,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
|
||||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap34xx_sr1_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr1_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
|
@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
|
|||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap36xx_sr1_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr1_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
/* L4 CORE -> SR1 interface */
|
||||
static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
|
||||
{
|
||||
.pa_start = OMAP34XX_SR2_BASE,
|
||||
.pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
|
||||
.flags = ADDR_TYPE_RT,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
|
||||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap34xx_sr2_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr2_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
|
@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
|
|||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap36xx_sr2_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr2_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
|
@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
|
|||
* Return: 0 if device named @dev_name is not likely to be accessible,
|
||||
* or 1 if it is likely to be accessible.
|
||||
*/
|
||||
static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
||||
const char *dev_name)
|
||||
static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
||||
const char *dev_name)
|
||||
{
|
||||
struct device_node *node;
|
||||
bool available;
|
||||
|
||||
if (!bus)
|
||||
return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
|
||||
return omap_type() == OMAP2_DEVICE_TYPE_GP;
|
||||
|
||||
if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
|
||||
return 1;
|
||||
node = of_get_child_by_name(bus, dev_name);
|
||||
available = of_device_is_available(node);
|
||||
of_node_put(node);
|
||||
|
||||
return 0;
|
||||
return available;
|
||||
}
|
||||
|
||||
int __init omap3xxx_hwmod_init(void)
|
||||
|
@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
|
|||
|
||||
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
|
||||
r = omap_hwmod_register_links(h_sham);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
of_node_put(bus);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
|
||||
r = omap_hwmod_register_links(h_aes);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
of_node_put(bus);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
of_node_put(bus);
|
||||
|
||||
/*
|
||||
* Register hwmod links specific to certain ES levels of a
|
||||
|
|
|
@ -114,6 +114,7 @@ pmu {
|
|||
pcie0: pcie@20020000 {
|
||||
compatible = "brcm,iproc-pcie";
|
||||
reg = <0 0x20020000 0 0x1000>;
|
||||
dma-coherent;
|
||||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
|
@ -144,6 +145,7 @@ pcie0: pcie@20020000 {
|
|||
pcie4: pcie@50020000 {
|
||||
compatible = "brcm,iproc-pcie";
|
||||
reg = <0 0x50020000 0 0x1000>;
|
||||
dma-coherent;
|
||||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
|
@ -174,6 +176,7 @@ pcie4: pcie@50020000 {
|
|||
pcie8: pcie@60c00000 {
|
||||
compatible = "brcm,iproc-pcie-paxc";
|
||||
reg = <0 0x60c00000 0 0x1000>;
|
||||
dma-coherent;
|
||||
linux,pci-domain = <8>;
|
||||
|
||||
bus-range = <0x0 0x1>;
|
||||
|
@ -203,6 +206,7 @@ enet: ethernet@61000000 {
|
|||
<0x61030000 0x100>;
|
||||
reg-names = "amac_base", "idm_base", "nicpm_base";
|
||||
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dma-coherent;
|
||||
phy-handle = <&gphy0>;
|
||||
phy-mode = "rgmii";
|
||||
status = "disabled";
|
||||
|
@ -213,6 +217,7 @@ pdc0: iproc-pdc0@612c0000 {
|
|||
reg = <0x612c0000 0x445>; /* PDC FS0 regs */
|
||||
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -222,6 +227,7 @@ pdc1: iproc-pdc1@612e0000 {
|
|||
reg = <0x612e0000 0x445>; /* PDC FS1 regs */
|
||||
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -231,6 +237,7 @@ pdc2: iproc-pdc2@61300000 {
|
|||
reg = <0x61300000 0x445>; /* PDC FS2 regs */
|
||||
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -240,6 +247,7 @@ pdc3: iproc-pdc3@61320000 {
|
|||
reg = <0x61320000 0x445>; /* PDC FS3 regs */
|
||||
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -644,6 +652,7 @@ sata_phy1: sata-phy@1 {
|
|||
sata: ahci@663f2000 {
|
||||
compatible = "brcm,iproc-ahci", "generic-ahci";
|
||||
reg = <0x663f2000 0x1000>;
|
||||
dma-coherent;
|
||||
reg-names = "ahci";
|
||||
interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
|
@ -667,6 +676,7 @@ sdio0: sdhci@66420000 {
|
|||
compatible = "brcm,sdhci-iproc-cygnus";
|
||||
reg = <0x66420000 0x100>;
|
||||
interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dma-coherent;
|
||||
bus-width = <8>;
|
||||
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
|
||||
status = "disabled";
|
||||
|
@ -676,6 +686,7 @@ sdio1: sdhci@66430000 {
|
|||
compatible = "brcm,sdhci-iproc-cygnus";
|
||||
reg = <0x66430000 0x100>;
|
||||
interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dma-coherent;
|
||||
bus-width = <8>;
|
||||
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
||||
|
||||
#define __NR_compat_syscalls 394
|
||||
#define __NR_compat_syscalls 398
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
|
|
@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
|
|||
__SYSCALL(__NR_preadv2, compat_sys_preadv2)
|
||||
#define __NR_pwritev2 393
|
||||
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
|
||||
#define __NR_pkey_mprotect 394
|
||||
__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
|
||||
#define __NR_pkey_alloc 395
|
||||
__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
|
||||
#define __NR_pkey_free 396
|
||||
__SYSCALL(__NR_pkey_free, sys_pkey_free)
|
||||
#define __NR_statx 397
|
||||
__SYSCALL(__NR_statx, sys_statx)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
|
|
|
@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
|
|||
/*
|
||||
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
|
||||
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
|
||||
* happens, increase the KASLR offset by the size of the kernel image.
|
||||
* happens, increase the KASLR offset by the size of the kernel image
|
||||
* rounded up by SWAPPER_BLOCK_SIZE.
|
||||
*/
|
||||
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
|
||||
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
|
||||
offset = (offset + (u64)(_end - _text)) & mask;
|
||||
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
|
||||
u64 kimg_sz = _end - _text;
|
||||
offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
|
||||
& mask;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
/*
|
||||
|
|
|
@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
|||
_GLOBAL(pnv_wakeup_tb_loss)
|
||||
ld r1,PACAR1(r13)
|
||||
/*
|
||||
* Before entering any idle state, the NVGPRs are saved in the stack
|
||||
* and they are restored before switching to the process context. Hence
|
||||
* until they are restored, they are free to be used.
|
||||
* Before entering any idle state, the NVGPRs are saved in the stack.
|
||||
* If there was a state loss, or PACA_NAPSTATELOST was set, then the
|
||||
* NVGPRs are restored. If we are here, it is likely that state is lost,
|
||||
* but not guaranteed -- neither ISA207 nor ISA300 tests to reach
|
||||
* here are the same as the test to restore NVGPRS:
|
||||
* PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
|
||||
* and SRR1 test for restoring NVGPRs.
|
||||
*
|
||||
* We are about to clobber NVGPRs now, so set NAPSTATELOST to
|
||||
* guarantee they will always be restored. This might be tightened
|
||||
* with careful reading of specs (particularly for ISA300) but this
|
||||
* is already a slow wakeup path and it's simpler to be safe.
|
||||
*/
|
||||
li r0,1
|
||||
stb r0,PACA_NAPSTATELOST(r13)
|
||||
|
||||
/*
|
||||
*
|
||||
* Save SRR1 and LR in NVGPRs as they might be clobbered in
|
||||
* opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
|
||||
|
|
|
@ -397,8 +397,7 @@ static void early_check_vec5(void)
|
|||
void __init mmu_early_init_devtree(void)
|
||||
{
|
||||
/* Disable radix mode based on kernel command line. */
|
||||
/* We don't yet have the machinery to do radix as a guest. */
|
||||
if (disable_radix || !(mfmsr() & MSR_HV))
|
||||
if (disable_radix)
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
|
||||
/*
|
||||
|
|
|
@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct blk_mq_timeout_data *data = priv;
|
||||
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
||||
/*
|
||||
* If a request wasn't started before the queue was
|
||||
* marked dying, kill it here or it'll go unnoticed.
|
||||
*/
|
||||
if (unlikely(blk_queue_dying(rq->q))) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
}
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||
return;
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, rq->deadline)) {
|
||||
if (!blk_mark_rq_complete(rq))
|
||||
|
|
|
@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
|
|||
|
||||
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
|
||||
{
|
||||
blk_stat_flush_batch(src);
|
||||
|
||||
if (!src->nr_samples)
|
||||
return;
|
||||
|
||||
blk_stat_flush_batch(src);
|
||||
|
||||
dst->min = min(dst->min, src->min);
|
||||
dst->max = max(dst->max, src->max);
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
|
|||
return true;
|
||||
|
||||
if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
|
||||
h->oem_revision == 0)
|
||||
h->oem_revision == 1)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
|
@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
|
|||
{ .compatible = "img,boston-lcd", .data = &boston_config },
|
||||
{ .compatible = "mti,malta-lcd", .data = &malta_config },
|
||||
{ .compatible = "mti,sead3-lcd", .data = &sead3_config },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -344,7 +344,8 @@ config BT_WILINK
|
|||
|
||||
config BT_QCOMSMD
|
||||
tristate "Qualcomm SMD based HCI support"
|
||||
depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
|
||||
depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
|
||||
depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
|
||||
select BT_QCA
|
||||
help
|
||||
Qualcomm SMD based HCI driver.
|
||||
|
|
|
@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
|
|||
struct amd768_priv {
|
||||
void __iomem *iobase;
|
||||
struct pci_dev *pcidev;
|
||||
u32 pmbase;
|
||||
};
|
||||
|
||||
static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
|
||||
|
@ -148,33 +149,58 @@ static int __init mod_init(void)
|
|||
if (pmbase == 0)
|
||||
return -EIO;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
|
||||
PMBASE_SIZE, DRV_NAME)) {
|
||||
if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
|
||||
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
|
||||
pmbase + 0xF0);
|
||||
return -EBUSY;
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
|
||||
PMBASE_SIZE);
|
||||
priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
|
||||
if (!priv->iobase) {
|
||||
pr_err(DRV_NAME "Cannot map ioport\n");
|
||||
return -ENOMEM;
|
||||
err = -EINVAL;
|
||||
goto err_iomap;
|
||||
}
|
||||
|
||||
amd_rng.priv = (unsigned long)priv;
|
||||
priv->pmbase = pmbase;
|
||||
priv->pcidev = pdev;
|
||||
|
||||
pr_info(DRV_NAME " detected\n");
|
||||
return devm_hwrng_register(&pdev->dev, &amd_rng);
|
||||
err = hwrng_register(&amd_rng);
|
||||
if (err) {
|
||||
pr_err(DRV_NAME " registering failed (%d)\n", err);
|
||||
goto err_hwrng;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_hwrng:
|
||||
ioport_unmap(priv->iobase);
|
||||
err_iomap:
|
||||
release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
|
||||
out:
|
||||
kfree(priv);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit mod_exit(void)
|
||||
{
|
||||
struct amd768_priv *priv;
|
||||
|
||||
priv = (struct amd768_priv *)amd_rng.priv;
|
||||
|
||||
hwrng_unregister(&amd_rng);
|
||||
|
||||
ioport_unmap(priv->iobase);
|
||||
|
||||
release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
|
||||
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
module_init(mod_init);
|
||||
|
|
|
@ -31,6 +31,9 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
|
||||
#define PFX KBUILD_MODNAME ": "
|
||||
|
||||
#define GEODE_RNG_DATA_REG 0x50
|
||||
#define GEODE_RNG_STATUS_REG 0x54
|
||||
|
||||
|
@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
|
|||
|
||||
static int __init mod_init(void)
|
||||
{
|
||||
int err = -ENODEV;
|
||||
struct pci_dev *pdev = NULL;
|
||||
const struct pci_device_id *ent;
|
||||
void __iomem *mem;
|
||||
|
@ -89,27 +93,43 @@ static int __init mod_init(void)
|
|||
|
||||
for_each_pci_dev(pdev) {
|
||||
ent = pci_match_id(pci_tbl, pdev);
|
||||
if (ent) {
|
||||
rng_base = pci_resource_start(pdev, 0);
|
||||
if (rng_base == 0)
|
||||
return -ENODEV;
|
||||
|
||||
mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
geode_rng.priv = (unsigned long)mem;
|
||||
|
||||
pr_info("AMD Geode RNG detected\n");
|
||||
return devm_hwrng_register(&pdev->dev, &geode_rng);
|
||||
}
|
||||
if (ent)
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Device not found. */
|
||||
return -ENODEV;
|
||||
goto out;
|
||||
|
||||
found:
|
||||
rng_base = pci_resource_start(pdev, 0);
|
||||
if (rng_base == 0)
|
||||
goto out;
|
||||
err = -ENOMEM;
|
||||
mem = ioremap(rng_base, 0x58);
|
||||
if (!mem)
|
||||
goto out;
|
||||
geode_rng.priv = (unsigned long)mem;
|
||||
|
||||
pr_info("AMD Geode RNG detected\n");
|
||||
err = hwrng_register(&geode_rng);
|
||||
if (err) {
|
||||
pr_err(PFX "RNG registering failed (%d)\n",
|
||||
err);
|
||||
goto err_unmap;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_unmap:
|
||||
iounmap(mem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void __exit mod_exit(void)
|
||||
{
|
||||
void __iomem *mem = (void __iomem *)geode_rng.priv;
|
||||
|
||||
hwrng_unregister(&geode_rng);
|
||||
iounmap(mem);
|
||||
}
|
||||
|
||||
module_init(mod_init);
|
||||
|
|
|
@ -84,11 +84,14 @@ struct pp_struct {
|
|||
struct ieee1284_info state;
|
||||
struct ieee1284_info saved_state;
|
||||
long default_inactivity;
|
||||
int index;
|
||||
};
|
||||
|
||||
/* should we use PARDEVICE_MAX here? */
|
||||
static struct device *devices[PARPORT_MAX];
|
||||
|
||||
static DEFINE_IDA(ida_index);
|
||||
|
||||
/* pp_struct.flags bitfields */
|
||||
#define PP_CLAIMED (1<<0)
|
||||
#define PP_EXCL (1<<1)
|
||||
|
@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
|
|||
struct pardevice *pdev = NULL;
|
||||
char *name;
|
||||
struct pardev_cb ppdev_cb;
|
||||
int rc = 0;
|
||||
int rc = 0, index;
|
||||
|
||||
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
|
||||
if (name == NULL)
|
||||
|
@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
|
|||
goto err;
|
||||
}
|
||||
|
||||
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
|
||||
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
|
||||
ppdev_cb.irq_func = pp_irq;
|
||||
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
|
||||
ppdev_cb.private = pp;
|
||||
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
|
||||
pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
|
||||
parport_put_port(port);
|
||||
|
||||
if (!pdev) {
|
||||
pr_warn("%s: failed to register device!\n", name);
|
||||
rc = -ENXIO;
|
||||
ida_simple_remove(&ida_index, index);
|
||||
goto err;
|
||||
}
|
||||
|
||||
pp->pdev = pdev;
|
||||
pp->index = index;
|
||||
dev_dbg(&pdev->dev, "registered pardevice\n");
|
||||
err:
|
||||
kfree(name);
|
||||
|
@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
|
|||
|
||||
if (pp->pdev) {
|
||||
parport_unregister_device(pp->pdev);
|
||||
ida_simple_remove(&ida_index, pp->index);
|
||||
pp->pdev = NULL;
|
||||
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
|
||||
}
|
||||
|
|
|
@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
|
|||
|
||||
clk->core = hw->core;
|
||||
clk->dev_id = dev_id;
|
||||
clk->con_id = con_id;
|
||||
clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
|
||||
clk->max_rate = ULONG_MAX;
|
||||
|
||||
clk_prepare_lock();
|
||||
|
@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
|
|||
hlist_del(&clk->clks_node);
|
||||
clk_prepare_unlock();
|
||||
|
||||
kfree_const(clk->con_id);
|
||||
kfree(clk);
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
|
|||
PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
|
||||
PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
|
||||
|
||||
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" };
|
||||
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
|
||||
|
||||
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
|
||||
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
|
||||
|
@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make uart_pll_clk a child of the gpll, as all other sources are
|
||||
* not that usable / stable.
|
||||
*/
|
||||
writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
|
||||
reg_base + RK2928_CLKSEL_CON(13));
|
||||
|
||||
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
|
||||
if (IS_ERR(ctx)) {
|
||||
pr_err("%s: rockchip clk init failed\n", __func__);
|
||||
|
|
|
@ -80,6 +80,7 @@ config SUN6I_A31_CCU
|
|||
select SUNXI_CCU_DIV
|
||||
select SUNXI_CCU_NK
|
||||
select SUNXI_CCU_NKM
|
||||
select SUNXI_CCU_NKMP
|
||||
select SUNXI_CCU_NM
|
||||
select SUNXI_CCU_MP
|
||||
select SUNXI_CCU_PHASE
|
||||
|
|
|
@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
|
|||
0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
|
||||
|
||||
/* Fixed Factor clocks */
|
||||
static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0);
|
||||
static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
|
||||
|
||||
/* We hardcode the divider to 4 for now */
|
||||
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
|
||||
|
|
|
@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
|
|||
0x150, 0, 4, 24, 2, BIT(31),
|
||||
CLK_SET_RATE_PARENT);
|
||||
|
||||
static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
|
||||
static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
|
||||
|
||||
static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
|
||||
|
||||
|
|
|
@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
|
|||
unsigned int m, p;
|
||||
u32 reg;
|
||||
|
||||
/* Adjust parent_rate according to pre-dividers */
|
||||
ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
|
||||
-1, &parent_rate);
|
||||
|
||||
reg = readl(cmp->common.base + cmp->common.reg);
|
||||
|
||||
m = reg >> cmp->m.shift;
|
||||
|
@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
unsigned int m, p;
|
||||
u32 reg;
|
||||
|
||||
/* Adjust parent_rate according to pre-dividers */
|
||||
ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
|
||||
-1, &parent_rate);
|
||||
|
||||
max_m = cmp->m.max ?: 1 << cmp->m.width;
|
||||
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
|
|||
p = reg >> nkmp->p.shift;
|
||||
p &= (1 << nkmp->p.width) - 1;
|
||||
|
||||
return parent_rate * n * k >> p / m;
|
||||
return (parent_rate * n * k >> p) / m;
|
||||
}
|
||||
|
||||
static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||
|
|
|
@ -1184,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu)
|
|||
for_each_cpu(j, policy->related_cpus)
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
} else {
|
||||
policy->min = policy->user_policy.min;
|
||||
policy->max = policy->user_policy.max;
|
||||
}
|
||||
|
||||
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
|
||||
|
|
|
@ -364,9 +364,7 @@ static bool driver_registered __read_mostly;
|
|||
static bool acpi_ppc;
|
||||
#endif
|
||||
|
||||
static struct perf_limits performance_limits;
|
||||
static struct perf_limits powersave_limits;
|
||||
static struct perf_limits *limits;
|
||||
static struct perf_limits global;
|
||||
|
||||
static void intel_pstate_init_limits(struct perf_limits *limits)
|
||||
{
|
||||
|
@ -377,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
|
|||
limits->max_sysfs_pct = 100;
|
||||
}
|
||||
|
||||
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
|
||||
{
|
||||
intel_pstate_init_limits(limits);
|
||||
limits->min_perf_pct = 100;
|
||||
limits->min_perf = int_ext_tofp(1);
|
||||
limits->min_sysfs_pct = 100;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(intel_pstate_driver_lock);
|
||||
static DEFINE_MUTEX(intel_pstate_limits_lock);
|
||||
|
||||
|
@ -507,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
|
|||
* correct max turbo frequency based on the turbo state.
|
||||
* Also need to convert to MHz as _PSS freq is in MHz.
|
||||
*/
|
||||
if (!limits->turbo_disabled)
|
||||
if (!global.turbo_disabled)
|
||||
cpu->acpi_perf_data.states[0].core_frequency =
|
||||
policy->cpuinfo.max_freq / 1000;
|
||||
cpu->valid_pss_table = true;
|
||||
|
@ -626,7 +616,7 @@ static inline void update_turbo_state(void)
|
|||
|
||||
cpu = all_cpu_data[0];
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
||||
limits->turbo_disabled =
|
||||
global.turbo_disabled =
|
||||
(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
|
||||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
|
||||
}
|
||||
|
@ -851,7 +841,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
|
|||
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min, hw_min, max, hw_max, cpu;
|
||||
struct perf_limits *perf_limits = limits;
|
||||
struct perf_limits *perf_limits = &global;
|
||||
u64 value, cap;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
|
@ -863,19 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
|||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
|
||||
hw_min = HWP_LOWEST_PERF(cap);
|
||||
if (limits->no_turbo)
|
||||
if (global.no_turbo)
|
||||
hw_max = HWP_GUARANTEED_PERF(cap);
|
||||
else
|
||||
hw_max = HWP_HIGHEST_PERF(cap);
|
||||
|
||||
min = fp_ext_toint(hw_max * perf_limits->min_perf);
|
||||
max = fp_ext_toint(hw_max * perf_limits->max_perf);
|
||||
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
min = max;
|
||||
else
|
||||
min = fp_ext_toint(hw_max * perf_limits->min_perf);
|
||||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||
|
||||
value &= ~HWP_MIN_PERF(~0L);
|
||||
value |= HWP_MIN_PERF(min);
|
||||
|
||||
max = fp_ext_toint(hw_max * perf_limits->max_perf);
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
|
||||
|
@ -968,20 +961,11 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
static void intel_pstate_update_policies(void)
|
||||
__releases(&intel_pstate_limits_lock)
|
||||
__acquires(&intel_pstate_limits_lock)
|
||||
{
|
||||
struct perf_limits *saved_limits = limits;
|
||||
int cpu;
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits = saved_limits;
|
||||
}
|
||||
|
||||
/************************** debugfs begin ************************/
|
||||
|
@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
|
|||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", limits->object); \
|
||||
return sprintf(buf, "%u\n", global.object); \
|
||||
}
|
||||
|
||||
static ssize_t intel_pstate_show_status(char *buf);
|
||||
|
@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
|
|||
}
|
||||
|
||||
update_turbo_state();
|
||||
if (limits->turbo_disabled)
|
||||
ret = sprintf(buf, "%u\n", limits->turbo_disabled);
|
||||
if (global.turbo_disabled)
|
||||
ret = sprintf(buf, "%u\n", global.turbo_disabled);
|
||||
else
|
||||
ret = sprintf(buf, "%u\n", limits->no_turbo);
|
||||
ret = sprintf(buf, "%u\n", global.no_turbo);
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
|
@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
|||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
update_turbo_state();
|
||||
if (limits->turbo_disabled) {
|
||||
if (global.turbo_disabled) {
|
||||
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
limits->no_turbo = clamp_t(int, input, 0, 1);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
global.no_turbo = clamp_t(int, input, 0, 1);
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
|
@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
|||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
limits->max_perf_pct = min(limits->max_policy_pct,
|
||||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf_pct = max(limits->min_perf_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf = percent_ext_fp(limits->max_perf_pct);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
|
||||
global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
|
||||
global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
|
||||
global.max_perf = percent_ext_fp(global.max_perf_pct);
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
|
@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
|||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
limits->min_perf_pct = max(limits->min_policy_pct,
|
||||
limits->min_sysfs_pct);
|
||||
limits->min_perf_pct = min(limits->max_policy_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf_pct = min(limits->max_perf_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf = percent_ext_fp(limits->min_perf_pct);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
|
||||
global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
|
||||
global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
|
||||
global.min_perf = percent_ext_fp(global.min_perf_pct);
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
|
@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
|
|||
u32 vid;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (limits->no_turbo && !limits->turbo_disabled)
|
||||
if (global.no_turbo && !global.turbo_disabled)
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
vid_fp = cpudata->vid.min + mul_fp(
|
||||
|
@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
|
|||
u64 val;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (limits->no_turbo && !limits->turbo_disabled)
|
||||
if (global.no_turbo && !global.turbo_disabled)
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
return val;
|
||||
|
@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
|||
int max_perf = cpu->pstate.turbo_pstate;
|
||||
int max_perf_adj;
|
||||
int min_perf;
|
||||
struct perf_limits *perf_limits = limits;
|
||||
struct perf_limits *perf_limits = &global;
|
||||
|
||||
if (limits->no_turbo || limits->turbo_disabled)
|
||||
if (global.no_turbo || global.turbo_disabled)
|
||||
max_perf = cpu->pstate.max_pstate;
|
||||
|
||||
if (per_cpu_limits)
|
||||
|
@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
|||
|
||||
sample->busy_scaled = busy_frac * 100;
|
||||
|
||||
target = limits->no_turbo || limits->turbo_disabled ?
|
||||
target = global.no_turbo || global.turbo_disabled ?
|
||||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
target += target >> 2;
|
||||
target = mul_fp(target, busy_frac);
|
||||
|
@ -2116,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
|||
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
struct perf_limits *perf_limits = NULL;
|
||||
struct perf_limits *perf_limits = &global;
|
||||
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
|
@ -2139,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
||||
pr_debug("set performance\n");
|
||||
if (!perf_limits) {
|
||||
limits = &performance_limits;
|
||||
perf_limits = limits;
|
||||
}
|
||||
} else {
|
||||
pr_debug("set powersave\n");
|
||||
if (!perf_limits) {
|
||||
limits = &powersave_limits;
|
||||
perf_limits = limits;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
intel_pstate_update_perf_limits(policy, perf_limits);
|
||||
|
||||
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
||||
|
@ -2177,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
struct perf_limits *perf_limits;
|
||||
|
||||
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
perf_limits = &performance_limits;
|
||||
else
|
||||
perf_limits = &powersave_limits;
|
||||
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
|
||||
perf_limits->no_turbo ?
|
||||
policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
|
||||
cpu->pstate.max_freq :
|
||||
cpu->pstate.turbo_freq;
|
||||
|
||||
|
@ -2201,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
|||
unsigned int max_freq, min_freq;
|
||||
|
||||
max_freq = policy->cpuinfo.max_freq *
|
||||
perf_limits->max_sysfs_pct / 100;
|
||||
global.max_sysfs_pct / 100;
|
||||
min_freq = policy->cpuinfo.max_freq *
|
||||
perf_limits->min_sysfs_pct / 100;
|
||||
global.min_sysfs_pct / 100;
|
||||
cpufreq_verify_within_limits(policy, min_freq, max_freq);
|
||||
}
|
||||
|
||||
|
@ -2255,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = limits->turbo_disabled ?
|
||||
policy->cpuinfo.max_freq = global.turbo_disabled ?
|
||||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
|
||||
|
||||
|
@ -2275,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
return ret;
|
||||
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
|
||||
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
|
||||
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
|
||||
else
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
|
@ -2301,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
|||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = limits->turbo_disabled ?
|
||||
policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
|
||||
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
|
||||
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
|
@ -2309,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
|
||||
struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
unsigned int max_freq;
|
||||
|
||||
update_turbo_state();
|
||||
|
||||
max_freq = limits->no_turbo || limits->turbo_disabled ?
|
||||
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
|
||||
policy->cpuinfo.max_freq = max_freq;
|
||||
if (policy->max > max_freq)
|
||||
policy->max = max_freq;
|
||||
|
||||
if (target_freq > max_freq)
|
||||
target_freq = max_freq;
|
||||
|
||||
return target_freq;
|
||||
}
|
||||
|
||||
static int intel_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
|
@ -2337,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
|
|||
struct cpufreq_freqs freqs;
|
||||
int target_pstate;
|
||||
|
||||
update_turbo_state();
|
||||
|
||||
freqs.old = policy->cur;
|
||||
freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
|
||||
freqs.new = target_freq;
|
||||
|
||||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
switch (relation) {
|
||||
|
@ -2370,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
int target_pstate;
|
||||
|
||||
target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
|
||||
update_turbo_state();
|
||||
|
||||
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
|
||||
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
||||
intel_pstate_update_pstate(cpu, target_pstate);
|
||||
|
@ -2425,13 +2364,7 @@ static int intel_pstate_register_driver(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
intel_pstate_init_limits(&powersave_limits);
|
||||
intel_pstate_set_performance_limits(&performance_limits);
|
||||
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
|
||||
intel_pstate_driver == &intel_pstate)
|
||||
limits = &performance_limits;
|
||||
else
|
||||
limits = &powersave_limits;
|
||||
intel_pstate_init_limits(&global);
|
||||
|
||||
ret = cpufreq_register_driver(intel_pstate_driver);
|
||||
if (ret) {
|
||||
|
|
|
@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
|
|||
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Return if cpu_device is not setup for this CPU.
|
||||
*
|
||||
* This could happen if the arch did not set up cpu_device
|
||||
* since this CPU is not in cpu_present mask and the
|
||||
* driver did not send a correct CPU mask during registration.
|
||||
* Without this check we would end up passing bogus
|
||||
* value for &cpu_dev->kobj in kobject_init_and_add()
|
||||
*/
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
|
||||
if (!kdev)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
|
|||
*/
|
||||
int ccp_enqueue_cmd(struct ccp_cmd *cmd)
|
||||
{
|
||||
struct ccp_device *ccp = ccp_get_device();
|
||||
struct ccp_device *ccp;
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/* Some commands might need to be sent to a specific device */
|
||||
ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
|
||||
|
||||
if (!ccp)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
|
|||
goto err;
|
||||
|
||||
ccp_cmd = &cmd->ccp_cmd;
|
||||
ccp_cmd->ccp = chan->ccp;
|
||||
ccp_pt = &ccp_cmd->u.passthru_nomap;
|
||||
ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
|
||||
ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
|
||||
|
|
|
@ -44,7 +44,7 @@ config EXTCON_GPIO
|
|||
|
||||
config EXTCON_INTEL_INT3496
|
||||
tristate "Intel INT3496 ACPI device extcon driver"
|
||||
depends on GPIOLIB && ACPI
|
||||
depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
|
||||
help
|
||||
Say Y here to enable extcon support for USB OTG ports controlled by
|
||||
an Intel INT3496 ACPI device.
|
||||
|
|
|
@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
|
|||
EXTCON_NONE,
|
||||
};
|
||||
|
||||
static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
|
||||
static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
|
||||
static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
|
||||
|
||||
static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
|
||||
{ "id-gpios", &id_gpios, 1 },
|
||||
{ "vbus-gpios", &vbus_gpios, 1 },
|
||||
{ "mux-gpios", &mux_gpios, 1 },
|
||||
{ },
|
||||
};
|
||||
|
||||
static void int3496_do_usb_id(struct work_struct *work)
|
||||
{
|
||||
struct int3496_data *data =
|
||||
|
@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
|
|||
struct int3496_data *data;
|
||||
int ret;
|
||||
|
||||
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
|
||||
acpi_int3496_default_gpios);
|
||||
if (ret) {
|
||||
dev_err(dev, "can't add GPIO ACPI mapping\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
|
|||
data->dev = dev;
|
||||
INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
|
||||
|
||||
data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
|
||||
INT3496_GPIO_USB_ID,
|
||||
GPIOD_IN);
|
||||
data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
|
||||
if (IS_ERR(data->gpio_usb_id)) {
|
||||
ret = PTR_ERR(data->gpio_usb_id);
|
||||
dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
|
||||
return ret;
|
||||
} else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
|
||||
dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
|
||||
gpiod_direction_input(data->gpio_usb_id);
|
||||
}
|
||||
|
||||
data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
|
||||
if (data->usb_id_irq <= 0) {
|
||||
if (data->usb_id_irq < 0) {
|
||||
dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
|
||||
return -EINVAL;
|
||||
return data->usb_id_irq;
|
||||
}
|
||||
|
||||
data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
|
||||
INT3496_GPIO_VBUS_EN,
|
||||
GPIOD_ASIS);
|
||||
data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
|
||||
if (IS_ERR(data->gpio_vbus_en))
|
||||
dev_info(dev, "can't request VBUS EN GPIO\n");
|
||||
|
||||
data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
|
||||
INT3496_GPIO_USB_MUX,
|
||||
GPIOD_ASIS);
|
||||
data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
|
||||
if (IS_ERR(data->gpio_usb_mux))
|
||||
dev_info(dev, "can't request USB MUX GPIO\n");
|
||||
|
||||
|
@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
|
|||
devm_free_irq(&pdev->dev, data->usb_id_irq, data);
|
||||
cancel_delayed_work_sync(&data->work);
|
||||
|
||||
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
|
|||
gpio->regmap = a10sr->regmap;
|
||||
|
||||
gpio->gp = altr_a10sr_gc;
|
||||
|
||||
gpio->gp.parent = pdev->dev.parent;
|
||||
gpio->gp.of_node = pdev->dev.of_node;
|
||||
|
||||
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
|
||||
|
|
|
@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
|
|||
|
||||
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
||||
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
if (type == IRQ_TYPE_NONE) {
|
||||
irq_set_handler_locked(d, handle_bad_irq);
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_LEVEL_HIGH &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
|
||||
}
|
||||
if (type == altera_gc->interrupt_trigger) {
|
||||
if (type == IRQ_TYPE_LEVEL_HIGH)
|
||||
irq_set_handler_locked(d, handle_level_irq);
|
||||
else
|
||||
irq_set_handler_locked(d, handle_simple_irq);
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_EDGE_RISING &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_EDGE_FALLING &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_EDGE_BOTH &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
|
||||
return 0;
|
||||
|
||||
}
|
||||
irq_set_handler_locked(d, handle_bad_irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
|
|||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
|
||||
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct altera_gpio_chip *altera_gc;
|
||||
|
@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
|||
altera_gc->interrupt_trigger = reg;
|
||||
|
||||
ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
|
||||
handle_simple_irq, IRQ_TYPE_NONE);
|
||||
handle_bad_irq, IRQ_TYPE_NONE);
|
||||
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "could not add irqchip\n");
|
||||
|
|
|
@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
|
|||
static irqreturn_t mcp23s08_irq(int irq, void *data)
|
||||
{
|
||||
struct mcp23s08 *mcp = data;
|
||||
int intcap, intf, i;
|
||||
int intcap, intf, i, gpio, gpio_orig, intcap_mask;
|
||||
unsigned int child_irq;
|
||||
bool intf_set, intcap_changed, gpio_bit_changed,
|
||||
defval_changed, gpio_set;
|
||||
|
||||
mutex_lock(&mcp->lock);
|
||||
if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
|
||||
|
@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
|
|||
}
|
||||
|
||||
mcp->cache[MCP_INTCAP] = intcap;
|
||||
|
||||
/* This clears the interrupt(configurable on S18) */
|
||||
if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
|
||||
mutex_unlock(&mcp->lock);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
gpio_orig = mcp->cache[MCP_GPIO];
|
||||
mcp->cache[MCP_GPIO] = gpio;
|
||||
mutex_unlock(&mcp->lock);
|
||||
|
||||
if (mcp->cache[MCP_INTF] == 0) {
|
||||
/* There is no interrupt pending */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
dev_dbg(mcp->chip.parent,
|
||||
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
|
||||
intcap, intf, gpio_orig, gpio);
|
||||
|
||||
for (i = 0; i < mcp->chip.ngpio; i++) {
|
||||
if ((BIT(i) & mcp->cache[MCP_INTF]) &&
|
||||
((BIT(i) & intcap & mcp->irq_rise) ||
|
||||
(mcp->irq_fall & ~intcap & BIT(i)) ||
|
||||
(BIT(i) & mcp->cache[MCP_INTCON]))) {
|
||||
/* We must check all of the inputs on the chip,
|
||||
* otherwise we may not notice a change on >=2 pins.
|
||||
*
|
||||
* On at least the mcp23s17, INTCAP is only updated
|
||||
* one byte at a time(INTCAPA and INTCAPB are
|
||||
* not written to at the same time - only on a per-bank
|
||||
* basis).
|
||||
*
|
||||
* INTF only contains the single bit that caused the
|
||||
* interrupt per-bank. On the mcp23s17, there is
|
||||
* INTFA and INTFB. If two pins are changed on the A
|
||||
* side at the same time, INTF will only have one bit
|
||||
* set. If one pin on the A side and one pin on the B
|
||||
* side are changed at the same time, INTF will have
|
||||
* two bits set. Thus, INTF can't be the only check
|
||||
* to see if the input has changed.
|
||||
*/
|
||||
|
||||
intf_set = BIT(i) & mcp->cache[MCP_INTF];
|
||||
if (i < 8 && intf_set)
|
||||
intcap_mask = 0x00FF;
|
||||
else if (i >= 8 && intf_set)
|
||||
intcap_mask = 0xFF00;
|
||||
else
|
||||
intcap_mask = 0x00;
|
||||
|
||||
intcap_changed = (intcap_mask &
|
||||
(BIT(i) & mcp->cache[MCP_INTCAP])) !=
|
||||
(intcap_mask & (BIT(i) & gpio_orig));
|
||||
gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
|
||||
gpio_bit_changed = (BIT(i) & gpio_orig) !=
|
||||
(BIT(i) & mcp->cache[MCP_GPIO]);
|
||||
defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
|
||||
((BIT(i) & mcp->cache[MCP_GPIO]) !=
|
||||
(BIT(i) & mcp->cache[MCP_DEFVAL]));
|
||||
|
||||
if (((gpio_bit_changed || intcap_changed) &&
|
||||
(BIT(i) & mcp->irq_rise) && gpio_set) ||
|
||||
((gpio_bit_changed || intcap_changed) &&
|
||||
(BIT(i) & mcp->irq_fall) && !gpio_set) ||
|
||||
defval_changed) {
|
||||
child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
|
||||
handle_nested_irq(child_irq);
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
|
|||
struct seq_file *sfile;
|
||||
struct gpio_desc *desc;
|
||||
struct gpio_chip *gc;
|
||||
int status, val;
|
||||
int val;
|
||||
char buf;
|
||||
|
||||
sfile = file->private_data;
|
||||
|
@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
|
|||
chip = priv->chip;
|
||||
gc = &chip->gc;
|
||||
|
||||
status = copy_from_user(&buf, usr_buf, 1);
|
||||
if (status)
|
||||
return status;
|
||||
if (copy_from_user(&buf, usr_buf, 1))
|
||||
return -EFAULT;
|
||||
|
||||
if (buf == '0')
|
||||
val = 0;
|
||||
|
|
|
@ -42,9 +42,7 @@ struct xgene_gpio {
|
|||
struct gpio_chip chip;
|
||||
void __iomem *base;
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_PM
|
||||
u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
|
||||
#endif
|
||||
};
|
||||
|
||||
static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
|
||||
|
@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int xgene_gpio_suspend(struct device *dev)
|
||||
static __maybe_unused int xgene_gpio_suspend(struct device *dev)
|
||||
{
|
||||
struct xgene_gpio *gpio = dev_get_drvdata(dev);
|
||||
unsigned long bank_offset;
|
||||
|
@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xgene_gpio_resume(struct device *dev)
|
||||
static __maybe_unused int xgene_gpio_resume(struct device *dev)
|
||||
{
|
||||
struct xgene_gpio *gpio = dev_get_drvdata(dev);
|
||||
unsigned long bank_offset;
|
||||
|
@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
|
|||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
|
||||
#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
|
||||
#else
|
||||
#define XGENE_GPIO_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
static int xgene_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
|
|||
.name = "xgene-gpio",
|
||||
.of_match_table = xgene_gpio_of_match,
|
||||
.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
|
||||
.pm = XGENE_GPIO_PM_OPS,
|
||||
.pm = &xgene_gpio_pm,
|
||||
},
|
||||
.probe = xgene_gpio_probe,
|
||||
};
|
||||
|
|
|
@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
|
@ -34,12 +34,13 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
|||
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
|
||||
|
||||
amdgpu-y += \
|
||||
vi.o mxgpu_vi.o
|
||||
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o
|
||||
|
||||
# add GMC block
|
||||
amdgpu-y += \
|
||||
gmc_v7_0.o \
|
||||
gmc_v8_0.o
|
||||
gmc_v8_0.o \
|
||||
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o
|
||||
|
||||
# add IH block
|
||||
amdgpu-y += \
|
||||
|
@ -47,7 +48,13 @@ amdgpu-y += \
|
|||
amdgpu_ih.o \
|
||||
iceland_ih.o \
|
||||
tonga_ih.o \
|
||||
cz_ih.o
|
||||
cz_ih.o \
|
||||
vega10_ih.o
|
||||
|
||||
# add PSP block
|
||||
amdgpu-y += \
|
||||
amdgpu_psp.o \
|
||||
psp_v3_1.o
|
||||
|
||||
# add SMC block
|
||||
amdgpu-y += \
|
||||
|
@ -63,23 +70,27 @@ amdgpu-y += \
|
|||
# add GFX block
|
||||
amdgpu-y += \
|
||||
amdgpu_gfx.o \
|
||||
gfx_v8_0.o
|
||||
gfx_v8_0.o \
|
||||
gfx_v9_0.o
|
||||
|
||||
# add async DMA block
|
||||
amdgpu-y += \
|
||||
sdma_v2_4.o \
|
||||
sdma_v3_0.o
|
||||
sdma_v3_0.o \
|
||||
sdma_v4_0.o
|
||||
|
||||
# add UVD block
|
||||
amdgpu-y += \
|
||||
amdgpu_uvd.o \
|
||||
uvd_v5_0.o \
|
||||
uvd_v6_0.o
|
||||
uvd_v6_0.o \
|
||||
uvd_v7_0.o
|
||||
|
||||
# add VCE block
|
||||
amdgpu-y += \
|
||||
amdgpu_vce.o \
|
||||
vce_v3_0.o
|
||||
vce_v3_0.o \
|
||||
vce_v4_0.o
|
||||
|
||||
# add amdkfd interfaces
|
||||
amdgpu-y += \
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "amdgpu_irq.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "amdgpu_ttm.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_gds.h"
|
||||
#include "amdgpu_sync.h"
|
||||
#include "amdgpu_ring.h"
|
||||
|
@ -59,6 +60,8 @@
|
|||
#include "amd_powerplay.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_acp.h"
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
|
||||
#include "gpu_scheduler.h"
|
||||
#include "amdgpu_virt.h"
|
||||
|
@ -79,7 +82,7 @@ extern int amdgpu_pcie_gen2;
|
|||
extern int amdgpu_msi;
|
||||
extern int amdgpu_lockup_timeout;
|
||||
extern int amdgpu_dpm;
|
||||
extern int amdgpu_smc_load_fw;
|
||||
extern int amdgpu_fw_load_type;
|
||||
extern int amdgpu_aspm;
|
||||
extern int amdgpu_runtime_pm;
|
||||
extern unsigned amdgpu_ip_block_mask;
|
||||
|
@ -101,6 +104,11 @@ extern char *amdgpu_disable_cu;
|
|||
extern char *amdgpu_virtual_display;
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
extern int amdgpu_vram_page_split;
|
||||
extern int amdgpu_ngg;
|
||||
extern int amdgpu_prim_buf_per_se;
|
||||
extern int amdgpu_pos_buf_per_se;
|
||||
extern int amdgpu_cntl_sb_buf_per_se;
|
||||
extern int amdgpu_param_buf_per_se;
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
|
@ -109,11 +117,16 @@ extern int amdgpu_vram_page_split;
|
|||
#define AMDGPU_IB_POOL_SIZE 16
|
||||
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
|
||||
#define AMDGPUFB_CONN_LIMIT 4
|
||||
#define AMDGPU_BIOS_NUM_SCRATCH 8
|
||||
#define AMDGPU_BIOS_NUM_SCRATCH 16
|
||||
|
||||
/* max number of IP instances */
|
||||
#define AMDGPU_MAX_SDMA_INSTANCES 2
|
||||
|
||||
/* max number of VMHUB */
|
||||
#define AMDGPU_MAX_VMHUBS 2
|
||||
#define AMDGPU_MMHUB 0
|
||||
#define AMDGPU_GFXHUB 1
|
||||
|
||||
/* hardcode that limit for now */
|
||||
#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
|
||||
|
||||
|
@ -280,7 +293,7 @@ struct amdgpu_vm_pte_funcs {
|
|||
void (*set_pte_pde)(struct amdgpu_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
uint32_t incr, uint64_t flags);
|
||||
};
|
||||
|
||||
/* provided by the gmc block */
|
||||
|
@ -293,7 +306,18 @@ struct amdgpu_gart_funcs {
|
|||
void *cpu_pt_addr, /* cpu addr of page table */
|
||||
uint32_t gpu_page_idx, /* pte/pde to update */
|
||||
uint64_t addr, /* addr to write into pte/pde */
|
||||
uint32_t flags); /* access flags */
|
||||
uint64_t flags); /* access flags */
|
||||
/* enable/disable PRT support */
|
||||
void (*set_prt)(struct amdgpu_device *adev, bool enable);
|
||||
/* set pte flags based per asic */
|
||||
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
|
||||
uint32_t flags);
|
||||
};
|
||||
|
||||
/* provided by the mc block */
|
||||
struct amdgpu_mc_funcs {
|
||||
/* adjust mc addr in fb for APU case */
|
||||
u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
|
||||
};
|
||||
|
||||
/* provided by the ih block */
|
||||
|
@ -522,6 +546,10 @@ struct amdgpu_gart {
|
|||
struct page **pages;
|
||||
#endif
|
||||
bool ready;
|
||||
|
||||
/* Asic default pte flags */
|
||||
uint64_t gart_pte_flags;
|
||||
|
||||
const struct amdgpu_gart_funcs *gart_funcs;
|
||||
};
|
||||
|
||||
|
@ -537,9 +565,24 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||
int pages);
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages, struct page **pagelist,
|
||||
dma_addr_t *dma_addr, uint32_t flags);
|
||||
dma_addr_t *dma_addr, uint64_t flags);
|
||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* VMHUB structures, functions & helpers
|
||||
*/
|
||||
struct amdgpu_vmhub {
|
||||
uint32_t ctx0_ptb_addr_lo32;
|
||||
uint32_t ctx0_ptb_addr_hi32;
|
||||
uint32_t vm_inv_eng0_req;
|
||||
uint32_t vm_inv_eng0_ack;
|
||||
uint32_t vm_context0_cntl;
|
||||
uint32_t vm_l2_pro_fault_status;
|
||||
uint32_t vm_l2_pro_fault_cntl;
|
||||
uint32_t (*get_invalidate_req)(unsigned int vm_id);
|
||||
uint32_t (*get_vm_protection_bits)(void);
|
||||
};
|
||||
|
||||
/*
|
||||
* GPU MC structures, functions & helpers
|
||||
*/
|
||||
|
@ -567,6 +610,15 @@ struct amdgpu_mc {
|
|||
uint32_t vram_type;
|
||||
uint32_t srbm_soft_reset;
|
||||
struct amdgpu_mode_mc_save save;
|
||||
bool prt_warning;
|
||||
/* apertures */
|
||||
u64 shared_aperture_start;
|
||||
u64 shared_aperture_end;
|
||||
u64 private_aperture_start;
|
||||
u64 private_aperture_end;
|
||||
/* protects concurrent invalidation */
|
||||
spinlock_t invalidate_lock;
|
||||
const struct amdgpu_mc_funcs *mc_funcs;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -601,6 +653,83 @@ struct amdgpu_doorbell {
|
|||
u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
|
||||
};
|
||||
|
||||
/*
|
||||
* 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
|
||||
*/
|
||||
typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
|
||||
{
|
||||
/*
|
||||
* All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
|
||||
* a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
|
||||
* Compute related doorbells are allocated from 0x00 to 0x8a
|
||||
*/
|
||||
|
||||
|
||||
/* kernel scheduling */
|
||||
AMDGPU_DOORBELL64_KIQ = 0x00,
|
||||
|
||||
/* HSA interface queue and debug queue */
|
||||
AMDGPU_DOORBELL64_HIQ = 0x01,
|
||||
AMDGPU_DOORBELL64_DIQ = 0x02,
|
||||
|
||||
/* Compute engines */
|
||||
AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
|
||||
AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
|
||||
AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
|
||||
AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
|
||||
AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
|
||||
AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
|
||||
AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
|
||||
AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
|
||||
|
||||
/* User queue doorbell range (128 doorbells) */
|
||||
AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
|
||||
AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
|
||||
|
||||
/* Graphics engine */
|
||||
AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
|
||||
|
||||
/*
|
||||
* Other graphics doorbells can be allocated here: from 0x8c to 0xef
|
||||
* Graphics voltage island aperture 1
|
||||
* default non-graphics QWORD index is 0xF0 - 0xFF inclusive
|
||||
*/
|
||||
|
||||
/* sDMA engines */
|
||||
AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
|
||||
AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
|
||||
AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
|
||||
AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
|
||||
|
||||
/* Interrupt handler */
|
||||
AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
|
||||
AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
|
||||
AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
|
||||
|
||||
/* VCN engine use 32 bits doorbell */
|
||||
AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
|
||||
AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
|
||||
AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
|
||||
AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
|
||||
|
||||
/* overlap the doorbell assignment with VCN as they are mutually exclusive
|
||||
* VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
|
||||
*/
|
||||
AMDGPU_DOORBELL64_RING0_1 = 0xF8,
|
||||
AMDGPU_DOORBELL64_RING2_3 = 0xF9,
|
||||
AMDGPU_DOORBELL64_RING4_5 = 0xFA,
|
||||
AMDGPU_DOORBELL64_RING6_7 = 0xFB,
|
||||
|
||||
AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC,
|
||||
AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD,
|
||||
AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE,
|
||||
AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF,
|
||||
|
||||
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
|
||||
AMDGPU_DOORBELL64_INVALID = 0xFFFF
|
||||
} AMDGPU_DOORBELL64_ASSIGNMENT;
|
||||
|
||||
|
||||
void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
|
||||
phys_addr_t *aperture_base,
|
||||
size_t *aperture_size,
|
||||
|
@ -699,6 +828,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
|||
|
||||
struct amdgpu_fpriv {
|
||||
struct amdgpu_vm vm;
|
||||
struct amdgpu_bo_va *prt_va;
|
||||
struct mutex bo_list_lock;
|
||||
struct idr bo_list_handles;
|
||||
struct amdgpu_ctx_mgr ctx_mgr;
|
||||
|
@ -776,9 +906,12 @@ struct amdgpu_rlc {
|
|||
struct amdgpu_mec {
|
||||
struct amdgpu_bo *hpd_eop_obj;
|
||||
u64 hpd_eop_gpu_addr;
|
||||
struct amdgpu_bo *mec_fw_obj;
|
||||
u64 mec_fw_gpu_addr;
|
||||
u32 num_pipe;
|
||||
u32 num_mec;
|
||||
u32 num_queue;
|
||||
void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
|
||||
};
|
||||
|
||||
struct amdgpu_kiq {
|
||||
|
@ -810,7 +943,16 @@ struct amdgpu_rb_config {
|
|||
uint32_t raster_config_1;
|
||||
};
|
||||
|
||||
struct amdgpu_gca_config {
|
||||
struct gb_addr_config {
|
||||
uint16_t pipe_interleave_size;
|
||||
uint8_t num_pipes;
|
||||
uint8_t max_compress_frags;
|
||||
uint8_t num_banks;
|
||||
uint8_t num_se;
|
||||
uint8_t num_rb_per_se;
|
||||
};
|
||||
|
||||
struct amdgpu_gfx_config {
|
||||
unsigned max_shader_engines;
|
||||
unsigned max_tile_pipes;
|
||||
unsigned max_cu_per_sh;
|
||||
|
@ -839,7 +981,11 @@ struct amdgpu_gca_config {
|
|||
uint32_t tile_mode_array[32];
|
||||
uint32_t macrotile_mode_array[16];
|
||||
|
||||
struct gb_addr_config gb_addr_config_fields;
|
||||
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
|
||||
|
||||
/* gfx configure feature */
|
||||
uint32_t double_offchip_lds_buf;
|
||||
};
|
||||
|
||||
struct amdgpu_cu_info {
|
||||
|
@ -857,9 +1003,31 @@ struct amdgpu_gfx_funcs {
|
|||
void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
|
||||
};
|
||||
|
||||
struct amdgpu_ngg_buf {
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t size;
|
||||
uint32_t bo_size;
|
||||
};
|
||||
|
||||
enum {
|
||||
PRIM = 0,
|
||||
POS,
|
||||
CNTL,
|
||||
PARAM,
|
||||
NGG_BUF_MAX
|
||||
};
|
||||
|
||||
struct amdgpu_ngg {
|
||||
struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
|
||||
uint32_t gds_reserve_addr;
|
||||
uint32_t gds_reserve_size;
|
||||
bool init;
|
||||
};
|
||||
|
||||
struct amdgpu_gfx {
|
||||
struct mutex gpu_clock_mutex;
|
||||
struct amdgpu_gca_config config;
|
||||
struct amdgpu_gfx_config config;
|
||||
struct amdgpu_rlc rlc;
|
||||
struct amdgpu_mec mec;
|
||||
struct amdgpu_kiq kiq;
|
||||
|
@ -899,6 +1067,9 @@ struct amdgpu_gfx {
|
|||
/* reset mask */
|
||||
uint32_t grbm_soft_reset;
|
||||
uint32_t srbm_soft_reset;
|
||||
bool in_reset;
|
||||
/* NGG */
|
||||
struct amdgpu_ngg ngg;
|
||||
};
|
||||
|
||||
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
@ -1007,66 +1178,11 @@ struct amdgpu_wb {
|
|||
|
||||
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
|
||||
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
|
||||
int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
|
||||
void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
|
||||
|
||||
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* UVD
|
||||
*/
|
||||
#define AMDGPU_DEFAULT_UVD_HANDLES 10
|
||||
#define AMDGPU_MAX_UVD_HANDLES 40
|
||||
#define AMDGPU_UVD_STACK_SIZE (200*1024)
|
||||
#define AMDGPU_UVD_HEAP_SIZE (256*1024)
|
||||
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
|
||||
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
|
||||
|
||||
struct amdgpu_uvd {
|
||||
struct amdgpu_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
unsigned fw_version;
|
||||
void *saved_bo;
|
||||
unsigned max_handles;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
const struct firmware *fw; /* UVD firmware */
|
||||
struct amdgpu_ring ring;
|
||||
struct amdgpu_irq_src irq;
|
||||
bool address_64_bit;
|
||||
bool use_ctx_buf;
|
||||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
};
|
||||
|
||||
/*
|
||||
* VCE
|
||||
*/
|
||||
#define AMDGPU_MAX_VCE_HANDLES 16
|
||||
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
|
||||
|
||||
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
|
||||
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
|
||||
|
||||
struct amdgpu_vce {
|
||||
struct amdgpu_bo *vcpu_bo;
|
||||
uint64_t gpu_addr;
|
||||
unsigned fw_version;
|
||||
unsigned fb_version;
|
||||
atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
|
||||
uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
struct mutex idle_mutex;
|
||||
const struct firmware *fw; /* VCE firmware */
|
||||
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
unsigned harvest_config;
|
||||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
unsigned num_rings;
|
||||
};
|
||||
|
||||
/*
|
||||
* SDMA
|
||||
*/
|
||||
|
@ -1095,11 +1211,22 @@ struct amdgpu_sdma {
|
|||
/*
|
||||
* Firmware
|
||||
*/
|
||||
enum amdgpu_firmware_load_type {
|
||||
AMDGPU_FW_LOAD_DIRECT = 0,
|
||||
AMDGPU_FW_LOAD_SMU,
|
||||
AMDGPU_FW_LOAD_PSP,
|
||||
};
|
||||
|
||||
struct amdgpu_firmware {
|
||||
struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
|
||||
bool smu_load;
|
||||
enum amdgpu_firmware_load_type load_type;
|
||||
struct amdgpu_bo *fw_buf;
|
||||
unsigned int fw_size;
|
||||
unsigned int max_ucodes;
|
||||
/* firmwares are loaded by psp instead of smu from vega10 */
|
||||
const struct amdgpu_psp_funcs *funcs;
|
||||
struct amdgpu_bo *rbuf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1112,10 +1239,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
|
|||
* Testing
|
||||
*/
|
||||
void amdgpu_test_moves(struct amdgpu_device *adev);
|
||||
void amdgpu_test_ring_sync(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *cpA,
|
||||
struct amdgpu_ring *cpB);
|
||||
void amdgpu_test_syncing(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* MMU Notifier
|
||||
|
@ -1202,6 +1325,8 @@ struct amdgpu_asic_funcs {
|
|||
/* static power management */
|
||||
int (*get_pcie_lanes)(struct amdgpu_device *adev);
|
||||
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
|
||||
/* get config memsize register */
|
||||
u32 (*get_config_memsize)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1342,9 +1467,11 @@ struct amdgpu_device {
|
|||
bool have_disp_power_ref;
|
||||
|
||||
/* BIOS */
|
||||
bool is_atom_fw;
|
||||
uint8_t *bios;
|
||||
uint32_t bios_size;
|
||||
struct amdgpu_bo *stollen_vga_memory;
|
||||
uint32_t bios_scratch_reg_offset;
|
||||
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
|
||||
|
||||
/* Register/doorbell mmio */
|
||||
|
@ -1391,6 +1518,7 @@ struct amdgpu_device {
|
|||
struct amdgpu_gart gart;
|
||||
struct amdgpu_dummy_page dummy_page;
|
||||
struct amdgpu_vm_manager vm_manager;
|
||||
struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
|
||||
|
||||
/* memory management */
|
||||
struct amdgpu_mman mman;
|
||||
|
@ -1457,6 +1585,9 @@ struct amdgpu_device {
|
|||
/* firmwares */
|
||||
struct amdgpu_firmware firmware;
|
||||
|
||||
/* PSP */
|
||||
struct psp_context psp;
|
||||
|
||||
/* GDS */
|
||||
struct amdgpu_gds gds;
|
||||
|
||||
|
@ -1501,23 +1632,32 @@ void amdgpu_device_fini(struct amdgpu_device *adev);
|
|||
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
|
||||
|
||||
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
||||
bool always_indirect);
|
||||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
bool always_indirect);
|
||||
uint32_t acc_flags);
|
||||
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
|
||||
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
|
||||
|
||||
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
|
||||
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
|
||||
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
|
||||
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
|
||||
|
||||
/*
|
||||
* Registers read & write functions.
|
||||
*/
|
||||
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
|
||||
#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
|
||||
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
|
||||
#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
|
||||
#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
|
||||
|
||||
#define AMDGPU_REGS_IDX (1<<0)
|
||||
#define AMDGPU_REGS_NO_KIQ (1<<1)
|
||||
|
||||
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
|
||||
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
|
||||
|
||||
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
|
||||
#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
|
||||
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
|
||||
#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
|
||||
#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
|
||||
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
|
||||
|
@ -1556,6 +1696,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
|
|||
|
||||
#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
|
||||
#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
|
||||
#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
|
||||
#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
|
||||
|
||||
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
|
||||
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
|
||||
|
@ -1584,7 +1726,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
|
|||
{
|
||||
if (ring->count_dw <= 0)
|
||||
DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
|
||||
ring->ring[ring->wptr++] = v;
|
||||
ring->ring[ring->wptr++ & ring->buf_mask] = v;
|
||||
ring->wptr &= ring->ptr_mask;
|
||||
ring->count_dw--;
|
||||
}
|
||||
|
@ -1597,9 +1739,9 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *sr
|
|||
if (ring->count_dw < count_dw) {
|
||||
DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
|
||||
} else {
|
||||
occupied = ring->wptr & ring->ptr_mask;
|
||||
occupied = ring->wptr & ring->buf_mask;
|
||||
dst = (void *)&ring->ring[occupied];
|
||||
chunk1 = ring->ptr_mask + 1 - occupied;
|
||||
chunk1 = ring->buf_mask + 1 - occupied;
|
||||
chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
|
||||
chunk2 = count_dw - chunk1;
|
||||
chunk1 <<= 2;
|
||||
|
@ -1650,11 +1792,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
|
||||
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
|
||||
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
|
||||
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
|
||||
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
|
||||
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
|
||||
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
|
||||
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
||||
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
||||
#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
|
||||
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
|
||||
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
|
||||
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
|
||||
|
@ -1698,6 +1842,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
|
||||
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
|
||||
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
|
||||
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
||||
|
||||
/* Common functions */
|
||||
int amdgpu_gpu_reset(struct amdgpu_device *adev);
|
||||
|
@ -1723,7 +1868,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
|||
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||
int *last_invalidated);
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
||||
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *mem);
|
||||
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
|
||||
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
|
||||
|
@ -1762,8 +1907,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev);
|
|||
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
|
||||
void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
int amdgpu_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
|
||||
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
|
||||
|
|
|
@ -74,9 +74,9 @@ static void amdgpu_afmt_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
|
|||
|
||||
/* Check that we are in spec (not always possible) */
|
||||
if (n < (128*freq/1500))
|
||||
printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
|
||||
pr_warn("Calculated ACR N value is too small. You may experience audio problems.\n");
|
||||
if (n > (128*freq/300))
|
||||
printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
|
||||
pr_warn("Calculated ACR N value is too large. You may experience audio problems.\n");
|
||||
|
||||
*N = n;
|
||||
*CTS = cts;
|
||||
|
|
|
@ -1748,3 +1748,31 @@ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
|
|||
memcpy(dst, src, num_bytes);
|
||||
#endif
|
||||
}
|
||||
|
||||
int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
|
||||
{
|
||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
|
||||
uint16_t data_offset;
|
||||
int usage_bytes = 0;
|
||||
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
|
||||
|
||||
DRM_DEBUG("atom firmware requested %08x %dkb\n",
|
||||
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
|
||||
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
|
||||
|
||||
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
|
||||
}
|
||||
ctx->scratch_size_bytes = 0;
|
||||
if (usage_bytes == 0)
|
||||
usage_bytes = 20 * 1024;
|
||||
/* allocate some scratch memory */
|
||||
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
|
||||
if (!ctx->scratch)
|
||||
return -ENOMEM;
|
||||
ctx->scratch_size_bytes = usage_bytes;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -215,4 +215,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
|||
int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
|
||||
u8 voltage_type,
|
||||
u8 *svd_gpio_id, u8 *svc_gpio_id);
|
||||
|
||||
int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
112
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
Normal file
112
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atomfirmware.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "atom.h"
|
||||
|
||||
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
uint16_t data_offset;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
|
||||
NULL, NULL, &data_offset)) {
|
||||
struct atom_firmware_info_v3_1 *firmware_info =
|
||||
(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
if (le32_to_cpu(firmware_info->firmware_capability) &
|
||||
ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
uint16_t data_offset;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
|
||||
NULL, NULL, &data_offset)) {
|
||||
struct atom_firmware_info_v3_1 *firmware_info =
|
||||
(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
adev->bios_scratch_reg_offset =
|
||||
le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
|
||||
adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
|
||||
}
|
||||
|
||||
void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
|
||||
WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
|
||||
}
|
||||
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
|
||||
{
|
||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
vram_usagebyfirmware);
|
||||
uint16_t data_offset;
|
||||
int usage_bytes = 0;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
struct vram_usagebyfirmware_v2_1 *firmware_usage =
|
||||
(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
|
||||
|
||||
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
|
||||
le32_to_cpu(firmware_usage->start_address_in_kb),
|
||||
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
|
||||
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
|
||||
|
||||
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024;
|
||||
}
|
||||
ctx->scratch_size_bytes = 0;
|
||||
if (usage_bytes == 0)
|
||||
usage_bytes = 20 * 1024;
|
||||
/* allocate some scratch memory */
|
||||
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
|
||||
if (!ctx->scratch)
|
||||
return -ENOMEM;
|
||||
ctx->scratch_size_bytes = usage_bytes;
|
||||
return 0;
|
||||
}
|
33
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
Normal file
33
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_ATOMFIRMWARE_H__
|
||||
#define __AMDGPU_ATOMFIRMWARE_H__
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
|
||||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
|
||||
void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
|
@ -583,8 +583,8 @@ static bool amdgpu_atpx_detect(void)
|
|||
|
||||
if (has_atpx && vga_count == 2) {
|
||||
acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
|
||||
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
|
||||
acpi_method_name);
|
||||
pr_info("vga_switcheroo: detected switching method %s handle\n",
|
||||
acpi_method_name);
|
||||
amdgpu_atpx_priv.atpx_detected = true;
|
||||
amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
|
||||
amdgpu_atpx_init();
|
||||
|
|
|
@ -86,6 +86,18 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool is_atom_fw(uint8_t *bios)
|
||||
{
|
||||
uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8);
|
||||
uint8_t frev = bios[bios_header_start + 2];
|
||||
uint8_t crev = bios[bios_header_start + 3];
|
||||
|
||||
if ((frev < 3) ||
|
||||
((frev == 3) && (crev < 3)))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If you boot an IGP board with a discrete card as the primary,
|
||||
* the IGP rom is not accessible via the rom bar as the IGP rom is
|
||||
|
@ -419,26 +431,30 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
|
|||
bool amdgpu_get_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_atrm_get_bios(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
if (amdgpu_acpi_vfct_bios(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
if (igp_read_bios_from_vram(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
if (amdgpu_read_bios(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
if (amdgpu_read_bios_from_rom(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
if (amdgpu_read_disabled_bios(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
if (amdgpu_read_platform_bios(adev))
|
||||
return true;
|
||||
goto success;
|
||||
|
||||
DRM_ERROR("Unable to locate a BIOS ROM\n");
|
||||
return false;
|
||||
|
||||
success:
|
||||
adev->is_atom_fw = is_atom_fw(adev->bios);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -571,7 +571,9 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
|
|||
.process = cgs_process_irq,
|
||||
};
|
||||
|
||||
static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
|
||||
static int amdgpu_cgs_add_irq_source(void *cgs_device,
|
||||
unsigned client_id,
|
||||
unsigned src_id,
|
||||
unsigned num_types,
|
||||
cgs_irq_source_set_func_t set,
|
||||
cgs_irq_handler_func_t handler,
|
||||
|
@ -597,7 +599,7 @@ static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src
|
|||
irq_params->handler = handler;
|
||||
irq_params->private_data = private_data;
|
||||
source->data = (void *)irq_params;
|
||||
ret = amdgpu_irq_add_id(adev, src_id, source);
|
||||
ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
|
||||
if (ret) {
|
||||
kfree(irq_params);
|
||||
kfree(source);
|
||||
|
@ -606,16 +608,26 @@ static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
|
||||
static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
|
||||
unsigned src_id, unsigned type)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
|
||||
|
||||
if (!adev->irq.client[client_id].sources)
|
||||
return -EINVAL;
|
||||
|
||||
return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
|
||||
static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
|
||||
unsigned src_id, unsigned type)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
|
||||
|
||||
if (!adev->irq.client[client_id].sources)
|
||||
return -EINVAL;
|
||||
|
||||
return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
||||
|
@ -825,9 +837,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
uint32_t ucode_start_address;
|
||||
const uint8_t *src;
|
||||
const struct smc_firmware_header_v1_0 *hdr;
|
||||
|
||||
if (CGS_UCODE_ID_SMU_SK == type)
|
||||
amdgpu_cgs_rel_firmware(cgs_device, CGS_UCODE_ID_SMU);
|
||||
const struct common_firmware_header *header;
|
||||
struct amdgpu_firmware_info *ucode = NULL;
|
||||
|
||||
if (!adev->pm.fw) {
|
||||
switch (adev->asic_type) {
|
||||
|
@ -889,6 +900,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
strcpy(fw_name, "amdgpu/vega10_smc.bin");
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("SMC firmware not supported\n");
|
||||
return -EINVAL;
|
||||
|
@ -907,6 +921,15 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
adev->pm.fw = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
|
||||
ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
|
||||
ucode->fw = adev->pm.fw;
|
||||
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
|
||||
|
|
|
@ -82,6 +82,15 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
if (ring < adev->uvd.num_enc_rings){
|
||||
*out_ring = &adev->uvd.ring_enc[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d UVD ENC rings are supported\n",
|
||||
adev->uvd.num_enc_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(*out_ring && (*out_ring)->adev)) {
|
||||
|
@ -759,23 +768,33 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
|
|||
amdgpu_bo_unref(&parser->uf_entry.robj);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_vm *vm)
|
||||
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_bo *bo;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, vm);
|
||||
r = amdgpu_vm_update_directories(adev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
|
||||
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm);
|
||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(adev, &p->job->sync,
|
||||
fpriv->prt_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -853,9 +872,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
if (p->job->vm) {
|
||||
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
|
||||
|
||||
r = amdgpu_bo_vm_update_pte(p, vm);
|
||||
r = amdgpu_bo_vm_update_pte(p);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -869,7 +888,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
int i, j;
|
||||
int r;
|
||||
int r, ce_preempt = 0, de_preempt = 0;
|
||||
|
||||
for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
|
||||
struct amdgpu_cs_chunk *chunk;
|
||||
|
@ -884,13 +903,26 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
||||
continue;
|
||||
|
||||
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
ce_preempt++;
|
||||
else
|
||||
de_preempt++;
|
||||
}
|
||||
|
||||
/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
|
||||
if (ce_preempt > 1 || de_preempt > 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance, chunk_ib->ring,
|
||||
&ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
|
||||
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
if (!parser->ctx->preamble_presented) {
|
||||
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "amdgpu_i2c.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "amd_pcie.h"
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
#include "si.h"
|
||||
|
@ -48,9 +49,11 @@
|
|||
#include "cik.h"
|
||||
#endif
|
||||
#include "vi.h"
|
||||
#include "soc15.h"
|
||||
#include "bif/bif_4_1_d.h"
|
||||
#include <linux/pci.h>
|
||||
#include <linux/firmware.h>
|
||||
#include "amdgpu_pm.h"
|
||||
|
||||
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
||||
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
||||
|
@ -74,6 +77,7 @@ static const char *amdgpu_asic_name[] = {
|
|||
"POLARIS10",
|
||||
"POLARIS11",
|
||||
"POLARIS12",
|
||||
"VEGA10",
|
||||
"LAST",
|
||||
};
|
||||
|
||||
|
@ -90,16 +94,16 @@ bool amdgpu_device_is_px(struct drm_device *dev)
|
|||
* MMIO register access helper functions.
|
||||
*/
|
||||
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
||||
bool always_indirect)
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
|
||||
BUG_ON(in_interrupt());
|
||||
return amdgpu_virt_kiq_rreg(adev, reg);
|
||||
}
|
||||
|
||||
if ((reg * 4) < adev->rmmio_size && !always_indirect)
|
||||
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
||||
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
else {
|
||||
unsigned long flags;
|
||||
|
@ -114,16 +118,16 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
|||
}
|
||||
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
bool always_indirect)
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
|
||||
BUG_ON(in_interrupt());
|
||||
return amdgpu_virt_kiq_wreg(adev, reg, v);
|
||||
}
|
||||
|
||||
if ((reg * 4) < adev->rmmio_size && !always_indirect)
|
||||
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
else {
|
||||
unsigned long flags;
|
||||
|
@ -194,6 +198,44 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mm_rdoorbell64 - read a doorbell Qword
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @index: doorbell index
|
||||
*
|
||||
* Returns the value in the doorbell aperture at the
|
||||
* requested doorbell index (VEGA10+).
|
||||
*/
|
||||
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
|
||||
{
|
||||
if (index < adev->doorbell.num_doorbells) {
|
||||
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
|
||||
} else {
|
||||
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mm_wdoorbell64 - write a doorbell Qword
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @index: doorbell index
|
||||
* @v: value to write
|
||||
*
|
||||
* Writes @v to the doorbell aperture at the
|
||||
* requested doorbell index (VEGA10+).
|
||||
*/
|
||||
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
|
||||
{
|
||||
if (index < adev->doorbell.num_doorbells) {
|
||||
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
|
||||
} else {
|
||||
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_invalid_rreg - dummy reg read function
|
||||
*
|
||||
|
@ -475,7 +517,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|||
int r;
|
||||
|
||||
if (adev->wb.wb_obj == NULL) {
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->wb.wb_obj, &adev->wb.gpu_addr,
|
||||
(void **)&adev->wb.wb);
|
||||
|
@ -488,7 +530,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|||
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
|
||||
|
||||
/* clear wb memory */
|
||||
memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
|
||||
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -515,6 +557,29 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_wb_get_64bit - Allocate a wb entry
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @wb: wb index
|
||||
*
|
||||
* Allocate a wb slot for use by the driver (all asics).
|
||||
* Returns 0 on success or -EINVAL on failure.
|
||||
*/
|
||||
int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
|
||||
{
|
||||
unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
|
||||
adev->wb.num_wb, 0, 2, 7, 0);
|
||||
if ((offset + 1) < adev->wb.num_wb) {
|
||||
__set_bit(offset, adev->wb.used);
|
||||
__set_bit(offset + 1, adev->wb.used);
|
||||
*wb = offset;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_wb_free - Free a wb entry
|
||||
*
|
||||
|
@ -529,6 +594,22 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
|
|||
__clear_bit(wb, adev->wb.used);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_wb_free_64bit - Free a wb entry
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @wb: wb index
|
||||
*
|
||||
* Free a wb slot allocated for use by the driver (all asics)
|
||||
*/
|
||||
void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
|
||||
{
|
||||
if ((wb + 1) < adev->wb.num_wb) {
|
||||
__clear_bit(wb, adev->wb.used);
|
||||
__clear_bit(wb + 1, adev->wb.used);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_location - try to find VRAM location
|
||||
* @adev: amdgpu device structure holding all necessary informations
|
||||
|
@ -602,7 +683,7 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
|
|||
dev_warn(adev->dev, "limiting GTT\n");
|
||||
mc->gtt_size = size_bf;
|
||||
}
|
||||
mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
|
||||
mc->gtt_start = 0;
|
||||
} else {
|
||||
if (mc->gtt_size > size_af) {
|
||||
dev_warn(adev->dev, "limiting GTT\n");
|
||||
|
@ -636,9 +717,9 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
/* then check MEM_SIZE, in case the crtcs are off */
|
||||
reg = RREG32(mmCONFIG_MEMSIZE);
|
||||
reg = amdgpu_asic_get_config_memsize(adev);
|
||||
|
||||
if (reg)
|
||||
if ((reg != 0) && (reg != 0xffffffff))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -915,8 +996,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
mutex_init(&adev->mode_info.atom_context->mutex);
|
||||
amdgpu_atombios_scratch_regs_init(adev);
|
||||
amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
|
||||
if (adev->is_atom_fw) {
|
||||
amdgpu_atomfirmware_scratch_regs_init(adev);
|
||||
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
||||
} else {
|
||||
amdgpu_atombios_scratch_regs_init(adev);
|
||||
amdgpu_atombios_allocate_fb_scratch(adev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -954,6 +1040,45 @@ static bool amdgpu_check_pot_argument(int arg)
|
|||
return (arg & (arg - 1)) == 0;
|
||||
}
|
||||
|
||||
static void amdgpu_get_block_size(struct amdgpu_device *adev)
|
||||
{
|
||||
/* from AI, asic starts to support multiple level VMPT */
|
||||
if (adev->asic_type >= CHIP_VEGA10) {
|
||||
if (amdgpu_vm_block_size != 9)
|
||||
dev_warn(adev->dev,
|
||||
"Multi-VMPT limits block size to one page!\n");
|
||||
amdgpu_vm_block_size = 9;
|
||||
return;
|
||||
}
|
||||
/* defines number of bits in page table versus page directory,
|
||||
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
|
||||
* page table and the remaining bits are in the page directory */
|
||||
if (amdgpu_vm_block_size == -1) {
|
||||
|
||||
/* Total bits covered by PD + PTs */
|
||||
unsigned bits = ilog2(amdgpu_vm_size) + 18;
|
||||
|
||||
/* Make sure the PD is 4K in size up to 8GB address space.
|
||||
Above that split equal between PD and PTs */
|
||||
if (amdgpu_vm_size <= 8)
|
||||
amdgpu_vm_block_size = bits - 9;
|
||||
else
|
||||
amdgpu_vm_block_size = (bits + 3) / 2;
|
||||
|
||||
} else if (amdgpu_vm_block_size < 9) {
|
||||
dev_warn(adev->dev, "VM page table size (%d) too small\n",
|
||||
amdgpu_vm_block_size);
|
||||
amdgpu_vm_block_size = 9;
|
||||
}
|
||||
|
||||
if (amdgpu_vm_block_size > 24 ||
|
||||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
|
||||
dev_warn(adev->dev, "VM page table size (%d) too large\n",
|
||||
amdgpu_vm_block_size);
|
||||
amdgpu_vm_block_size = 9;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_check_arguments - validate module params
|
||||
*
|
||||
|
@ -1004,33 +1129,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
|
|||
amdgpu_vm_size = 8;
|
||||
}
|
||||
|
||||
/* defines number of bits in page table versus page directory,
|
||||
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
|
||||
* page table and the remaining bits are in the page directory */
|
||||
if (amdgpu_vm_block_size == -1) {
|
||||
|
||||
/* Total bits covered by PD + PTs */
|
||||
unsigned bits = ilog2(amdgpu_vm_size) + 18;
|
||||
|
||||
/* Make sure the PD is 4K in size up to 8GB address space.
|
||||
Above that split equal between PD and PTs */
|
||||
if (amdgpu_vm_size <= 8)
|
||||
amdgpu_vm_block_size = bits - 9;
|
||||
else
|
||||
amdgpu_vm_block_size = (bits + 3) / 2;
|
||||
|
||||
} else if (amdgpu_vm_block_size < 9) {
|
||||
dev_warn(adev->dev, "VM page table size (%d) too small\n",
|
||||
amdgpu_vm_block_size);
|
||||
amdgpu_vm_block_size = 9;
|
||||
}
|
||||
|
||||
if (amdgpu_vm_block_size > 24 ||
|
||||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
|
||||
dev_warn(adev->dev, "VM page table size (%d) too large\n",
|
||||
amdgpu_vm_block_size);
|
||||
amdgpu_vm_block_size = 9;
|
||||
}
|
||||
amdgpu_get_block_size(adev);
|
||||
|
||||
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
|
||||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
|
||||
|
@ -1059,7 +1158,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
|||
if (state == VGA_SWITCHEROO_ON) {
|
||||
unsigned d3_delay = dev->pdev->d3_delay;
|
||||
|
||||
printk(KERN_INFO "amdgpu: switched on\n");
|
||||
pr_info("amdgpu: switched on\n");
|
||||
/* don't suspend or resume card normally */
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
|
@ -1070,7 +1169,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
|||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
} else {
|
||||
printk(KERN_INFO "amdgpu: switched off\n");
|
||||
pr_info("amdgpu: switched off\n");
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
amdgpu_device_suspend(dev, true, true);
|
||||
|
@ -1114,13 +1213,15 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
|
|||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
state);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
}
|
||||
if (adev->ip_blocks[i].version->type != block_type)
|
||||
continue;
|
||||
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
|
||||
(void *)adev, state);
|
||||
if (r)
|
||||
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
@ -1134,13 +1235,15 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
|
|||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
|
||||
state);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
}
|
||||
if (adev->ip_blocks[i].version->type != block_type)
|
||||
continue;
|
||||
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
|
||||
(void *)adev, state);
|
||||
if (r)
|
||||
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
@ -1345,6 +1448,13 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
break;
|
||||
#endif
|
||||
case CHIP_VEGA10:
|
||||
adev->family = AMDGPU_FAMILY_AI;
|
||||
|
||||
r = soc15_set_ip_blocks(adev);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
@ -1476,6 +1586,9 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
amdgpu_dpm_enable_vce(adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1607,6 +1720,53 @@ int amdgpu_suspend(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
|
||||
continue;
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
@ -1627,8 +1787,13 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
|||
|
||||
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
if (adev->is_atom_fw) {
|
||||
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
} else {
|
||||
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1693,6 +1858,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
* can recall function without having locking issues */
|
||||
mutex_init(&adev->vm_manager.lock);
|
||||
atomic_set(&adev->irq.ih.lock, 0);
|
||||
mutex_init(&adev->firmware.mutex);
|
||||
mutex_init(&adev->pm.mutex);
|
||||
mutex_init(&adev->gfx.gpu_clock_mutex);
|
||||
mutex_init(&adev->srbm_mutex);
|
||||
|
@ -1801,14 +1967,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
DRM_INFO("GPU post is not needed\n");
|
||||
}
|
||||
|
||||
/* Initialize clocks */
|
||||
r = amdgpu_atombios_get_clock_info(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
|
||||
goto failed;
|
||||
if (!adev->is_atom_fw) {
|
||||
/* Initialize clocks */
|
||||
r = amdgpu_atombios_get_clock_info(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
|
||||
return r;
|
||||
}
|
||||
/* init i2c buses */
|
||||
amdgpu_atombios_i2c_init(adev);
|
||||
}
|
||||
/* init i2c buses */
|
||||
amdgpu_atombios_i2c_init(adev);
|
||||
|
||||
/* Fence driver */
|
||||
r = amdgpu_fence_driver_init(adev);
|
||||
|
@ -1837,8 +2005,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
/* Get a log2 for easy divisions. */
|
||||
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
|
||||
|
||||
amdgpu_fbdev_init(adev);
|
||||
|
||||
r = amdgpu_ib_pool_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
|
||||
|
@ -1849,21 +2015,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
|
||||
amdgpu_fbdev_init(adev);
|
||||
|
||||
r = amdgpu_gem_debugfs_init(adev);
|
||||
if (r) {
|
||||
if (r)
|
||||
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = amdgpu_debugfs_regs_init(adev);
|
||||
if (r) {
|
||||
if (r)
|
||||
DRM_ERROR("registering register debugfs failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = amdgpu_debugfs_firmware_init(adev);
|
||||
if (r) {
|
||||
if (r)
|
||||
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if ((amdgpu_testing & 1)) {
|
||||
if (adev->accel_working)
|
||||
|
@ -1871,12 +2035,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
else
|
||||
DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
|
||||
}
|
||||
if ((amdgpu_testing & 2)) {
|
||||
if (adev->accel_working)
|
||||
amdgpu_test_syncing(adev);
|
||||
else
|
||||
DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
|
||||
}
|
||||
if (amdgpu_benchmarking) {
|
||||
if (adev->accel_working)
|
||||
amdgpu_benchmark(adev, amdgpu_benchmarking);
|
||||
|
@ -2023,7 +2181,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
*/
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
if (adev->is_atom_fw)
|
||||
amdgpu_atomfirmware_scratch_regs_save(adev);
|
||||
else
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
pci_save_state(dev->pdev);
|
||||
if (suspend) {
|
||||
/* Shut down the device */
|
||||
|
@ -2075,7 +2236,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|||
return r;
|
||||
}
|
||||
}
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
if (adev->is_atom_fw)
|
||||
amdgpu_atomfirmware_scratch_regs_restore(adev);
|
||||
else
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* post card */
|
||||
if (amdgpu_need_post(adev)) {
|
||||
|
@ -2288,6 +2452,117 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sriov_gpu_reset - reset the asic
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @voluntary: if this reset is requested by guest.
|
||||
* (true means by guest and false means by HYPERVISOR )
|
||||
*
|
||||
* Attempt the reset the GPU if it has hung (all asics).
|
||||
* for SRIOV case.
|
||||
* Returns 0 for success or an error on failure.
|
||||
*/
|
||||
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
|
||||
{
|
||||
int i, r = 0;
|
||||
int resched;
|
||||
struct amdgpu_bo *bo, *tmp;
|
||||
struct amdgpu_ring *ring;
|
||||
struct dma_fence *fence = NULL, *next = NULL;
|
||||
|
||||
mutex_lock(&adev->virt.lock_reset);
|
||||
atomic_inc(&adev->gpu_reset_counter);
|
||||
adev->gfx.in_reset = true;
|
||||
|
||||
/* block TTM */
|
||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||
|
||||
/* block scheduler */
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
kthread_park(ring->sched.thread);
|
||||
amd_sched_hw_job_reset(&ring->sched);
|
||||
}
|
||||
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
amdgpu_fence_driver_force_completion(adev);
|
||||
|
||||
/* request to take full control of GPU before re-initialization */
|
||||
if (voluntary)
|
||||
amdgpu_virt_reset_gpu(adev);
|
||||
else
|
||||
amdgpu_virt_request_full_gpu(adev, true);
|
||||
|
||||
|
||||
/* Resume IP prior to SMC */
|
||||
amdgpu_sriov_reinit_early(adev);
|
||||
|
||||
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
||||
amdgpu_ttm_recover_gart(adev);
|
||||
|
||||
/* now we are okay to resume SMC/CP/SDMA */
|
||||
amdgpu_sriov_reinit_late(adev);
|
||||
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
|
||||
if (amdgpu_ib_ring_tests(adev))
|
||||
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
||||
|
||||
/* release full control of GPU after ib test */
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
DRM_INFO("recover vram bo from shadow\n");
|
||||
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
||||
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
|
||||
if (fence) {
|
||||
r = dma_fence_wait(fence, false);
|
||||
if (r) {
|
||||
WARN(r, "recovery from shadow isn't completed\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
fence = next;
|
||||
}
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
|
||||
if (fence) {
|
||||
r = dma_fence_wait(fence, false);
|
||||
if (r)
|
||||
WARN(r, "recovery from shadow isn't completed\n");
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
amd_sched_job_recovery(&ring->sched);
|
||||
kthread_unpark(ring->sched.thread);
|
||||
}
|
||||
|
||||
drm_helper_resume_force_mode(adev->ddev);
|
||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
||||
if (r) {
|
||||
/* bad news, how to tell it to userspace ? */
|
||||
dev_info(adev->dev, "GPU reset failed\n");
|
||||
}
|
||||
|
||||
adev->gfx.in_reset = false;
|
||||
mutex_unlock(&adev->virt.lock_reset);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gpu_reset - reset the asic
|
||||
*
|
||||
|
@ -2303,7 +2578,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
bool need_full_reset;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
return amdgpu_sriov_gpu_reset(adev, true);
|
||||
|
||||
if (!amdgpu_check_soft_reset(adev)) {
|
||||
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
||||
|
@ -2349,9 +2624,15 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
amdgpu_display_stop_mc_access(adev, &save);
|
||||
amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
|
||||
}
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
if (adev->is_atom_fw)
|
||||
amdgpu_atomfirmware_scratch_regs_save(adev);
|
||||
else
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
if (adev->is_atom_fw)
|
||||
amdgpu_atomfirmware_scratch_regs_restore(adev);
|
||||
else
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
/* post card */
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
|
||||
|
@ -2390,7 +2671,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
if (fence) {
|
||||
r = dma_fence_wait(fence, false);
|
||||
if (r) {
|
||||
WARN(r, "recovery from shadow isn't comleted\n");
|
||||
WARN(r, "recovery from shadow isn't completed\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2402,7 +2683,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
if (fence) {
|
||||
r = dma_fence_wait(fence, false);
|
||||
if (r)
|
||||
WARN(r, "recovery from shadow isn't comleted\n");
|
||||
WARN(r, "recovery from shadow isn't completed\n");
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
@ -2957,24 +3238,42 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
|||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||
int idx, r;
|
||||
int32_t value;
|
||||
int idx, x, outsize, r, valuesize;
|
||||
uint32_t values[16];
|
||||
|
||||
if (size != 4 || *pos & 0x3)
|
||||
if (size & 3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* convert offset to sensor number */
|
||||
idx = *pos >> 2;
|
||||
|
||||
valuesize = sizeof(values);
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
|
||||
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
|
||||
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
|
||||
else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
|
||||
r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
|
||||
&valuesize);
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (!r)
|
||||
r = put_user(value, (int32_t *)buf);
|
||||
if (size > valuesize)
|
||||
return -EINVAL;
|
||||
|
||||
return !r ? 4 : r;
|
||||
outsize = 0;
|
||||
x = 0;
|
||||
if (!r) {
|
||||
while (size) {
|
||||
r = put_user(values[x++], (int32_t *)buf);
|
||||
buf += 4;
|
||||
size -= 4;
|
||||
outsize += 4;
|
||||
}
|
||||
}
|
||||
|
||||
return !r ? outsize : r;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
||||
|
|
|
@ -31,86 +31,88 @@
|
|||
|
||||
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
|
||||
{
|
||||
printk("\tui class: ");
|
||||
const char *s;
|
||||
|
||||
switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
|
||||
default:
|
||||
printk("none\n");
|
||||
s = "none";
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
|
||||
printk("battery\n");
|
||||
s = "battery";
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
|
||||
printk("balanced\n");
|
||||
s = "balanced";
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
|
||||
printk("performance\n");
|
||||
s = "performance";
|
||||
break;
|
||||
}
|
||||
printk("\tinternal class: ");
|
||||
printk("\tui class: %s\n", s);
|
||||
printk("\tinternal class:");
|
||||
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
|
||||
(class2 == 0))
|
||||
printk("none");
|
||||
pr_cont(" none");
|
||||
else {
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
|
||||
printk("boot ");
|
||||
pr_cont(" boot");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
|
||||
printk("thermal ");
|
||||
pr_cont(" thermal");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
|
||||
printk("limited_pwr ");
|
||||
pr_cont(" limited_pwr");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_REST)
|
||||
printk("rest ");
|
||||
pr_cont(" rest");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
|
||||
printk("forced ");
|
||||
pr_cont(" forced");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
|
||||
printk("3d_perf ");
|
||||
pr_cont(" 3d_perf");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
|
||||
printk("ovrdrv ");
|
||||
pr_cont(" ovrdrv");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
|
||||
printk("uvd ");
|
||||
pr_cont(" uvd");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
|
||||
printk("3d_low ");
|
||||
pr_cont(" 3d_low");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
|
||||
printk("acpi ");
|
||||
pr_cont(" acpi");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
|
||||
printk("uvd_hd2 ");
|
||||
pr_cont(" uvd_hd2");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
|
||||
printk("uvd_hd ");
|
||||
pr_cont(" uvd_hd");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
|
||||
printk("uvd_sd ");
|
||||
pr_cont(" uvd_sd");
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
|
||||
printk("limited_pwr2 ");
|
||||
pr_cont(" limited_pwr2");
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
|
||||
printk("ulv ");
|
||||
pr_cont(" ulv");
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
|
||||
printk("uvd_mvc ");
|
||||
pr_cont(" uvd_mvc");
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
void amdgpu_dpm_print_cap_info(u32 caps)
|
||||
{
|
||||
printk("\tcaps: ");
|
||||
printk("\tcaps:");
|
||||
if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
|
||||
printk("single_disp ");
|
||||
pr_cont(" single_disp");
|
||||
if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
|
||||
printk("video ");
|
||||
pr_cont(" video");
|
||||
if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
|
||||
printk("no_dc ");
|
||||
printk("\n");
|
||||
pr_cont(" no_dc");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *rps)
|
||||
{
|
||||
printk("\tstatus: ");
|
||||
printk("\tstatus:");
|
||||
if (rps == adev->pm.dpm.current_ps)
|
||||
printk("c ");
|
||||
pr_cont(" c");
|
||||
if (rps == adev->pm.dpm.requested_ps)
|
||||
printk("r ");
|
||||
pr_cont(" r");
|
||||
if (rps == adev->pm.dpm.boot_ps)
|
||||
printk("b ");
|
||||
printk("\n");
|
||||
pr_cont(" b");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -270,8 +270,18 @@ struct amdgpu_dpm_funcs {
|
|||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal);
|
||||
int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
|
||||
int *size);
|
||||
|
||||
struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
|
||||
int (*reset_power_profile_state)(struct amdgpu_device *adev,
|
||||
struct amd_pp_profile *request);
|
||||
int (*get_power_profile_state)(struct amdgpu_device *adev,
|
||||
struct amd_pp_profile *query);
|
||||
int (*set_power_profile_state)(struct amdgpu_device *adev,
|
||||
struct amd_pp_profile *request);
|
||||
int (*switch_power_profile)(struct amdgpu_device *adev,
|
||||
enum amd_pp_profile_type type);
|
||||
};
|
||||
|
||||
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
|
||||
|
@ -282,10 +292,10 @@ struct amdgpu_dpm_funcs {
|
|||
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
|
||||
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
|
||||
|
||||
#define amdgpu_dpm_read_sensor(adev, idx, value) \
|
||||
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
|
||||
-EINVAL)
|
||||
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
|
||||
(adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
|
||||
|
||||
#define amdgpu_dpm_get_temperature(adev) \
|
||||
((adev)->pp_enabled ? \
|
||||
|
@ -388,6 +398,22 @@ struct amdgpu_dpm_funcs {
|
|||
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
|
||||
(adev)->pm.dpm.forced_level)
|
||||
|
||||
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
|
||||
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
|
||||
(adev)->powerplay.pp_handle, request))
|
||||
|
||||
#define amdgpu_dpm_get_power_profile_state(adev, query) \
|
||||
((adev)->powerplay.pp_funcs->get_power_profile_state(\
|
||||
(adev)->powerplay.pp_handle, query))
|
||||
|
||||
#define amdgpu_dpm_set_power_profile_state(adev, request) \
|
||||
((adev)->powerplay.pp_funcs->set_power_profile_state(\
|
||||
(adev)->powerplay.pp_handle, request))
|
||||
|
||||
#define amdgpu_dpm_switch_power_profile(adev, type) \
|
||||
((adev)->powerplay.pp_funcs->switch_power_profile(\
|
||||
(adev)->powerplay.pp_handle, type))
|
||||
|
||||
struct amdgpu_dpm {
|
||||
struct amdgpu_ps *ps;
|
||||
/* number of valid power states */
|
||||
|
|
|
@ -60,9 +60,12 @@
|
|||
* - 3.8.0 - Add support raster config init in the kernel
|
||||
* - 3.9.0 - Add support for memory query info about VRAM and GTT.
|
||||
* - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
|
||||
* - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
|
||||
* - 3.12.0 - Add query for double offchip LDS buffers
|
||||
* - 3.13.0 - Add PRT support
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 10
|
||||
#define KMS_DRIVER_MINOR 13
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
|
@ -77,7 +80,7 @@ int amdgpu_pcie_gen2 = -1;
|
|||
int amdgpu_msi = -1;
|
||||
int amdgpu_lockup_timeout = 0;
|
||||
int amdgpu_dpm = -1;
|
||||
int amdgpu_smc_load_fw = 1;
|
||||
int amdgpu_fw_load_type = -1;
|
||||
int amdgpu_aspm = -1;
|
||||
int amdgpu_runtime_pm = -1;
|
||||
unsigned amdgpu_ip_block_mask = 0xffffffff;
|
||||
|
@ -100,6 +103,11 @@ unsigned amdgpu_pg_mask = 0xffffffff;
|
|||
char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
unsigned amdgpu_pp_feature_mask = 0xffffffff;
|
||||
int amdgpu_ngg = 0;
|
||||
int amdgpu_prim_buf_per_se = 0;
|
||||
int amdgpu_pos_buf_per_se = 0;
|
||||
int amdgpu_cntl_sb_buf_per_se = 0;
|
||||
int amdgpu_param_buf_per_se = 0;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
|
@ -137,8 +145,8 @@ module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
|
|||
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(dpm, amdgpu_dpm, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(smc_load_fw, "SMC firmware loading(1 = enable, 0 = disable)");
|
||||
module_param_named(smc_load_fw, amdgpu_smc_load_fw, int, 0444);
|
||||
MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
|
||||
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(aspm, amdgpu_aspm, int, 0444);
|
||||
|
@ -207,6 +215,22 @@ MODULE_PARM_DESC(virtual_display,
|
|||
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
|
||||
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
|
||||
|
||||
MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
|
||||
module_param_named(ngg, amdgpu_ngg, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
|
||||
module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
|
||||
module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
|
||||
module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
|
||||
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
|
||||
|
||||
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
|
@ -409,6 +433,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67D0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
|
@ -421,8 +446,16 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
|
||||
/* Vega 10 */
|
||||
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
||||
|
@ -685,7 +718,6 @@ static struct drm_driver kms_driver = {
|
|||
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
|
||||
.load = amdgpu_driver_load_kms,
|
||||
.open = amdgpu_driver_open_kms,
|
||||
.preclose = amdgpu_driver_preclose_kms,
|
||||
.postclose = amdgpu_driver_postclose_kms,
|
||||
.lastclose = amdgpu_driver_lastclose_kms,
|
||||
.set_busid = drm_pci_set_busid,
|
||||
|
|
|
@ -147,11 +147,11 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
||||
true, &gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
|
||||
aligned_size);
|
||||
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
abo = gem_to_amdgpu_bo(gobj);
|
||||
|
@ -241,8 +241,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
|||
/* setup helper */
|
||||
rfbdev->helper.fb = fb;
|
||||
|
||||
memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
|
||||
|
||||
strcpy(info->fix.id, "amdgpudrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
|
|
|
@ -229,7 +229,8 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||
unsigned p;
|
||||
int i, j;
|
||||
u64 page_base;
|
||||
uint32_t flags = AMDGPU_PTE_SYSTEM;
|
||||
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
|
||||
uint64_t flags = 0;
|
||||
|
||||
if (!adev->gart.ready) {
|
||||
WARN(1, "trying to unbind memory from uninitialized GART !\n");
|
||||
|
@ -271,7 +272,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||
*/
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr,
|
||||
uint32_t flags)
|
||||
uint64_t flags)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
|
|
|
@ -152,6 +152,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct dma_fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
@ -173,6 +174,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
if (bo_va) {
|
||||
if (--bo_va->ref_count == 0) {
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm, &fence);
|
||||
if (unlikely(r)) {
|
||||
dev_err(adev->dev, "failed to clear page "
|
||||
"tables on GEM object close (%d)\n", r);
|
||||
}
|
||||
|
||||
if (fence) {
|
||||
amdgpu_bo_fence(bo, fence, true);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
}
|
||||
}
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
@ -507,14 +519,16 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
|
|||
* amdgpu_gem_va_update_vm -update the bo_va in its VM
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: vm to update
|
||||
* @bo_va: bo_va to update
|
||||
* @list: validation list
|
||||
* @operation: map or unmap
|
||||
* @operation: map, unmap or clear
|
||||
*
|
||||
* Update the bo_va directly after setting its address. Errors are not
|
||||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct list_head *list,
|
||||
uint32_t operation)
|
||||
|
@ -529,20 +543,21 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
|
||||
r = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_va_check,
|
||||
NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
r = amdgpu_vm_update_directories(adev, vm);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
|
||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP)
|
||||
if (operation == AMDGPU_VA_OP_MAP ||
|
||||
operation == AMDGPU_VA_OP_REPLACE)
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
|
||||
error:
|
||||
|
@ -553,6 +568,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
|
||||
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
|
||||
const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
|
||||
AMDGPU_VM_PAGE_PRT;
|
||||
|
||||
struct drm_amdgpu_gem_va *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
@ -563,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list;
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
uint64_t va_flags;
|
||||
int r = 0;
|
||||
|
||||
if (!adev->vm_manager.enabled)
|
||||
|
@ -577,17 +598,17 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
|
||||
AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
|
||||
if ((args->flags & invalid_flags)) {
|
||||
dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
|
||||
args->flags, invalid_flags);
|
||||
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
|
||||
dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
|
||||
args->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (args->operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
case AMDGPU_VA_OP_UNMAP:
|
||||
case AMDGPU_VA_OP_CLEAR:
|
||||
case AMDGPU_VA_OP_REPLACE:
|
||||
break;
|
||||
default:
|
||||
dev_err(&dev->pdev->dev, "unsupported operation %d\n",
|
||||
|
@ -595,38 +616,47 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
abo = gem_to_amdgpu_bo(gobj);
|
||||
INIT_LIST_HEAD(&list);
|
||||
tv.bo = &abo->tbo;
|
||||
tv.shared = false;
|
||||
list_add(&tv.head, &list);
|
||||
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
|
||||
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
|
||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
abo = gem_to_amdgpu_bo(gobj);
|
||||
tv.bo = &abo->tbo;
|
||||
tv.shared = false;
|
||||
list_add(&tv.head, &list);
|
||||
} else {
|
||||
gobj = NULL;
|
||||
abo = NULL;
|
||||
}
|
||||
|
||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto error_unref;
|
||||
|
||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
|
||||
if (!bo_va) {
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return -ENOENT;
|
||||
if (abo) {
|
||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
|
||||
if (!bo_va) {
|
||||
r = -ENOENT;
|
||||
goto error_backoff;
|
||||
}
|
||||
} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
|
||||
bo_va = fpriv->prt_va;
|
||||
} else {
|
||||
bo_va = NULL;
|
||||
}
|
||||
|
||||
switch (args->operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
if (args->flags & AMDGPU_VM_PAGE_READABLE)
|
||||
va_flags |= AMDGPU_PTE_READABLE;
|
||||
if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||
va_flags |= AMDGPU_PTE_WRITEABLE;
|
||||
if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||
va_flags |= AMDGPU_PTE_EXECUTABLE;
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
|
||||
args->map_size);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
||||
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
|
||||
args->offset_in_bo, args->map_size,
|
||||
va_flags);
|
||||
|
@ -634,14 +664,34 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
case AMDGPU_VA_OP_UNMAP:
|
||||
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
|
||||
break;
|
||||
|
||||
case AMDGPU_VA_OP_CLEAR:
|
||||
r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
|
||||
args->va_address,
|
||||
args->map_size);
|
||||
break;
|
||||
case AMDGPU_VA_OP_REPLACE:
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
|
||||
args->map_size);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
||||
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
|
||||
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
|
||||
args->offset_in_bo, args->map_size,
|
||||
va_flags);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
|
||||
!amdgpu_vm_debug)
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
|
||||
amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
|
||||
args->operation);
|
||||
|
||||
error_backoff:
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
error_unref:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -161,9 +161,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (ring->funcs->init_cond_exec)
|
||||
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
||||
|
||||
if (vm) {
|
||||
r = amdgpu_vm_flush(ring, job);
|
||||
if (r) {
|
||||
|
@ -172,7 +169,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
}
|
||||
}
|
||||
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
if (ring->funcs->init_cond_exec)
|
||||
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
||||
|
||||
if (ring->funcs->emit_hdp_flush
|
||||
#ifdef CONFIG_X86_64
|
||||
&& !(adev->flags & AMD_IS_APU)
|
||||
#endif
|
||||
)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
|
||||
skip_preamble = ring->current_ctx == fence_ctx;
|
||||
|
@ -202,7 +206,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
need_ctx_switch = false;
|
||||
}
|
||||
|
||||
if (ring->funcs->emit_hdp_invalidate)
|
||||
if (ring->funcs->emit_hdp_invalidate
|
||||
#ifdef CONFIG_X86_64
|
||||
&& !(adev->flags & AMD_IS_APU)
|
||||
#endif
|
||||
)
|
||||
amdgpu_ring_emit_hdp_invalidate(ring);
|
||||
|
||||
r = amdgpu_fence_emit(ring, f);
|
||||
|
@ -214,6 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (ring->funcs->insert_end)
|
||||
ring->funcs->insert_end(ring);
|
||||
|
||||
/* wrap the last IB with fence */
|
||||
if (job && job->uf_addr) {
|
||||
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
|
||||
|
|
|
@ -25,6 +25,48 @@
|
|||
#define __AMDGPU_IH_H__
|
||||
|
||||
struct amdgpu_device;
|
||||
/*
|
||||
* vega10+ IH clients
|
||||
*/
|
||||
enum amdgpu_ih_clientid
|
||||
{
|
||||
AMDGPU_IH_CLIENTID_IH = 0x00,
|
||||
AMDGPU_IH_CLIENTID_ACP = 0x01,
|
||||
AMDGPU_IH_CLIENTID_ATHUB = 0x02,
|
||||
AMDGPU_IH_CLIENTID_BIF = 0x03,
|
||||
AMDGPU_IH_CLIENTID_DCE = 0x04,
|
||||
AMDGPU_IH_CLIENTID_ISP = 0x05,
|
||||
AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
|
||||
AMDGPU_IH_CLIENTID_RLC = 0x07,
|
||||
AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
|
||||
AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
|
||||
AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
|
||||
AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
|
||||
AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
|
||||
AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
|
||||
AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
|
||||
AMDGPU_IH_CLIENTID_THM = 0x0f,
|
||||
AMDGPU_IH_CLIENTID_UVD = 0x10,
|
||||
AMDGPU_IH_CLIENTID_VCE0 = 0x11,
|
||||
AMDGPU_IH_CLIENTID_VMC = 0x12,
|
||||
AMDGPU_IH_CLIENTID_XDMA = 0x13,
|
||||
AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
|
||||
AMDGPU_IH_CLIENTID_ATS = 0x15,
|
||||
AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
|
||||
AMDGPU_IH_CLIENTID_DF = 0x17,
|
||||
AMDGPU_IH_CLIENTID_VCE1 = 0x18,
|
||||
AMDGPU_IH_CLIENTID_PWR = 0x19,
|
||||
AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
|
||||
AMDGPU_IH_CLIENTID_EA = 0x1c,
|
||||
AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
|
||||
AMDGPU_IH_CLIENTID_MP0 = 0x1e,
|
||||
AMDGPU_IH_CLIENTID_MP1 = 0x1f,
|
||||
|
||||
AMDGPU_IH_CLIENTID_MAX
|
||||
|
||||
};
|
||||
|
||||
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
||||
|
||||
/*
|
||||
* R6xx+ IH ring
|
||||
|
@ -46,12 +88,19 @@ struct amdgpu_ih_ring {
|
|||
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
|
||||
};
|
||||
|
||||
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
|
||||
|
||||
struct amdgpu_iv_entry {
|
||||
unsigned client_id;
|
||||
unsigned src_id;
|
||||
unsigned src_data;
|
||||
unsigned ring_id;
|
||||
unsigned vm_id;
|
||||
unsigned vm_id_src;
|
||||
uint64_t timestamp;
|
||||
unsigned timestamp_src;
|
||||
unsigned pas_id;
|
||||
unsigned pasid_src;
|
||||
unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
|
||||
const uint32_t *iv_entry;
|
||||
};
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "amdgpu_ih.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
|
@ -89,23 +90,28 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
|
|||
static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
unsigned i, j;
|
||||
unsigned i, j, k;
|
||||
int r;
|
||||
|
||||
spin_lock_irqsave(&adev->irq.lock, irqflags);
|
||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
||||
|
||||
if (!src || !src->funcs->set || !src->num_types)
|
||||
for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
|
||||
if (!adev->irq.client[i].sources)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < src->num_types; ++j) {
|
||||
atomic_set(&src->enabled_types[j], 0);
|
||||
r = src->funcs->set(adev, src, j,
|
||||
AMDGPU_IRQ_STATE_DISABLE);
|
||||
if (r)
|
||||
DRM_ERROR("error disabling interrupt (%d)\n",
|
||||
r);
|
||||
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
||||
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
||||
|
||||
if (!src || !src->funcs->set || !src->num_types)
|
||||
continue;
|
||||
|
||||
for (k = 0; k < src->num_types; ++k) {
|
||||
atomic_set(&src->enabled_types[k], 0);
|
||||
r = src->funcs->set(adev, src, k,
|
||||
AMDGPU_IRQ_STATE_DISABLE);
|
||||
if (r)
|
||||
DRM_ERROR("error disabling interrupt (%d)\n",
|
||||
r);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->irq.lock, irqflags);
|
||||
|
@ -254,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_irq_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned i, j;
|
||||
|
||||
drm_vblank_cleanup(adev->ddev);
|
||||
if (adev->irq.installed) {
|
||||
|
@ -266,19 +272,25 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
|||
cancel_work_sync(&adev->reset_work);
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
||||
|
||||
if (!src)
|
||||
for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
|
||||
if (!adev->irq.client[i].sources)
|
||||
continue;
|
||||
|
||||
kfree(src->enabled_types);
|
||||
src->enabled_types = NULL;
|
||||
if (src->data) {
|
||||
kfree(src->data);
|
||||
kfree(src);
|
||||
adev->irq.sources[i] = NULL;
|
||||
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
||||
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
||||
|
||||
if (!src)
|
||||
continue;
|
||||
|
||||
kfree(src->enabled_types);
|
||||
src->enabled_types = NULL;
|
||||
if (src->data) {
|
||||
kfree(src->data);
|
||||
kfree(src);
|
||||
adev->irq.client[i].sources[j] = NULL;
|
||||
}
|
||||
}
|
||||
kfree(adev->irq.client[i].sources);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -290,16 +302,28 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
|||
* @source: irq source
|
||||
*
|
||||
*/
|
||||
int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
||||
int amdgpu_irq_add_id(struct amdgpu_device *adev,
|
||||
unsigned client_id, unsigned src_id,
|
||||
struct amdgpu_irq_src *source)
|
||||
{
|
||||
if (client_id >= AMDGPU_IH_CLIENTID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->irq.sources[src_id] != NULL)
|
||||
if (!source->funcs)
|
||||
return -EINVAL;
|
||||
|
||||
if (!source->funcs)
|
||||
if (!adev->irq.client[client_id].sources) {
|
||||
adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
|
||||
sizeof(struct amdgpu_irq_src),
|
||||
GFP_KERNEL);
|
||||
if (!adev->irq.client[client_id].sources)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (adev->irq.client[client_id].sources[src_id] != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (source->num_types && !source->enabled_types) {
|
||||
|
@ -313,8 +337,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
|||
source->enabled_types = types;
|
||||
}
|
||||
|
||||
adev->irq.sources[src_id] = source;
|
||||
|
||||
adev->irq.client[client_id].sources[src_id] = source;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -329,10 +352,18 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
|||
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
unsigned client_id = entry->client_id;
|
||||
unsigned src_id = entry->src_id;
|
||||
struct amdgpu_irq_src *src;
|
||||
int r;
|
||||
|
||||
trace_amdgpu_iv(entry);
|
||||
|
||||
if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
|
||||
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
|
||||
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
|
||||
return;
|
||||
|
@ -341,7 +372,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
|||
if (adev->irq.virq[src_id]) {
|
||||
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
|
||||
} else {
|
||||
src = adev->irq.sources[src_id];
|
||||
if (!adev->irq.client[client_id].sources) {
|
||||
DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
|
||||
client_id, src_id);
|
||||
return;
|
||||
}
|
||||
|
||||
src = adev->irq.client[client_id].sources[src_id];
|
||||
if (!src) {
|
||||
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
|
||||
return;
|
||||
|
@ -385,13 +422,20 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
|
|||
|
||||
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
|
||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
||||
if (!src)
|
||||
int i, j, k;
|
||||
|
||||
for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
|
||||
if (!adev->irq.client[i].sources)
|
||||
continue;
|
||||
for (j = 0; j < src->num_types; j++)
|
||||
amdgpu_irq_update(adev, src, j);
|
||||
|
||||
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
||||
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
||||
|
||||
if (!src)
|
||||
continue;
|
||||
for (k = 0; k < src->num_types; k++)
|
||||
amdgpu_irq_update(adev, src, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "amdgpu_ih.h"
|
||||
|
||||
#define AMDGPU_MAX_IRQ_SRC_ID 0x100
|
||||
#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_iv_entry;
|
||||
|
@ -44,6 +45,10 @@ struct amdgpu_irq_src {
|
|||
void *data;
|
||||
};
|
||||
|
||||
struct amdgpu_irq_client {
|
||||
struct amdgpu_irq_src **sources;
|
||||
};
|
||||
|
||||
/* provided by interrupt generating IP blocks */
|
||||
struct amdgpu_irq_src_funcs {
|
||||
int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
|
||||
|
@ -58,7 +63,7 @@ struct amdgpu_irq {
|
|||
bool installed;
|
||||
spinlock_t lock;
|
||||
/* interrupt sources */
|
||||
struct amdgpu_irq_src *sources[AMDGPU_MAX_IRQ_SRC_ID];
|
||||
struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
|
||||
|
||||
/* status, etc. */
|
||||
bool msi_enabled; /* msi enabled */
|
||||
|
@ -80,7 +85,8 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg);
|
|||
|
||||
int amdgpu_irq_init(struct amdgpu_device *adev);
|
||||
void amdgpu_irq_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
||||
int amdgpu_irq_add_id(struct amdgpu_device *adev,
|
||||
unsigned client_id, unsigned src_id,
|
||||
struct amdgpu_irq_src *source);
|
||||
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
|
|
@ -209,6 +209,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|||
fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
|
||||
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SOS:
|
||||
fw_info->ver = adev->psp.sos_fw_version;
|
||||
fw_info->feature = adev->psp.sos_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_ASD:
|
||||
fw_info->ver = adev->psp.asd_fw_version;
|
||||
fw_info->feature = adev->psp.asd_feature_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -241,6 +249,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
uint32_t ui32 = 0;
|
||||
uint64_t ui64 = 0;
|
||||
int i, found;
|
||||
int ui32_size = sizeof(ui32);
|
||||
|
||||
if (!info->return_size || !info->return_pointer)
|
||||
return -EINVAL;
|
||||
|
@ -309,6 +318,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||
ib_size_alignment = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
type = AMD_IP_BLOCK_TYPE_UVD;
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; i++)
|
||||
ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
|
||||
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||
ib_size_alignment = 1;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -348,6 +364,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
case AMDGPU_HW_IP_VCE:
|
||||
type = AMD_IP_BLOCK_TYPE_VCE;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
type = AMD_IP_BLOCK_TYPE_UVD;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -528,6 +547,15 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
dev_info.vram_type = adev->mc.vram_type;
|
||||
dev_info.vram_bit_width = adev->mc.vram_width;
|
||||
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
||||
dev_info.gc_double_offchip_lds_buf =
|
||||
adev->gfx.config.double_offchip_lds_buf;
|
||||
|
||||
if (amdgpu_ngg) {
|
||||
dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[PRIM].gpu_addr;
|
||||
dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[POS].gpu_addr;
|
||||
dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[CNTL].gpu_addr;
|
||||
dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[PARAM].gpu_addr;
|
||||
}
|
||||
|
||||
return copy_to_user(out, &dev_info,
|
||||
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
||||
|
@ -597,6 +625,80 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
case AMDGPU_INFO_SENSOR: {
|
||||
struct pp_gpu_power query = {0};
|
||||
int query_size = sizeof(query);
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
return -ENOENT;
|
||||
|
||||
switch (info->sensor_info.type) {
|
||||
case AMDGPU_INFO_SENSOR_GFX_SCLK:
|
||||
/* get sclk in Mhz */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GFX_SCLK,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
ui32 /= 100;
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_GFX_MCLK:
|
||||
/* get mclk in Mhz */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GFX_MCLK,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
ui32 /= 100;
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_GPU_TEMP:
|
||||
/* get temperature in millidegrees C */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_TEMP,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_GPU_LOAD:
|
||||
/* get GPU load */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_LOAD,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
|
||||
/* get average GPU power */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_POWER,
|
||||
(void *)&query, &query_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
ui32 = query.average_gpu_power >> 8;
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_VDDNB:
|
||||
/* get VDDNB in millivolts */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_VDDNB,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_VDDGFX:
|
||||
/* get VDDGFX in millivolts */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_VDDGFX,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n",
|
||||
info->sensor_info.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
||||
}
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
||||
return -EINVAL;
|
||||
|
@ -656,6 +758,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
goto out_suspend;
|
||||
}
|
||||
|
||||
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
|
||||
if (!fpriv->prt_va) {
|
||||
r = -ENOMEM;
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
goto out_suspend;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_map_static_csa(adev, &fpriv->vm);
|
||||
if (r)
|
||||
|
@ -695,11 +805,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
if (!fpriv)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
|
||||
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
||||
|
||||
amdgpu_uvd_free_handles(adev, file_priv);
|
||||
amdgpu_vce_free_handles(adev, file_priv);
|
||||
|
||||
amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* TODO: how to handle reserve failure */
|
||||
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
|
||||
|
@ -723,21 +837,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_preclose_kms - drm callback for pre close
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @file_priv: drm file
|
||||
*
|
||||
* On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
|
||||
* (all asics).
|
||||
*/
|
||||
void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* VBlank related functions.
|
||||
*/
|
||||
|
@ -990,6 +1089,23 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
|||
fw_info.feature, fw_info.ver);
|
||||
}
|
||||
|
||||
/* PSP SOS */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
|
||||
/* PSP ASD */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_ASD;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* SMC */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
|
|
|
@ -395,32 +395,18 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
amdgpu_fill_placement_to_bo(bo, placement);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
|
||||
if (!resv) {
|
||||
bool locked;
|
||||
|
||||
reservation_object_init(&bo->tbo.ttm_resv);
|
||||
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
|
||||
WARN_ON(!locked);
|
||||
}
|
||||
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
|
||||
&amdgpu_ttm_bo_destroy);
|
||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
||||
amdgpu_cs_report_moved_bytes(adev,
|
||||
atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
|
||||
|
||||
if (unlikely(r != 0)) {
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
}
|
||||
|
||||
bo->tbo.priority = ilog2(bo->tbo.num_pages);
|
||||
if (kernel)
|
||||
bo->tbo.priority *= 2;
|
||||
bo->tbo.priority = min(bo->tbo.priority, (unsigned)(TTM_MAX_BO_PRIORITY - 1));
|
||||
bo->tbo.priority = 1;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||
|
@ -436,7 +422,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
dma_fence_put(fence);
|
||||
}
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
*bo_ptr = bo;
|
||||
|
||||
trace_amdgpu_bo_create(bo);
|
||||
|
@ -827,7 +813,10 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
|||
|
||||
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
||||
{
|
||||
if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
if (adev->family <= AMDGPU_FAMILY_CZ &&
|
||||
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
|
||||
return -EINVAL;
|
||||
|
||||
bo->tiling_flags = tiling_flags;
|
||||
|
|
|
@ -43,16 +43,22 @@ static const struct cg_flag_name clocks[] = {
|
|||
{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
|
||||
{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
|
||||
{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
|
||||
{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
|
||||
{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
|
||||
{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
|
||||
{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
|
||||
{0, NULL},
|
||||
};
|
||||
|
||||
|
@ -610,6 +616,174 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
|
||||
char *buf, struct amd_pp_profile *query)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (adev->pp_enabled)
|
||||
ret = amdgpu_dpm_get_power_profile_state(
|
||||
adev, query);
|
||||
else if (adev->pm.funcs->get_power_profile_state)
|
||||
ret = adev->pm.funcs->get_power_profile_state(
|
||||
adev, query);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
"%d %d %d %d %d\n",
|
||||
query->min_sclk / 100,
|
||||
query->min_mclk / 100,
|
||||
query->activity_threshold,
|
||||
query->up_hyst,
|
||||
query->down_hyst);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amd_pp_profile query = {0};
|
||||
|
||||
query.type = AMD_PP_GFX_PROFILE;
|
||||
|
||||
return amdgpu_get_pp_power_profile(dev, buf, &query);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amd_pp_profile query = {0};
|
||||
|
||||
query.type = AMD_PP_COMPUTE_PROFILE;
|
||||
|
||||
return amdgpu_get_pp_power_profile(dev, buf, &query);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
|
||||
const char *buf,
|
||||
size_t count,
|
||||
struct amd_pp_profile *request)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint32_t loop = 0;
|
||||
char *sub_str, buf_cpy[128], *tmp_str;
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
long int value;
|
||||
int ret = 0;
|
||||
|
||||
if (strncmp("reset", buf, strlen("reset")) == 0) {
|
||||
if (adev->pp_enabled)
|
||||
ret = amdgpu_dpm_reset_power_profile_state(
|
||||
adev, request);
|
||||
else if (adev->pm.funcs->reset_power_profile_state)
|
||||
ret = adev->pm.funcs->reset_power_profile_state(
|
||||
adev, request);
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
if (strncmp("set", buf, strlen("set")) == 0) {
|
||||
if (adev->pp_enabled)
|
||||
ret = amdgpu_dpm_set_power_profile_state(
|
||||
adev, request);
|
||||
else if (adev->pm.funcs->set_power_profile_state)
|
||||
ret = adev->pm.funcs->set_power_profile_state(
|
||||
adev, request);
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
if (count + 1 >= 128) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memcpy(buf_cpy, buf, count + 1);
|
||||
tmp_str = buf_cpy;
|
||||
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
ret = kstrtol(sub_str, 0, &value);
|
||||
if (ret) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
switch (loop) {
|
||||
case 0:
|
||||
/* input unit MHz convert to dpm table unit 10KHz*/
|
||||
request->min_sclk = (uint32_t)value * 100;
|
||||
break;
|
||||
case 1:
|
||||
/* input unit MHz convert to dpm table unit 10KHz*/
|
||||
request->min_mclk = (uint32_t)value * 100;
|
||||
break;
|
||||
case 2:
|
||||
request->activity_threshold = (uint16_t)value;
|
||||
break;
|
||||
case 3:
|
||||
request->up_hyst = (uint8_t)value;
|
||||
break;
|
||||
case 4:
|
||||
request->down_hyst = (uint8_t)value;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
loop++;
|
||||
}
|
||||
|
||||
if (adev->pp_enabled)
|
||||
ret = amdgpu_dpm_set_power_profile_state(
|
||||
adev, request);
|
||||
else if (adev->pm.funcs->set_power_profile_state)
|
||||
ret = adev->pm.funcs->set_power_profile_state(
|
||||
adev, request);
|
||||
|
||||
if (ret)
|
||||
count = -EINVAL;
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct amd_pp_profile request = {0};
|
||||
|
||||
request.type = AMD_PP_GFX_PROFILE;
|
||||
|
||||
return amdgpu_set_pp_power_profile(dev, buf, count, &request);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct amd_pp_profile request = {0};
|
||||
|
||||
request.type = AMD_PP_COMPUTE_PROFILE;
|
||||
|
||||
return amdgpu_set_pp_power_profile(dev, buf, count, &request);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
|
||||
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_dpm_forced_performance_level,
|
||||
|
@ -637,6 +811,12 @@ static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
|
|||
static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_mclk_od,
|
||||
amdgpu_set_pp_mclk_od);
|
||||
static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_gfx_power_profile,
|
||||
amdgpu_set_pp_gfx_power_profile);
|
||||
static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_pp_compute_power_profile,
|
||||
amdgpu_set_pp_compute_power_profile);
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -1142,11 +1322,11 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
|||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
} else {
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
|
@ -1255,6 +1435,20 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
|||
DRM_ERROR("failed to create device file pp_mclk_od\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev,
|
||||
&dev_attr_pp_gfx_power_profile);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file "
|
||||
"pp_gfx_power_profile\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev,
|
||||
&dev_attr_pp_compute_power_profile);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file "
|
||||
"pp_compute_power_profile\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_debugfs_pm_init(adev);
|
||||
if (ret) {
|
||||
|
@ -1284,6 +1478,10 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
|||
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_gfx_power_profile);
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_compute_power_profile);
|
||||
}
|
||||
|
||||
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
|
@ -1340,7 +1538,9 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
|||
|
||||
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
|
||||
{
|
||||
int32_t value;
|
||||
uint32_t value;
|
||||
struct pp_gpu_power query = {0};
|
||||
int size;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
|
@ -1348,47 +1548,60 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
|
|||
return -EINVAL;
|
||||
|
||||
/* GPU Clocks */
|
||||
size = sizeof(value);
|
||||
seq_printf(m, "GFX Clocks and Power:\n");
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u mV (VDDNB)\n", value);
|
||||
size = sizeof(query);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
|
||||
seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
|
||||
query.vddc_power & 0xff);
|
||||
seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
|
||||
query.vddci_power & 0xff);
|
||||
seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
|
||||
query.max_gpu_power & 0xff);
|
||||
seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
|
||||
query.average_gpu_power & 0xff);
|
||||
}
|
||||
size = sizeof(value);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
/* GPU Temp */
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
|
||||
seq_printf(m, "GPU Temperature: %u C\n", value/1000);
|
||||
|
||||
/* GPU Load */
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
|
||||
seq_printf(m, "GPU Load: %u %%\n", value);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
/* UVD clocks */
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
|
||||
if (!value) {
|
||||
seq_printf(m, "UVD: Disabled\n");
|
||||
} else {
|
||||
seq_printf(m, "UVD: Enabled\n");
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
|
||||
}
|
||||
}
|
||||
seq_printf(m, "\n");
|
||||
|
||||
/* VCE clocks */
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
|
||||
if (!value) {
|
||||
seq_printf(m, "VCE: Disabled\n");
|
||||
} else {
|
||||
seq_printf(m, "VCE: Enabled\n");
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
|
||||
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
|
|||
amd_pp = &(adev->powerplay);
|
||||
pp_init.chip_family = adev->family;
|
||||
pp_init.chip_id = adev->asic_type;
|
||||
pp_init.pm_en = amdgpu_dpm != 0 ? true : false;
|
||||
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
|
||||
pp_init.feature_mask = amdgpu_pp_feature_mask;
|
||||
pp_init.device = amdgpu_cgs_create_device(adev);
|
||||
ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
|
||||
|
@ -71,6 +71,7 @@ static int amdgpu_pp_early_init(void *handle)
|
|||
case CHIP_TOPAZ:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_VEGA10:
|
||||
adev->pp_enabled = true;
|
||||
if (amdgpu_create_pp_handle(adev))
|
||||
return -EINVAL;
|
||||
|
@ -163,7 +164,7 @@ static int amdgpu_pp_hw_init(void *handle)
|
|||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->pp_enabled && adev->firmware.smu_load)
|
||||
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_init_bo(adev);
|
||||
|
||||
if (adev->powerplay.ip_funcs->hw_init)
|
||||
|
@ -190,7 +191,7 @@ static int amdgpu_pp_hw_fini(void *handle)
|
|||
ret = adev->powerplay.ip_funcs->hw_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->pp_enabled && adev->firmware.smu_load)
|
||||
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
return ret;
|
||||
|
|
481
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
Normal file
481
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
Normal file
|
@ -0,0 +1,481 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Huang Rui
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "soc15_common.h"
|
||||
#include "psp_v3_1.h"
|
||||
|
||||
static void psp_set_funcs(struct amdgpu_device *adev);
|
||||
|
||||
static int psp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
psp_set_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
psp->init_microcode = psp_v3_1_init_microcode;
|
||||
psp->bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv;
|
||||
psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
|
||||
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
|
||||
psp->ring_init = psp_v3_1_ring_init;
|
||||
psp->cmd_submit = psp_v3_1_cmd_submit;
|
||||
psp->compare_sram_data = psp_v3_1_compare_sram_data;
|
||||
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
psp->adev = adev;
|
||||
|
||||
ret = psp_init_microcode(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to load psp firmware!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_fini(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
||||
uint32_t reg_val, uint32_t mask, bool check_changed)
|
||||
{
|
||||
uint32_t val;
|
||||
int i;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
val = RREG32(reg_index);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (check_changed) {
|
||||
if (val != reg_val)
|
||||
return 0;
|
||||
} else {
|
||||
if ((val & mask) == reg_val)
|
||||
return 0;
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static int
|
||||
psp_cmd_submit_buf(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
|
||||
int index)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_bo *cmd_buf_bo;
|
||||
uint64_t cmd_buf_mc_addr;
|
||||
struct psp_gfx_cmd_resp *cmd_buf_mem;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&cmd_buf_bo, &cmd_buf_mc_addr,
|
||||
(void **)&cmd_buf_mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
|
||||
|
||||
memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
|
||||
|
||||
ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr,
|
||||
fence_mc_addr, index);
|
||||
|
||||
while (*((unsigned int *)psp->fence_buf) != index) {
|
||||
msleep(1);
|
||||
};
|
||||
|
||||
amdgpu_bo_free_kernel(&cmd_buf_bo,
|
||||
&cmd_buf_mc_addr,
|
||||
(void **)&cmd_buf_mem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||
uint64_t tmr_mc, uint32_t size)
|
||||
{
|
||||
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
|
||||
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = (uint32_t)tmr_mc;
|
||||
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = (uint32_t)(tmr_mc >> 32);
|
||||
cmd->cmd.cmd_setup_tmr.buf_size = size;
|
||||
}
|
||||
|
||||
/* Set up Trusted Memory Region */
|
||||
static int psp_tmr_init(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Allocate 3M memory aligned to 1M from Frame Buffer (local
|
||||
* physical).
|
||||
*
|
||||
* Note: this memory need be reserved till the driver
|
||||
* uninitializes.
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr, 1);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
|
||||
kfree(cmd);
|
||||
|
||||
return 0;
|
||||
|
||||
failed_mem:
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
failed:
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||
uint64_t asd_mc, uint64_t asd_mc_shared,
|
||||
uint32_t size, uint32_t shared_size)
|
||||
{
|
||||
cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
|
||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
|
||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
|
||||
cmd->cmd.cmd_load_ta.app_len = size;
|
||||
|
||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared);
|
||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared);
|
||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
||||
}
|
||||
|
||||
static int psp_asd_load(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_bo *asd_bo, *asd_shared_bo;
|
||||
uint64_t asd_mc_addr, asd_shared_mc_addr;
|
||||
void *asd_buf, *asd_shared_buf;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for shared ASD <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&asd_shared_bo, &asd_shared_mc_addr, &asd_buf);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
/*
|
||||
* Allocate 256k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for ASD firmware
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_BIN_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&asd_bo, &asd_mc_addr, &asd_buf);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
|
||||
memcpy(asd_buf, psp->asd_start_addr, psp->asd_ucode_size);
|
||||
|
||||
psp_prep_asd_cmd_buf(cmd, asd_mc_addr, asd_shared_mc_addr,
|
||||
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr, 2);
|
||||
if (ret)
|
||||
goto failed_mem1;
|
||||
|
||||
amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
|
||||
amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
|
||||
kfree(cmd);
|
||||
|
||||
return 0;
|
||||
|
||||
failed_mem1:
|
||||
amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
|
||||
failed_mem:
|
||||
amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
|
||||
failed:
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_load_fw(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
int i;
|
||||
struct amdgpu_firmware_info *ucode;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = psp_bootloader_load_sysdrv(psp);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = psp_bootloader_load_sos(psp);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->fence_buf_bo,
|
||||
&psp->fence_buf_mc_addr,
|
||||
&psp->fence_buf);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
|
||||
|
||||
ret = psp_tmr_init(psp);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
|
||||
ret = psp_asd_load(psp);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
|
||||
for (i = 0; i < adev->firmware.max_ucodes; i++) {
|
||||
ucode = &adev->firmware.ucode[i];
|
||||
if (!ucode->fw)
|
||||
continue;
|
||||
|
||||
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
|
||||
psp_smu_reload_quirk(psp))
|
||||
continue;
|
||||
|
||||
ret = psp_prep_cmd_buf(ucode, cmd);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, ucode, cmd,
|
||||
psp->fence_buf_mc_addr, i + 3);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
|
||||
#if 0
|
||||
/* check if firmware loaded sucessfully */
|
||||
if (!amdgpu_psp_check_fw_loading_status(adev, i))
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
||||
&psp->fence_buf_mc_addr, &psp->fence_buf);
|
||||
kfree(cmd);
|
||||
|
||||
return 0;
|
||||
|
||||
failed_mem:
|
||||
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
||||
&psp->fence_buf_mc_addr, &psp->fence_buf);
|
||||
failed:
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_hw_init(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->firmware.mutex);
|
||||
/*
|
||||
* This sequence is just used on hw_init only once, no need on
|
||||
* resume.
|
||||
*/
|
||||
ret = amdgpu_ucode_init_bo(adev);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = psp_load_fw(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP firmware loading failed\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->firmware.mutex);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
|
||||
mutex_unlock(&adev->firmware.mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int psp_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
if (psp->tmr_buf)
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_suspend(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_resume(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->firmware.mutex);
|
||||
|
||||
ret = psp_load_fw(adev);
|
||||
if (ret)
|
||||
DRM_ERROR("PSP resume failed\n");
|
||||
|
||||
mutex_unlock(&adev->firmware.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
|
||||
enum AMDGPU_UCODE_ID ucode_type)
|
||||
{
|
||||
struct amdgpu_firmware_info *ucode = NULL;
|
||||
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
||||
DRM_INFO("firmware is not loaded by PSP\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!adev->firmware.fw_size)
|
||||
return false;
|
||||
|
||||
ucode = &adev->firmware.ucode[ucode_type];
|
||||
if (!ucode->fw || !ucode->ucode_size)
|
||||
return false;
|
||||
|
||||
return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
|
||||
}
|
||||
|
||||
static int psp_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs psp_ip_funcs = {
|
||||
.name = "psp",
|
||||
.early_init = psp_early_init,
|
||||
.late_init = NULL,
|
||||
.sw_init = psp_sw_init,
|
||||
.sw_fini = psp_sw_fini,
|
||||
.hw_init = psp_hw_init,
|
||||
.hw_fini = psp_hw_fini,
|
||||
.suspend = psp_suspend,
|
||||
.resume = psp_resume,
|
||||
.is_idle = NULL,
|
||||
.wait_for_idle = NULL,
|
||||
.soft_reset = NULL,
|
||||
.set_clockgating_state = psp_set_clockgating_state,
|
||||
.set_powergating_state = psp_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_psp_funcs psp_funcs = {
|
||||
.check_fw_loading_status = psp_check_fw_loading_status,
|
||||
};
|
||||
|
||||
static void psp_set_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (NULL == adev->firmware.funcs)
|
||||
adev->firmware.funcs = &psp_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version psp_v3_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_PSP,
|
||||
.major = 3,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &psp_ip_funcs,
|
||||
};
|
127
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
Normal file
127
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
Normal file
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Huang Rui
|
||||
*
|
||||
*/
|
||||
#ifndef __AMDGPU_PSP_H__
|
||||
#define __AMDGPU_PSP_H__
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "psp_gfx_if.h"
|
||||
|
||||
#define PSP_FENCE_BUFFER_SIZE 0x1000
|
||||
#define PSP_CMD_BUFFER_SIZE 0x1000
|
||||
#define PSP_ASD_BIN_SIZE 0x40000
|
||||
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
|
||||
|
||||
enum psp_ring_type
|
||||
{
|
||||
PSP_RING_TYPE__INVALID = 0,
|
||||
/*
|
||||
* These values map to the way the PSP kernel identifies the
|
||||
* rings.
|
||||
*/
|
||||
PSP_RING_TYPE__UM = 1, /* User mode ring (formerly called RBI) */
|
||||
PSP_RING_TYPE__KM = 2 /* Kernel mode ring (formerly called GPCOM) */
|
||||
};
|
||||
|
||||
struct psp_ring
|
||||
{
|
||||
enum psp_ring_type ring_type;
|
||||
struct psp_gfx_rb_frame *ring_mem;
|
||||
uint64_t ring_mem_mc_addr;
|
||||
void *ring_mem_handle;
|
||||
uint32_t ring_size;
|
||||
};
|
||||
|
||||
struct psp_context
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct psp_ring km_ring;
|
||||
|
||||
int (*init_microcode)(struct psp_context *psp);
|
||||
int (*bootloader_load_sysdrv)(struct psp_context *psp);
|
||||
int (*bootloader_load_sos)(struct psp_context *psp);
|
||||
int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
|
||||
struct psp_gfx_cmd_resp *cmd);
|
||||
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
|
||||
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
|
||||
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index);
|
||||
bool (*compare_sram_data)(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
enum AMDGPU_UCODE_ID ucode_type);
|
||||
bool (*smu_reload_quirk)(struct psp_context *psp);
|
||||
|
||||
/* sos firmware */
|
||||
const struct firmware *sos_fw;
|
||||
uint32_t sos_fw_version;
|
||||
uint32_t sos_feature_version;
|
||||
uint32_t sys_bin_size;
|
||||
uint32_t sos_bin_size;
|
||||
uint8_t *sys_start_addr;
|
||||
uint8_t *sos_start_addr;
|
||||
|
||||
/* tmr buffer */
|
||||
struct amdgpu_bo *tmr_bo;
|
||||
uint64_t tmr_mc_addr;
|
||||
void *tmr_buf;
|
||||
|
||||
/* asd firmware */
|
||||
const struct firmware *asd_fw;
|
||||
uint32_t asd_fw_version;
|
||||
uint32_t asd_feature_version;
|
||||
uint32_t asd_ucode_size;
|
||||
uint8_t *asd_start_addr;
|
||||
|
||||
/* fence buffer */
|
||||
struct amdgpu_bo *fence_buf_bo;
|
||||
uint64_t fence_buf_mc_addr;
|
||||
void *fence_buf;
|
||||
};
|
||||
|
||||
struct amdgpu_psp_funcs {
|
||||
bool (*check_fw_loading_status)(struct amdgpu_device *adev,
|
||||
enum AMDGPU_UCODE_ID);
|
||||
};
|
||||
|
||||
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
|
||||
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
|
||||
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
|
||||
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
|
||||
#define psp_compare_sram_data(psp, ucode, type) \
|
||||
(psp)->compare_sram_data((psp), (ucode), (type))
|
||||
#define psp_init_microcode(psp) \
|
||||
((psp)->init_microcode ? (psp)->init_microcode((psp)) : 0)
|
||||
#define psp_bootloader_load_sysdrv(psp) \
|
||||
((psp)->bootloader_load_sysdrv ? (psp)->bootloader_load_sysdrv((psp)) : 0)
|
||||
#define psp_bootloader_load_sos(psp) \
|
||||
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
|
||||
#define psp_smu_reload_quirk(psp) \
|
||||
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
|
||||
|
||||
extern const struct amd_ip_funcs psp_ip_funcs;
|
||||
|
||||
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
|
||||
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
||||
uint32_t field_val, uint32_t mask, bool check_changed);
|
||||
|
||||
#endif
|
|
@ -182,16 +182,32 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
if (ring->funcs->support_64bit_ptrs) {
|
||||
r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
} else {
|
||||
r = amdgpu_wb_get(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->fence_offs);
|
||||
|
@ -219,6 +235,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
|
||||
amdgpu_sched_hw_submission);
|
||||
|
||||
ring->buf_mask = (ring->ring_size / 4) - 1;
|
||||
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
|
||||
0xffffffffffffffff : ring->buf_mask;
|
||||
/* Allocate ring buffer */
|
||||
if (ring->ring_obj == NULL) {
|
||||
r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
|
||||
|
@ -230,9 +249,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
dev_err(adev->dev, "(%d) ring create failed\n", r);
|
||||
return r;
|
||||
}
|
||||
memset((void *)ring->ring, 0, ring->ring_size);
|
||||
amdgpu_ring_clear_ring(ring);
|
||||
}
|
||||
ring->ptr_mask = (ring->ring_size / 4) - 1;
|
||||
|
||||
ring->max_dw = max_dw;
|
||||
|
||||
if (amdgpu_debugfs_ring_init(adev, ring)) {
|
||||
|
@ -253,10 +272,18 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
{
|
||||
ring->ready = false;
|
||||
|
||||
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
if (ring->funcs->support_64bit_ptrs) {
|
||||
amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs);
|
||||
amdgpu_wb_free_64bit(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
|
||||
} else {
|
||||
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
}
|
||||
|
||||
|
||||
amdgpu_bo_free_kernel(&ring->ring_obj,
|
||||
&ring->gpu_addr,
|
||||
|
@ -293,8 +320,8 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
|||
|
||||
if (*pos < 12) {
|
||||
early[0] = amdgpu_ring_get_rptr(ring);
|
||||
early[1] = amdgpu_ring_get_wptr(ring);
|
||||
early[2] = ring->wptr;
|
||||
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
|
||||
early[2] = ring->wptr & ring->buf_mask;
|
||||
for (i = *pos / 4; i < 3 && size; i++) {
|
||||
r = put_user(early[i], (uint32_t *)buf);
|
||||
if (r)
|
||||
|
|
|
@ -27,10 +27,11 @@
|
|||
#include "gpu_scheduler.h"
|
||||
|
||||
/* max number of rings */
|
||||
#define AMDGPU_MAX_RINGS 16
|
||||
#define AMDGPU_MAX_RINGS 18
|
||||
#define AMDGPU_MAX_GFX_RINGS 1
|
||||
#define AMDGPU_MAX_COMPUTE_RINGS 8
|
||||
#define AMDGPU_MAX_VCE_RINGS 3
|
||||
#define AMDGPU_MAX_UVD_ENC_RINGS 2
|
||||
|
||||
/* some special values for the owner field */
|
||||
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
|
||||
|
@ -45,7 +46,8 @@ enum amdgpu_ring_type {
|
|||
AMDGPU_RING_TYPE_SDMA,
|
||||
AMDGPU_RING_TYPE_UVD,
|
||||
AMDGPU_RING_TYPE_VCE,
|
||||
AMDGPU_RING_TYPE_KIQ
|
||||
AMDGPU_RING_TYPE_KIQ,
|
||||
AMDGPU_RING_TYPE_UVD_ENC
|
||||
};
|
||||
|
||||
struct amdgpu_device;
|
||||
|
@ -96,10 +98,11 @@ struct amdgpu_ring_funcs {
|
|||
enum amdgpu_ring_type type;
|
||||
uint32_t align_mask;
|
||||
u32 nop;
|
||||
bool support_64bit_ptrs;
|
||||
|
||||
/* ring read/write ptr handling */
|
||||
u32 (*get_rptr)(struct amdgpu_ring *ring);
|
||||
u32 (*get_wptr)(struct amdgpu_ring *ring);
|
||||
u64 (*get_rptr)(struct amdgpu_ring *ring);
|
||||
u64 (*get_wptr)(struct amdgpu_ring *ring);
|
||||
void (*set_wptr)(struct amdgpu_ring *ring);
|
||||
/* validating and patching of IBs */
|
||||
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
|
@ -126,6 +129,7 @@ struct amdgpu_ring_funcs {
|
|||
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
|
||||
/* insert NOP packets */
|
||||
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
|
||||
void (*insert_end)(struct amdgpu_ring *ring);
|
||||
/* pad the indirect buffer to the necessary number of dw */
|
||||
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
|
||||
|
@ -148,19 +152,23 @@ struct amdgpu_ring {
|
|||
struct amdgpu_bo *ring_obj;
|
||||
volatile uint32_t *ring;
|
||||
unsigned rptr_offs;
|
||||
unsigned wptr;
|
||||
unsigned wptr_old;
|
||||
u64 wptr;
|
||||
u64 wptr_old;
|
||||
unsigned ring_size;
|
||||
unsigned max_dw;
|
||||
int count_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t ptr_mask;
|
||||
uint64_t ptr_mask;
|
||||
uint32_t buf_mask;
|
||||
bool ready;
|
||||
u32 idx;
|
||||
u32 me;
|
||||
u32 pipe;
|
||||
u32 queue;
|
||||
struct amdgpu_bo *mqd_obj;
|
||||
uint64_t mqd_gpu_addr;
|
||||
void *mqd_ptr;
|
||||
uint64_t eop_gpu_addr;
|
||||
u32 doorbell_index;
|
||||
bool use_doorbell;
|
||||
unsigned wptr_offs;
|
||||
|
@ -184,5 +192,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i = 0;
|
||||
while (i <= ring->buf_mask)
|
||||
ring->ring[i++] = ring->funcs->nop;
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -228,7 +228,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
out_cleanup:
|
||||
kfree(gtt_obj);
|
||||
if (r) {
|
||||
printk(KERN_WARNING "Error while testing BO move.\n");
|
||||
pr_warn("Error while testing BO move\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,82 +237,3 @@ void amdgpu_test_moves(struct amdgpu_device *adev)
|
|||
if (adev->mman.buffer_funcs)
|
||||
amdgpu_do_test_moves(adev);
|
||||
}
|
||||
|
||||
void amdgpu_test_ring_sync(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB)
|
||||
{
|
||||
}
|
||||
|
||||
static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB,
|
||||
struct amdgpu_ring *ringC)
|
||||
{
|
||||
}
|
||||
|
||||
static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB)
|
||||
{
|
||||
if (ringA == &ringA->adev->vce.ring[0] &&
|
||||
ringB == &ringB->adev->vce.ring[1])
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void amdgpu_test_syncing(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ringA = adev->rings[i];
|
||||
if (!ringA || !ringA->ready)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < i; ++j) {
|
||||
struct amdgpu_ring *ringB = adev->rings[j];
|
||||
if (!ringB || !ringB->ready)
|
||||
continue;
|
||||
|
||||
if (!amdgpu_test_sync_possible(ringA, ringB))
|
||||
continue;
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
|
||||
amdgpu_test_ring_sync(adev, ringA, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
|
||||
amdgpu_test_ring_sync(adev, ringB, ringA);
|
||||
|
||||
for (k = 0; k < j; ++k) {
|
||||
struct amdgpu_ring *ringC = adev->rings[k];
|
||||
if (!ringC || !ringC->ready)
|
||||
continue;
|
||||
|
||||
if (!amdgpu_test_sync_possible(ringA, ringC))
|
||||
continue;
|
||||
|
||||
if (!amdgpu_test_sync_possible(ringB, ringC))
|
||||
continue;
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
|
||||
amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
|
||||
amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
|
||||
amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
|
||||
amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
|
||||
amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
|
||||
amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user