forked from luck/tmp_suning_uos_patched
drm fixes across the board
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJYhUA5AAoJEAx081l5xIa+E9wP/j/2v9+L/bG+G7pNMnIuyEcy 5SB/ipC12Ek7j9cKtlyIBIXpAqXnDoKMd/a6AamC7a/w0gmluVHe1eoB4i326wRQ fNDjD57it/N9QGHyyhHlRzOhwncP9tBHpfaHQPmZE68blusrb9JXgmbVbVRubSjB fvwhK7aA6oWb2XhecWJ5UwVyYYB/E+Ca0hs//+6PRYI2MlHz4u1OOXD4jkkiFMdz UcAAEqMOPNbHPYM3FsVs2Xo171AgFaeykoX4jt8+Kbyl91YYj0uyZ0BQA76midVz U8d/UEE8c+4lg4wqyDjL0c5HhtaIekluuUa2yxi8t/1T4j58wDXxyjhlgJn4toN8 M10LBaCUsCj7Y3iZiYPc8QkMSY6GO1ITkhByw7LKILG1L/RfiVnixkzVtqyO02oA b7R6HLS5ma63WiYf6HgxlgDlgwiDwU3lnvZcttJfOAPUN0SZsXUHG6i7vs6XX8eL Y4wm+SB0605yRWd2bZ8FUWVvpifCYjCMH7vwFUNbwuSb4ZvsCmSyjNTSTGAbObQH v7i+rIegaQ0PV9ribZ0HsIBnOzjO7Vr0O46ZpgWSU9G/QuYmMuOGHvnhfo7HfbaI KRqRmlLULosp7DeM4Z8MFrNeNj+0OXaPacdfqPS86uCiU/wShyZUyCzJKeD6HTid xVpiX27zOzt2zvQE/OzL =H6SW -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.10-rc6' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "drm fixes across the board. Okay holidays and LCA kinda caught up with me, I thought I'd get some of this dequeued last week, but Hobart was sunny and warm and not all gloomy and rainy as usual. This is a bit large, but not too much considering it's two weeks stuff from AMD and Intel. core: - one locking fix that helps with dynamic suspend/resume races i915: - mostly GVT updates, GVT was a recent introduction so fixes for it shouldn't cause any notable side effects. amdgpu: - a bunch of fixes for GPUs with a different memory controller design that need different firmware. exynos: - decon regression fixes msm: - two regression fixes etnaviv: - a workaround for an mmu bug that needs a lot more work. virtio: - sparse fix, and a maintainers update" * tag 'drm-fixes-for-v4.10-rc6' of git://people.freedesktop.org/~airlied/linux: (56 commits) drm/exynos/decon5433: set STANDALONE_UPDATE_F on output enablement drm/exynos/decon5433: fix CMU programming drm/exynos/decon5433: do not disable video after reset drm/i915: Ignore bogus plane coordinates on SKL when the plane is not visible drm/i915: Remove WaDisableLSQCROPERFforOCL KBL workaround. drm/amdgpu: add support for new hainan variants drm/radeon: add support for new hainan variants drm/amdgpu: change clock gating mode for uvd_v4. drm/amdgpu: fix program vce instance logic error. drm/amdgpu: fix bug set incorrect value to vce register Revert "drm/amdgpu: Only update the CUR_SIZE register when necessary" drm/msm: fix potential null ptr issue in non-iommu case drm/msm/mdp5: rip out plane->pending tracking drm/exynos/decon5433: set STANDALONE_UPDATE_F also if planes are disabled drm/exynos/decon5433: update shadow registers iff there are active windows drm/i915/gvt: rewrite gt reset handler using new function intel_gvt_reset_vgpu_locked drm/i915/gvt: fix vGPU instance reuse issues by vGPU reset function drm/i915/gvt: introduce intel_vgpu_reset_mmio() to reset mmio space drm/i915/gvt: move mmio init/clean function to mmio.c drm/i915/gvt: introduce intel_vgpu_reset_cfg_space to reset configuration space ...
This commit is contained in:
commit
3258943ddb
16
MAINTAINERS
16
MAINTAINERS
|
@ -4100,12 +4100,18 @@ F: drivers/gpu/drm/bridge/
|
|||
|
||||
DRM DRIVER FOR BOCHS VIRTUAL GPU
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Odd Fixes
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/bochs/
|
||||
|
||||
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Obsolete
|
||||
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
|
||||
F: drivers/gpu/drm/cirrus/
|
||||
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
|
@ -4298,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
|
|||
|
||||
DRM DRIVER FOR QXL VIRTUAL GPU
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/qxl/
|
||||
F: include/uapi/drm/qxl_drm.h
|
||||
|
||||
|
@ -13092,6 +13101,7 @@ M: David Airlie <airlied@linux.ie>
|
|||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/virtio/
|
||||
F: include/uapi/linux/virtio_gpu.h
|
||||
|
|
|
@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
|
|||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
|
@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v10_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
|
@ -2620,7 +2617,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
|||
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
|
|
|
@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
|
|||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
|
@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v11_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
|
@ -2640,7 +2637,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
|||
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
|
|
|
@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
|||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
int xorigin = 0, yorigin = 0;
|
||||
|
||||
int w = amdgpu_crtc->cursor_width;
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
amdgpu_crtc->cursor_y = y;
|
||||
|
||||
|
@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
|||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
|
@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v6_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v6_0_show_cursor(crtc);
|
||||
|
@ -1986,7 +1985,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
|
@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
|||
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v6_0_show_cursor(crtc);
|
||||
dce_v6_0_lock_cursor(crtc, false);
|
||||
}
|
||||
|
|
|
@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
|
|||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
|
@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
|
||||
dce_v8_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
|
@ -2471,7 +2468,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
|||
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
|
|
|
@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
|
|||
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/si58_mc.bin");
|
||||
|
||||
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
|
||||
|
@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
|||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err;
|
||||
bool is_58_fw = false;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
|
@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
|||
default: BUG();
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
/* this memory configuration requires special firmware */
|
||||
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
|
||||
is_58_fw = true;
|
||||
|
||||
if (is_58_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
|||
WREG32(mmVM_CONTEXT1_CNTL,
|
||||
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
|
||||
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
gmc_v6_0_set_fault_enable_default(adev, false);
|
||||
else
|
||||
gmc_v6_0_set_fault_enable_default(adev, true);
|
||||
|
||||
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
|
||||
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
|
@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_sw_init(void *handle)
|
||||
|
|
|
@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
|
|||
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
|
@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
(adev->pdev->device == 0x6817) ||
|
||||
(adev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (adev->asic_type == CHIP_OLAND) {
|
||||
if ((adev->pdev->revision == 0xC7) ||
|
||||
(adev->pdev->revision == 0x80) ||
|
||||
(adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6604) ||
|
||||
(adev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_HAINAN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
|
@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
|
@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
|
|||
((adev->pdev->device == 0x6660) ||
|
||||
(adev->pdev->device == 0x6663) ||
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667))) ||
|
||||
((adev->pdev->revision == 0xc3) &&
|
||||
(adev->pdev->device == 0x6665)))
|
||||
(adev->pdev->device == 0x6667))))
|
||||
chip_name = "hainan_k";
|
||||
else if ((adev->pdev->revision == 0xc3) &&
|
||||
(adev->pdev->device == 0x6665))
|
||||
chip_name = "banks_k_2";
|
||||
else
|
||||
chip_name = "hainan";
|
||||
break;
|
||||
|
|
|
@ -40,13 +40,14 @@
|
|||
#include "smu/smu_7_0_1_sh_mask.h"
|
||||
|
||||
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_start(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_stop(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state);
|
||||
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
||||
bool sw_mode);
|
||||
/**
|
||||
* uvd_v4_2_ring_get_rptr - get read pointer
|
||||
*
|
||||
|
@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
|
|||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
/**
|
||||
* uvd_v4_2_hw_init - start and test UVD block
|
||||
*
|
||||
|
@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
|
|||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
uvd_v4_2_init_cg(adev);
|
||||
uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
|
||||
uvd_v4_2_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
r = uvd_v4_2_start(adev);
|
||||
if (r)
|
||||
|
@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
uint32_t rb_bufsz;
|
||||
int i, j, r;
|
||||
|
||||
/* disable byte swapping */
|
||||
u32 lmi_swap_cntl = 0;
|
||||
u32 mp_swap_cntl = 0;
|
||||
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
uvd_v4_2_set_dcm(adev, true);
|
||||
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
|
@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
|
|||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
|||
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
|
||||
}
|
||||
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
bool hw_mode = true;
|
||||
|
||||
if (hw_mode) {
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
} else {
|
||||
u32 tmp = RREG32(mmUVD_CGC_CTRL);
|
||||
tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
|
||||
WREG32(mmUVD_CGC_CTRL, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool uvd_v4_2_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
|
|||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
uvd_v4_2_enable_mgcg(adev, gate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
|||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
return 0;
|
||||
|
|
|
@ -43,9 +43,13 @@
|
|||
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
|
||||
#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
|
||||
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
|
||||
#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
|
||||
|
||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||
|
||||
#define VCE_V3_0_FW_SIZE (384 * 1024)
|
||||
|
@ -54,6 +58,9 @@
|
|||
|
||||
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
|
||||
|
||||
#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
|
||||
| GRBM_GFX_INDEX__VCE_ALL_PIPE)
|
||||
|
||||
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
|
|||
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
|
||||
|
||||
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
data &= ~0xffc00000;
|
||||
data &= ~0x3ff;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
|
||||
|
||||
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
|
||||
|
@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||
vce_v3_0_mc_resume(adev, idx);
|
||||
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
||||
|
||||
|
@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
|
|||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||
|
||||
if (adev->asic_type >= CHIP_STONEY)
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
|
||||
|
@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
|
|||
vce_v3_0_set_vce_sw_clock_gating(adev, false);
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
|
|||
* VCE team suggest use bit 3--bit 6 for busy status check
|
||||
*/
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
|
||||
}
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
|
||||
}
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
|
@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
|||
if (adev->vce.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
|
||||
|
||||
if (enable) {
|
||||
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
|
||||
|
@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
|||
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
|
@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
|
|
|
@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
|
|||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
} else {
|
||||
/*EPR# 419220 -HW limitation to to */
|
||||
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(hwmgr,
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
|
||||
/*Program HardMin based on the vce_arbiter.ecclk */
|
||||
if (hwmgr->vce_arbiter.ecclk == 0) {
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin, 0);
|
||||
/* disable ECLK DPM 0. Otherwise VCE could hang if
|
||||
* switching SCLK from DPM 0 to 6/7 */
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkSoftMin, 1);
|
||||
} else {
|
||||
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(hwmgr,
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
|||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
phy_power_on(dp->phy);
|
||||
|
||||
analogix_dp_init_dp(dp);
|
||||
|
@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
|||
goto err_disable_pm_runtime;
|
||||
}
|
||||
|
||||
phy_power_off(dp->phy);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_pm_runtime:
|
||||
|
||||
phy_power_off(dp->phy);
|
||||
pm_runtime_put(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
|
|||
This is a KMS driver for emulated cirrus device in qemu.
|
||||
It is *NOT* intended for real cirrus devices. This requires
|
||||
the modesetting userspace X.org driver.
|
||||
|
||||
Cirrus is obsolete, the hardware was designed in the 90ies
|
||||
and can't keep up with todays needs. More background:
|
||||
https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
|
||||
|
||||
Better alternatives are:
|
||||
- stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
|
||||
- qxl (DRM_QXL, qemu -vga qxl, works best with spice)
|
||||
- virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
|
||||
|
|
|
@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
|||
return NULL;
|
||||
|
||||
mode->type |= DRM_MODE_TYPE_USERDEF;
|
||||
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
|
||||
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
|
||||
mode->hdisplay = 1366;
|
||||
mode->hsync_start--;
|
||||
mode->hsync_end--;
|
||||
drm_mode_set_name(mode);
|
||||
}
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
return mode;
|
||||
}
|
||||
|
|
|
@ -115,24 +115,27 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
|
|||
|
||||
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
|
||||
/**
|
||||
* drm_kms_helper_poll_enable_locked - re-enable output polling.
|
||||
* drm_kms_helper_poll_enable - re-enable output polling.
|
||||
* @dev: drm_device
|
||||
*
|
||||
* This function re-enables the output polling work without
|
||||
* locking the mode_config mutex.
|
||||
* This function re-enables the output polling work, after it has been
|
||||
* temporarily disabled using drm_kms_helper_poll_disable(), for example over
|
||||
* suspend/resume.
|
||||
*
|
||||
* This is like drm_kms_helper_poll_enable() however it is to be
|
||||
* called from a context where the mode_config mutex is locked
|
||||
* already.
|
||||
* Drivers can call this helper from their device resume implementation. It is
|
||||
* an error to call this when the output polling support has not yet been set
|
||||
* up.
|
||||
*
|
||||
* Note that calls to enable and disable polling must be strictly ordered, which
|
||||
* is automatically the case when they're only call from suspend/resume
|
||||
* callbacks.
|
||||
*/
|
||||
void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
|
||||
void drm_kms_helper_poll_enable(struct drm_device *dev)
|
||||
{
|
||||
bool poll = false;
|
||||
struct drm_connector *connector;
|
||||
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
|
||||
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
|
||||
return;
|
||||
|
||||
|
@ -143,14 +146,24 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (dev->mode_config.delayed_event) {
|
||||
/*
|
||||
* FIXME:
|
||||
*
|
||||
* Use short (1s) delay to handle the initial delayed event.
|
||||
* This delay should not be needed, but Optimus/nouveau will
|
||||
* fail in a mysterious way if the delayed event is handled as
|
||||
* soon as possible like it is done in
|
||||
* drm_helper_probe_single_connector_modes() in case the poll
|
||||
* was enabled before.
|
||||
*/
|
||||
poll = true;
|
||||
delay = 0;
|
||||
delay = HZ;
|
||||
}
|
||||
|
||||
if (poll)
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
||||
|
||||
static enum drm_connector_status
|
||||
drm_connector_detect(struct drm_connector *connector, bool force)
|
||||
|
@ -277,7 +290,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||
|
||||
/* Re-enable polling in case the global poll config changed. */
|
||||
if (drm_kms_helper_poll != dev->mode_config.poll_running)
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
dev->mode_config.poll_running = drm_kms_helper_poll;
|
||||
|
||||
|
@ -469,8 +482,12 @@ static void output_poll_execute(struct work_struct *work)
|
|||
* This function disables the output polling work.
|
||||
*
|
||||
* Drivers can call this helper from their device suspend implementation. It is
|
||||
* not an error to call this even when output polling isn't enabled or arlready
|
||||
* disabled.
|
||||
* not an error to call this even when output polling isn't enabled or already
|
||||
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
|
||||
*
|
||||
* Note that calls to enable and disable polling must be strictly ordered, which
|
||||
* is automatically the case when they're only call from suspend/resume
|
||||
* callbacks.
|
||||
*/
|
||||
void drm_kms_helper_poll_disable(struct drm_device *dev)
|
||||
{
|
||||
|
@ -480,24 +497,6 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
|
||||
|
||||
/**
|
||||
* drm_kms_helper_poll_enable - re-enable output polling.
|
||||
* @dev: drm_device
|
||||
*
|
||||
* This function re-enables the output polling work.
|
||||
*
|
||||
* Drivers can call this helper from their device resume implementation. It is
|
||||
* an error to call this when the output polling support has not yet been set
|
||||
* up.
|
||||
*/
|
||||
void drm_kms_helper_poll_enable(struct drm_device *dev)
|
||||
{
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
||||
|
||||
/**
|
||||
* drm_kms_helper_poll_init - initialize and enable output polling
|
||||
* @dev: drm_device
|
||||
|
|
|
@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
struct list_head list;
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
|
||||
* drm_mm into giving out a low IOVA after address space
|
||||
* rollover. This needs a proper fix.
|
||||
*/
|
||||
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
|
||||
size, 0, mmu->last_iova, ~0UL,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
|
||||
|
||||
if (ret != -ENOSPC)
|
||||
break;
|
||||
|
|
|
@ -46,7 +46,8 @@ enum decon_flag_bits {
|
|||
BIT_CLKS_ENABLED,
|
||||
BIT_IRQS_ENABLED,
|
||||
BIT_WIN_UPDATED,
|
||||
BIT_SUSPENDED
|
||||
BIT_SUSPENDED,
|
||||
BIT_REQUEST_UPDATE
|
||||
};
|
||||
|
||||
struct decon_context {
|
||||
|
@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
|
|||
m->crtc_vsync_end = m->crtc_vsync_start + 1;
|
||||
}
|
||||
|
||||
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
|
||||
|
||||
/* enable clock gate */
|
||||
val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
|
||||
writel(val, ctx->addr + DECON_CMU);
|
||||
|
||||
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
|
||||
decon_setup_trigger(ctx);
|
||||
|
||||
|
@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
|
|||
|
||||
/* window enable */
|
||||
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
|
||||
set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
|
||||
}
|
||||
|
||||
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
|
||||
|
@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
|
|||
return;
|
||||
|
||||
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
|
||||
set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
|
||||
}
|
||||
|
||||
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
|
||||
|
@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
|
|||
for (i = ctx->first_win; i < WINDOWS_NR; i++)
|
||||
decon_shadow_protect_win(ctx, i, false);
|
||||
|
||||
/* standalone update */
|
||||
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
|
||||
if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
|
||||
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
|
||||
|
||||
if (ctx->out_type & IFTYPE_I80)
|
||||
set_bit(BIT_WIN_UPDATED, &ctx->flags);
|
||||
|
|
|
@ -37,13 +37,6 @@
|
|||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
|
||||
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
|
||||
#define BYTES_TO_MB(b) ((b) >> 20ULL)
|
||||
|
||||
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
|
||||
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
|
||||
#define HOST_FENCE 4
|
||||
|
||||
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
|||
POSTING_READ(fence_reg_lo);
|
||||
}
|
||||
|
||||
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
}
|
||||
|
||||
static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
||||
reg = vgpu->fence.regs[i];
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
list_add_tail(®->link,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
|
@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
|||
continue;
|
||||
list_del(pos);
|
||||
vgpu->fence.regs[i] = reg;
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
if (++i == vgpu_fence_sz(vgpu))
|
||||
break;
|
||||
}
|
||||
if (i != vgpu_fence_sz(vgpu))
|
||||
goto out_free_fence;
|
||||
|
||||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return 0;
|
||||
|
@ -313,6 +315,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
|
|||
free_resource(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_resource - reset resource state owned by a vGPU
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to reset resource state owned by a vGPU.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
|
||||
* @vgpu: vGPU
|
||||
|
|
|
@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
* @primary: is the vGPU presented as primary
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_cfg_space - reset vGPU configuration space
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
|
||||
{
|
||||
u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
|
||||
bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
|
||||
if (cmd & PCI_COMMAND_MEMORY) {
|
||||
trap_gttmmio(vgpu, false);
|
||||
map_aperture(vgpu, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Currently we only do such reset when vGPU is not
|
||||
* owned by any VM, so we simply restore entire cfg
|
||||
* space to default value.
|
||||
*/
|
||||
intel_vgpu_init_cfg_space(vgpu, primary);
|
||||
}
|
||||
|
|
|
@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
|
|||
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
u64 pte;
|
||||
|
||||
#ifdef readq
|
||||
pte = readq(addr);
|
||||
#else
|
||||
pte = ioread32(addr);
|
||||
pte |= (u64)ioread32(addr + 4) << 32;
|
||||
#endif
|
||||
return pte;
|
||||
return readq(addr);
|
||||
}
|
||||
|
||||
static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
|
@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
|
||||
#ifdef writeq
|
||||
writeq(pte, addr);
|
||||
#else
|
||||
iowrite32((u32)pte, addr);
|
||||
iowrite32(pte >> 32, addr + 4);
|
||||
#endif
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
|
@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
|
|||
info->gtt_entry_size;
|
||||
mem = kzalloc(mm->has_shadow_page_table ?
|
||||
mm->page_table_entry_size * 2
|
||||
: mm->page_table_entry_size,
|
||||
GFP_ATOMIC);
|
||||
: mm->page_table_entry_size, GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
mm->virtual_page_table = mem;
|
||||
|
@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
|||
struct intel_vgpu_mm *mm;
|
||||
int ret;
|
||||
|
||||
mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
|
@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
int page_entry_num = GTT_PAGE_SIZE >>
|
||||
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||
struct page *scratch_pt;
|
||||
void *scratch_pt;
|
||||
unsigned long mfn;
|
||||
int i;
|
||||
void *p;
|
||||
|
||||
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scratch_pt) {
|
||||
gvt_err("fail to allocate scratch page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
p = kmap_atomic(scratch_pt);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(p);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
|
||||
kunmap_atomic(p);
|
||||
__free_page(scratch_pt);
|
||||
gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
|
||||
free_page((unsigned long)scratch_pt);
|
||||
return -EFAULT;
|
||||
}
|
||||
gtt->scratch_pt[type].page_mfn = mfn;
|
||||
gtt->scratch_pt[type].page = scratch_pt;
|
||||
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
||||
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
||||
vgpu->id, type, mfn);
|
||||
|
||||
|
@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||
* scratch_pt[type] indicate the scratch pt/scratch page used by the
|
||||
* 'type' pt.
|
||||
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
|
||||
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
||||
*/
|
||||
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
|
||||
|
@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||
se.val64 |= PPAT_CACHED_INDEX;
|
||||
|
||||
for (i = 0; i < page_entry_num; i++)
|
||||
ops->set_entry(p, &se, i, false, 0, vgpu);
|
||||
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
|
||||
}
|
||||
|
||||
kunmap_atomic(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
|||
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
int ret;
|
||||
void *page_addr;
|
||||
void *page;
|
||||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
|
||||
|
@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
gvt->gtt.scratch_ggtt_page =
|
||||
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
if (!gvt->gtt.scratch_ggtt_page) {
|
||||
page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
gvt_err("fail to allocate scratch ggtt page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
|
||||
|
||||
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
|
||||
|
||||
gvt->gtt.scratch_ggtt_mfn =
|
||||
intel_gvt_hypervisor_virt_to_mfn(page_addr);
|
||||
gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
|
||||
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate scratch ggtt page\n");
|
||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||
|
@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
|||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_gtt - reset the all GTT related status
|
||||
* @vgpu: a vGPU
|
||||
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
||||
*
|
||||
* This function is called from vfio core to reset reset all
|
||||
* GTT related status, including GGTT, PPGTT, scratch page.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||
{
|
||||
int i;
|
||||
|
||||
ppgtt_free_all_shadow_page(vgpu);
|
||||
if (!dmlr)
|
||||
return;
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
|
||||
/* clear scratch page for security */
|
||||
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||
if (vgpu->gtt.scratch_pt[i].page != NULL)
|
||||
memset(page_address(vgpu->gtt.scratch_pt[i].page),
|
||||
0, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
|||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
|
|
|
@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
|||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
kfree(dev_priv->gvt);
|
||||
dev_priv->gvt = NULL;
|
||||
}
|
||||
|
@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
idr_init(&gvt->vgpu_idr);
|
||||
|
||||
mutex_init(&gvt->lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
|
@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
|
||||
ret = intel_gvt_setup_mmio_info(gvt);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_gvt_load_firmware(gvt);
|
||||
if (ret)
|
||||
|
@ -313,6 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
out_clean_idr:
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
kfree(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
|
|||
|
||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param);
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value);
|
||||
|
@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
|
|||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_type *type);
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
|
||||
|
@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
|
|||
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
||||
unsigned long *g_index);
|
||||
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary);
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
|
@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
|
|||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||
|
||||
struct intel_gvt_ops {
|
||||
|
|
|
@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
static int new_mmio_info(struct intel_gvt *gvt,
|
||||
u32 offset, u32 flags, u32 size,
|
||||
u32 addr_mask, u32 ro_mask, u32 device,
|
||||
void *read, void *write)
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
|
||||
{
|
||||
struct intel_gvt_mmio_info *info, *p;
|
||||
u32 start, end, i;
|
||||
|
@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
|||
default:
|
||||
/*should not hit here*/
|
||||
gvt_err("invalid forcewake offset 0x%x\n", offset);
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
|
||||
|
@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes, unsigned long bitmap)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, bitmap);
|
||||
|
||||
/* full GPU reset */
|
||||
if (bitmap == 0xff) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
setup_vgpu_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_init_gtt(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int engine_mask = 0;
|
||||
u32 data;
|
||||
u64 bitmap = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & GEN6_GRDOM_FULL) {
|
||||
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
|
||||
bitmap = 0xff;
|
||||
engine_mask = ALL_ENGINES;
|
||||
} else {
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
engine_mask |= (1 << VCS2);
|
||||
}
|
||||
}
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
bitmap |= (1 << VCS2);
|
||||
}
|
||||
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
|
||||
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 data;
|
||||
|
@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned int id = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
|||
id = VECS;
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
}
|
||||
set_bit(id, (void *)vgpu->tlb_handle_pending);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
|
|
|
@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
|||
struct intel_vgpu_type *type;
|
||||
struct device *pdev;
|
||||
void *gvt;
|
||||
int ret;
|
||||
|
||||
pdev = mdev_parent_dev(mdev);
|
||||
gvt = kdev_to_i915(pdev)->gvt;
|
||||
|
@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
|||
if (!type) {
|
||||
gvt_err("failed to find type %s to create\n",
|
||||
kobject_name(kobj));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
|
||||
if (IS_ERR_OR_NULL(vgpu)) {
|
||||
gvt_err("create intel vgpu failed\n");
|
||||
return -EINVAL;
|
||||
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
|
||||
gvt_err("failed to create intel vgpu: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
|
||||
|
@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
|||
|
||||
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
|
||||
dev_name(mdev_dev(mdev)));
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_vgpu_remove(struct mdev_device *mdev)
|
||||
|
|
|
@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (mmio) {
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
|
@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
goto err;
|
||||
}
|
||||
ret = mmio->read(vgpu, offset, p_data, bytes);
|
||||
} else
|
||||
} else {
|
||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (!vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -302,3 +303,56 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_mmio - reset virtual MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_mmio - init MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_mmio - clean MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
|
|
@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
|||
*offset; \
|
||||
})
|
||||
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
|
||||
|
||||
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
||||
|
|
|
@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
|||
vgpu->id))
|
||||
return -EINVAL;
|
||||
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
|
||||
GFP_DMA32 | __GFP_ZERO,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
|
||||
__GFP_ZERO,
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
if (!vgpu_opregion(vgpu)->va)
|
||||
return -ENOMEM;
|
||||
|
@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
|||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
vgpu_opregion(vgpu)->va = NULL;
|
||||
}
|
||||
|
|
|
@ -50,8 +50,7 @@
|
|||
#define INTEL_GVT_OPREGION_PARM 0x204
|
||||
|
||||
#define INTEL_GVT_OPREGION_PAGES 2
|
||||
#define INTEL_GVT_OPREGION_PORDER 1
|
||||
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
|
||||
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
|
||||
|
||||
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
|
||||
|
||||
|
|
|
@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload;
|
||||
struct intel_vgpu *vgpu;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
vgpu = workload->vgpu;
|
||||
|
||||
if (!workload->status && !workload->vgpu->resetting) {
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
wait_event(workload->shadow_ctx_status_wq,
|
||||
!atomic_read(&workload->shadow_ctx_active));
|
||||
|
||||
|
@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(workload->vgpu,
|
||||
event);
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||
|
@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
|
||||
scheduler->current_workload[ring_id] = NULL;
|
||||
|
||||
atomic_dec(&workload->vgpu->running_workload_num);
|
||||
|
||||
list_del_init(&workload->list);
|
||||
workload->complete(workload);
|
||||
|
||||
atomic_dec(&vgpu->running_workload_num);
|
||||
wake_up(&scheduler->workload_complete_wq);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
@ -459,11 +459,11 @@ static int workload_thread(void *priv)
|
|||
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
|
||||
workload, workload->status);
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (workload->req)
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (need_force_wake)
|
||||
intel_uncore_forcewake_put(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
|
|
@ -35,79 +35,6 @@
|
|||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
|
||||
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
if (vgpu->mmio.vreg)
|
||||
memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
|
||||
else {
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!param->primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
|
@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
|||
if (low_avail / min_low == 0)
|
||||
break;
|
||||
gvt->types[i].low_gm_size = min_low;
|
||||
gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
|
||||
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
|
||||
gvt->types[i].fence = 4;
|
||||
gvt->types[i].max_instance = low_avail / min_low;
|
||||
gvt->types[i].avail_instance = gvt->types[i].max_instance;
|
||||
|
@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
|||
*/
|
||||
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_low_gm_size;
|
||||
high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
|
||||
high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_high_gm_size;
|
||||
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
|
||||
gvt->fence.vgpu_allocated_fence_num;
|
||||
|
@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
vfree(vgpu);
|
||||
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
|
@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
vgpu->gvt = gvt;
|
||||
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
||||
|
||||
setup_vgpu_cfg_space(vgpu, param);
|
||||
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
||||
|
||||
ret = setup_vgpu_mmio(vgpu);
|
||||
ret = intel_vgpu_init_mmio(vgpu);
|
||||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_vgpu_alloc_resource(vgpu, param);
|
||||
if (ret)
|
||||
|
@ -354,7 +281,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
out_clean_vgpu_resource:
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
out_clean_vgpu_mmio:
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
out_clean_idr:
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
out_free_vgpu:
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU
|
||||
* intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
|
||||
* @vgpu: virtual GPU
|
||||
* @dmlr: vGPU Device Model Level Reset or GT Reset
|
||||
* @engine_mask: engines to reset for GT reset
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU through
|
||||
* device model reset or GT reset. The caller should hold the gvt lock.
|
||||
*
|
||||
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
|
||||
* the whole vGPU to default state as when it is created. This vGPU function
|
||||
* is required both for functionary and security concerns.The ultimate goal
|
||||
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
|
||||
* assign a vGPU to a virtual machine we must isse such reset first.
|
||||
*
|
||||
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
|
||||
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
|
||||
* Unlike the FLR, GT reset only reset particular resource of a vGPU per
|
||||
* the reset request. Guest driver can issue a GT reset by programming the
|
||||
* virtual GDRST register to reset specific virtual GPU engine or all
|
||||
* engines.
|
||||
*
|
||||
* The parameter dev_level is to identify if we will do DMLR or GT reset.
|
||||
* The parameter engine_mask is to specific the engines that need to be
|
||||
* resetted. If value ALL_ENGINES is given for engine_mask, it means
|
||||
* the caller requests a full GT reset that we will reset all virtual
|
||||
* GPU engines. For FLR, engine_mask is ignored.
|
||||
*/
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
vgpu->id, dmlr, engine_mask);
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
||||
if (dmlr)
|
||||
intel_vgpu_reset_cfg_space(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU.
|
||||
|
@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
*/
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
|
|
@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = u64_to_user_ptr(args->data_ptr);
|
||||
int ret;
|
||||
|
||||
/* We manually control the domain here and pretend that it
|
||||
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
|
||||
*/
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED |
|
||||
I915_WAIT_ALL,
|
||||
MAX_SCHEDULE_TIMEOUT,
|
||||
to_rps_client(file));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (copy_from_user(vaddr, user_data, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
drm_clflush_virt_range(vaddr, args->size);
|
||||
i915_gem_chipset_flush(to_i915(dev));
|
||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||
|
||||
out:
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
|
|
|
@ -199,6 +199,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
|||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
ret = 0;
|
||||
while (!list_empty(&eviction_list)) {
|
||||
vma = list_first_entry(&eviction_list,
|
||||
struct i915_vma,
|
||||
|
|
|
@ -2967,6 +2967,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
|
|||
unsigned int rotation = plane_state->base.rotation;
|
||||
int ret;
|
||||
|
||||
if (!plane_state->base.visible)
|
||||
return 0;
|
||||
|
||||
/* Rotate src coordinates to match rotated GTT view */
|
||||
if (drm_rotation_90_or_270(rotation))
|
||||
drm_rect_rotate(&plane_state->base.src,
|
||||
|
|
|
@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
|
|||
|
||||
/* Enable polling and queue hotplug re-enabling. */
|
||||
if (hpd_disabled) {
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
|
||||
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
|
||||
}
|
||||
|
@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (enabled)
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
|
|
|
@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
|
|||
uint32_t *batch,
|
||||
uint32_t index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/*
|
||||
* WaDisableLSQCROPERFforOCL:kbl
|
||||
* This WA is implemented in skl_init_clock_gating() but since
|
||||
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
||||
* set this bit here to retain the WA during flush.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
|
|
|
@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
|
|||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE);
|
||||
|
||||
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
|
||||
* involving this register should also be added to WA batch as required.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
/* WaDisableLSQCROPERFforOCL:kbl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
|
||||
/* WaToEnableHwFixForPushConstHWBug:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
|
||||
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
|
||||
|
|
|
@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
{
|
||||
struct adreno_platform_config *config = pdev->dev.platform_data;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
struct msm_mmu *mmu;
|
||||
int ret;
|
||||
|
||||
adreno_gpu->funcs = funcs;
|
||||
|
@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
mmu = gpu->aspace->mmu;
|
||||
if (mmu) {
|
||||
if (gpu->aspace && gpu->aspace->mmu) {
|
||||
struct msm_mmu *mmu = gpu->aspace->mmu;
|
||||
ret = mmu->funcs->attach(mmu, iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
if (ret)
|
||||
|
|
|
@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
|
|||
|
||||
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
for_each_plane_in_state(state, plane, plane_state, i)
|
||||
mdp5_plane_complete_commit(plane, plane_state);
|
||||
|
||||
if (mdp5_kms->smp)
|
||||
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
|
||||
|
|
|
@ -104,8 +104,6 @@ struct mdp5_plane_state {
|
|||
|
||||
/* assigned by crtc blender */
|
||||
enum mdp_mixer_stage_id stage;
|
||||
|
||||
bool pending : 1;
|
||||
};
|
||||
#define to_mdp5_plane_state(x) \
|
||||
container_of(x, struct mdp5_plane_state, base)
|
||||
|
@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
|
|||
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
|
||||
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
|
||||
|
||||
|
|
|
@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
|
|||
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
|
||||
drm_printf(p, "\talpha=%u\n", pstate->alpha);
|
||||
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
|
||||
drm_printf(p, "\tpending=%u\n", pstate->pending);
|
||||
}
|
||||
|
||||
static void mdp5_plane_reset(struct drm_plane *plane)
|
||||
|
@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
|
|||
if (mdp5_state && mdp5_state->base.fb)
|
||||
drm_framebuffer_reference(mdp5_state->base.fb);
|
||||
|
||||
mdp5_state->pending = false;
|
||||
|
||||
return &mdp5_state->base;
|
||||
}
|
||||
|
||||
|
@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
|||
DBG("%s: check (%d -> %d)", plane->name,
|
||||
plane_enabled(old_state), plane_enabled(state));
|
||||
|
||||
/* We don't allow faster-than-vblank updates.. if we did add this
|
||||
* some day, we would need to disallow in cases where hwpipe
|
||||
* changes
|
||||
*/
|
||||
if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
|
||||
return -EBUSY;
|
||||
|
||||
max_width = config->hw->lm.max_width << 16;
|
||||
max_height = config->hw->lm.max_height << 16;
|
||||
|
||||
|
@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
|
|||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct drm_plane_state *state = plane->state;
|
||||
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
|
||||
|
||||
DBG("%s: update", plane->name);
|
||||
|
||||
mdp5_state->pending = true;
|
||||
|
||||
if (plane_enabled(state)) {
|
||||
int ret;
|
||||
|
||||
|
@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
|
|||
return pstate->hwpipe->flush_mask;
|
||||
}
|
||||
|
||||
/* called after vsync in thread context */
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
|
||||
|
||||
pstate->pending = false;
|
||||
}
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
|
||||
{
|
||||
|
|
|
@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
|
|||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
|
||||
if (!priv->aspace[id])
|
||||
continue;
|
||||
msm_gem_unmap_vma(priv->aspace[id],
|
||||
&msm_obj->domain[id], msm_obj->sgt);
|
||||
}
|
||||
|
|
|
@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
|
|||
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/si58_mc.bin");
|
||||
|
||||
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
|
||||
static void si_pcie_gen3_enable(struct radeon_device *rdev);
|
||||
|
@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
|
|||
int err;
|
||||
int new_fw = 0;
|
||||
bool new_smc = false;
|
||||
bool si58_fw = false;
|
||||
bool banks2_fw = false;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
|
@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
|
|||
((rdev->pdev->device == 0x6660) ||
|
||||
(rdev->pdev->device == 0x6663) ||
|
||||
(rdev->pdev->device == 0x6665) ||
|
||||
(rdev->pdev->device == 0x6667))) ||
|
||||
((rdev->pdev->revision == 0xc3) &&
|
||||
(rdev->pdev->device == 0x6665)))
|
||||
(rdev->pdev->device == 0x6667))))
|
||||
new_smc = true;
|
||||
else if ((rdev->pdev->revision == 0xc3) &&
|
||||
(rdev->pdev->device == 0x6665))
|
||||
banks2_fw = true;
|
||||
new_chip_name = "hainan";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
|
@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
|
|||
default: BUG();
|
||||
}
|
||||
|
||||
/* this memory configuration requires special firmware */
|
||||
if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
|
||||
si58_fw = true;
|
||||
|
||||
DRM_INFO("Loading %s Microcode\n", new_chip_name);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
|
||||
|
@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
|
||||
if (si58_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
|
||||
|
@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
if (new_smc)
|
||||
if (banks2_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
|
||||
else if (new_smc)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
|
||||
|
|
|
@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
(rdev->pdev->device == 0x6817) ||
|
||||
(rdev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
if ((rdev->pdev->revision == 0xC7) ||
|
||||
(rdev->pdev->revision == 0x80) ||
|
||||
(rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6604) ||
|
||||
(rdev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_HAINAN) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
|
@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
(rdev->pdev->device == 0x6665) ||
|
||||
(rdev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
|
|
|
@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
|
|||
info->fbops = &virtio_gpufb_ops;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
|
||||
info->screen_base = obj->vmap;
|
||||
info->screen_buffer = obj->vmap;
|
||||
info->screen_size = obj->gem_base.size;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(info, &vfbdev->helper,
|
||||
|
|
|
@ -73,6 +73,5 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
|
|||
|
||||
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
|
||||
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
|
||||
extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user