forked from luck/tmp_suning_uos_patched
Fixes for:
- DP full color range. - selftest for gem_object - forcewake on suspend - GPU reset This also include accumulated fixes from GVT: - Fix an error code in gvt_dma_map_page() (Dan) - Fix off by one error in intel_vgpu_write_fence() (Dan) - Fix potential Spectre v1 (Gustavo) - Fix workload free in vgpu release (Henry) - Fix cleanup sequence in intel_gvt_clean_device (Henry) - dmabuf mutex init place fix (Henry) - possible memory leak in intel_vgpu_ioctl() err path (Yi) - return error on cmd access check failure (Yan) -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJbdcppAAoJEPpiX2QO6xPKZ+IH/jzYLJDShTvJ01gyXMmNUn2/ JLKOVnKuBzDZj3duQHZTXZBBwC+Gr66uC0iX9hA0Zl/La/jmZdHfxY4PBDzlIuxq AZpK9kX7yKAP9TfbF35H6qUb3n09rlWO6L4pMtdO8nS1oMHEZ1UXTz9okjQQ/GFd hl4btwNo75xDB4aBFFNKT/bhpjSl0Yuox60Ff5q4kJ4XiZ88NKx+H9fppJWk/j3P YK972jnR1NugYmVuhL8ENFt1b1IuQ7Rn64O7SqYNou2Xcr1yM7lCFGyWDf2Tang/ MSisLRmJSnHYSZ0d1qjJS3tRuTnQg0s3xi7KheMFYFnjYv8sR5hzvrDbFHYHzc8= =Esml -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-fixes-2018-08-16-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next Fixes for: - DP full color range. - selftest for gem_object - forcewake on suspend - GPU reset This also include accumulated fixes from GVT: - Fix an error code in gvt_dma_map_page() (Dan) - Fix off by one error in intel_vgpu_write_fence() (Dan) - Fix potential Spectre v1 (Gustavo) - Fix workload free in vgpu release (Henry) - Fix cleanup sequence in intel_gvt_clean_device (Henry) - dmabuf mutex init place fix (Henry) - possible memory leak in intel_vgpu_ioctl() err path (Yi) - return error on cmd access check failure (Yan) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180816190335.GA7765@intel.com
This commit is contained in:
commit
0258d7a5e2
|
@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
|||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
||||
if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
|
||||
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
||||
reg = vgpu->fence.regs[fence];
|
||||
|
|
|
@ -874,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
|
||||
gvt_vgpu_err("%s access to non-render register (%x)\n",
|
||||
cmd, offset);
|
||||
return 0;
|
||||
return -EBADRQC;
|
||||
}
|
||||
|
||||
if (is_shadowed_mmio(offset)) {
|
||||
|
|
|
@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
|||
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
|
||||
.vgpu_create = intel_gvt_create_vgpu,
|
||||
.vgpu_destroy = intel_gvt_destroy_vgpu,
|
||||
.vgpu_release = intel_gvt_release_vgpu,
|
||||
.vgpu_reset = intel_gvt_reset_vgpu,
|
||||
.vgpu_activate = intel_gvt_activate_vgpu,
|
||||
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
||||
|
@ -315,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
|||
if (WARN_ON(!gvt))
|
||||
return;
|
||||
|
||||
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_cleanup_vgpu_type_groups(gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
intel_gvt_debugfs_clean(gvt);
|
||||
clean_service_thread(gvt);
|
||||
intel_gvt_clean_cmd_parser(gvt);
|
||||
|
@ -322,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
|||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
intel_gvt_free_firmware(gvt);
|
||||
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_cleanup_vgpu_type_groups(gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||
|
||||
kfree(dev_priv->gvt);
|
||||
dev_priv->gvt = NULL;
|
||||
}
|
||||
|
|
|
@ -486,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
|
|||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_type *type);
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
@ -563,7 +564,8 @@ struct intel_gvt_ops {
|
|||
unsigned int);
|
||||
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
|
||||
struct intel_vgpu_type *);
|
||||
void (*vgpu_destroy)(struct intel_vgpu *);
|
||||
void (*vgpu_destroy)(struct intel_vgpu *vgpu);
|
||||
void (*vgpu_release)(struct intel_vgpu *vgpu);
|
||||
void (*vgpu_reset)(struct intel_vgpu *);
|
||||
void (*vgpu_activate)(struct intel_vgpu *);
|
||||
void (*vgpu_deactivate)(struct intel_vgpu *);
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
#include <linux/mdev.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
|
||||
|
@ -187,14 +189,14 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||
|
||||
/* Setup DMA mapping. */
|
||||
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
|
||||
ret = dma_mapping_error(dev, *dma_addr);
|
||||
if (ret) {
|
||||
if (dma_mapping_error(dev, *dma_addr)) {
|
||||
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
|
||||
page_to_pfn(page), ret);
|
||||
gvt_unpin_guest_page(vgpu, gfn, size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
|
@ -666,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
|||
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
||||
return;
|
||||
|
||||
intel_gvt_ops->vgpu_deactivate(vgpu);
|
||||
intel_gvt_ops->vgpu_release(vgpu);
|
||||
|
||||
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
|
||||
&vgpu->vdev.iommu_notifier);
|
||||
|
@ -1139,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
|
||||
struct vfio_region_info info;
|
||||
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
|
||||
int i, ret;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
|
||||
size_t size;
|
||||
int nr_areas = 1;
|
||||
|
@ -1224,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
if (info.index >= VFIO_PCI_NUM_REGIONS +
|
||||
vgpu->vdev.num_regions)
|
||||
return -EINVAL;
|
||||
info.index =
|
||||
array_index_nospec(info.index,
|
||||
VFIO_PCI_NUM_REGIONS +
|
||||
vgpu->vdev.num_regions);
|
||||
|
||||
i = info.index - VFIO_PCI_NUM_REGIONS;
|
||||
|
||||
|
@ -1250,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
&sparse->header, sizeof(*sparse) +
|
||||
(sparse->nr_areas *
|
||||
sizeof(*sparse->areas)));
|
||||
kfree(sparse);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(sparse);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
kfree(sparse);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -1270,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
sizeof(info), caps.buf,
|
||||
caps.size)) {
|
||||
kfree(caps.buf);
|
||||
kfree(sparse);
|
||||
return -EFAULT;
|
||||
}
|
||||
info.cap_offset = sizeof(info);
|
||||
|
@ -1278,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
kfree(caps.buf);
|
||||
}
|
||||
|
||||
kfree(sparse);
|
||||
return copy_to_user((void __user *)arg, &info, minsz) ?
|
||||
-EFAULT : 0;
|
||||
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
|
||||
|
@ -1615,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
|||
kvmgt_protect_table_init(info);
|
||||
gvt_cache_init(vgpu);
|
||||
|
||||
mutex_init(&vgpu->dmabuf_lock);
|
||||
init_completion(&vgpu->vblank_done);
|
||||
|
||||
info->track_node.track_write = kvmgt_page_track_write;
|
||||
|
|
|
@ -784,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
|||
kunmap(page);
|
||||
}
|
||||
|
||||
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
@ -879,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
* cleaned up during the resetting process later, so doing
|
||||
* the workload clean up here doesn't have any impact.
|
||||
**/
|
||||
clean_workloads(vgpu, ENGINE_MASK(ring_id));
|
||||
intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
|
||||
}
|
||||
|
||||
workload->complete(workload);
|
||||
|
@ -1081,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
|||
if (!s->active)
|
||||
return;
|
||||
|
||||
clean_workloads(vgpu, engine_mask);
|
||||
intel_vgpu_clean_workloads(vgpu, engine_mask);
|
||||
s->ops->reset(vgpu, engine_mask);
|
||||
}
|
||||
|
||||
|
|
|
@ -158,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
|
||||
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
||||
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -222,7 +222,7 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
|||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to deactivate a virtual GPU.
|
||||
* All virtual GPU runtime information will be destroyed.
|
||||
* The virtual GPU will be stopped.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
||||
|
@ -238,11 +238,29 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
|||
}
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_release_vgpu - release a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to release a virtual GPU.
|
||||
* The virtual GPU will be stopped and all runtime information will be
|
||||
* destroyed.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
intel_gvt_deactivate_vgpu(vgpu);
|
||||
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_destroy_vgpu - destroy a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
|
@ -361,6 +379,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
vgpu->gvt = gvt;
|
||||
vgpu->sched_ctl.weight = param->weight;
|
||||
mutex_init(&vgpu->vgpu_lock);
|
||||
mutex_init(&vgpu->dmabuf_lock);
|
||||
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
|
||||
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
|
||||
idr_init(&vgpu->object_idr);
|
||||
|
|
|
@ -9201,6 +9201,7 @@ enum skl_power_gate {
|
|||
#define TRANS_MSA_10_BPC (2 << 5)
|
||||
#define TRANS_MSA_12_BPC (3 << 5)
|
||||
#define TRANS_MSA_16_BPC (4 << 5)
|
||||
#define TRANS_MSA_CEA_RANGE (1 << 3)
|
||||
|
||||
/* LCPLL Control */
|
||||
#define LCPLL_CTL _MMIO(0x130040)
|
||||
|
|
|
@ -1685,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
|
|||
WARN_ON(transcoder_is_dsi(cpu_transcoder));
|
||||
|
||||
temp = TRANS_MSA_SYNC_CLK;
|
||||
|
||||
if (crtc_state->limited_color_range)
|
||||
temp |= TRANS_MSA_CEA_RANGE;
|
||||
|
||||
switch (crtc_state->pipe_bpp) {
|
||||
case 18:
|
||||
temp |= TRANS_MSA_6_BPC;
|
||||
|
|
|
@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
|
|||
mmio = RING_HWS_PGA(engine->mmio_base);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
u32 mask = ~0u;
|
||||
|
||||
/*
|
||||
* Keep the render interrupt unmasked as this papers over
|
||||
* lost interrupts following a reset.
|
||||
*/
|
||||
if (engine->id == RCS)
|
||||
mask &= ~BIT(0);
|
||||
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
|
||||
}
|
||||
|
||||
I915_WRITE(mmio, engine->status_page.ggtt_offset);
|
||||
POSTING_READ(mmio);
|
||||
|
|
|
@ -359,8 +359,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
|
|||
}
|
||||
|
||||
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
|
||||
static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
|
||||
bool restore)
|
||||
static unsigned int
|
||||
intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct intel_uncore_forcewake_domain *domain;
|
||||
|
@ -412,20 +412,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
|
|||
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
|
||||
|
||||
fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
|
||||
|
||||
if (restore) { /* If reset with a user forcewake, try to restore */
|
||||
if (fw)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
|
||||
dev_priv->uncore.fifo_count =
|
||||
fifo_free_entries(dev_priv);
|
||||
}
|
||||
|
||||
if (!restore)
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
return fw; /* track the lost user forcewake domains */
|
||||
}
|
||||
|
||||
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
|
||||
|
@ -534,7 +525,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
|
||||
bool restore_forcewake)
|
||||
unsigned int restore_forcewake)
|
||||
{
|
||||
/* clear out unclaimed reg detection bit */
|
||||
if (check_for_unclaimed_mmio(dev_priv))
|
||||
|
@ -549,7 +540,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
iosf_mbi_punit_acquire();
|
||||
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
if (restore_forcewake) {
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
restore_forcewake);
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
|
||||
dev_priv->uncore.fifo_count =
|
||||
fifo_free_entries(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
}
|
||||
iosf_mbi_punit_release();
|
||||
}
|
||||
|
||||
|
@ -558,13 +559,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
|
|||
iosf_mbi_punit_acquire();
|
||||
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
|
||||
&dev_priv->uncore.pmic_bus_access_nb);
|
||||
intel_uncore_forcewake_reset(dev_priv, false);
|
||||
dev_priv->uncore.fw_domains_saved =
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
iosf_mbi_punit_release();
|
||||
}
|
||||
|
||||
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
__intel_uncore_early_sanitize(dev_priv, true);
|
||||
unsigned int restore_forcewake;
|
||||
|
||||
restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
|
||||
__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
|
||||
|
||||
iosf_mbi_register_pmic_bus_access_notifier(
|
||||
&dev_priv->uncore.pmic_bus_access_nb);
|
||||
i915_check_and_clear_faults(dev_priv);
|
||||
|
@ -1545,7 +1551,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_uncore_edram_detect(dev_priv);
|
||||
intel_uncore_fw_domains_init(dev_priv);
|
||||
__intel_uncore_early_sanitize(dev_priv, false);
|
||||
__intel_uncore_early_sanitize(dev_priv, 0);
|
||||
|
||||
dev_priv->uncore.unclaimed_mmio_check = 1;
|
||||
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
|
||||
|
@ -1632,7 +1638,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
|
|||
iosf_mbi_punit_acquire();
|
||||
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
|
||||
&dev_priv->uncore.pmic_bus_access_nb);
|
||||
intel_uncore_forcewake_reset(dev_priv, false);
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
iosf_mbi_punit_release();
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ struct intel_uncore {
|
|||
|
||||
enum forcewake_domains fw_domains;
|
||||
enum forcewake_domains fw_domains_active;
|
||||
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
|
||||
|
||||
u32 fw_set;
|
||||
u32 fw_clear;
|
||||
|
|
|
@ -499,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
|
|||
return err == expected;
|
||||
}
|
||||
|
||||
static void disable_retire_worker(struct drm_i915_private *i915)
|
||||
{
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
if (!i915->gt.active_requests++) {
|
||||
intel_runtime_pm_get(i915);
|
||||
i915_gem_unpark(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
}
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
cancel_delayed_work_sync(&i915->gt.retire_work);
|
||||
cancel_delayed_work_sync(&i915->gt.idle_work);
|
||||
}
|
||||
|
||||
static int igt_mmap_offset_exhaustion(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
|
@ -509,12 +522,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||
int loop, err;
|
||||
|
||||
/* Disable background reaper */
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
if (!i915->gt.active_requests++)
|
||||
i915_gem_unpark(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
cancel_delayed_work_sync(&i915->gt.retire_work);
|
||||
cancel_delayed_work_sync(&i915->gt.idle_work);
|
||||
disable_retire_worker(i915);
|
||||
GEM_BUG_ON(!i915->gt.awake);
|
||||
|
||||
/* Trim the device mmap space to only a page */
|
||||
|
|
|
@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
|
|||
i915_reg_t reg = { offset };
|
||||
|
||||
iosf_mbi_punit_acquire();
|
||||
intel_uncore_forcewake_reset(dev_priv, false);
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
iosf_mbi_punit_release();
|
||||
|
||||
check_for_unclaimed_mmio(dev_priv);
|
||||
|
|
Loading…
Reference in New Issue
Block a user