forked from luck/tmp_suning_uos_patched
drm amdgpu, i915, meson, udl, tegra fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcnZWiAAoJEAx081l5xIa+0SEQAIECjILAICp/h0CitVe09fvU NB1TimOsJwjhbJSHdUX4l/FqHRz6xczxnW3yDjteOdg5a/LLtyI+C0ZBaF/RYVK5 dXk8xNiu3Hoem/+W2shfLjTKKjxSp6Hul4Mx8BL/svClo33K76VuMrcoOAhrYq4G dVbjB6iTSkT8IiYUsNfcddKj2S7rpObAoPindPavHqye2yzGrCDuRmYy5gxszpHs IcKN+6f0voT0caijiXRDwQ/F97f6YghKVTizXsuwbbFa5wQuvNlZhCdT6ltORoab n05/VA8diP+ttu85d+HGlUKtUcB01nkVqr4qAPOHq44CmLbsJKs+wp1zMN88xInT koMR/ElG1DmwXQgbrNI7hrcJsh6N0DB9MUG2gGrz+Sa46aAYweb1c6YVdseR3o4x cisMhIzxJ0IVlBI5fV7qxZrUrhzYzqtX7sBosVDiszs92EU8MvCcqMjIwXmOZexE du6Exxq61Cx4xvBJIPTDWFvNHZVq9xKepCzgxmeKJmYx5yTZiS+hXJ5G+PL+JVri I1O9esPfbXeW9Y7j/sm/pe3udoha9D6sg0/hnPNpUCGx0P7d/DhiIyUF7zEy+/CN N3xKWxslpbwfNg6bbscxE01ToLGz/TX+R2ANcAmLQeWPFfs3CIfJzs9flncMvi8A 9reKUm+OZBeJ9q0nEYdb =6CNL -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-03-29' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Weekly fixes roundup, nothing two serious, some usb device regressions are fixed, and i915 GVT has a bigger fix but otherwise not really much happening here. core: - fb bpp check regression fix - release/unplug fix - use after free fixes i915: - fix mmap range checks - fix gvt ppgtt mm LRU list access races - fix selftest error pointer check - fix a macro definition (pre-emptive for potential further backports) - fix one AML SKU ULX status amdgpu: - one variable refresh rate fix udl: - fix EDID reading tegra: - build/warning fixes meson: - cleanup path fixes - TMDS clock filter fix rockchip: - NV12 buffers and scalar fix" * tag 'drm-fixes-2019-03-29' of git://anongit.freedesktop.org/drm/drm: (22 commits) drm/i915/icl: Fix VEBOX mismatch BUG_ON() drm/i915/selftests: Fix an IS_ERR() vs NULL check drm/i915: Mark AML 0x87CA as ULX drm/meson: fix TMDS clock filtering for DMT monitors drm/meson: Uninstall IRQ handler drm/meson: Fix invalid pointer in meson_drv_unbind() drm/udl: Refactor edid retrieving in UDL driver (v2) drm: Fix drm_release() and device unplug drm/fb: avoid setting 0 depth. drm/tegra: vic: Fix implicit function declaration warning drm/tegra: hub: Fix dereference before check drm/i915/icl: Fix the TRANS_DDI_FUNC_CTL2 bitfield macro drm/amd/display: Only allow VRR when vrefresh is within supported range drm/rockchip: vop: reset scale mode when win is disabled drm/vkms: fix use-after-free when drm_gem_handle_create() fails drm/vgem: fix use-after-free when drm_gem_handle_create() fails drm/i915/gvt: Add mutual lock for ppgtt mm LRU list drm/i915/gvt: Only assign ppgtt root at dispatch time drm/i915/gvt: Don't submit request for error workload dispatch drm/i915/gvt: stop scheduling workload when vgpu is inactive ...
This commit is contained in:
commit
9df0ef6ca9
|
@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
|
|||
struct amdgpu_dm_connector *aconnector =
|
||||
to_amdgpu_dm_connector(new_con_state->base.connector);
|
||||
struct drm_display_mode *mode = &new_crtc_state->base.mode;
|
||||
int vrefresh = drm_mode_vrefresh(mode);
|
||||
|
||||
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
|
||||
aconnector->min_vfreq <= drm_mode_vrefresh(mode);
|
||||
vrefresh >= aconnector->min_vfreq &&
|
||||
vrefresh <= aconnector->max_vfreq;
|
||||
|
||||
if (new_crtc_state->vrr_supported) {
|
||||
new_crtc_state->stream->ignore_msa_timing_param = true;
|
||||
|
|
|
@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
|
|||
synchronize_srcu(&drm_unplug_srcu);
|
||||
|
||||
drm_dev_unregister(dev);
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
if (dev->open_count == 0)
|
||||
drm_dev_put(dev);
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
drm_dev_put(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_unplug);
|
||||
|
||||
|
|
|
@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
|||
best_depth = fmt->depth;
|
||||
}
|
||||
}
|
||||
if (sizes.surface_depth != best_depth) {
|
||||
if (sizes.surface_depth != best_depth && best_depth) {
|
||||
DRM_INFO("requested bpp %d, scaled depth down to %d",
|
||||
sizes.surface_bpp, best_depth);
|
||||
sizes.surface_depth = best_depth;
|
||||
|
|
|
@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
|
||||
drm_close_helper(filp);
|
||||
|
||||
if (!--dev->open_count) {
|
||||
if (!--dev->open_count)
|
||||
drm_lastclose(dev);
|
||||
if (drm_dev_is_unplugged(dev))
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
|
||||
drm_minor_release(minor);
|
||||
|
|
|
@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
|||
}
|
||||
|
||||
if (index_mode) {
|
||||
if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
|
||||
if (guest_gma >= I915_GTT_PAGE_SIZE) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
|||
}
|
||||
|
||||
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
|
||||
|
||||
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
|
||||
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
|
||||
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
||||
|
||||
return mm;
|
||||
}
|
||||
|
||||
|
@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
list_move_tail(&mm->ppgtt_mm.lru_list,
|
||||
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
|
||||
|
||||
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
|
|||
struct intel_vgpu_mm *mm;
|
||||
struct list_head *pos, *n;
|
||||
|
||||
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
|
||||
|
||||
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
|
||||
|
||||
|
@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
|
|||
continue;
|
||||
|
||||
list_del_init(&mm->ppgtt_mm.lru_list);
|
||||
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
||||
invalidate_ppgtt_mm(mm);
|
||||
return 1;
|
||||
}
|
||||
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|||
}
|
||||
}
|
||||
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
|
||||
mutex_init(&gvt->gtt.ppgtt_mm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
|
|||
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
|
||||
if (mm->type == INTEL_GVT_MM_PPGTT) {
|
||||
mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
list_del_init(&mm->ppgtt_mm.lru_list);
|
||||
mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
|
||||
if (mm->ppgtt_mm.shadowed)
|
||||
invalidate_ppgtt_mm(mm);
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@ struct intel_gvt_gtt {
|
|||
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
|
||||
struct list_head oos_page_use_list_head;
|
||||
struct list_head oos_page_free_list_head;
|
||||
struct mutex ppgtt_mm_lock;
|
||||
struct list_head ppgtt_mm_lru_list_head;
|
||||
|
||||
struct page *scratch_page;
|
||||
|
|
|
@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||
|
||||
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
|
||||
{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
|
||||
{RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
|
||||
|
||||
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
|
||||
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
|
||||
|
|
|
@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
|||
int i = 0;
|
||||
|
||||
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
|
||||
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
|
||||
|
@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|||
if (workload->shadow)
|
||||
return 0;
|
||||
|
||||
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pin shadow context by gvt even the shadow context will be pinned
|
||||
* when i915 alloc request. That is because gvt will update the guest
|
||||
* context from shadow context when workload is completed, and at that
|
||||
|
@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
||||
struct i915_request *rq;
|
||||
int ring_id = workload->ring_id;
|
||||
int ret;
|
||||
|
||||
|
@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
mutex_lock(&vgpu->vgpu_lock);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
goto err_req;
|
||||
}
|
||||
|
||||
ret = intel_gvt_workload_req_alloc(workload);
|
||||
if (ret)
|
||||
goto err_req;
|
||||
|
@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
|
||||
ret = prepare_workload(workload);
|
||||
out:
|
||||
if (ret) {
|
||||
/* We might still need to add request with
|
||||
* clean ctx to retire it properly..
|
||||
*/
|
||||
rq = fetch_and_zero(&workload->req);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(workload->req)) {
|
||||
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
||||
ring_id, workload->req);
|
||||
|
@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
|
||||
if (!scheduler->current_vgpu->active ||
|
||||
list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
|
|
@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
|
|||
INTEL_DEVID(dev_priv) == 0x5915 || \
|
||||
INTEL_DEVID(dev_priv) == 0x591E)
|
||||
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
|
||||
INTEL_DEVID(dev_priv) == 0x87C0)
|
||||
INTEL_DEVID(dev_priv) == 0x87C0 || \
|
||||
INTEL_DEVID(dev_priv) == 0x87CA)
|
||||
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
|
|
|
@ -2863,7 +2863,7 @@ enum i915_power_well_id {
|
|||
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
|
||||
#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
|
||||
#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
|
||||
#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
|
||||
#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
|
||||
|
||||
#define GEN11_EU_DISABLE _MMIO(0x9134)
|
||||
#define GEN11_EU_DIS_MASK 0xFF
|
||||
|
@ -9243,7 +9243,7 @@ enum skl_power_gate {
|
|||
#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
|
||||
_TRANS_DDI_FUNC_CTL2_A)
|
||||
#define PORT_SYNC_MODE_ENABLE (1 << 4)
|
||||
#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
|
||||
#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
|
||||
#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
|
||||
#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
|
||||
|
||||
|
|
|
@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
|
|||
struct i915_gem_context *ctx;
|
||||
|
||||
ctx = live_context(i915, file);
|
||||
if (!ctx)
|
||||
if (IS_ERR(ctx))
|
||||
break;
|
||||
|
||||
/* We will need some GGTT space for the rq's context */
|
||||
|
|
|
@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
|
|||
|
||||
ret = drm_dev_register(drm, 0);
|
||||
if (ret)
|
||||
goto free_drm;
|
||||
goto uninstall_irq;
|
||||
|
||||
drm_fbdev_generic_setup(drm, 32);
|
||||
|
||||
return 0;
|
||||
|
||||
uninstall_irq:
|
||||
drm_irq_uninstall(drm);
|
||||
free_drm:
|
||||
drm_dev_put(drm);
|
||||
|
||||
|
@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
|
|||
|
||||
static void meson_drv_unbind(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct meson_drm *priv = drm->dev_private;
|
||||
struct meson_drm *priv = dev_get_drvdata(dev);
|
||||
struct drm_device *drm = priv->drm;
|
||||
|
||||
if (priv->canvas) {
|
||||
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
|
||||
|
@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
|
|||
}
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
drm_irq_uninstall(drm);
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_put(drm);
|
||||
|
|
|
@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
|
|||
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
|
||||
|
||||
/* If sink max TMDS clock, we reject the mode */
|
||||
if (mode->clock > connector->display_info.max_tmds_clock)
|
||||
if (connector->display_info.max_tmds_clock &&
|
||||
mode->clock > connector->display_info.max_tmds_clock)
|
||||
return MODE_BAD;
|
||||
|
||||
/* Check against non-VIC supported modes */
|
||||
|
|
|
@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
|
|||
clk_disable(vop->hclk);
|
||||
}
|
||||
|
||||
static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
|
||||
{
|
||||
if (win->phy->scl && win->phy->scl->ext) {
|
||||
VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
|
||||
VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
|
||||
VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
|
||||
VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
|
||||
}
|
||||
|
||||
VOP_WIN_SET(vop, win, enable, 0);
|
||||
}
|
||||
|
||||
static int vop_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct vop *vop = to_vop(crtc);
|
||||
|
@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
|
|||
struct vop_win *vop_win = &vop->win[i];
|
||||
const struct vop_win_data *win = vop_win->data;
|
||||
|
||||
VOP_WIN_SET(vop, win, enable, 0);
|
||||
vop_win_disable(vop, win);
|
||||
}
|
||||
spin_unlock(&vop->reg_lock);
|
||||
|
||||
|
@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
|
|||
|
||||
spin_lock(&vop->reg_lock);
|
||||
|
||||
VOP_WIN_SET(vop, win, enable, 0);
|
||||
vop_win_disable(vop, win);
|
||||
|
||||
spin_unlock(&vop->reg_lock);
|
||||
}
|
||||
|
@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
|
|||
int channel = i * 2 + 1;
|
||||
|
||||
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
|
||||
VOP_WIN_SET(vop, win, enable, 0);
|
||||
vop_win_disable(vop, win);
|
||||
VOP_WIN_SET(vop, win, gate, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
|
|||
static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
|
||||
struct tegra_plane *p = to_tegra_plane(plane);
|
||||
struct tegra_dc *dc;
|
||||
u32 value;
|
||||
|
||||
/* rien ne va plus */
|
||||
if (!old_state || !old_state->crtc)
|
||||
return;
|
||||
|
||||
dc = to_tegra_dc(old_state->crtc);
|
||||
|
||||
/*
|
||||
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
|
||||
* on planes that are already disabled. Make sure we fallback to the
|
||||
|
|
|
@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
|
|||
if (vic->booted)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
if (vic->config->supports_sid) {
|
||||
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
|
||||
u32 value;
|
||||
|
@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
|
|||
vic_writel(vic, value, VIC_THI_STREAMID1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* setup clockgating registers */
|
||||
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
|
||||
|
|
|
@ -18,18 +18,19 @@
|
|||
#include "udl_connector.h"
|
||||
#include "udl_drv.h"
|
||||
|
||||
static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
|
||||
u8 *buff)
|
||||
static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
|
||||
size_t len)
|
||||
{
|
||||
int ret, i;
|
||||
u8 *read_buff;
|
||||
struct udl_device *udl = data;
|
||||
|
||||
read_buff = kmalloc(2, GFP_KERNEL);
|
||||
if (!read_buff)
|
||||
return false;
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < EDID_LENGTH; i++) {
|
||||
int bval = (i + block_idx * EDID_LENGTH) << 8;
|
||||
for (i = 0; i < len; i++) {
|
||||
int bval = (i + block * EDID_LENGTH) << 8;
|
||||
ret = usb_control_msg(udl->udev,
|
||||
usb_rcvctrlpipe(udl->udev, 0),
|
||||
(0x02), (0x80 | (0x02 << 5)), bval,
|
||||
|
@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
|
|||
if (ret < 1) {
|
||||
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
|
||||
kfree(read_buff);
|
||||
return false;
|
||||
return -1;
|
||||
}
|
||||
buff[i] = read_buff[1];
|
||||
buf[i] = read_buff[1];
|
||||
}
|
||||
|
||||
kfree(read_buff);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
|
||||
int *result_buff_size)
|
||||
{
|
||||
int i, extensions;
|
||||
u8 *block_buff = NULL, *buff_ptr;
|
||||
|
||||
block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
if (block_buff == NULL)
|
||||
return false;
|
||||
|
||||
if (udl_get_edid_block(udl, 0, block_buff) &&
|
||||
memchr_inv(block_buff, 0, EDID_LENGTH)) {
|
||||
extensions = ((struct edid *)block_buff)->extensions;
|
||||
if (extensions > 0) {
|
||||
/* we have to read all extensions one by one */
|
||||
*result_buff_size = EDID_LENGTH * (extensions + 1);
|
||||
*result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
|
||||
buff_ptr = *result_buff;
|
||||
if (buff_ptr == NULL) {
|
||||
kfree(block_buff);
|
||||
return false;
|
||||
}
|
||||
memcpy(buff_ptr, block_buff, EDID_LENGTH);
|
||||
kfree(block_buff);
|
||||
buff_ptr += EDID_LENGTH;
|
||||
for (i = 1; i < extensions; ++i) {
|
||||
if (udl_get_edid_block(udl, i, buff_ptr)) {
|
||||
buff_ptr += EDID_LENGTH;
|
||||
} else {
|
||||
kfree(*result_buff);
|
||||
*result_buff = NULL;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/* we have only base edid block */
|
||||
*result_buff = block_buff;
|
||||
*result_buff_size = EDID_LENGTH;
|
||||
return true;
|
||||
}
|
||||
|
||||
kfree(block_buff);
|
||||
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udl_get_modes(struct drm_connector *connector)
|
||||
|
@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
|
|||
static enum drm_connector_status
|
||||
udl_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
u8 *edid_buff = NULL;
|
||||
int edid_buff_size = 0;
|
||||
struct udl_device *udl = connector->dev->dev_private;
|
||||
struct udl_drm_connector *udl_connector =
|
||||
container_of(connector,
|
||||
|
@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
|
|||
udl_connector->edid = NULL;
|
||||
}
|
||||
|
||||
|
||||
if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
|
||||
udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
|
||||
if (!udl_connector->edid)
|
||||
return connector_status_disconnected;
|
||||
|
||||
udl_connector->edid = (struct edid *)edid_buff;
|
||||
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
|
|||
ret = drm_gem_handle_create(file, &obj->base, handle);
|
||||
drm_gem_object_put_unlocked(&obj->base);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return &obj->base;
|
||||
|
||||
err:
|
||||
__vgem_gem_destroy(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
|
|
|
@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
|
|||
|
||||
ret = drm_gem_handle_create(file, &obj->gem, handle);
|
||||
drm_gem_object_put_unlocked(&obj->gem);
|
||||
if (ret) {
|
||||
drm_gem_object_release(&obj->gem);
|
||||
kfree(obj);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &obj->gem;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user