forked from luck/tmp_suning_uos_patched
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Fixes for i915, nouveau and radeon: - i915: haswell stability, modeset rework fallout, ums fix - nouveau: misc fixes from code rework - radeon: pll rework fixes, more 2 level PTE cleanups. - core: warning fixes on 32-bit." * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (31 commits) nouveau: fix warning on 32-bit build. drm/nouveau/bios: fix typo in error message drm/nouveau: only call ttm_agp_tt_create when __OS_HAS_AGP drm/nv50/fb: fix double free of vram mm drm/nouveau/pm: do not stop reclocking if failing to set the fan speed drm/nouveau/pm: fix a typo related to the move to the therm subdev drm/nouveau/hwmon: fix the initialization condition drm: fix warning on 32-bit. drm: radeon: fix printk format warning drm/radeon: fix spelling typos in debugging output drm/radeon: Don't destroy I2C Bus Rec in radeon_ext_tmds_enc_destroy(). drm/radeon: check if pcie gen 2 is already enabled (v2) drm/radeon/cayman: set VM max pfn at MC init drm/radeon: separate pt alloc from lru add drm/radeon: don't add the IB pool to all VMs v2 drm/radeon: allocate page tables on demand v4 drm/radeon: update comments to clarify VM setup (v2) drm/radeon: allocate PPLLs from low to high drm/radeon: fix compilation with backlight disabled drm/radeon: use %zu for formatting size_t ...
This commit is contained in:
commit
75fa29c700
|
@ -667,7 +667,7 @@ static int intel_gtt_init(void)
|
|||
gtt_map_size = intel_private.base.gtt_total_entries * 4;
|
||||
|
||||
intel_private.gtt = NULL;
|
||||
if (INTEL_GTT_GEN < 6)
|
||||
if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
|
||||
intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
|
||||
gtt_map_size);
|
||||
if (intel_private.gtt == NULL)
|
||||
|
|
|
@ -239,7 +239,7 @@ int drm_vma_info(struct seq_file *m, void *data)
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
|
||||
atomic_read(&dev->vma_count),
|
||||
high_memory, (void *)virt_to_phys(high_memory));
|
||||
high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
|
||||
|
||||
list_for_each_entry(pt, &dev->vmalist, head) {
|
||||
vma = pt->vma;
|
||||
|
|
|
@ -303,10 +303,10 @@ static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
|
|||
|
||||
ch7xxx_readb(dvo, CH7xxx_PM, &val);
|
||||
|
||||
if (val & CH7xxx_PM_FPD)
|
||||
return false;
|
||||
else
|
||||
if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
|
||||
|
|
|
@ -1341,9 +1341,14 @@ int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
|||
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
||||
{
|
||||
struct scatterlist *sg = obj->pages->sgl;
|
||||
while (n >= SG_MAX_SINGLE_ALLOC) {
|
||||
int nents = obj->pages->nents;
|
||||
while (nents > SG_MAX_SINGLE_ALLOC) {
|
||||
if (n < SG_MAX_SINGLE_ALLOC - 1)
|
||||
break;
|
||||
|
||||
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
|
||||
n -= SG_MAX_SINGLE_ALLOC - 1;
|
||||
nents -= SG_MAX_SINGLE_ALLOC - 1;
|
||||
}
|
||||
return sg_page(sg+n);
|
||||
}
|
||||
|
@ -1427,7 +1432,7 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
|
|||
int __must_check i915_gem_idle(struct drm_device *dev);
|
||||
int i915_add_request(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_request *request);
|
||||
u32 *seqno);
|
||||
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
|
||||
uint32_t seqno);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
|
|
|
@ -1955,11 +1955,12 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
|
|||
int
|
||||
i915_add_request(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_request *request)
|
||||
u32 *out_seqno)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
uint32_t seqno;
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 request_ring_position;
|
||||
u32 seqno;
|
||||
int was_empty;
|
||||
int ret;
|
||||
|
||||
|
@ -1974,11 +1975,9 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (request == NULL) {
|
||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
seqno = i915_gem_next_request_seqno(ring);
|
||||
|
||||
|
@ -2030,6 +2029,8 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
}
|
||||
}
|
||||
|
||||
if (out_seqno)
|
||||
*out_seqno = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3959,6 +3960,9 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
if (!intel_enable_gtt())
|
||||
return -EIO;
|
||||
|
||||
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
|
||||
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
|
||||
|
||||
i915_gem_l3_remap(dev);
|
||||
|
||||
i915_gem_init_swizzling(dev);
|
||||
|
@ -4098,7 +4102,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
|
|
|
@ -521,7 +521,7 @@
|
|||
*/
|
||||
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
|
||||
#define _3D_CHICKEN3 0x02090
|
||||
#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
|
||||
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
|
||||
|
||||
#define MI_MODE 0x0209c
|
||||
# define VS_TIMER_DISPATCH (1 << 6)
|
||||
|
|
|
@ -3253,6 +3253,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
|
||||
|
||||
/*
|
||||
* There seems to be a race in PCH platform hw (at least on some
|
||||
* outputs) where an enabled pipe still completes any pageflip right
|
||||
* away (as if the pipe is off) instead of waiting for vblank. As soon
|
||||
* as the first vblank happend, everything works as expected. Hence just
|
||||
* wait for one vblank before returning to avoid strange things
|
||||
* happening.
|
||||
*/
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||
|
@ -7892,8 +7902,7 @@ static struct intel_quirk intel_quirks[] = {
|
|||
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
|
||||
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
|
||||
|
||||
/* 855 & before need to leave pipe A & dpll A up */
|
||||
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||
/* 830/845 need to leave pipe A & dpll A up */
|
||||
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||
|
||||
|
@ -8049,29 +8058,42 @@ static void intel_enable_pipe_a(struct drm_device *dev)
|
|||
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_check_plane_mapping(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
u32 reg, val;
|
||||
|
||||
if (dev_priv->num_pipe == 1)
|
||||
return true;
|
||||
|
||||
reg = DSPCNTR(!crtc->plane);
|
||||
val = I915_READ(reg);
|
||||
|
||||
if ((val & DISPLAY_PLANE_ENABLE) &&
|
||||
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg, val;
|
||||
u32 reg;
|
||||
|
||||
/* Clear any frame start delays used for debugging left by the BIOS */
|
||||
reg = PIPECONF(crtc->pipe);
|
||||
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
|
||||
|
||||
/* We need to sanitize the plane -> pipe mapping first because this will
|
||||
* disable the crtc (and hence change the state) if it is wrong. */
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
* disable the crtc (and hence change the state) if it is wrong. Note
|
||||
* that gen4+ has a fixed plane -> pipe mapping. */
|
||||
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
|
||||
struct intel_connector *connector;
|
||||
bool plane;
|
||||
|
||||
reg = DSPCNTR(crtc->plane);
|
||||
val = I915_READ(reg);
|
||||
|
||||
if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
|
||||
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
|
||||
goto ok;
|
||||
|
||||
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
|
||||
crtc->base.base.id);
|
||||
|
||||
|
@ -8095,7 +8117,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
|||
WARN_ON(crtc->active);
|
||||
crtc->base.enabled = false;
|
||||
}
|
||||
ok:
|
||||
|
||||
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
|
||||
crtc->pipe == PIPE_A && !crtc->active) {
|
||||
|
|
|
@ -2369,8 +2369,9 @@ static void
|
|||
intel_dp_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
|
||||
if (intel_dpd_is_edp(dev))
|
||||
if (is_edp(intel_dp))
|
||||
intel_panel_destroy_backlight(dev);
|
||||
|
||||
drm_sysfs_connector_remove(connector);
|
||||
|
|
|
@ -209,7 +209,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
|
|||
}
|
||||
|
||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
struct drm_i915_gem_request *request,
|
||||
void (*tail)(struct intel_overlay *))
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
|
@ -218,12 +217,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
|||
int ret;
|
||||
|
||||
BUG_ON(overlay->last_flip_req);
|
||||
ret = i915_add_request(ring, NULL, request);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
overlay->last_flip_req = request->seqno;
|
||||
ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
overlay->flip_tail = tail;
|
||||
ret = i915_wait_seqno(ring, overlay->last_flip_req);
|
||||
if (ret)
|
||||
|
@ -240,7 +237,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
BUG_ON(overlay->active);
|
||||
|
@ -248,17 +244,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
|
||||
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
goto out;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
||||
|
@ -266,9 +254,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, request, NULL);
|
||||
out:
|
||||
return ret;
|
||||
return intel_overlay_do_wait_request(overlay, NULL);
|
||||
}
|
||||
|
||||
/* overlay needs to be enabled in OCMD reg */
|
||||
|
@ -278,17 +264,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
struct drm_device *dev = overlay->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!overlay->active);
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (load_polyphase_filter)
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
|
@ -298,22 +279,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = i915_add_request(ring, NULL, request);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
overlay->last_flip_req = request->seqno;
|
||||
return 0;
|
||||
return i915_add_request(ring, NULL, &overlay->last_flip_req);
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
|
@ -349,15 +322,10 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!overlay->active);
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* According to intel docs the overlay hw may hang (when switching
|
||||
* off) without loading the filter coeffs. It is however unclear whether
|
||||
* this applies to the disabling of the overlay or to the switching off
|
||||
|
@ -365,10 +333,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* wait for overlay to go idle */
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
|
@ -379,8 +346,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, request,
|
||||
intel_overlay_off_tail);
|
||||
return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
|
||||
}
|
||||
|
||||
/* recover from an interruption due to a signal
|
||||
|
@ -425,24 +391,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
return 0;
|
||||
|
||||
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
/* synchronous slowpath */
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = intel_ring_begin(ring, 2);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, request,
|
||||
ret = intel_overlay_do_wait_request(overlay,
|
||||
intel_overlay_release_old_vid_tail);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -3442,8 +3442,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
|||
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* Bspec says we need to always set all mask bits. */
|
||||
I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
|
||||
_3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
|
||||
I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
|
||||
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
|
||||
|
||||
/*
|
||||
* According to the spec the following bits should be
|
||||
|
|
|
@ -43,7 +43,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
|||
*ver = nv_ro08(bios, dcb);
|
||||
|
||||
if (*ver >= 0x41) {
|
||||
nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver);
|
||||
nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
|
||||
return 0x0000;
|
||||
} else
|
||||
if (*ver >= 0x30) {
|
||||
|
|
|
@ -277,7 +277,6 @@ nv50_fb_dtor(struct nouveau_object *object)
|
|||
__free_page(priv->r100c08_page);
|
||||
}
|
||||
|
||||
nouveau_mm_fini(&priv->base.vram);
|
||||
nouveau_fb_destroy(&priv->base);
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
|
|||
end = ptimer->read(ptimer);
|
||||
|
||||
if (cycles == 5) {
|
||||
tach = (u64)60000000000;
|
||||
tach = (u64)60000000000ULL;
|
||||
do_div(tach, (end - start));
|
||||
return tach;
|
||||
} else
|
||||
|
|
|
@ -456,6 +456,7 @@ static struct ttm_tt *
|
|||
nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
|
||||
uint32_t page_flags, struct page *dummy_read)
|
||||
{
|
||||
#if __OS_HAS_AGP
|
||||
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
||||
struct drm_device *dev = drm->dev;
|
||||
|
||||
|
@ -463,6 +464,7 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
|
|||
return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
|
||||
page_flags, dummy_read);
|
||||
}
|
||||
#endif
|
||||
|
||||
return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_pm *pm = nouveau_pm(dev);
|
||||
struct nouveau_therm *therm = nouveau_therm(drm);
|
||||
struct nouveau_therm *therm = nouveau_therm(drm->device);
|
||||
int ret;
|
||||
|
||||
/*XXX: not on all boards, we should control based on temperature
|
||||
|
@ -64,7 +64,6 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
|
|||
ret = therm->fan_set(therm, perflvl->fanspeed);
|
||||
if (ret && ret != -ENODEV) {
|
||||
NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -706,8 +705,7 @@ nouveau_hwmon_init(struct drm_device *dev)
|
|||
struct device *hwmon_dev;
|
||||
int ret = 0;
|
||||
|
||||
if (!therm || !therm->temp_get || !therm->attr_get ||
|
||||
!therm->attr_set || therm->temp_get(therm) < 0)
|
||||
if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
|
||||
return -ENODEV;
|
||||
|
||||
hwmon_dev = hwmon_device_register(&dev->pdev->dev);
|
||||
|
|
|
@ -1690,10 +1690,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
}
|
||||
/* all other cases */
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
||||
return ATOM_PPLL1;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else {
|
||||
|
@ -1715,10 +1715,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
}
|
||||
/* all other cases */
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
||||
return ATOM_PPLL1;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else {
|
||||
|
|
|
@ -3431,9 +3431,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
|
|||
if (!(mask & DRM_PCIE_SPEED_50))
|
||||
return;
|
||||
|
||||
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
|
||||
if (speed_cntl & LC_CURRENT_DATA_RATE) {
|
||||
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
|
||||
|
||||
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
|
||||
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
|
||||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
|
||||
|
||||
|
|
|
@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
|
|||
WREG32(0x15DC, 0);
|
||||
|
||||
/* empty context1-7 */
|
||||
/* Assign the pt base to something valid for now; the pts used for
|
||||
* the VMs are determined by the application and setup and assigned
|
||||
* on the fly in the vm part of radeon_gart.c
|
||||
*/
|
||||
for (i = 1; i < 8; i++) {
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
|
||||
rdev->gart.table_addr >> 12);
|
||||
}
|
||||
|
@ -1572,12 +1576,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
|
||||
radeon_ring_write(ring, 0);
|
||||
|
||||
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
|
||||
radeon_ring_write(ring, vm->last_pfn);
|
||||
|
||||
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
|
||||
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
|
||||
|
||||
|
|
|
@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
|
|||
if (!(mask & DRM_PCIE_SPEED_50))
|
||||
return;
|
||||
|
||||
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
|
||||
if (speed_cntl & LC_CURRENT_DATA_RATE) {
|
||||
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
|
||||
|
||||
/* 55 nm r6xx asics */
|
||||
|
|
|
@ -663,9 +663,14 @@ struct radeon_vm {
|
|||
struct list_head list;
|
||||
struct list_head va;
|
||||
unsigned id;
|
||||
unsigned last_pfn;
|
||||
u64 pd_gpu_addr;
|
||||
struct radeon_sa_bo *sa_bo;
|
||||
|
||||
/* contains the page directory */
|
||||
struct radeon_sa_bo *page_directory;
|
||||
uint64_t pd_gpu_addr;
|
||||
|
||||
/* array of page tables, one for each page directory entry */
|
||||
struct radeon_sa_bo **page_tables;
|
||||
|
||||
struct mutex mutex;
|
||||
/* last fence for cs using this vm */
|
||||
struct radeon_fence *fence;
|
||||
|
@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
|
|||
*/
|
||||
int radeon_vm_manager_init(struct radeon_device *rdev);
|
||||
void radeon_vm_manager_fini(struct radeon_device *rdev);
|
||||
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm, int ring);
|
||||
void radeon_vm_fence(struct radeon_device *rdev,
|
||||
|
|
|
@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle,
|
|||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 12) {
|
||||
DRM_INFO("ATIF buffer is too small: %lu\n", size);
|
||||
DRM_INFO("ATIF buffer is too small: %zu\n", size);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
|
|||
|
||||
radeon_set_backlight_level(rdev, enc, req.backlight_level);
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
if (rdev->is_atom_bios) {
|
||||
struct radeon_encoder_atom_dig *dig = enc->enc_priv;
|
||||
backlight_force_update(dig->bl_dev,
|
||||
|
@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
|
|||
backlight_force_update(dig->bl_dev,
|
||||
BACKLIGHT_UPDATE_HOTKEY);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
/* TODO: check other events */
|
||||
|
@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle,
|
|||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 8) {
|
||||
DRM_INFO("ATCS buffer is too small: %lu\n", size);
|
||||
DRM_INFO("ATCS buffer is too small: %zu\n", size);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
|
|||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 8) {
|
||||
printk("ATPX buffer is too small: %lu\n", size);
|
||||
printk("ATPX buffer is too small: %zu\n", size);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
out:
|
||||
radeon_vm_add_to_lru(rdev, vm);
|
||||
mutex_unlock(&vm->mutex);
|
||||
mutex_unlock(&rdev->vm_manager.lock);
|
||||
return r;
|
||||
|
|
|
@ -1018,6 +1018,10 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
return r;
|
||||
/* initialize vm here */
|
||||
mutex_init(&rdev->vm_manager.lock);
|
||||
/* Adjust VM size here.
|
||||
* Currently set to 4GB ((1 << 20) 4k pages).
|
||||
* Max GPUVM size for cayman and SI is 40 bits.
|
||||
*/
|
||||
rdev->vm_manager.max_pfn = 1 << 20;
|
||||
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
|
||||
|
||||
|
|
|
@ -422,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
|
|||
* TODO bind a default page at vm initialization for default address
|
||||
*/
|
||||
|
||||
/**
|
||||
* radeon_vm_num_pde - return the number of page directory entries
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Calculate the number of page directory entries (cayman+).
|
||||
*/
|
||||
static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
|
||||
{
|
||||
return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_directory_size - returns the size of the page directory in bytes
|
||||
*
|
||||
|
@ -431,7 +443,7 @@ void radeon_gart_fini(struct radeon_device *rdev)
|
|||
*/
|
||||
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
|
||||
{
|
||||
return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
|
||||
return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -451,11 +463,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
|
|||
|
||||
if (!rdev->vm_manager.enabled) {
|
||||
/* allocate enough for 2 full VM pts */
|
||||
size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
|
||||
size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
|
||||
size = radeon_vm_directory_size(rdev);
|
||||
size += rdev->vm_manager.max_pfn * 8;
|
||||
size *= 2;
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
|
||||
size,
|
||||
RADEON_GPU_PAGE_ALIGN(size),
|
||||
RADEON_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
|
||||
|
@ -476,7 +488,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
|
|||
|
||||
/* restore page table */
|
||||
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
|
||||
if (vm->sa_bo == NULL)
|
||||
if (vm->page_directory == NULL)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(bo_va, &vm->va, vm_list) {
|
||||
|
@ -500,16 +512,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
|
|||
struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_bo_va *bo_va;
|
||||
int i;
|
||||
|
||||
if (!vm->sa_bo)
|
||||
if (!vm->page_directory)
|
||||
return;
|
||||
|
||||
list_del_init(&vm->list);
|
||||
radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
|
||||
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
|
||||
|
||||
list_for_each_entry(bo_va, &vm->va, vm_list) {
|
||||
bo_va->valid = false;
|
||||
}
|
||||
|
||||
if (vm->page_tables == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
|
||||
radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
|
||||
|
||||
kfree(vm->page_tables);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -545,6 +566,35 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
|
|||
rdev->vm_manager.enabled = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_evict - evict page table to make room for new one
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @vm: VM we want to allocate something for
|
||||
*
|
||||
* Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
|
||||
* Returns 0 for success, -ENOMEM for failure.
|
||||
*
|
||||
* Global and local mutex must be locked!
|
||||
*/
|
||||
int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_vm *vm_evict;
|
||||
|
||||
if (list_empty(&rdev->vm_manager.lru_vm))
|
||||
return -ENOMEM;
|
||||
|
||||
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
|
||||
struct radeon_vm, list);
|
||||
if (vm_evict == vm)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&vm_evict->mutex);
|
||||
radeon_vm_free_pt(rdev, vm_evict);
|
||||
mutex_unlock(&vm_evict->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_alloc_pt - allocates a page table for a VM
|
||||
*
|
||||
|
@ -552,57 +602,71 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
|
|||
* @vm: vm to bind
|
||||
*
|
||||
* Allocate a page table for the requested vm (cayman+).
|
||||
* Also starts to populate the page table.
|
||||
* Returns 0 for success, error for failure.
|
||||
*
|
||||
* Global and local mutex must be locked!
|
||||
*/
|
||||
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_vm *vm_evict;
|
||||
int r;
|
||||
unsigned pd_size, pts_size;
|
||||
u64 *pd_addr;
|
||||
int tables_size;
|
||||
int r;
|
||||
|
||||
if (vm == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* allocate enough to cover the current VM size */
|
||||
tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
|
||||
tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
|
||||
|
||||
if (vm->sa_bo != NULL) {
|
||||
/* update lru */
|
||||
list_del_init(&vm->list);
|
||||
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
|
||||
if (vm->page_directory != NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
retry:
|
||||
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
|
||||
tables_size, RADEON_GPU_PAGE_SIZE, false);
|
||||
pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
|
||||
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
|
||||
&vm->page_directory, pd_size,
|
||||
RADEON_GPU_PAGE_SIZE, false);
|
||||
if (r == -ENOMEM) {
|
||||
if (list_empty(&rdev->vm_manager.lru_vm)) {
|
||||
r = radeon_vm_evict(rdev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
|
||||
mutex_lock(&vm_evict->mutex);
|
||||
radeon_vm_free_pt(rdev, vm_evict);
|
||||
mutex_unlock(&vm_evict->mutex);
|
||||
goto retry;
|
||||
|
||||
} else if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
|
||||
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
|
||||
memset(pd_addr, 0, tables_size);
|
||||
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
|
||||
|
||||
/* Initially clear the page directory */
|
||||
pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
|
||||
memset(pd_addr, 0, pd_size);
|
||||
|
||||
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
|
||||
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
|
||||
|
||||
if (vm->page_tables == NULL) {
|
||||
DRM_ERROR("Cannot allocate memory for page table array\n");
|
||||
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_add_to_lru - add VMs page table to LRU list
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @vm: vm to add to LRU
|
||||
*
|
||||
* Add the allocated page table to the LRU list (cayman+).
|
||||
*
|
||||
* Global mutex must be locked!
|
||||
*/
|
||||
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
{
|
||||
list_del_init(&vm->list);
|
||||
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
|
||||
return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
|
||||
&rdev->ring_tmp_bo.bo->tbo.mem);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -793,20 +857,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
if (last_pfn > vm->last_pfn) {
|
||||
/* release mutex and lock in right order */
|
||||
mutex_unlock(&vm->mutex);
|
||||
mutex_lock(&rdev->vm_manager.lock);
|
||||
mutex_lock(&vm->mutex);
|
||||
/* and check again */
|
||||
if (last_pfn > vm->last_pfn) {
|
||||
/* grow va space 32M by 32M */
|
||||
unsigned align = ((32 << 20) >> 12) - 1;
|
||||
radeon_vm_free_pt(rdev, vm);
|
||||
vm->last_pfn = (last_pfn + align) & ~align;
|
||||
}
|
||||
mutex_unlock(&rdev->vm_manager.lock);
|
||||
}
|
||||
head = &vm->va;
|
||||
last_offset = 0;
|
||||
list_for_each_entry(tmp, &vm->va, vm_list) {
|
||||
|
@ -864,6 +914,155 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_update_pdes - make sure that page directory is valid
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @vm: requested vm
|
||||
* @start: start of GPU address range
|
||||
* @end: end of GPU address range
|
||||
*
|
||||
* Allocates new page tables if necessary
|
||||
* and updates the page directory (cayman+).
|
||||
* Returns 0 for success, error for failure.
|
||||
*
|
||||
* Global and local mutex must be locked!
|
||||
*/
|
||||
static int radeon_vm_update_pdes(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
uint64_t start, uint64_t end)
|
||||
{
|
||||
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
|
||||
|
||||
uint64_t last_pde = ~0, last_pt = ~0;
|
||||
unsigned count = 0;
|
||||
uint64_t pt_idx;
|
||||
int r;
|
||||
|
||||
start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
|
||||
end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
|
||||
|
||||
/* walk over the address space and update the page directory */
|
||||
for (pt_idx = start; pt_idx <= end; ++pt_idx) {
|
||||
uint64_t pde, pt;
|
||||
|
||||
if (vm->page_tables[pt_idx])
|
||||
continue;
|
||||
|
||||
retry:
|
||||
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
|
||||
&vm->page_tables[pt_idx],
|
||||
RADEON_VM_PTE_COUNT * 8,
|
||||
RADEON_GPU_PAGE_SIZE, false);
|
||||
|
||||
if (r == -ENOMEM) {
|
||||
r = radeon_vm_evict(rdev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
goto retry;
|
||||
} else if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
pde = vm->pd_gpu_addr + pt_idx * 8;
|
||||
|
||||
pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
|
||||
|
||||
if (((last_pde + 8 * count) != pde) ||
|
||||
((last_pt + incr * count) != pt)) {
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pde,
|
||||
last_pt, count, incr,
|
||||
RADEON_VM_PAGE_VALID);
|
||||
}
|
||||
|
||||
count = 1;
|
||||
last_pde = pde;
|
||||
last_pt = pt;
|
||||
} else {
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
|
||||
incr, RADEON_VM_PAGE_VALID);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_update_ptes - make sure that page tables are valid
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @vm: requested vm
|
||||
* @start: start of GPU address range
|
||||
* @end: end of GPU address range
|
||||
* @dst: destination address to map to
|
||||
* @flags: mapping flags
|
||||
*
|
||||
* Update the page tables in the range @start - @end (cayman+).
|
||||
*
|
||||
* Global and local mutex must be locked!
|
||||
*/
|
||||
static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
uint64_t start, uint64_t end,
|
||||
uint64_t dst, uint32_t flags)
|
||||
{
|
||||
static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
|
||||
|
||||
uint64_t last_pte = ~0, last_dst = ~0;
|
||||
unsigned count = 0;
|
||||
uint64_t addr;
|
||||
|
||||
start = start / RADEON_GPU_PAGE_SIZE;
|
||||
end = end / RADEON_GPU_PAGE_SIZE;
|
||||
|
||||
/* walk over the address space and update the page tables */
|
||||
for (addr = start; addr < end; ) {
|
||||
uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
|
||||
unsigned nptes;
|
||||
uint64_t pte;
|
||||
|
||||
if ((addr & ~mask) == (end & ~mask))
|
||||
nptes = end - addr;
|
||||
else
|
||||
nptes = RADEON_VM_PTE_COUNT - (addr & mask);
|
||||
|
||||
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
|
||||
pte += (addr & mask) * 8;
|
||||
|
||||
if (((last_pte + 8 * count) != pte) ||
|
||||
((count + nptes) > 1 << 11)) {
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pte,
|
||||
last_dst, count,
|
||||
RADEON_GPU_PAGE_SIZE,
|
||||
flags);
|
||||
}
|
||||
|
||||
count = nptes;
|
||||
last_pte = pte;
|
||||
last_dst = dst;
|
||||
} else {
|
||||
count += nptes;
|
||||
}
|
||||
|
||||
addr += nptes;
|
||||
dst += nptes * RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_bo_update_pte - map a bo into the vm page table
|
||||
*
|
||||
|
@ -887,12 +1086,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
struct radeon_semaphore *sem = NULL;
|
||||
struct radeon_bo_va *bo_va;
|
||||
unsigned nptes, npdes, ndw;
|
||||
uint64_t pe, addr;
|
||||
uint64_t pfn;
|
||||
uint64_t addr;
|
||||
int r;
|
||||
|
||||
/* nothing to do if vm isn't bound */
|
||||
if (vm->sa_bo == NULL)
|
||||
if (vm->page_directory == NULL)
|
||||
return 0;
|
||||
|
||||
bo_va = radeon_vm_bo_find(vm, bo);
|
||||
|
@ -939,25 +1137,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* estimate number of dw needed */
|
||||
/* reserve space for 32-bit padding */
|
||||
ndw = 32;
|
||||
|
||||
nptes = radeon_bo_ngpu_pages(bo);
|
||||
|
||||
pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
|
||||
/* assume two extra pdes in case the mapping overlaps the borders */
|
||||
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
|
||||
|
||||
/* handle cases where a bo spans several pdes */
|
||||
npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
|
||||
(pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
|
||||
/* estimate number of dw needed */
|
||||
/* semaphore, fence and padding */
|
||||
ndw = 32;
|
||||
|
||||
if (RADEON_VM_BLOCK_SIZE > 11)
|
||||
/* reserve space for one header for every 2k dwords */
|
||||
ndw += (nptes >> 11) * 3;
|
||||
else
|
||||
/* reserve space for one header for
|
||||
every (1 << BLOCK_SIZE) entries */
|
||||
ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
|
||||
|
||||
/* reserve space for one header for every 2k dwords */
|
||||
ndw += (nptes >> 11) * 3;
|
||||
/* reserve space for pte addresses */
|
||||
ndw += nptes * 2;
|
||||
|
||||
/* reserve space for one header for every 2k dwords */
|
||||
ndw += (npdes >> 11) * 3;
|
||||
|
||||
/* reserve space for pde addresses */
|
||||
ndw += npdes * 2;
|
||||
|
||||
|
@ -971,22 +1173,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
radeon_fence_note_sync(vm->fence, ridx);
|
||||
}
|
||||
|
||||
/* update page table entries */
|
||||
pe = vm->pd_gpu_addr;
|
||||
pe += radeon_vm_directory_size(rdev);
|
||||
pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
|
||||
r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_asic_vm_set_page(rdev, pe, addr, nptes,
|
||||
RADEON_GPU_PAGE_SIZE, bo_va->flags);
|
||||
|
||||
/* update page directory entries */
|
||||
addr = pe;
|
||||
|
||||
pe = vm->pd_gpu_addr;
|
||||
pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
|
||||
|
||||
radeon_asic_vm_set_page(rdev, pe, addr, npdes,
|
||||
RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
|
||||
radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
|
||||
addr, bo_va->flags);
|
||||
|
||||
radeon_fence_unref(&vm->fence);
|
||||
r = radeon_fence_emit(rdev, &vm->fence, ridx);
|
||||
|
@ -997,6 +1191,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, vm->fence);
|
||||
radeon_fence_unref(&vm->last_flush);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1056,31 +1251,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
|
|||
* @rdev: radeon_device pointer
|
||||
* @vm: requested vm
|
||||
*
|
||||
* Init @vm (cayman+).
|
||||
* Map the IB pool and any other shared objects into the VM
|
||||
* by default as it's used by all VMs.
|
||||
* Returns 0 for success, error for failure.
|
||||
* Init @vm fields (cayman+).
|
||||
*/
|
||||
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
vm->id = 0;
|
||||
vm->fence = NULL;
|
||||
vm->last_pfn = 0;
|
||||
mutex_init(&vm->mutex);
|
||||
INIT_LIST_HEAD(&vm->list);
|
||||
INIT_LIST_HEAD(&vm->va);
|
||||
|
||||
/* map the ib pool buffer at 0 in virtual address space, set
|
||||
* read only
|
||||
*/
|
||||
bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1102,17 +1281,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
|
|||
radeon_vm_free_pt(rdev, vm);
|
||||
mutex_unlock(&rdev->vm_manager.lock);
|
||||
|
||||
/* remove all bo at this point non are busy any more because unbind
|
||||
* waited for the last vm fence to signal
|
||||
*/
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (!r) {
|
||||
bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
|
||||
list_del_init(&bo_va->bo_list);
|
||||
list_del_init(&bo_va->vm_list);
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
kfree(bo_va);
|
||||
}
|
||||
if (!list_empty(&vm->va)) {
|
||||
dev_err(rdev->dev, "still active bo inside vm\n");
|
||||
}
|
||||
|
|
|
@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
/* new gpu have virtual address space support */
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
struct radeon_fpriv *fpriv;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
||||
|
@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = radeon_vm_init(rdev, &fpriv->vm);
|
||||
radeon_vm_init(rdev, &fpriv->vm);
|
||||
|
||||
/* map the ib pool buffer read only into
|
||||
* virtual address space */
|
||||
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
|
@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
|
|||
/* new gpu have virtual address space support */
|
||||
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
|
||||
struct radeon_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (!r) {
|
||||
bo_va = radeon_vm_bo_find(&fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
if (bo_va)
|
||||
radeon_vm_bo_rmv(rdev, bo_va);
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
}
|
||||
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
|
|
|
@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
|
|||
.disable = radeon_legacy_encoder_disable,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
|
||||
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
|
||||
{
|
||||
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
|
||||
uint8_t level;
|
||||
|
||||
/* Convert brightness to hardware level */
|
||||
if (bd->props.brightness < 0)
|
||||
level = 0;
|
||||
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
|
||||
level = RADEON_MAX_BL_LEVEL;
|
||||
else
|
||||
level = bd->props.brightness;
|
||||
|
||||
if (pdata->negative)
|
||||
level = RADEON_MAX_BL_LEVEL - level;
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
u8
|
||||
radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
|
||||
{
|
||||
|
@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve
|
|||
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
|
||||
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
|
||||
{
|
||||
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
|
||||
uint8_t level;
|
||||
|
||||
/* Convert brightness to hardware level */
|
||||
if (bd->props.brightness < 0)
|
||||
level = 0;
|
||||
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
|
||||
level = RADEON_MAX_BL_LEVEL;
|
||||
else
|
||||
level = bd->props.brightness;
|
||||
|
||||
if (pdata->negative)
|
||||
level = RADEON_MAX_BL_LEVEL - level;
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
|
||||
{
|
||||
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
|
||||
|
@ -991,11 +991,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
|
|||
static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
|
||||
if (tmds) {
|
||||
if (tmds->i2c_bus)
|
||||
radeon_i2c_destroy(tmds->i2c_bus);
|
||||
}
|
||||
/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
|
||||
kfree(radeon_encoder->enc_priv);
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(radeon_encoder);
|
||||
|
|
|
@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
|
|||
{
|
||||
#if DRM_DEBUG_CODE
|
||||
if (ring->count_dw <= 0) {
|
||||
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
|
||||
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
|
||||
}
|
||||
#endif
|
||||
ring->ring[ring->wptr++] = v;
|
||||
|
|
|
@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
|
|||
WREG32(0x15DC, 0);
|
||||
|
||||
/* empty context1-15 */
|
||||
/* FIXME start with 4G, once using 2 level pt switch to full
|
||||
* vm size space
|
||||
*/
|
||||
/* set vm size, must be a multiple of 4 */
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
|
||||
/* Assign the pt base to something valid for now; the pts used for
|
||||
* the VMs are determined by the application and setup and assigned
|
||||
* on the fly in the vm part of radeon_gart.c
|
||||
*/
|
||||
for (i = 1; i < 16; i++) {
|
||||
if (i < 8)
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
|
||||
|
|
Loading…
Reference in New Issue
Block a user