forked from luck/tmp_suning_uos_patched
Merge tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Highlights: - follow-up refactoring after the shared dpll rework that landed in 3.11 - oddball prep cleanups from Ben for ppgtt - encoder->get_config state tracking infrastructure from Jesse - used by the experimental fastboot support from Jesse (disabled by default) - make the error state file official and add it to our sysfs interface (Mika) - drm_mm prep changes from Ben, prepares to embedd the drm_mm_node (which will be used by the vma rework later on) - interrupt handling rework, follow up cleanups to the VECS enabling, hpd storm handling and fifo underrun reporting. - Big pile of smaller cleanups, code improvements and related stuff. * tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel: (72 commits) drm/i915: clear DPLL reg when disabling i9xx dplls drm/i915: Fix up cpt pixel multiplier enable sequence drm/i915: clean up vlv ->pre_pll_enable and pll enable sequence drm/i915: move error state to own compilation unit drm/i915: Don't attempt to read an unitialized stack value drm/i915: Use for_each_pipe() when possible drm/i915: don't enable PM_VEBOX_CS_ERROR_INTERRUPT drm/i915: unify ring irq refcounts (again) drm/i915: kill dev_priv->rps.lock drm/i915: queue work outside spinlock in hsw_pm_irq_handler drm/i915: streamline hsw_pm_irq_handler drm/i915: irq handlers don't need interrupt-safe spinlocks drm/i915: kill lpt pch transcoder->crtc mapping code for fifo underruns drm/i915: improve GEN7_ERR_INT clearing for fifo underrun reporting drm/i915: improve SERR_INT clearing for fifo underrun reporting drm/i915: extract ibx_display_interrupt_update drm/i915: remove unused members from drm_i915_private drm/i915: don't frob mm.suspended when not using ums drm/i915: Fix VLV DP RBR/HDMI/DAC PLL LPF coefficients drm/i915: WARN if the bios reserved range is bigger than stolen size ... Conflicts: drivers/gpu/drm/i915/i915_gem.c
This commit is contained in:
commit
e13af9a834
drivers/gpu/drm
drm_mm.c
i915
Makefilei915_debugfs.ci915_dma.ci915_drv.ci915_drv.hi915_gem.ci915_gem_context.ci915_gem_evict.ci915_gem_execbuffer.ci915_gem_gtt.ci915_gem_stolen.ci915_gem_tiling.ci915_gpu_error.ci915_irq.ci915_reg.hi915_sysfs.ci915_trace.hintel_display.cintel_dp.cintel_drv.hintel_dvo.cintel_fb.cintel_lvds.cintel_overlay.cintel_pm.cintel_ringbuffer.cintel_ringbuffer.hintel_sdvo.cintel_sprite.c
include/drm
|
@ -147,33 +147,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
|||
}
|
||||
}
|
||||
|
||||
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
bool atomic)
|
||||
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
||||
{
|
||||
struct drm_mm_node *hole, *node;
|
||||
unsigned long end = start + size;
|
||||
struct drm_mm_node *hole;
|
||||
unsigned long end = node->start + node->size;
|
||||
unsigned long hole_start;
|
||||
unsigned long hole_end;
|
||||
|
||||
BUG_ON(node == NULL);
|
||||
|
||||
/* Find the relevant hole to add our node to */
|
||||
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
|
||||
if (hole_start > start || hole_end < end)
|
||||
if (hole_start > node->start || hole_end < end)
|
||||
continue;
|
||||
|
||||
node = drm_mm_kmalloc(mm, atomic);
|
||||
if (unlikely(node == NULL))
|
||||
return NULL;
|
||||
|
||||
node->start = start;
|
||||
node->size = size;
|
||||
node->mm = mm;
|
||||
node->allocated = 1;
|
||||
|
||||
INIT_LIST_HEAD(&node->hole_stack);
|
||||
list_add(&node->node_list, &hole->node_list);
|
||||
|
||||
if (start == hole_start) {
|
||||
if (node->start == hole_start) {
|
||||
hole->hole_follows = 0;
|
||||
list_del_init(&hole->hole_stack);
|
||||
}
|
||||
|
@ -184,13 +178,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
|
|||
node->hole_follows = 1;
|
||||
}
|
||||
|
||||
return node;
|
||||
return 0;
|
||||
}
|
||||
|
||||
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
|
||||
return NULL;
|
||||
WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
|
||||
node->start, node->size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_create_block);
|
||||
EXPORT_SYMBOL(drm_mm_reserve_node);
|
||||
|
||||
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
|
||||
unsigned long size,
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
ccflags-y := -Iinclude/drm
|
||||
i915-y := i915_drv.o i915_dma.o i915_irq.o \
|
||||
i915_debugfs.o \
|
||||
i915_gpu_error.o \
|
||||
i915_suspend.o \
|
||||
i915_gem.o \
|
||||
i915_gem_context.o \
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <generated/utsrelease.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "intel_drv.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
@ -90,16 +89,6 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
}
|
||||
|
||||
static const char *cache_level_str(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case I915_CACHE_NONE: return " uncached";
|
||||
case I915_CACHE_LLC: return " snooped (LLC)";
|
||||
case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
|
@ -113,7 +102,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
obj->last_read_seqno,
|
||||
obj->last_write_seqno,
|
||||
obj->last_fenced_seqno,
|
||||
cache_level_str(obj->cache_level),
|
||||
i915_cache_level_str(obj->cache_level),
|
||||
obj->dirty ? " dirty" : "",
|
||||
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
||||
if (obj->base.name)
|
||||
|
@ -122,9 +111,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
seq_printf(m, " (pinned x %d)", obj->pin_count);
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE)
|
||||
seq_printf(m, " (fence: %d)", obj->fence_reg);
|
||||
if (obj->gtt_space != NULL)
|
||||
seq_printf(m, " (gtt offset: %08x, size: %08x)",
|
||||
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
|
||||
if (i915_gem_obj_ggtt_bound(obj))
|
||||
seq_printf(m, " (gtt offset: %08lx, size: %08x)",
|
||||
i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
|
||||
if (obj->stolen)
|
||||
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
|
||||
if (obj->pin_mappable || obj->fault_mappable) {
|
||||
|
@ -157,11 +146,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
|
||||
switch (list) {
|
||||
case ACTIVE_LIST:
|
||||
seq_printf(m, "Active:\n");
|
||||
seq_puts(m, "Active:\n");
|
||||
head = &dev_priv->mm.active_list;
|
||||
break;
|
||||
case INACTIVE_LIST:
|
||||
seq_printf(m, "Inactive:\n");
|
||||
seq_puts(m, "Inactive:\n");
|
||||
head = &dev_priv->mm.inactive_list;
|
||||
break;
|
||||
default:
|
||||
|
@ -171,11 +160,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj, head, mm_list) {
|
||||
seq_printf(m, " ");
|
||||
seq_puts(m, " ");
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += obj->gtt_space->size;
|
||||
total_gtt_size += i915_gem_obj_ggtt_size(obj);
|
||||
count++;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -187,10 +176,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
|
||||
#define count_objects(list, member) do { \
|
||||
list_for_each_entry(obj, list, member) { \
|
||||
size += obj->gtt_space->size; \
|
||||
size += i915_gem_obj_ggtt_size(obj); \
|
||||
++count; \
|
||||
if (obj->map_and_fenceable) { \
|
||||
mappable_size += obj->gtt_space->size; \
|
||||
mappable_size += i915_gem_obj_ggtt_size(obj); \
|
||||
++mappable_count; \
|
||||
} \
|
||||
} \
|
||||
|
@ -209,7 +198,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
stats->count++;
|
||||
stats->total += obj->base.size;
|
||||
|
||||
if (obj->gtt_space) {
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
if (!list_empty(&obj->ring_list))
|
||||
stats->active += obj->base.size;
|
||||
else
|
||||
|
@ -222,7 +211,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
static int i915_gem_object_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
|
@ -267,11 +256,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
size = count = mappable_size = mappable_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if (obj->fault_mappable) {
|
||||
size += obj->gtt_space->size;
|
||||
size += i915_gem_obj_ggtt_size(obj);
|
||||
++count;
|
||||
}
|
||||
if (obj->pin_mappable) {
|
||||
mappable_size += obj->gtt_space->size;
|
||||
mappable_size += i915_gem_obj_ggtt_size(obj);
|
||||
++mappable_count;
|
||||
}
|
||||
if (obj->madv == I915_MADV_DONTNEED) {
|
||||
|
@ -290,7 +279,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
dev_priv->gtt.total,
|
||||
dev_priv->gtt.mappable_end - dev_priv->gtt.start);
|
||||
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct file_stats stats;
|
||||
|
||||
|
@ -310,7 +299,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_gtt_info(struct seq_file *m, void* data)
|
||||
static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
|
@ -329,11 +318,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
|
|||
if (list == PINNED_LIST && obj->pin_count == 0)
|
||||
continue;
|
||||
|
||||
seq_printf(m, " ");
|
||||
seq_puts(m, " ");
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += obj->gtt_space->size;
|
||||
total_gtt_size += i915_gem_obj_ggtt_size(obj);
|
||||
count++;
|
||||
}
|
||||
|
||||
|
@ -371,20 +360,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
pipe, plane);
|
||||
}
|
||||
if (work->enable_stall_check)
|
||||
seq_printf(m, "Stall check enabled, ");
|
||||
seq_puts(m, "Stall check enabled, ");
|
||||
else
|
||||
seq_printf(m, "Stall check waiting for page flip ioctl, ");
|
||||
seq_puts(m, "Stall check waiting for page flip ioctl, ");
|
||||
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
|
||||
|
||||
if (work->old_fb_obj) {
|
||||
struct drm_i915_gem_object *obj = work->old_fb_obj;
|
||||
if (obj)
|
||||
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
|
||||
seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
|
||||
i915_gem_obj_ggtt_offset(obj));
|
||||
}
|
||||
if (work->pending_flip_obj) {
|
||||
struct drm_i915_gem_object *obj = work->pending_flip_obj;
|
||||
if (obj)
|
||||
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
|
||||
seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
|
||||
i915_gem_obj_ggtt_offset(obj));
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
@ -424,7 +415,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (count == 0)
|
||||
seq_printf(m, "No requests\n");
|
||||
seq_puts(m, "No requests\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -574,10 +565,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "Fence %d, pin count = %d, object = ",
|
||||
i, dev_priv->fence_regs[i].pin_count);
|
||||
if (obj == NULL)
|
||||
seq_printf(m, "unused");
|
||||
seq_puts(m, "unused");
|
||||
else
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -606,361 +597,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const char *ring_str(int ring)
|
||||
{
|
||||
switch (ring) {
|
||||
case RCS: return "render";
|
||||
case VCS: return "bsd";
|
||||
case BCS: return "blt";
|
||||
case VECS: return "vebox";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
static const char *pin_flag(int pinned)
|
||||
{
|
||||
if (pinned > 0)
|
||||
return " P";
|
||||
else if (pinned < 0)
|
||||
return " p";
|
||||
else
|
||||
return "";
|
||||
}
|
||||
|
||||
static const char *tiling_flag(int tiling)
|
||||
{
|
||||
switch (tiling) {
|
||||
default:
|
||||
case I915_TILING_NONE: return "";
|
||||
case I915_TILING_X: return " X";
|
||||
case I915_TILING_Y: return " Y";
|
||||
}
|
||||
}
|
||||
|
||||
static const char *dirty_flag(int dirty)
|
||||
{
|
||||
return dirty ? " dirty" : "";
|
||||
}
|
||||
|
||||
static const char *purgeable_flag(int purgeable)
|
||||
{
|
||||
return purgeable ? " purgeable" : "";
|
||||
}
|
||||
|
||||
static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
|
||||
{
|
||||
|
||||
if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
|
||||
e->err = -ENOSPC;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (e->bytes == e->size - 1 || e->err)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
|
||||
unsigned len)
|
||||
{
|
||||
if (e->pos + len <= e->start) {
|
||||
e->pos += len;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* First vsnprintf needs to fit in its entirety for memmove */
|
||||
if (len >= e->size) {
|
||||
e->err = -EIO;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __i915_error_advance(struct drm_i915_error_state_buf *e,
|
||||
unsigned len)
|
||||
{
|
||||
/* If this is first printf in this window, adjust it so that
|
||||
* start position matches start of the buffer
|
||||
*/
|
||||
|
||||
if (e->pos < e->start) {
|
||||
const size_t off = e->start - e->pos;
|
||||
|
||||
/* Should not happen but be paranoid */
|
||||
if (off > len || e->bytes) {
|
||||
e->err = -EIO;
|
||||
return;
|
||||
}
|
||||
|
||||
memmove(e->buf, e->buf + off, len - off);
|
||||
e->bytes = len - off;
|
||||
e->pos = e->start;
|
||||
return;
|
||||
}
|
||||
|
||||
e->bytes += len;
|
||||
e->pos += len;
|
||||
}
|
||||
|
||||
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
|
||||
const char *f, va_list args)
|
||||
{
|
||||
unsigned len;
|
||||
|
||||
if (!__i915_error_ok(e))
|
||||
return;
|
||||
|
||||
/* Seek the first printf which is hits start position */
|
||||
if (e->pos < e->start) {
|
||||
len = vsnprintf(NULL, 0, f, args);
|
||||
if (!__i915_error_seek(e, len))
|
||||
return;
|
||||
}
|
||||
|
||||
len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
|
||||
if (len >= e->size - e->bytes)
|
||||
len = e->size - e->bytes - 1;
|
||||
|
||||
__i915_error_advance(e, len);
|
||||
}
|
||||
|
||||
static void i915_error_puts(struct drm_i915_error_state_buf *e,
|
||||
const char *str)
|
||||
{
|
||||
unsigned len;
|
||||
|
||||
if (!__i915_error_ok(e))
|
||||
return;
|
||||
|
||||
len = strlen(str);
|
||||
|
||||
/* Seek the first printf which is hits start position */
|
||||
if (e->pos < e->start) {
|
||||
if (!__i915_error_seek(e, len))
|
||||
return;
|
||||
}
|
||||
|
||||
if (len >= e->size - e->bytes)
|
||||
len = e->size - e->bytes - 1;
|
||||
memcpy(e->buf + e->bytes, str, len);
|
||||
|
||||
__i915_error_advance(e, len);
|
||||
}
|
||||
|
||||
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, f);
|
||||
i915_error_vprintf(e, f, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
|
||||
#define err_puts(e, s) i915_error_puts(e, s)
|
||||
|
||||
static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
const char *name,
|
||||
struct drm_i915_error_buffer *err,
|
||||
int count)
|
||||
{
|
||||
err_printf(m, "%s [%d]:\n", name, count);
|
||||
|
||||
while (count--) {
|
||||
err_printf(m, " %08x %8u %02x %02x %x %x",
|
||||
err->gtt_offset,
|
||||
err->size,
|
||||
err->read_domains,
|
||||
err->write_domain,
|
||||
err->rseqno, err->wseqno);
|
||||
err_puts(m, pin_flag(err->pinned));
|
||||
err_puts(m, tiling_flag(err->tiling));
|
||||
err_puts(m, dirty_flag(err->dirty));
|
||||
err_puts(m, purgeable_flag(err->purgeable));
|
||||
err_puts(m, err->ring != -1 ? " " : "");
|
||||
err_puts(m, ring_str(err->ring));
|
||||
err_puts(m, cache_level_str(err->cache_level));
|
||||
|
||||
if (err->name)
|
||||
err_printf(m, " (name: %d)", err->name);
|
||||
if (err->fence_reg != I915_FENCE_REG_NONE)
|
||||
err_printf(m, " (fence: %d)", err->fence_reg);
|
||||
|
||||
err_puts(m, "\n");
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
unsigned ring)
|
||||
{
|
||||
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
|
||||
err_printf(m, "%s command stream:\n", ring_str(ring));
|
||||
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
|
||||
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
|
||||
err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
|
||||
err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
|
||||
err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
|
||||
err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
|
||||
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
|
||||
if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
|
||||
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
|
||||
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
|
||||
err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
|
||||
err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
|
||||
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
|
||||
error->semaphore_mboxes[ring][0],
|
||||
error->semaphore_seqno[ring][0]);
|
||||
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
|
||||
error->semaphore_mboxes[ring][1],
|
||||
error->semaphore_seqno[ring][1]);
|
||||
}
|
||||
err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
|
||||
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
|
||||
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
|
||||
}
|
||||
|
||||
struct i915_error_state_file_priv {
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_error_state *error;
|
||||
};
|
||||
|
||||
|
||||
static int i915_error_state(struct i915_error_state_file_priv *error_priv,
|
||||
struct drm_i915_error_state_buf *m)
|
||||
|
||||
{
|
||||
struct drm_device *dev = error_priv->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error = error_priv->error;
|
||||
struct intel_ring_buffer *ring;
|
||||
int i, j, page, offset, elt;
|
||||
|
||||
if (!error) {
|
||||
err_printf(m, "no error state collected\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
|
||||
error->time.tv_usec);
|
||||
err_printf(m, "Kernel: " UTS_RELEASE "\n");
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
|
||||
err_printf(m, "EIR: 0x%08x\n", error->eir);
|
||||
err_printf(m, "IER: 0x%08x\n", error->ier);
|
||||
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
|
||||
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
|
||||
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
|
||||
err_printf(m, "CCID: 0x%08x\n", error->ccid);
|
||||
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
|
||||
err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
|
||||
error->extra_instdone[i]);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
err_printf(m, "ERROR: 0x%08x\n", error->error);
|
||||
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 7)
|
||||
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_ring_error_state(m, dev, error, i);
|
||||
|
||||
if (error->active_bo)
|
||||
print_error_buffers(m, "Active",
|
||||
error->active_bo,
|
||||
error->active_bo_count);
|
||||
|
||||
if (error->pinned_bo)
|
||||
print_error_buffers(m, "Pinned",
|
||||
error->pinned_bo,
|
||||
error->pinned_bo_count);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
struct drm_i915_error_object *obj;
|
||||
|
||||
if ((obj = error->ring[i].batchbuffer)) {
|
||||
err_printf(m, "%s --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (page = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n", offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (error->ring[i].num_requests) {
|
||||
err_printf(m, "%s --- %d requests\n",
|
||||
dev_priv->ring[i].name,
|
||||
error->ring[i].num_requests);
|
||||
for (j = 0; j < error->ring[i].num_requests; j++) {
|
||||
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
|
||||
error->ring[i].requests[j].seqno,
|
||||
error->ring[i].requests[j].jiffies,
|
||||
error->ring[i].requests[j].tail);
|
||||
}
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (page = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n",
|
||||
offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obj = error->ring[i].ctx;
|
||||
if (obj) {
|
||||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
offset,
|
||||
obj->pages[0][elt],
|
||||
obj->pages[0][elt+1],
|
||||
obj->pages[0][elt+2],
|
||||
obj->pages[0][elt+3]);
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (error->overlay)
|
||||
intel_overlay_print_error_state(m, error->overlay);
|
||||
|
||||
if (error->display)
|
||||
intel_display_print_error_state(m, dev, error->display);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_error_state_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
|
@ -986,9 +622,7 @@ i915_error_state_write(struct file *filp,
|
|||
static int i915_error_state_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct drm_device *dev = inode->i_private;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct i915_error_state_file_priv *error_priv;
|
||||
unsigned long flags;
|
||||
|
||||
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
|
||||
if (!error_priv)
|
||||
|
@ -996,11 +630,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
|
|||
|
||||
error_priv->dev = dev;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error_priv->error = dev_priv->gpu_error.first_error;
|
||||
if (error_priv->error)
|
||||
kref_get(&error_priv->error->ref);
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
i915_error_state_get(dev, error_priv);
|
||||
|
||||
file->private_data = error_priv;
|
||||
|
||||
|
@ -1011,8 +641,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
|
|||
{
|
||||
struct i915_error_state_file_priv *error_priv = file->private_data;
|
||||
|
||||
if (error_priv->error)
|
||||
kref_put(&error_priv->error->ref, i915_error_state_free);
|
||||
i915_error_state_put(error_priv);
|
||||
kfree(error_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -1025,41 +654,16 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
|
|||
struct drm_i915_error_state_buf error_str;
|
||||
loff_t tmp_pos = 0;
|
||||
ssize_t ret_count = 0;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
memset(&error_str, 0, sizeof(error_str));
|
||||
ret = i915_error_state_buf_init(&error_str, count, *pos);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We need to have enough room to store any i915_error_state printf
|
||||
* so that we can move it to start position.
|
||||
*/
|
||||
error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
|
||||
error_str.buf = kmalloc(error_str.size,
|
||||
GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
|
||||
|
||||
if (error_str.buf == NULL) {
|
||||
error_str.size = PAGE_SIZE;
|
||||
error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
|
||||
}
|
||||
|
||||
if (error_str.buf == NULL) {
|
||||
error_str.size = 128;
|
||||
error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
|
||||
}
|
||||
|
||||
if (error_str.buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
error_str.start = *pos;
|
||||
|
||||
ret = i915_error_state(error_priv, &error_str);
|
||||
ret = i915_error_state_to_str(&error_str, error_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (error_str.bytes == 0 && error_str.err) {
|
||||
ret = error_str.err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
|
||||
error_str.buf,
|
||||
error_str.bytes);
|
||||
|
@ -1069,7 +673,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
|
|||
else
|
||||
*pos = error_str.start + ret_count;
|
||||
out:
|
||||
kfree(error_str.buf);
|
||||
i915_error_state_buf_release(&error_str);
|
||||
return ret ?: ret_count;
|
||||
}
|
||||
|
||||
|
@ -1246,7 +850,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
|||
(freq_sts >> 8) & 0xff));
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
} else {
|
||||
seq_printf(m, "no P-state info available\n");
|
||||
seq_puts(m, "no P-state info available\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1341,28 +945,28 @@ static int ironlake_drpc_info(struct seq_file *m)
|
|||
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
|
||||
seq_printf(m, "Render standby enabled: %s\n",
|
||||
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
|
||||
seq_printf(m, "Current RS state: ");
|
||||
seq_puts(m, "Current RS state: ");
|
||||
switch (rstdbyctl & RSX_STATUS_MASK) {
|
||||
case RSX_STATUS_ON:
|
||||
seq_printf(m, "on\n");
|
||||
seq_puts(m, "on\n");
|
||||
break;
|
||||
case RSX_STATUS_RC1:
|
||||
seq_printf(m, "RC1\n");
|
||||
seq_puts(m, "RC1\n");
|
||||
break;
|
||||
case RSX_STATUS_RC1E:
|
||||
seq_printf(m, "RC1E\n");
|
||||
seq_puts(m, "RC1E\n");
|
||||
break;
|
||||
case RSX_STATUS_RS1:
|
||||
seq_printf(m, "RS1\n");
|
||||
seq_puts(m, "RS1\n");
|
||||
break;
|
||||
case RSX_STATUS_RS2:
|
||||
seq_printf(m, "RS2 (RC6)\n");
|
||||
seq_puts(m, "RS2 (RC6)\n");
|
||||
break;
|
||||
case RSX_STATUS_RS3:
|
||||
seq_printf(m, "RC3 (RC6+)\n");
|
||||
seq_puts(m, "RC3 (RC6+)\n");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "unknown\n");
|
||||
seq_puts(m, "unknown\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1377,8 +981,7 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
|
||||
unsigned forcewake_count;
|
||||
int count=0, ret;
|
||||
|
||||
int count = 0, ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
|
@ -1389,8 +992,8 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
spin_unlock_irq(&dev_priv->gt_lock);
|
||||
|
||||
if (forcewake_count) {
|
||||
seq_printf(m, "RC information inaccurate because somebody "
|
||||
"holds a forcewake reference \n");
|
||||
seq_puts(m, "RC information inaccurate because somebody "
|
||||
"holds a forcewake reference \n");
|
||||
} else {
|
||||
/* NB: we cannot use forcewake, else we read the wrong values */
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
|
||||
|
@ -1423,25 +1026,25 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
|
||||
seq_printf(m, "Deepest RC6 Enabled: %s\n",
|
||||
yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
|
||||
seq_printf(m, "Current RC state: ");
|
||||
seq_puts(m, "Current RC state: ");
|
||||
switch (gt_core_status & GEN6_RCn_MASK) {
|
||||
case GEN6_RC0:
|
||||
if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
|
||||
seq_printf(m, "Core Power Down\n");
|
||||
seq_puts(m, "Core Power Down\n");
|
||||
else
|
||||
seq_printf(m, "on\n");
|
||||
seq_puts(m, "on\n");
|
||||
break;
|
||||
case GEN6_RC3:
|
||||
seq_printf(m, "RC3\n");
|
||||
seq_puts(m, "RC3\n");
|
||||
break;
|
||||
case GEN6_RC6:
|
||||
seq_printf(m, "RC6\n");
|
||||
seq_puts(m, "RC6\n");
|
||||
break;
|
||||
case GEN6_RC7:
|
||||
seq_printf(m, "RC7\n");
|
||||
seq_puts(m, "RC7\n");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "Unknown\n");
|
||||
seq_puts(m, "Unknown\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1485,43 +1088,46 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (!I915_HAS_FBC(dev)) {
|
||||
seq_printf(m, "FBC unsupported on this chipset\n");
|
||||
seq_puts(m, "FBC unsupported on this chipset\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (intel_fbc_enabled(dev)) {
|
||||
seq_printf(m, "FBC enabled\n");
|
||||
seq_puts(m, "FBC enabled\n");
|
||||
} else {
|
||||
seq_printf(m, "FBC disabled: ");
|
||||
switch (dev_priv->no_fbc_reason) {
|
||||
seq_puts(m, "FBC disabled: ");
|
||||
switch (dev_priv->fbc.no_fbc_reason) {
|
||||
case FBC_NO_OUTPUT:
|
||||
seq_printf(m, "no outputs");
|
||||
seq_puts(m, "no outputs");
|
||||
break;
|
||||
case FBC_STOLEN_TOO_SMALL:
|
||||
seq_printf(m, "not enough stolen memory");
|
||||
seq_puts(m, "not enough stolen memory");
|
||||
break;
|
||||
case FBC_UNSUPPORTED_MODE:
|
||||
seq_printf(m, "mode not supported");
|
||||
seq_puts(m, "mode not supported");
|
||||
break;
|
||||
case FBC_MODE_TOO_LARGE:
|
||||
seq_printf(m, "mode too large");
|
||||
seq_puts(m, "mode too large");
|
||||
break;
|
||||
case FBC_BAD_PLANE:
|
||||
seq_printf(m, "FBC unsupported on plane");
|
||||
seq_puts(m, "FBC unsupported on plane");
|
||||
break;
|
||||
case FBC_NOT_TILED:
|
||||
seq_printf(m, "scanout buffer not tiled");
|
||||
seq_puts(m, "scanout buffer not tiled");
|
||||
break;
|
||||
case FBC_MULTIPLE_PIPES:
|
||||
seq_printf(m, "multiple pipes are enabled");
|
||||
seq_puts(m, "multiple pipes are enabled");
|
||||
break;
|
||||
case FBC_MODULE_PARAM:
|
||||
seq_printf(m, "disabled per module param (default off)");
|
||||
seq_puts(m, "disabled per module param (default off)");
|
||||
break;
|
||||
case FBC_CHIP_DEFAULT:
|
||||
seq_puts(m, "disabled per chip default");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "unknown reason");
|
||||
seq_puts(m, "unknown reason");
|
||||
}
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1604,7 +1210,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
int gpu_freq, ia_freq;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
|
||||
seq_printf(m, "unsupported on this chipset\n");
|
||||
seq_puts(m, "unsupported on this chipset\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1612,7 +1218,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
|
||||
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
|
||||
|
||||
for (gpu_freq = dev_priv->rps.min_delay;
|
||||
gpu_freq <= dev_priv->rps.max_delay;
|
||||
|
@ -1701,7 +1307,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||
fb->base.bits_per_pixel,
|
||||
atomic_read(&fb->base.refcount.refcount));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
|
@ -1716,7 +1322,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||
fb->base.bits_per_pixel,
|
||||
atomic_read(&fb->base.refcount.refcount));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
|
@ -1736,22 +1342,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
return ret;
|
||||
|
||||
if (dev_priv->ips.pwrctx) {
|
||||
seq_printf(m, "power context ");
|
||||
seq_puts(m, "power context ");
|
||||
describe_obj(m, dev_priv->ips.pwrctx);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
if (dev_priv->ips.renderctx) {
|
||||
seq_printf(m, "render context ");
|
||||
seq_puts(m, "render context ");
|
||||
describe_obj(m, dev_priv->ips.renderctx);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->default_context) {
|
||||
seq_printf(m, "HW default context %s ring ", ring->name);
|
||||
describe_obj(m, ring->default_context->obj);
|
||||
seq_printf(m, "\n");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1778,7 +1384,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
|
|||
|
||||
static const char *swizzle_string(unsigned swizzle)
|
||||
{
|
||||
switch(swizzle) {
|
||||
switch (swizzle) {
|
||||
case I915_BIT_6_SWIZZLE_NONE:
|
||||
return "none";
|
||||
case I915_BIT_6_SWIZZLE_9:
|
||||
|
@ -1868,7 +1474,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
|||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
seq_printf(m, "aliasing PPGTT:\n");
|
||||
seq_puts(m, "aliasing PPGTT:\n");
|
||||
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
|
||||
}
|
||||
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
|
||||
|
@ -1886,7 +1492,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
|
|||
|
||||
|
||||
if (!IS_VALLEYVIEW(dev)) {
|
||||
seq_printf(m, "unsupported\n");
|
||||
seq_puts(m, "unsupported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2356,61 +1962,35 @@ static struct drm_info_list i915_debugfs_list[] = {
|
|||
};
|
||||
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
|
||||
|
||||
struct i915_debugfs_files {
|
||||
const char *name;
|
||||
const struct file_operations *fops;
|
||||
} i915_debugfs_files[] = {
|
||||
{"i915_wedged", &i915_wedged_fops},
|
||||
{"i915_max_freq", &i915_max_freq_fops},
|
||||
{"i915_min_freq", &i915_min_freq_fops},
|
||||
{"i915_cache_sharing", &i915_cache_sharing_fops},
|
||||
{"i915_ring_stop", &i915_ring_stop_fops},
|
||||
{"i915_gem_drop_caches", &i915_drop_caches_fops},
|
||||
{"i915_error_state", &i915_error_state_fops},
|
||||
{"i915_next_seqno", &i915_next_seqno_fops},
|
||||
};
|
||||
|
||||
int i915_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_wedged",
|
||||
&i915_wedged_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
int ret, i;
|
||||
|
||||
ret = i915_forcewake_create(minor->debugfs_root, minor);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_max_freq",
|
||||
&i915_max_freq_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_min_freq",
|
||||
&i915_min_freq_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_cache_sharing",
|
||||
&i915_cache_sharing_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_ring_stop",
|
||||
&i915_ring_stop_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_gem_drop_caches",
|
||||
&i915_drop_caches_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_error_state",
|
||||
&i915_error_state_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_next_seqno",
|
||||
&i915_next_seqno_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
i915_debugfs_files[i].name,
|
||||
i915_debugfs_files[i].fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return drm_debugfs_create_files(i915_debugfs_list,
|
||||
I915_DEBUGFS_ENTRIES,
|
||||
|
@ -2419,26 +1999,18 @@ int i915_debugfs_init(struct drm_minor *minor)
|
|||
|
||||
void i915_debugfs_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
int i;
|
||||
|
||||
drm_debugfs_remove_files(i915_debugfs_list,
|
||||
I915_DEBUGFS_ENTRIES, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
|
||||
1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
|
||||
1, minor);
|
||||
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
|
||||
struct drm_info_list *info_list =
|
||||
(struct drm_info_list *) i915_debugfs_files[i].fops;
|
||||
|
||||
drm_debugfs_remove_files(info_list, 1, minor);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
|
|
@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
/* Always safe in the mode setting case. */
|
||||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
dev->vblank_disable_allowed = 1;
|
||||
if (INTEL_INFO(dev)->num_pipes == 0) {
|
||||
dev_priv->mm.suspended = 0;
|
||||
if (INTEL_INFO(dev)->num_pipes == 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = intel_fbdev_init(dev);
|
||||
if (ret)
|
||||
|
@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
/* We're off and running w/KMS */
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_gem:
|
||||
|
@ -1558,8 +1553,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_rmmap;
|
||||
}
|
||||
|
||||
dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
|
||||
aperture_size);
|
||||
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
|
||||
aperture_size);
|
||||
|
||||
/* The i915 workqueue is primarily used for batched retirement of
|
||||
* requests (and thus managing bo) once the task has been completed
|
||||
|
@ -1612,7 +1607,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
spin_lock_init(&dev_priv->rps.lock);
|
||||
spin_lock_init(&dev_priv->backlight.lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
|
||||
|
@ -1629,9 +1623,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_gem_unload;
|
||||
}
|
||||
|
||||
/* Start out suspended */
|
||||
dev_priv->mm.suspended = 1;
|
||||
|
||||
if (HAS_POWER_WELL(dev))
|
||||
i915_init_power_well(dev);
|
||||
|
||||
|
@ -1641,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
DRM_ERROR("failed to init modeset\n");
|
||||
goto out_gem_unload;
|
||||
}
|
||||
} else {
|
||||
/* Start out suspended in ums mode. */
|
||||
dev_priv->ums.mm_suspended = 1;
|
||||
}
|
||||
|
||||
i915_setup_sysfs(dev);
|
||||
|
@ -1667,7 +1661,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
intel_teardown_mchbar(dev);
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
out_mtrrfree:
|
||||
arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
dev_priv->gtt.gtt_remove(dev);
|
||||
out_rmmap:
|
||||
|
@ -1705,7 +1699,7 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
|
||||
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
|
||||
acpi_video_unregister();
|
||||
|
||||
|
|
|
@ -132,6 +132,11 @@ int i915_enable_ips __read_mostly = 1;
|
|||
module_param_named(enable_ips, i915_enable_ips, int, 0600);
|
||||
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
|
||||
|
||||
bool i915_fastboot __read_mostly = 0;
|
||||
module_param_named(fastboot, i915_fastboot, bool, 0600);
|
||||
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
|
||||
"(default: false)");
|
||||
|
||||
static struct drm_driver driver;
|
||||
extern int intel_agp_enabled;
|
||||
|
||||
|
@ -551,7 +556,11 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
|
||||
/* If KMS is active, we do the leavevt stuff here */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
int error = i915_gem_idle(dev);
|
||||
int error;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
error = i915_gem_idle(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (error) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"GEM idle failed, resume might fail\n");
|
||||
|
@ -656,7 +665,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
|||
intel_init_pch_refclk(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
error = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -793,28 +801,29 @@ static int i965_reset_complete(struct drm_device *dev)
|
|||
static int i965_do_reset(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
u8 gdrst;
|
||||
|
||||
/*
|
||||
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
|
||||
* well as the reset bit (GR/bit 0). Setting the GR bit
|
||||
* triggers the reset; when done, the hardware will clear it.
|
||||
*/
|
||||
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
gdrst | GRDOM_RENDER |
|
||||
GRDOM_RESET_ENABLE);
|
||||
GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We can't reset render&media without also resetting display ... */
|
||||
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
gdrst | GRDOM_MEDIA |
|
||||
GRDOM_RESET_ENABLE);
|
||||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
|
||||
return wait_for(i965_reset_complete(dev), 500);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ironlake_do_reset(struct drm_device *dev)
|
||||
|
@ -955,11 +964,11 @@ int i915_reset(struct drm_device *dev)
|
|||
* switched away).
|
||||
*/
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
|
||||
!dev_priv->mm.suspended) {
|
||||
!dev_priv->ums.mm_suspended) {
|
||||
struct intel_ring_buffer *ring;
|
||||
int i;
|
||||
|
||||
dev_priv->mm.suspended = 0;
|
||||
dev_priv->ums.mm_suspended = 0;
|
||||
|
||||
i915_gem_init_swizzling(dev);
|
||||
|
||||
|
|
|
@ -144,6 +144,7 @@ enum intel_dpll_id {
|
|||
|
||||
struct intel_dpll_hw_state {
|
||||
uint32_t dpll;
|
||||
uint32_t dpll_md;
|
||||
uint32_t fp0;
|
||||
uint32_t fp1;
|
||||
};
|
||||
|
@ -156,6 +157,8 @@ struct intel_shared_dpll {
|
|||
/* should match the index in the dev_priv->shared_dplls array */
|
||||
enum intel_dpll_id id;
|
||||
struct intel_dpll_hw_state hw_state;
|
||||
void (*mode_set)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*enable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*disable)(struct drm_i915_private *dev_priv,
|
||||
|
@ -364,6 +367,7 @@ struct drm_i915_display_funcs {
|
|||
* fills out the pipe-config with the hw state. */
|
||||
bool (*get_pipe_config)(struct intel_crtc *,
|
||||
struct intel_crtc_config *);
|
||||
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
|
||||
int (*crtc_mode_set)(struct drm_crtc *crtc,
|
||||
int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
|
@ -462,8 +466,12 @@ struct i915_gtt {
|
|||
void __iomem *gsm;
|
||||
|
||||
bool do_idle_maps;
|
||||
dma_addr_t scratch_page_dma;
|
||||
struct page *scratch_page;
|
||||
struct {
|
||||
dma_addr_t addr;
|
||||
struct page *page;
|
||||
} scratch;
|
||||
|
||||
int mtrr;
|
||||
|
||||
/* global gtt ops */
|
||||
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
|
||||
|
@ -477,21 +485,17 @@ struct i915_gtt {
|
|||
struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
enum i915_cache_level cache_level);
|
||||
gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level);
|
||||
};
|
||||
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
|
||||
|
||||
#define I915_PPGTT_PD_ENTRIES 512
|
||||
#define I915_PPGTT_PT_ENTRIES 1024
|
||||
struct i915_hw_ppgtt {
|
||||
struct drm_device *dev;
|
||||
unsigned num_pd_entries;
|
||||
struct page **pt_pages;
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t *pt_dma_addr;
|
||||
dma_addr_t scratch_page_dma_addr;
|
||||
|
||||
/* pte functions, mirroring the interface of the global gtt. */
|
||||
void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
|
||||
|
@ -501,8 +505,7 @@ struct i915_hw_ppgtt {
|
|||
struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
enum i915_cache_level cache_level);
|
||||
gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level);
|
||||
int (*enable)(struct drm_device *dev);
|
||||
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
|
||||
|
@ -528,17 +531,36 @@ struct i915_hw_context {
|
|||
struct i915_ctx_hang_stats hang_stats;
|
||||
};
|
||||
|
||||
enum no_fbc_reason {
|
||||
FBC_NO_OUTPUT, /* no outputs enabled to compress */
|
||||
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
|
||||
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
|
||||
FBC_MODE_TOO_LARGE, /* mode too large for compression */
|
||||
FBC_BAD_PLANE, /* fbc not supported on plane */
|
||||
FBC_NOT_TILED, /* buffer not tiled */
|
||||
FBC_MULTIPLE_PIPES, /* more than one pipe active */
|
||||
FBC_MODULE_PARAM,
|
||||
struct i915_fbc {
|
||||
unsigned long size;
|
||||
unsigned int fb_id;
|
||||
enum plane plane;
|
||||
int y;
|
||||
|
||||
struct drm_mm_node *compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
|
||||
struct intel_fbc_work {
|
||||
struct delayed_work work;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
int interval;
|
||||
} *fbc_work;
|
||||
|
||||
enum {
|
||||
FBC_NO_OUTPUT, /* no outputs enabled to compress */
|
||||
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
|
||||
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
|
||||
FBC_MODE_TOO_LARGE, /* mode too large for compression */
|
||||
FBC_BAD_PLANE, /* fbc not supported on plane */
|
||||
FBC_NOT_TILED, /* buffer not tiled */
|
||||
FBC_MULTIPLE_PIPES, /* more than one pipe active */
|
||||
FBC_MODULE_PARAM,
|
||||
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
|
||||
} no_fbc_reason;
|
||||
};
|
||||
|
||||
|
||||
enum intel_pch {
|
||||
PCH_NONE = 0, /* No PCH present */
|
||||
PCH_IBX, /* Ibexpeak PCH */
|
||||
|
@ -721,12 +743,12 @@ struct i915_suspend_saved_registers {
|
|||
};
|
||||
|
||||
struct intel_gen6_power_mgmt {
|
||||
/* work and pm_iir are protected by dev_priv->irq_lock */
|
||||
struct work_struct work;
|
||||
struct delayed_work vlv_work;
|
||||
u32 pm_iir;
|
||||
/* lock - irqsave spinlock that protectects the work_struct and
|
||||
* pm_iir. */
|
||||
spinlock_t lock;
|
||||
|
||||
/* On vlv we need to manually drop to Vmin with a delayed work. */
|
||||
struct delayed_work vlv_work;
|
||||
|
||||
/* The below variables an all the rps hw state are protected by
|
||||
* dev->struct mutext. */
|
||||
|
@ -792,6 +814,18 @@ struct i915_dri1_state {
|
|||
uint32_t counter;
|
||||
};
|
||||
|
||||
struct i915_ums_state {
|
||||
/**
|
||||
* Flag if the X Server, and thus DRM, is not currently in
|
||||
* control of the device.
|
||||
*
|
||||
* This is set between LeaveVT and EnterVT. It needs to be
|
||||
* replaced with a semaphore. It also needs to be
|
||||
* transitioned away from for kernel modesetting.
|
||||
*/
|
||||
int mm_suspended;
|
||||
};
|
||||
|
||||
struct intel_l3_parity {
|
||||
u32 *remap_info;
|
||||
struct work_struct error_work;
|
||||
|
@ -815,8 +849,6 @@ struct i915_gem_mm {
|
|||
/** Usable portion of the GTT for GEM */
|
||||
unsigned long stolen_base; /* limited to low memory (32-bit) */
|
||||
|
||||
int gtt_mtrr;
|
||||
|
||||
/** PPGTT used for aliasing the PPGTT with the GTT */
|
||||
struct i915_hw_ppgtt *aliasing_ppgtt;
|
||||
|
||||
|
@ -864,16 +896,6 @@ struct i915_gem_mm {
|
|||
*/
|
||||
bool interruptible;
|
||||
|
||||
/**
|
||||
* Flag if the X Server, and thus DRM, is not currently in
|
||||
* control of the device.
|
||||
*
|
||||
* This is set between LeaveVT and EnterVT. It needs to be
|
||||
* replaced with a semaphore. It also needs to be
|
||||
* transitioned away from for kernel modesetting.
|
||||
*/
|
||||
int suspended;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
|
@ -896,6 +918,11 @@ struct drm_i915_error_state_buf {
|
|||
loff_t pos;
|
||||
};
|
||||
|
||||
struct i915_error_state_file_priv {
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_error_state *error;
|
||||
};
|
||||
|
||||
struct i915_gpu_error {
|
||||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
|
||||
|
@ -1058,12 +1085,7 @@ typedef struct drm_i915_private {
|
|||
|
||||
int num_plane;
|
||||
|
||||
unsigned long cfb_size;
|
||||
unsigned int cfb_fb;
|
||||
enum plane cfb_plane;
|
||||
int cfb_y;
|
||||
struct intel_fbc_work *fbc_work;
|
||||
|
||||
struct i915_fbc fbc;
|
||||
struct intel_opregion opregion;
|
||||
struct intel_vbt_data vbt;
|
||||
|
||||
|
@ -1080,8 +1102,6 @@ typedef struct drm_i915_private {
|
|||
} backlight;
|
||||
|
||||
/* LVDS info */
|
||||
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
|
||||
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
|
||||
bool no_aux_handshake;
|
||||
|
||||
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
|
||||
|
@ -1141,11 +1161,6 @@ typedef struct drm_i915_private {
|
|||
/* Haswell power well */
|
||||
struct i915_power_well power_well;
|
||||
|
||||
enum no_fbc_reason no_fbc_reason;
|
||||
|
||||
struct drm_mm_node *compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
|
||||
struct i915_gpu_error gpu_error;
|
||||
|
||||
struct drm_i915_gem_object *vlv_pctx;
|
||||
|
@ -1172,6 +1187,8 @@ typedef struct drm_i915_private {
|
|||
/* Old dri1 support infrastructure, beware the dragons ya fools entering
|
||||
* here! */
|
||||
struct i915_dri1_state dri1;
|
||||
/* Old ums support infrastructure, same warning applies. */
|
||||
struct i915_ums_state ums;
|
||||
} drm_i915_private_t;
|
||||
|
||||
/* Iterate over initialised rings */
|
||||
|
@ -1186,7 +1203,7 @@ enum hdmi_force_audio {
|
|||
HDMI_AUDIO_ON, /* force turn on HDMI audio */
|
||||
};
|
||||
|
||||
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
|
||||
#define I915_GTT_OFFSET_NONE ((u32)-1)
|
||||
|
||||
struct drm_i915_gem_object_ops {
|
||||
/* Interface between the GEM object and its backing storage.
|
||||
|
@ -1212,7 +1229,7 @@ struct drm_i915_gem_object {
|
|||
const struct drm_i915_gem_object_ops *ops;
|
||||
|
||||
/** Current space allocated to this object in the GTT, if any. */
|
||||
struct drm_mm_node *gtt_space;
|
||||
struct drm_mm_node gtt_space;
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
struct list_head global_list;
|
||||
|
@ -1313,13 +1330,6 @@ struct drm_i915_gem_object {
|
|||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
|
||||
/**
|
||||
* Current offset of the object in GTT space.
|
||||
*
|
||||
* This is the same as gtt_space->start
|
||||
*/
|
||||
uint32_t gtt_offset;
|
||||
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
/** Breadcrumb of last rendering to the buffer. */
|
||||
|
@ -1345,6 +1355,37 @@ struct drm_i915_gem_object {
|
|||
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
|
||||
/* Offset of the first PTE pointing to this object */
|
||||
static inline unsigned long
|
||||
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
|
||||
{
|
||||
return o->gtt_space.start;
|
||||
}
|
||||
|
||||
/* Whether or not this object is currently mapped by the translation tables */
|
||||
static inline bool
|
||||
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
|
||||
{
|
||||
return drm_mm_node_allocated(&o->gtt_space);
|
||||
}
|
||||
|
||||
/* The size used in the translation tables may be larger than the actual size of
|
||||
* the object on GEN2/GEN3 because of the way tiling is handled. See
|
||||
* i915_gem_get_gtt_size() for more details.
|
||||
*/
|
||||
static inline unsigned long
|
||||
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
|
||||
{
|
||||
return o->gtt_space.size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
|
||||
enum i915_cache_level color)
|
||||
{
|
||||
o->gtt_space.color = color;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request queue structure.
|
||||
*
|
||||
|
@ -1542,6 +1583,7 @@ extern int i915_enable_ppgtt __read_mostly;
|
|||
extern unsigned int i915_preliminary_hw_support __read_mostly;
|
||||
extern int i915_disable_power_well __read_mostly;
|
||||
extern int i915_enable_ips __read_mostly;
|
||||
extern bool i915_fastboot __read_mostly;
|
||||
|
||||
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume(struct drm_device *dev);
|
||||
|
@ -1585,21 +1627,12 @@ extern void intel_hpd_init(struct drm_device *dev);
|
|||
extern void intel_gt_init(struct drm_device *dev);
|
||||
extern void intel_gt_reset(struct drm_device *dev);
|
||||
|
||||
void i915_error_state_free(struct kref *error_ref);
|
||||
|
||||
void
|
||||
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
|
||||
|
||||
void
|
||||
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern void i915_destroy_error_state(struct drm_device *dev);
|
||||
#else
|
||||
#define i915_destroy_error_state(x)
|
||||
#endif
|
||||
|
||||
|
||||
/* i915_gem.c */
|
||||
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
@ -1910,8 +1943,27 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
|||
/* i915_debugfs.c */
|
||||
int i915_debugfs_init(struct drm_minor *minor);
|
||||
void i915_debugfs_cleanup(struct drm_minor *minor);
|
||||
|
||||
/* i915_gpu_error.c */
|
||||
__printf(2, 3)
|
||||
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
|
||||
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
|
||||
const struct i915_error_state_file_priv *error);
|
||||
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
|
||||
size_t count, loff_t pos);
|
||||
static inline void i915_error_state_buf_release(
|
||||
struct drm_i915_error_state_buf *eb)
|
||||
{
|
||||
kfree(eb->buf);
|
||||
}
|
||||
void i915_capture_error_state(struct drm_device *dev);
|
||||
void i915_error_state_get(struct drm_device *dev,
|
||||
struct i915_error_state_file_priv *error_priv);
|
||||
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
|
||||
void i915_destroy_error_state(struct drm_device *dev);
|
||||
|
||||
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
|
||||
const char *i915_cache_level_str(int type);
|
||||
|
||||
/* i915_suspend.c */
|
||||
extern int i915_save_state(struct drm_device *dev);
|
||||
|
@ -1991,7 +2043,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file);
|
||||
|
||||
/* overlay */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
|
||||
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
|
||||
struct intel_overlay_error_state *error);
|
||||
|
@ -2000,7 +2051,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
|
|||
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
||||
struct drm_device *dev,
|
||||
struct intel_display_error_state *error);
|
||||
#endif
|
||||
|
||||
/* On SNB platform, before reading ring registers forcewake bit
|
||||
* must be set to prevent GT core from power down and stale values being
|
||||
|
|
|
@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
|
|||
static inline bool
|
||||
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->gtt_space && !obj->active;
|
||||
return i915_gem_obj_ggtt_bound(obj) && !obj->active;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
|
||||
if (obj->pin_count)
|
||||
pinned += obj->gtt_space->size;
|
||||
pinned += i915_gem_obj_ggtt_size(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
args->aper_size = dev_priv->gtt.total;
|
||||
|
@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|||
* anyway again before the next pread happens. */
|
||||
if (obj->cache_level == I915_CACHE_NONE)
|
||||
needs_clflush = 1;
|
||||
if (obj->gtt_space) {
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
|||
user_data = to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
offset = obj->gtt_offset + args->offset;
|
||||
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
|
||||
|
||||
while (remain > 0) {
|
||||
/* Operation in this page
|
||||
|
@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||
* right away and we therefore have to clflush anyway. */
|
||||
if (obj->cache_level == I915_CACHE_NONE)
|
||||
needs_clflush_after = 1;
|
||||
if (obj->gtt_space) {
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
|
||||
obj->fault_mappable = true;
|
||||
|
||||
pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
|
||||
page_offset;
|
||||
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
|
||||
pfn >>= PAGE_SHIFT;
|
||||
pfn += page_offset;
|
||||
|
||||
/* Finally, remap it using the new GTT offset */
|
||||
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
|
||||
|
@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
|||
if (obj->pages == NULL)
|
||||
return 0;
|
||||
|
||||
BUG_ON(obj->gtt_space);
|
||||
BUG_ON(i915_gem_obj_ggtt_bound(obj));
|
||||
|
||||
if (obj->pages_pin_count)
|
||||
return -EBUSY;
|
||||
|
@ -2085,7 +2086,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
trace_i915_gem_request_add(ring, request->seqno);
|
||||
ring->outstanding_lazy_request = 0;
|
||||
|
||||
if (!dev_priv->mm.suspended) {
|
||||
if (!dev_priv->ums.mm_suspended) {
|
||||
if (i915_enable_hangcheck) {
|
||||
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
|
||||
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
|
||||
|
@ -2121,8 +2122,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
|||
|
||||
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (acthd >= obj->gtt_offset &&
|
||||
acthd < obj->gtt_offset + obj->base.size)
|
||||
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
|
||||
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -2180,11 +2181,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
|
|||
|
||||
if (ring->hangcheck.action != wait &&
|
||||
i915_request_guilty(request, acthd, &inside)) {
|
||||
DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
|
||||
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
|
||||
ring->name,
|
||||
inside ? "inside" : "flushing",
|
||||
request->batch_obj ?
|
||||
request->batch_obj->gtt_offset : 0,
|
||||
i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
|
||||
request->ctx ? request->ctx->id : 0,
|
||||
acthd);
|
||||
|
||||
|
@ -2390,7 +2391,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
|||
idle &= list_empty(&ring->request_list);
|
||||
}
|
||||
|
||||
if (!dev_priv->mm.suspended && !idle)
|
||||
if (!dev_priv->ums.mm_suspended && !idle)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
if (idle)
|
||||
|
@ -2585,7 +2586,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
|||
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (obj->gtt_space == NULL)
|
||||
if (!i915_gem_obj_ggtt_bound(obj))
|
||||
return 0;
|
||||
|
||||
if (obj->pin_count)
|
||||
|
@ -2624,9 +2625,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
|||
/* Avoid an unnecessary call to unbind on rebind. */
|
||||
obj->map_and_fenceable = true;
|
||||
|
||||
drm_mm_put_block(obj->gtt_space);
|
||||
obj->gtt_space = NULL;
|
||||
obj->gtt_offset = 0;
|
||||
drm_mm_remove_node(&obj->gtt_space);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2681,12 +2680,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
|||
POSTING_READ(fence_reg);
|
||||
|
||||
if (obj) {
|
||||
u32 size = obj->gtt_space->size;
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
uint64_t val;
|
||||
|
||||
val = (uint64_t)((obj->gtt_offset + size - 4096) &
|
||||
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
|
||||
0xfffff000) << 32;
|
||||
val |= obj->gtt_offset & 0xfffff000;
|
||||
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
|
||||
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
|
||||
|
@ -2710,15 +2709,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
|||
u32 val;
|
||||
|
||||
if (obj) {
|
||||
u32 size = obj->gtt_space->size;
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
int pitch_val;
|
||||
int tile_width;
|
||||
|
||||
WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
|
||||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
|
||||
(size & -size) != size ||
|
||||
(obj->gtt_offset & (size - 1)),
|
||||
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
|
||||
obj->gtt_offset, obj->map_and_fenceable, size);
|
||||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
|
||||
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
|
||||
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
|
||||
|
||||
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
|
||||
tile_width = 128;
|
||||
|
@ -2729,7 +2728,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
|||
pitch_val = obj->stride / tile_width;
|
||||
pitch_val = ffs(pitch_val) - 1;
|
||||
|
||||
val = obj->gtt_offset;
|
||||
val = i915_gem_obj_ggtt_offset(obj);
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
||||
val |= I915_FENCE_SIZE_BITS(size);
|
||||
|
@ -2754,19 +2753,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
|||
uint32_t val;
|
||||
|
||||
if (obj) {
|
||||
u32 size = obj->gtt_space->size;
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
uint32_t pitch_val;
|
||||
|
||||
WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
|
||||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
|
||||
(size & -size) != size ||
|
||||
(obj->gtt_offset & (size - 1)),
|
||||
"object 0x%08x not 512K or pot-size 0x%08x aligned\n",
|
||||
obj->gtt_offset, size);
|
||||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
|
||||
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
|
||||
i915_gem_obj_ggtt_offset(obj), size);
|
||||
|
||||
pitch_val = obj->stride / 128;
|
||||
pitch_val = ffs(pitch_val) - 1;
|
||||
|
||||
val = obj->gtt_offset;
|
||||
val = i915_gem_obj_ggtt_offset(obj);
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
||||
val |= I830_FENCE_SIZE_BITS(size);
|
||||
|
@ -2983,7 +2982,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
|
|||
if (HAS_LLC(dev))
|
||||
return true;
|
||||
|
||||
if (gtt_space == NULL)
|
||||
if (!drm_mm_node_allocated(gtt_space))
|
||||
return true;
|
||||
|
||||
if (list_empty(>t_space->node_list))
|
||||
|
@ -3016,8 +3015,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
|
|||
|
||||
if (obj->cache_level != obj->gtt_space->color) {
|
||||
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
|
||||
obj->gtt_space->start,
|
||||
obj->gtt_space->start + obj->gtt_space->size,
|
||||
i915_gem_obj_ggtt_offset(obj),
|
||||
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
|
||||
obj->cache_level,
|
||||
obj->gtt_space->color);
|
||||
err++;
|
||||
|
@ -3028,8 +3027,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
|
|||
obj->gtt_space,
|
||||
obj->cache_level)) {
|
||||
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
|
||||
obj->gtt_space->start,
|
||||
obj->gtt_space->start + obj->gtt_space->size,
|
||||
i915_gem_obj_ggtt_offset(obj),
|
||||
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
|
||||
obj->cache_level);
|
||||
err++;
|
||||
continue;
|
||||
|
@ -3051,7 +3050,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *node;
|
||||
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
||||
bool mappable, fenceable;
|
||||
size_t gtt_max = map_and_fenceable ?
|
||||
|
@ -3096,14 +3094,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (node == NULL) {
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
search_free:
|
||||
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
|
||||
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
|
||||
&obj->gtt_space,
|
||||
size, alignment,
|
||||
obj->cache_level, 0, gtt_max);
|
||||
if (ret) {
|
||||
|
@ -3115,34 +3108,31 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|||
goto search_free;
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
|
||||
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
|
||||
obj->cache_level))) {
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
drm_mm_put_block(node);
|
||||
drm_mm_remove_node(&obj->gtt_space);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
drm_mm_put_block(node);
|
||||
drm_mm_remove_node(&obj->gtt_space);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
||||
|
||||
obj->gtt_space = node;
|
||||
obj->gtt_offset = node->start;
|
||||
|
||||
fenceable =
|
||||
node->size == fence_size &&
|
||||
(node->start & (fence_alignment - 1)) == 0;
|
||||
i915_gem_obj_ggtt_size(obj) == fence_size &&
|
||||
(i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
|
||||
|
||||
mappable =
|
||||
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
|
||||
mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
|
||||
dev_priv->gtt.mappable_end;
|
||||
|
||||
obj->map_and_fenceable = mappable && fenceable;
|
||||
|
||||
|
@ -3244,7 +3234,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
int ret;
|
||||
|
||||
/* Not valid to be called on unbound objects. */
|
||||
if (obj->gtt_space == NULL)
|
||||
if (!i915_gem_obj_ggtt_bound(obj))
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
|
||||
|
@ -3303,13 +3293,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
|
||||
if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (obj->gtt_space) {
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
ret = i915_gem_object_finish_gpu(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -3332,7 +3322,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
|
||||
obj, cache_level);
|
||||
|
||||
obj->gtt_space->color = cache_level;
|
||||
i915_gem_obj_ggtt_set_color(obj, cache_level);
|
||||
}
|
||||
|
||||
if (cache_level == I915_CACHE_NONE) {
|
||||
|
@ -3613,14 +3603,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
|||
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
|
||||
return -EBUSY;
|
||||
|
||||
if (obj->gtt_space != NULL) {
|
||||
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
|
||||
(map_and_fenceable && !obj->map_and_fenceable)) {
|
||||
WARN(obj->pin_count,
|
||||
"bo is already pinned with incorrect alignment:"
|
||||
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
|
||||
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
|
||||
" obj->map_and_fenceable=%d\n",
|
||||
obj->gtt_offset, alignment,
|
||||
i915_gem_obj_ggtt_offset(obj), alignment,
|
||||
map_and_fenceable,
|
||||
obj->map_and_fenceable);
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
|
@ -3629,7 +3619,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
}
|
||||
|
||||
if (obj->gtt_space == NULL) {
|
||||
if (!i915_gem_obj_ggtt_bound(obj)) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
|
||||
ret = i915_gem_object_bind_to_gtt(obj, alignment,
|
||||
|
@ -3655,7 +3645,7 @@ void
|
|||
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
BUG_ON(obj->pin_count == 0);
|
||||
BUG_ON(obj->gtt_space == NULL);
|
||||
BUG_ON(!i915_gem_obj_ggtt_bound(obj));
|
||||
|
||||
if (--obj->pin_count == 0)
|
||||
obj->pin_mappable = false;
|
||||
|
@ -3705,7 +3695,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|||
* as the X server doesn't manage domains yet
|
||||
*/
|
||||
i915_gem_object_flush_cpu_write_domain(obj);
|
||||
args->offset = obj->gtt_offset;
|
||||
args->offset = i915_gem_obj_ggtt_offset(obj);
|
||||
out:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
|
@ -3974,9 +3964,7 @@ i915_gem_idle(struct drm_device *dev)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (dev_priv->mm.suspended) {
|
||||
if (dev_priv->ums.mm_suspended) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3992,18 +3980,11 @@ i915_gem_idle(struct drm_device *dev)
|
|||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
i915_gem_evict_everything(dev);
|
||||
|
||||
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
||||
* We need to replace this with a semaphore, or something.
|
||||
* And not confound mm.suspended!
|
||||
*/
|
||||
dev_priv->mm.suspended = 1;
|
||||
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Cancel the retire work handler, which should be idle now. */
|
||||
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
|
||||
|
||||
|
@ -4213,7 +4194,7 @@ int
|
|||
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
|
@ -4225,7 +4206,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev_priv->mm.suspended = 0;
|
||||
dev_priv->ums.mm_suspended = 0;
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
if (ret != 0) {
|
||||
|
@ -4245,7 +4226,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|||
cleanup_ringbuffer:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
dev_priv->mm.suspended = 1;
|
||||
dev_priv->ums.mm_suspended = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -4255,11 +4236,26 @@ int
|
|||
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return 0;
|
||||
|
||||
drm_irq_uninstall(dev);
|
||||
return i915_gem_idle(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = i915_gem_idle(dev);
|
||||
|
||||
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
||||
* We need to replace this with a semaphore, or something.
|
||||
* And not confound ums.mm_suspended!
|
||||
*/
|
||||
if (ret != 0)
|
||||
dev_priv->ums.mm_suspended = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -4270,9 +4266,11 @@ i915_gem_lastclose(struct drm_device *dev)
|
|||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = i915_gem_idle(dev);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
|
|||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring, new_context->obj->gtt_offset |
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
|
||||
MI_MM_SPACE_GTT |
|
||||
MI_SAVE_EXT_STATE_EN |
|
||||
MI_RESTORE_EXT_STATE_EN |
|
||||
|
|
|
@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
|
|||
return false;
|
||||
|
||||
list_add(&obj->exec_list, unwind);
|
||||
return drm_mm_scan_add_block(obj->gtt_space);
|
||||
return drm_mm_scan_add_block(&obj->gtt_space);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -107,7 +107,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
|
|||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
|
||||
ret = drm_mm_scan_remove_block(obj->gtt_space);
|
||||
ret = drm_mm_scan_remove_block(&obj->gtt_space);
|
||||
BUG_ON(ret);
|
||||
|
||||
list_del_init(&obj->exec_list);
|
||||
|
@ -127,7 +127,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
|
|||
obj = list_first_entry(&unwind_list,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
if (drm_mm_scan_remove_block(obj->gtt_space)) {
|
||||
if (drm_mm_scan_remove_block(&obj->gtt_space)) {
|
||||
list_move(&obj->exec_list, &eviction_list);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
continue;
|
||||
|
|
|
@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
return -ENOENT;
|
||||
|
||||
target_i915_obj = to_intel_bo(target_obj);
|
||||
target_offset = target_i915_obj->gtt_offset;
|
||||
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
|
||||
|
||||
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
|
||||
* pipe_control writes because the gpu doesn't properly redirect them
|
||||
|
@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
return ret;
|
||||
|
||||
/* Map the page containing the relocation we're going to perform. */
|
||||
reloc->offset += obj->gtt_offset;
|
||||
reloc->offset += i915_gem_obj_ggtt_offset(obj);
|
||||
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
reloc->offset & PAGE_MASK);
|
||||
reloc_entry = (uint32_t __iomem *)
|
||||
|
@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
|
|||
obj->has_aliasing_ppgtt_mapping = 1;
|
||||
}
|
||||
|
||||
if (entry->offset != obj->gtt_offset) {
|
||||
entry->offset = obj->gtt_offset;
|
||||
if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
|
||||
entry->offset = i915_gem_obj_ggtt_offset(obj);
|
||||
*need_reloc = true;
|
||||
}
|
||||
|
||||
|
@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
struct drm_i915_gem_exec_object2 *entry;
|
||||
|
||||
if (!obj->gtt_space)
|
||||
if (!i915_gem_obj_ggtt_bound(obj))
|
||||
return;
|
||||
|
||||
entry = obj->exec_entry;
|
||||
|
@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
bool need_fence, need_mappable;
|
||||
|
||||
if (!obj->gtt_space)
|
||||
if (!i915_gem_obj_ggtt_bound(obj))
|
||||
continue;
|
||||
|
||||
need_fence =
|
||||
|
@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(obj);
|
||||
|
||||
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
|
||||
if ((entry->alignment &&
|
||||
i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
|
||||
(need_mappable && !obj->map_and_fenceable))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
else
|
||||
|
@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||
|
||||
/* Bind fresh objects */
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
if (obj->gtt_space)
|
||||
if (i915_gem_obj_ggtt_bound(obj))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
|
||||
|
@ -972,7 +973,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto pre_mutex_err;
|
||||
|
||||
if (dev_priv->mm.suspended) {
|
||||
if (dev_priv->ums.mm_suspended) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -EBUSY;
|
||||
goto pre_mutex_err;
|
||||
|
@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
|
||||
exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
for (i = 0; i < args->num_cliprects; i++) {
|
||||
|
|
|
@ -28,6 +28,9 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#define GEN6_PPGTT_PD_ENTRIES 512
|
||||
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
|
||||
|
||||
/* PPGTT stuff */
|
||||
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
|
||||
|
||||
|
@ -42,8 +45,7 @@
|
|||
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
|
||||
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
|
||||
static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
|
@ -69,8 +71,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
|
|||
#define BYT_PTE_WRITEABLE (1 << 1)
|
||||
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
|
||||
|
||||
static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
|
@ -87,8 +88,7 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
|
|||
return pte;
|
||||
}
|
||||
|
||||
static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
|
@ -185,13 +185,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
|||
unsigned first_entry,
|
||||
unsigned num_entries)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
|
||||
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
|
||||
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
unsigned last_pte, i;
|
||||
|
||||
scratch_pte = ppgtt->pte_encode(ppgtt->dev,
|
||||
ppgtt->scratch_page_dma_addr,
|
||||
scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
while (num_entries) {
|
||||
|
@ -227,8 +227,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
|
|||
dma_addr_t page_addr;
|
||||
|
||||
page_addr = sg_page_iter_dma_address(&sg_iter);
|
||||
pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
|
||||
cache_level);
|
||||
pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level);
|
||||
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
|
||||
kunmap_atomic(pt_vaddr);
|
||||
act_pt++;
|
||||
|
@ -278,7 +277,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||
} else {
|
||||
ppgtt->pte_encode = gen6_pte_encode;
|
||||
}
|
||||
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
|
||||
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
|
||||
ppgtt->enable = gen6_ppgtt_enable;
|
||||
ppgtt->clear_range = gen6_ppgtt_clear_range;
|
||||
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
|
||||
|
@ -348,7 +347,6 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
|
|||
return -ENOMEM;
|
||||
|
||||
ppgtt->dev = dev;
|
||||
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
ret = gen6_ppgtt_init(ppgtt);
|
||||
|
@ -380,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
|||
enum i915_cache_level cache_level)
|
||||
{
|
||||
ppgtt->insert_entries(ppgtt, obj->pages,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
|
||||
cache_level);
|
||||
}
|
||||
|
||||
|
@ -388,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
|
|||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
ppgtt->clear_range(ppgtt,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -480,7 +478,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
|
|||
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
|
||||
addr = sg_page_iter_dma_address(&sg_iter);
|
||||
iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
|
||||
iowrite32(dev_priv->gtt.pte_encode(addr, level),
|
||||
>t_entries[i]);
|
||||
i++;
|
||||
}
|
||||
|
@ -493,7 +491,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
|
|||
*/
|
||||
if (i != 0)
|
||||
WARN_ON(readl(>t_entries[i-1])
|
||||
!= dev_priv->gtt.pte_encode(dev, addr, level));
|
||||
!= dev_priv->gtt.pte_encode(addr, level));
|
||||
|
||||
/* This next bit makes the above posting read even more important. We
|
||||
* want to flush the TLBs only after we're certain all the PTE updates
|
||||
|
@ -518,8 +516,7 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
|
|||
first_entry, num_entries, max_entries))
|
||||
num_entries = max_entries;
|
||||
|
||||
scratch_pte = dev_priv->gtt.pte_encode(dev,
|
||||
dev_priv->gtt.scratch_page_dma,
|
||||
scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr,
|
||||
I915_CACHE_LLC);
|
||||
for (i = 0; i < num_entries; i++)
|
||||
iowrite32(scratch_pte, >t_base[i]);
|
||||
|
@ -554,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
|
||||
cache_level);
|
||||
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
|
@ -566,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->gtt.gtt_clear_range(obj->base.dev,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
|
||||
obj->has_global_gtt_mapping = 0;
|
||||
|
@ -632,14 +629,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||
|
||||
/* Mark any preallocated objects as occupied */
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
|
||||
obj->gtt_offset, obj->base.size);
|
||||
int ret;
|
||||
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
|
||||
i915_gem_obj_ggtt_offset(obj), obj->base.size);
|
||||
|
||||
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
|
||||
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
|
||||
obj->gtt_offset,
|
||||
obj->base.size,
|
||||
false);
|
||||
WARN_ON(i915_gem_obj_ggtt_bound(obj));
|
||||
ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
|
||||
&obj->gtt_space);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Reservation failed\n");
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
}
|
||||
|
||||
|
@ -688,7 +686,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
|||
if (INTEL_INFO(dev)->gen <= 7) {
|
||||
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
|
||||
* aperture accordingly when using aliasing ppgtt. */
|
||||
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
|
||||
}
|
||||
|
||||
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
|
@ -699,7 +697,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
|||
|
||||
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
|
||||
drm_mm_takedown(&dev_priv->mm.gtt_space);
|
||||
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
|
||||
}
|
||||
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
}
|
||||
|
@ -724,8 +722,8 @@ static int setup_scratch_page(struct drm_device *dev)
|
|||
#else
|
||||
dma_addr = page_to_phys(page);
|
||||
#endif
|
||||
dev_priv->gtt.scratch_page = page;
|
||||
dev_priv->gtt.scratch_page_dma = dma_addr;
|
||||
dev_priv->gtt.scratch.page = page;
|
||||
dev_priv->gtt.scratch.addr = dma_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -733,11 +731,11 @@ static int setup_scratch_page(struct drm_device *dev)
|
|||
static void teardown_scratch_page(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
set_pages_wb(dev_priv->gtt.scratch_page, 1);
|
||||
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
|
||||
set_pages_wb(dev_priv->gtt.scratch.page, 1);
|
||||
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
put_page(dev_priv->gtt.scratch_page);
|
||||
__free_page(dev_priv->gtt.scratch_page);
|
||||
put_page(dev_priv->gtt.scratch.page);
|
||||
__free_page(dev_priv->gtt.scratch.page);
|
||||
}
|
||||
|
||||
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
||||
|
@ -849,34 +847,28 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
|||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen <= 5) {
|
||||
dev_priv->gtt.gtt_probe = i915_gmch_probe;
|
||||
dev_priv->gtt.gtt_remove = i915_gmch_remove;
|
||||
gtt->gtt_probe = i915_gmch_probe;
|
||||
gtt->gtt_remove = i915_gmch_remove;
|
||||
} else {
|
||||
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
|
||||
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
|
||||
if (IS_HASWELL(dev)) {
|
||||
dev_priv->gtt.pte_encode = hsw_pte_encode;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->gtt.pte_encode = byt_pte_encode;
|
||||
} else {
|
||||
dev_priv->gtt.pte_encode = gen6_pte_encode;
|
||||
}
|
||||
gtt->gtt_probe = gen6_gmch_probe;
|
||||
gtt->gtt_remove = gen6_gmch_remove;
|
||||
if (IS_HASWELL(dev))
|
||||
gtt->pte_encode = hsw_pte_encode;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
gtt->pte_encode = byt_pte_encode;
|
||||
else
|
||||
gtt->pte_encode = gen6_pte_encode;
|
||||
}
|
||||
|
||||
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
|
||||
&dev_priv->gtt.stolen_size,
|
||||
>t->mappable_base,
|
||||
>t->mappable_end);
|
||||
ret = gtt->gtt_probe(dev, >t->total, >t->stolen_size,
|
||||
>t->mappable_base, >t->mappable_end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* GMADR is the PCI mmio aperture into the global GTT. */
|
||||
DRM_INFO("Memory usable by graphics device = %zdM\n",
|
||||
dev_priv->gtt.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
|
||||
dev_priv->gtt.mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
|
||||
dev_priv->gtt.stolen_size >> 20);
|
||||
DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct pci_dev *pdev = dev_priv->bridge_dev;
|
||||
struct resource *r;
|
||||
u32 base;
|
||||
|
||||
/* On the machines I have tested the Graphics Base of Stolen Memory
|
||||
|
@ -88,6 +89,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|||
#endif
|
||||
}
|
||||
|
||||
if (base == 0)
|
||||
return 0;
|
||||
|
||||
/* Verify that nothing else uses this physical address. Stolen
|
||||
* memory should be reserved by the BIOS and hidden from the
|
||||
* kernel. So if the region is already marked as busy, something
|
||||
* is seriously wrong.
|
||||
*/
|
||||
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
|
||||
"Graphics Stolen Memory");
|
||||
if (r == NULL) {
|
||||
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
|
||||
base, base + (uint32_t)dev_priv->gtt.stolen_size);
|
||||
base = 0;
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
|
@ -120,7 +137,7 @@ static int i915_setup_compression(struct drm_device *dev, int size)
|
|||
if (!compressed_llb)
|
||||
goto err_fb;
|
||||
|
||||
dev_priv->compressed_llb = compressed_llb;
|
||||
dev_priv->fbc.compressed_llb = compressed_llb;
|
||||
|
||||
I915_WRITE(FBC_CFB_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_fb->start);
|
||||
|
@ -128,8 +145,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
|
|||
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||
}
|
||||
|
||||
dev_priv->compressed_fb = compressed_fb;
|
||||
dev_priv->cfb_size = size;
|
||||
dev_priv->fbc.compressed_fb = compressed_fb;
|
||||
dev_priv->fbc.size = size;
|
||||
|
||||
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
|
||||
size);
|
||||
|
@ -150,7 +167,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
|
|||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
if (size < dev_priv->cfb_size)
|
||||
if (size < dev_priv->fbc.size)
|
||||
return 0;
|
||||
|
||||
/* Release any current block */
|
||||
|
@ -163,16 +180,16 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->cfb_size == 0)
|
||||
if (dev_priv->fbc.size == 0)
|
||||
return;
|
||||
|
||||
if (dev_priv->compressed_fb)
|
||||
drm_mm_put_block(dev_priv->compressed_fb);
|
||||
if (dev_priv->fbc.compressed_fb)
|
||||
drm_mm_put_block(dev_priv->fbc.compressed_fb);
|
||||
|
||||
if (dev_priv->compressed_llb)
|
||||
drm_mm_put_block(dev_priv->compressed_llb);
|
||||
if (dev_priv->fbc.compressed_llb)
|
||||
drm_mm_put_block(dev_priv->fbc.compressed_llb);
|
||||
|
||||
dev_priv->cfb_size = 0;
|
||||
dev_priv->fbc.size = 0;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
|
@ -201,6 +218,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
|||
if (IS_VALLEYVIEW(dev))
|
||||
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
|
||||
|
||||
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
|
||||
return 0;
|
||||
|
||||
/* Basic memrange allocator for stolen space */
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
|
||||
bios_reserved);
|
||||
|
@ -333,6 +353,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *stolen;
|
||||
int ret;
|
||||
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return NULL;
|
||||
|
@ -347,11 +368,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
if (WARN_ON(size == 0))
|
||||
return NULL;
|
||||
|
||||
stolen = drm_mm_create_block(&dev_priv->mm.stolen,
|
||||
stolen_offset, size,
|
||||
false);
|
||||
if (stolen == NULL) {
|
||||
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
|
||||
if (!stolen)
|
||||
return NULL;
|
||||
|
||||
stolen->start = stolen_offset;
|
||||
stolen->size = size;
|
||||
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen space\n");
|
||||
kfree(stolen);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -363,7 +389,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
}
|
||||
|
||||
/* Some objects just need physical mem from stolen space */
|
||||
if (gtt_offset == -1)
|
||||
if (gtt_offset == I915_GTT_OFFSET_NONE)
|
||||
return obj;
|
||||
|
||||
/* To simplify the initialisation sequence between KMS and GTT,
|
||||
|
@ -371,25 +397,27 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
* setting up the GTT space. The actual reservation will occur
|
||||
* later.
|
||||
*/
|
||||
obj->gtt_space.start = gtt_offset;
|
||||
obj->gtt_space.size = size;
|
||||
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
|
||||
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
|
||||
gtt_offset, size,
|
||||
false);
|
||||
if (obj->gtt_space == NULL) {
|
||||
ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
|
||||
&obj->gtt_space);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return NULL;
|
||||
goto unref_out;
|
||||
}
|
||||
} else
|
||||
obj->gtt_space = I915_GTT_RESERVED;
|
||||
}
|
||||
|
||||
obj->gtt_offset = gtt_offset;
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
|
||||
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
||||
|
||||
return obj;
|
||||
|
||||
unref_out:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
|||
return true;
|
||||
|
||||
if (INTEL_INFO(obj->base.dev)->gen == 3) {
|
||||
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
|
||||
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
|
||||
return false;
|
||||
} else {
|
||||
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
|
||||
if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
|
||||
return false;
|
||||
}
|
||||
|
||||
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
|
||||
if (obj->gtt_space->size != size)
|
||||
if (i915_gem_obj_ggtt_size(obj) != size)
|
||||
return false;
|
||||
|
||||
if (obj->gtt_offset & (size - 1))
|
||||
if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
*/
|
||||
|
||||
obj->map_and_fenceable =
|
||||
obj->gtt_space == NULL ||
|
||||
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
|
||||
!i915_gem_obj_ggtt_bound(obj) ||
|
||||
(i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
|
||||
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
||||
|
||||
/* Rebind if we need a change of alignment */
|
||||
|
@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
i915_gem_get_gtt_alignment(dev, obj->base.size,
|
||||
args->tiling_mode,
|
||||
false);
|
||||
if (obj->gtt_offset & (unfenced_alignment - 1))
|
||||
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
}
|
||||
|
||||
|
|
971
drivers/gpu/drm/i915/i915_gpu_error.c
Normal file
971
drivers/gpu/drm/i915/i915_gpu_error.c
Normal file
|
@ -0,0 +1,971 @@
|
|||
/*
|
||||
* Copyright (c) 2008 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
* Keith Packard <keithp@keithp.com>
|
||||
* Mika Kuoppala <mika.kuoppala@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <generated/utsrelease.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
static const char *yesno(int v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
static const char *ring_str(int ring)
|
||||
{
|
||||
switch (ring) {
|
||||
case RCS: return "render";
|
||||
case VCS: return "bsd";
|
||||
case BCS: return "blt";
|
||||
case VECS: return "vebox";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
static const char *pin_flag(int pinned)
|
||||
{
|
||||
if (pinned > 0)
|
||||
return " P";
|
||||
else if (pinned < 0)
|
||||
return " p";
|
||||
else
|
||||
return "";
|
||||
}
|
||||
|
||||
static const char *tiling_flag(int tiling)
|
||||
{
|
||||
switch (tiling) {
|
||||
default:
|
||||
case I915_TILING_NONE: return "";
|
||||
case I915_TILING_X: return " X";
|
||||
case I915_TILING_Y: return " Y";
|
||||
}
|
||||
}
|
||||
|
||||
static const char *dirty_flag(int dirty)
|
||||
{
|
||||
return dirty ? " dirty" : "";
|
||||
}
|
||||
|
||||
static const char *purgeable_flag(int purgeable)
|
||||
{
|
||||
return purgeable ? " purgeable" : "";
|
||||
}
|
||||
|
||||
static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
|
||||
{
|
||||
|
||||
if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
|
||||
e->err = -ENOSPC;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (e->bytes == e->size - 1 || e->err)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
|
||||
unsigned len)
|
||||
{
|
||||
if (e->pos + len <= e->start) {
|
||||
e->pos += len;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* First vsnprintf needs to fit in its entirety for memmove */
|
||||
if (len >= e->size) {
|
||||
e->err = -EIO;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __i915_error_advance(struct drm_i915_error_state_buf *e,
|
||||
unsigned len)
|
||||
{
|
||||
/* If this is first printf in this window, adjust it so that
|
||||
* start position matches start of the buffer
|
||||
*/
|
||||
|
||||
if (e->pos < e->start) {
|
||||
const size_t off = e->start - e->pos;
|
||||
|
||||
/* Should not happen but be paranoid */
|
||||
if (off > len || e->bytes) {
|
||||
e->err = -EIO;
|
||||
return;
|
||||
}
|
||||
|
||||
memmove(e->buf, e->buf + off, len - off);
|
||||
e->bytes = len - off;
|
||||
e->pos = e->start;
|
||||
return;
|
||||
}
|
||||
|
||||
e->bytes += len;
|
||||
e->pos += len;
|
||||
}
|
||||
|
||||
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
|
||||
const char *f, va_list args)
|
||||
{
|
||||
unsigned len;
|
||||
|
||||
if (!__i915_error_ok(e))
|
||||
return;
|
||||
|
||||
/* Seek the first printf which is hits start position */
|
||||
if (e->pos < e->start) {
|
||||
len = vsnprintf(NULL, 0, f, args);
|
||||
if (!__i915_error_seek(e, len))
|
||||
return;
|
||||
}
|
||||
|
||||
len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
|
||||
if (len >= e->size - e->bytes)
|
||||
len = e->size - e->bytes - 1;
|
||||
|
||||
__i915_error_advance(e, len);
|
||||
}
|
||||
|
||||
static void i915_error_puts(struct drm_i915_error_state_buf *e,
|
||||
const char *str)
|
||||
{
|
||||
unsigned len;
|
||||
|
||||
if (!__i915_error_ok(e))
|
||||
return;
|
||||
|
||||
len = strlen(str);
|
||||
|
||||
/* Seek the first printf which is hits start position */
|
||||
if (e->pos < e->start) {
|
||||
if (!__i915_error_seek(e, len))
|
||||
return;
|
||||
}
|
||||
|
||||
if (len >= e->size - e->bytes)
|
||||
len = e->size - e->bytes - 1;
|
||||
memcpy(e->buf + e->bytes, str, len);
|
||||
|
||||
__i915_error_advance(e, len);
|
||||
}
|
||||
|
||||
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
|
||||
#define err_puts(e, s) i915_error_puts(e, s)
|
||||
|
||||
static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
const char *name,
|
||||
struct drm_i915_error_buffer *err,
|
||||
int count)
|
||||
{
|
||||
err_printf(m, "%s [%d]:\n", name, count);
|
||||
|
||||
while (count--) {
|
||||
err_printf(m, " %08x %8u %02x %02x %x %x",
|
||||
err->gtt_offset,
|
||||
err->size,
|
||||
err->read_domains,
|
||||
err->write_domain,
|
||||
err->rseqno, err->wseqno);
|
||||
err_puts(m, pin_flag(err->pinned));
|
||||
err_puts(m, tiling_flag(err->tiling));
|
||||
err_puts(m, dirty_flag(err->dirty));
|
||||
err_puts(m, purgeable_flag(err->purgeable));
|
||||
err_puts(m, err->ring != -1 ? " " : "");
|
||||
err_puts(m, ring_str(err->ring));
|
||||
err_puts(m, i915_cache_level_str(err->cache_level));
|
||||
|
||||
if (err->name)
|
||||
err_printf(m, " (name: %d)", err->name);
|
||||
if (err->fence_reg != I915_FENCE_REG_NONE)
|
||||
err_printf(m, " (fence: %d)", err->fence_reg);
|
||||
|
||||
err_puts(m, "\n");
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
unsigned ring)
|
||||
{
|
||||
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
|
||||
err_printf(m, "%s command stream:\n", ring_str(ring));
|
||||
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
|
||||
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
|
||||
err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
|
||||
err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
|
||||
err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
|
||||
err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
|
||||
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
|
||||
if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
|
||||
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
|
||||
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
|
||||
err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
|
||||
err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
|
||||
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
|
||||
error->semaphore_mboxes[ring][0],
|
||||
error->semaphore_seqno[ring][0]);
|
||||
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
|
||||
error->semaphore_mboxes[ring][1],
|
||||
error->semaphore_seqno[ring][1]);
|
||||
}
|
||||
err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
|
||||
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
|
||||
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
|
||||
}
|
||||
|
||||
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, f);
|
||||
i915_error_vprintf(e, f, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
const struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_device *dev = error_priv->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error = error_priv->error;
|
||||
struct intel_ring_buffer *ring;
|
||||
int i, j, page, offset, elt;
|
||||
|
||||
if (!error) {
|
||||
err_printf(m, "no error state collected\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
|
||||
error->time.tv_usec);
|
||||
err_printf(m, "Kernel: " UTS_RELEASE "\n");
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
|
||||
err_printf(m, "EIR: 0x%08x\n", error->eir);
|
||||
err_printf(m, "IER: 0x%08x\n", error->ier);
|
||||
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
|
||||
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
|
||||
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
|
||||
err_printf(m, "CCID: 0x%08x\n", error->ccid);
|
||||
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
|
||||
err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
|
||||
error->extra_instdone[i]);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
err_printf(m, "ERROR: 0x%08x\n", error->error);
|
||||
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 7)
|
||||
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_ring_error_state(m, dev, error, i);
|
||||
|
||||
if (error->active_bo)
|
||||
print_error_buffers(m, "Active",
|
||||
error->active_bo,
|
||||
error->active_bo_count);
|
||||
|
||||
if (error->pinned_bo)
|
||||
print_error_buffers(m, "Pinned",
|
||||
error->pinned_bo,
|
||||
error->pinned_bo_count);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
struct drm_i915_error_object *obj;
|
||||
|
||||
if ((obj = error->ring[i].batchbuffer)) {
|
||||
err_printf(m, "%s --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (page = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n", offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (error->ring[i].num_requests) {
|
||||
err_printf(m, "%s --- %d requests\n",
|
||||
dev_priv->ring[i].name,
|
||||
error->ring[i].num_requests);
|
||||
for (j = 0; j < error->ring[i].num_requests; j++) {
|
||||
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
|
||||
error->ring[i].requests[j].seqno,
|
||||
error->ring[i].requests[j].jiffies,
|
||||
error->ring[i].requests[j].tail);
|
||||
}
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (page = 0; page < obj->page_count; page++) {
|
||||
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
|
||||
err_printf(m, "%08x : %08x\n",
|
||||
offset,
|
||||
obj->pages[page][elt]);
|
||||
offset += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obj = error->ring[i].ctx;
|
||||
if (obj) {
|
||||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
offset,
|
||||
obj->pages[0][elt],
|
||||
obj->pages[0][elt+1],
|
||||
obj->pages[0][elt+2],
|
||||
obj->pages[0][elt+3]);
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (error->overlay)
|
||||
intel_overlay_print_error_state(m, error->overlay);
|
||||
|
||||
if (error->display)
|
||||
intel_display_print_error_state(m, dev, error->display);
|
||||
|
||||
out:
|
||||
if (m->bytes == 0 && m->err)
|
||||
return m->err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
|
||||
size_t count, loff_t pos)
|
||||
{
|
||||
memset(ebuf, 0, sizeof(*ebuf));
|
||||
|
||||
/* We need to have enough room to store any i915_error_state printf
|
||||
* so that we can move it to start position.
|
||||
*/
|
||||
ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
|
||||
ebuf->buf = kmalloc(ebuf->size,
|
||||
GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
|
||||
|
||||
if (ebuf->buf == NULL) {
|
||||
ebuf->size = PAGE_SIZE;
|
||||
ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
|
||||
}
|
||||
|
||||
if (ebuf->buf == NULL) {
|
||||
ebuf->size = 128;
|
||||
ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
|
||||
}
|
||||
|
||||
if (ebuf->buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ebuf->start = pos;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_error_object_free(struct drm_i915_error_object *obj)
|
||||
{
|
||||
int page;
|
||||
|
||||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
for (page = 0; page < obj->page_count; page++)
|
||||
kfree(obj->pages[page]);
|
||||
|
||||
kfree(obj);
|
||||
}
|
||||
|
||||
static void i915_error_state_free(struct kref *error_ref)
|
||||
{
|
||||
struct drm_i915_error_state *error = container_of(error_ref,
|
||||
typeof(*error), ref);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
i915_error_object_free(error->ring[i].batchbuffer);
|
||||
i915_error_object_free(error->ring[i].ringbuffer);
|
||||
i915_error_object_free(error->ring[i].ctx);
|
||||
kfree(error->ring[i].requests);
|
||||
}
|
||||
|
||||
kfree(error->active_bo);
|
||||
kfree(error->overlay);
|
||||
kfree(error->display);
|
||||
kfree(error);
|
||||
}
|
||||
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_gem_object *src,
|
||||
const int num_pages)
|
||||
{
|
||||
struct drm_i915_error_object *dst;
|
||||
int i;
|
||||
u32 reloc_offset;
|
||||
|
||||
if (src == NULL || src->pages == NULL)
|
||||
return NULL;
|
||||
|
||||
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
|
||||
if (dst == NULL)
|
||||
return NULL;
|
||||
|
||||
reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
unsigned long flags;
|
||||
void *d;
|
||||
|
||||
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
if (d == NULL)
|
||||
goto unwind;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (reloc_offset < dev_priv->gtt.mappable_end &&
|
||||
src->has_global_gtt_mapping) {
|
||||
void __iomem *s;
|
||||
|
||||
/* Simply ignore tiling or any overlapping fence.
|
||||
* It's part of the error state, and this hopefully
|
||||
* captures what the GPU read.
|
||||
*/
|
||||
|
||||
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
reloc_offset);
|
||||
memcpy_fromio(d, s, PAGE_SIZE);
|
||||
io_mapping_unmap_atomic(s);
|
||||
} else if (src->stolen) {
|
||||
unsigned long offset;
|
||||
|
||||
offset = dev_priv->mm.stolen_base;
|
||||
offset += src->stolen->start;
|
||||
offset += i << PAGE_SHIFT;
|
||||
|
||||
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
|
||||
} else {
|
||||
struct page *page;
|
||||
void *s;
|
||||
|
||||
page = i915_gem_object_get_page(src, i);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
s = kmap_atomic(page);
|
||||
memcpy(d, s, PAGE_SIZE);
|
||||
kunmap_atomic(s);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
dst->pages[i] = d;
|
||||
|
||||
reloc_offset += PAGE_SIZE;
|
||||
}
|
||||
dst->page_count = num_pages;
|
||||
|
||||
return dst;
|
||||
|
||||
unwind:
|
||||
while (i--)
|
||||
kfree(dst->pages[i]);
|
||||
kfree(dst);
|
||||
return NULL;
|
||||
}
|
||||
#define i915_error_object_create(dev_priv, src) \
|
||||
i915_error_object_create_sized((dev_priv), (src), \
|
||||
(src)->base.size>>PAGE_SHIFT)
|
||||
|
||||
static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
err->size = obj->base.size;
|
||||
err->name = obj->base.name;
|
||||
err->rseqno = obj->last_read_seqno;
|
||||
err->wseqno = obj->last_write_seqno;
|
||||
err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
|
||||
err->read_domains = obj->base.read_domains;
|
||||
err->write_domain = obj->base.write_domain;
|
||||
err->fence_reg = obj->fence_reg;
|
||||
err->pinned = 0;
|
||||
if (obj->pin_count > 0)
|
||||
err->pinned = 1;
|
||||
if (obj->user_pin_count > 0)
|
||||
err->pinned = -1;
|
||||
err->tiling = obj->tiling_mode;
|
||||
err->dirty = obj->dirty;
|
||||
err->purgeable = obj->madv != I915_MADV_WILLNEED;
|
||||
err->ring = obj->ring ? obj->ring->id : -1;
|
||||
err->cache_level = obj->cache_level;
|
||||
}
|
||||
|
||||
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
|
||||
int count, struct list_head *head)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(obj, head, mm_list) {
|
||||
capture_bo(err++, obj);
|
||||
if (++i == count)
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
||||
int count, struct list_head *head)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(obj, head, global_list) {
|
||||
if (obj->pin_count == 0)
|
||||
continue;
|
||||
|
||||
capture_bo(err++, obj);
|
||||
if (++i == count)
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void i915_gem_record_fences(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 7:
|
||||
case 6:
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
|
||||
break;
|
||||
case 5:
|
||||
case 4:
|
||||
for (i = 0; i < 16; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
|
||||
break;
|
||||
case 3:
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
|
||||
case 2:
|
||||
for (i = 0; i < 8; i++)
|
||||
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 seqno;
|
||||
|
||||
if (!ring->get_seqno)
|
||||
return NULL;
|
||||
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
|
||||
u32 acthd = I915_READ(ACTHD);
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return NULL;
|
||||
|
||||
obj = ring->private;
|
||||
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
|
||||
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
|
||||
return i915_error_object_create(dev_priv, obj);
|
||||
}
|
||||
|
||||
seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (obj->ring != ring)
|
||||
continue;
|
||||
|
||||
if (i915_seqno_passed(seqno, obj->last_read_seqno))
|
||||
continue;
|
||||
|
||||
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
|
||||
continue;
|
||||
|
||||
/* We need to copy these to an anonymous buffer as the simplest
|
||||
* method to avoid being overwritten by userspace.
|
||||
*/
|
||||
return i915_error_object_create(dev_priv, obj);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
|
||||
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
|
||||
error->semaphore_mboxes[ring->id][0]
|
||||
= I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
error->semaphore_mboxes[ring->id][1]
|
||||
= I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
|
||||
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
|
||||
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
|
||||
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
|
||||
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
|
||||
if (ring->id == RCS)
|
||||
error->bbaddr = I915_READ64(BB_ADDR);
|
||||
} else {
|
||||
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
|
||||
error->ipeir[ring->id] = I915_READ(IPEIR);
|
||||
error->ipehr[ring->id] = I915_READ(IPEHR);
|
||||
error->instdone[ring->id] = I915_READ(INSTDONE);
|
||||
}
|
||||
|
||||
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
|
||||
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
|
||||
error->seqno[ring->id] = ring->get_seqno(ring, false);
|
||||
error->acthd[ring->id] = intel_ring_get_active_head(ring);
|
||||
error->head[ring->id] = I915_READ_HEAD(ring);
|
||||
error->tail[ring->id] = I915_READ_TAIL(ring);
|
||||
error->ctl[ring->id] = I915_READ_CTL(ring);
|
||||
|
||||
error->cpu_ring_head[ring->id] = ring->head;
|
||||
error->cpu_ring_tail[ring->id] = ring->tail;
|
||||
}
|
||||
|
||||
|
||||
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
|
||||
struct drm_i915_error_state *error,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* Currently render ring is the only HW context user */
|
||||
if (ring->id != RCS || !error->ccid)
|
||||
return;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
|
||||
ering->ctx = i915_error_object_create_sized(dev_priv,
|
||||
obj, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_record_rings(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct drm_i915_gem_request *request;
|
||||
int i, count;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
i915_record_ring_state(dev, error, ring);
|
||||
|
||||
error->ring[i].batchbuffer =
|
||||
i915_error_first_batchbuffer(dev_priv, ring);
|
||||
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_object_create(dev_priv, ring->obj);
|
||||
|
||||
|
||||
i915_gem_record_active_context(ring, error, &error->ring[i]);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list)
|
||||
count++;
|
||||
|
||||
error->ring[i].num_requests = count;
|
||||
error->ring[i].requests =
|
||||
kmalloc(count*sizeof(struct drm_i915_error_request),
|
||||
GFP_ATOMIC);
|
||||
if (error->ring[i].requests == NULL) {
|
||||
error->ring[i].num_requests = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
struct drm_i915_error_request *erq;
|
||||
|
||||
erq = &error->ring[i].requests[count++];
|
||||
erq->seqno = request->seqno;
|
||||
erq->jiffies = request->emitted_jiffies;
|
||||
erq->tail = request->tail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int i;
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
|
||||
i++;
|
||||
error->active_bo_count = i;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
|
||||
if (obj->pin_count)
|
||||
i++;
|
||||
error->pinned_bo_count = i - error->active_bo_count;
|
||||
|
||||
if (i) {
|
||||
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
|
||||
GFP_ATOMIC);
|
||||
if (error->active_bo)
|
||||
error->pinned_bo =
|
||||
error->active_bo + error->active_bo_count;
|
||||
}
|
||||
|
||||
if (error->active_bo)
|
||||
error->active_bo_count =
|
||||
capture_active_bo(error->active_bo,
|
||||
error->active_bo_count,
|
||||
&dev_priv->mm.active_list);
|
||||
|
||||
if (error->pinned_bo)
|
||||
error->pinned_bo_count =
|
||||
capture_pinned_bo(error->pinned_bo,
|
||||
error->pinned_bo_count,
|
||||
&dev_priv->mm.bound_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_capture_error_state - capture an error record for later analysis
|
||||
* @dev: drm device
|
||||
*
|
||||
* Should be called when an error is detected (either a hang or an error
|
||||
* interrupt) to capture error state from the time of the error. Fills
|
||||
* out a structure which becomes available in debugfs for user level tools
|
||||
* to pick up.
|
||||
*/
|
||||
void i915_capture_error_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
int pipe;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
/* Account for pipe specific data like PIPE*STAT */
|
||||
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
||||
if (!error) {
|
||||
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("capturing error event; look for more information in "
|
||||
"/sys/class/drm/card%d/error\n", dev->primary->index);
|
||||
|
||||
kref_init(&error->ref);
|
||||
error->eir = I915_READ(EIR);
|
||||
error->pgtbl_er = I915_READ(PGTBL_ER);
|
||||
if (HAS_HW_CONTEXTS(dev))
|
||||
error->ccid = I915_READ(CCID);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
|
||||
else if (IS_GEN2(dev))
|
||||
error->ier = I915_READ16(IER);
|
||||
else
|
||||
error->ier = I915_READ(IER);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
error->derrmr = I915_READ(DERRMR);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
error->forcewake = I915_READ(FORCEWAKE_VLV);
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
error->forcewake = I915_READ(FORCEWAKE_MT);
|
||||
else if (INTEL_INFO(dev)->gen == 6)
|
||||
error->forcewake = I915_READ(FORCEWAKE);
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
for_each_pipe(pipe)
|
||||
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
error->error = I915_READ(ERROR_GEN6);
|
||||
error->done_reg = I915_READ(DONE_REG);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 7)
|
||||
error->err_int = I915_READ(GEN7_ERR_INT);
|
||||
|
||||
i915_get_extra_instdone(dev, error->extra_instdone);
|
||||
|
||||
i915_gem_capture_buffers(dev_priv, error);
|
||||
i915_gem_record_fences(dev, error);
|
||||
i915_gem_record_rings(dev, error);
|
||||
|
||||
do_gettimeofday(&error->time);
|
||||
|
||||
error->overlay = intel_overlay_capture_error_state(dev);
|
||||
error->display = intel_display_capture_error_state(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
if (dev_priv->gpu_error.first_error == NULL) {
|
||||
dev_priv->gpu_error.first_error = error;
|
||||
error = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
i915_error_state_free(&error->ref);
|
||||
}
|
||||
|
||||
void i915_error_state_get(struct drm_device *dev,
|
||||
struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error_priv->error = dev_priv->gpu_error.first_error;
|
||||
if (error_priv->error)
|
||||
kref_get(&error_priv->error->ref);
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
}
|
||||
|
||||
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
if (error_priv->error)
|
||||
kref_put(&error_priv->error->ref, i915_error_state_free);
|
||||
}
|
||||
|
||||
void i915_destroy_error_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
dev_priv->gpu_error.first_error = NULL;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
kref_put(&error->ref, i915_error_state_free);
|
||||
}
|
||||
|
||||
const char *i915_cache_level_str(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case I915_CACHE_NONE: return " uncached";
|
||||
case I915_CACHE_LLC: return " snooped (LLC)";
|
||||
case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
/* NB: please notice the memset */
|
||||
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 2:
|
||||
case 3:
|
||||
instdone[0] = I915_READ(INSTDONE);
|
||||
break;
|
||||
case 4:
|
||||
case 5:
|
||||
case 6:
|
||||
instdone[0] = I915_READ(INSTDONE_I965);
|
||||
instdone[1] = I915_READ(INSTDONE1);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Unsupported platform\n");
|
||||
case 7:
|
||||
instdone[0] = I915_READ(GEN7_INSTDONE_1);
|
||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
|
||||
break;
|
||||
}
|
||||
}
|
|
@ -128,6 +128,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
|
|||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
|
||||
|
@ -152,38 +154,66 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
bool enable)
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (enable) {
|
||||
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
|
||||
|
||||
if (!ivb_can_enable_err_int(dev))
|
||||
return;
|
||||
|
||||
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
|
||||
ERR_INT_FIFO_UNDERRUN_B |
|
||||
ERR_INT_FIFO_UNDERRUN_C);
|
||||
|
||||
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
} else {
|
||||
bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
|
||||
|
||||
/* Change the state _after_ we've read out the current one. */
|
||||
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
|
||||
if (!was_enabled &&
|
||||
(I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
|
||||
DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
|
||||
/**
|
||||
* ibx_display_interrupt_update - update SDEIMR
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
{
|
||||
uint32_t sdeimr = I915_READ(SDEIMR);
|
||||
sdeimr &= ~interrupt_mask;
|
||||
sdeimr |= (~enabled_irq_mask & interrupt_mask);
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(SDEIMR, sdeimr);
|
||||
POSTING_READ(SDEIMR);
|
||||
}
|
||||
#define ibx_enable_display_interrupt(dev_priv, bits) \
|
||||
ibx_display_interrupt_update((dev_priv), (bits), (bits))
|
||||
#define ibx_disable_display_interrupt(dev_priv, bits) \
|
||||
ibx_display_interrupt_update((dev_priv), (bits), 0)
|
||||
|
||||
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
|
||||
SDE_TRANSB_FIFO_UNDER;
|
||||
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
|
||||
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
|
||||
|
||||
if (enable)
|
||||
I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
|
||||
ibx_enable_display_interrupt(dev_priv, bit);
|
||||
else
|
||||
I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
|
||||
|
||||
POSTING_READ(SDEIMR);
|
||||
ibx_disable_display_interrupt(dev_priv, bit);
|
||||
}
|
||||
|
||||
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
|
@ -193,19 +223,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (enable) {
|
||||
I915_WRITE(SERR_INT,
|
||||
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
|
||||
|
||||
if (!cpt_can_enable_serr_int(dev))
|
||||
return;
|
||||
|
||||
I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
|
||||
SERR_INT_TRANS_B_FIFO_UNDERRUN |
|
||||
SERR_INT_TRANS_C_FIFO_UNDERRUN);
|
||||
|
||||
I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
|
||||
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
|
||||
} else {
|
||||
I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
|
||||
}
|
||||
uint32_t tmp = I915_READ(SERR_INT);
|
||||
bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
|
||||
|
||||
POSTING_READ(SDEIMR);
|
||||
/* Change the state _after_ we've read out the current one. */
|
||||
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
|
||||
|
||||
if (!was_enabled &&
|
||||
(tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
|
||||
DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -243,7 +280,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||
if (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN7(dev))
|
||||
ivybridge_set_fifo_underrun_reporting(dev, enable);
|
||||
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
||||
done:
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
@ -269,29 +306,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
|||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe p;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
|
||||
if (HAS_PCH_LPT(dev)) {
|
||||
crtc = NULL;
|
||||
for_each_pipe(p) {
|
||||
struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
|
||||
if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
|
||||
crtc = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!crtc) {
|
||||
DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
|
||||
}
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
/*
|
||||
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
|
||||
* has only one pch transcoder A that all pipes can use. To avoid racy
|
||||
* pch transcoder -> pipe lookups from interrupt code simply store the
|
||||
* underrun statistics in crtc A. Since we never expose this anywhere
|
||||
* nor use it outside of the fifo underrun code here using the "wrong"
|
||||
* crtc on LPT won't cause issues.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
||||
|
@ -303,7 +330,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
|||
intel_crtc->pch_fifo_underrun_disabled = !enable;
|
||||
|
||||
if (HAS_PCH_IBX(dev))
|
||||
ibx_set_fifo_underrun_reporting(intel_crtc, enable);
|
||||
ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
|
||||
else
|
||||
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
|
||||
|
||||
|
@ -319,6 +346,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
|
|||
u32 reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0x7fff0000;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if ((pipestat & mask) == mask)
|
||||
return;
|
||||
|
||||
|
@ -334,6 +363,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
|
|||
u32 reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0x7fff0000;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if ((pipestat & mask) == 0)
|
||||
return;
|
||||
|
||||
|
@ -625,14 +656,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
static void ironlake_handle_rps_change(struct drm_device *dev)
|
||||
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 busy_up, busy_down, max_avg, min_avg;
|
||||
u8 new_delay;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mchdev_lock, flags);
|
||||
spin_lock(&mchdev_lock);
|
||||
|
||||
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
|
||||
|
||||
|
@ -660,7 +690,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
|
|||
if (ironlake_set_drps(dev, new_delay))
|
||||
dev_priv->ips.cur_delay = new_delay;
|
||||
|
||||
spin_unlock_irqrestore(&mchdev_lock, flags);
|
||||
spin_unlock(&mchdev_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -689,13 +719,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
u32 pm_iir, pm_imr;
|
||||
u8 new_delay;
|
||||
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
pm_iir = dev_priv->rps.pm_iir;
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
pm_imr = I915_READ(GEN6_PMIMR);
|
||||
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
|
||||
I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
|
||||
return;
|
||||
|
@ -804,18 +834,17 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
kfree(parity_event[1]);
|
||||
}
|
||||
|
||||
static void ivybridge_handle_parity_error(struct drm_device *dev)
|
||||
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!HAS_L3_GPU_CACHE(dev))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
|
||||
}
|
||||
|
@ -841,15 +870,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
|||
}
|
||||
|
||||
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
|
||||
ivybridge_handle_parity_error(dev);
|
||||
ivybridge_parity_error_irq_handler(dev);
|
||||
}
|
||||
|
||||
/* Legacy way of handling PM interrupts */
|
||||
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
|
||||
u32 pm_iir)
|
||||
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 pm_iir)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* IIR bits should never already be set because IMR should
|
||||
* prevent an interrupt from being shown in IIR. The warning
|
||||
|
@ -860,11 +887,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
|
|||
* The mask bit in IMR is cleared by dev_priv->rps.work.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&dev_priv->rps.lock, flags);
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir |= pm_iir;
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
|
||||
POSTING_READ(GEN6_PMIMR);
|
||||
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
}
|
||||
|
@ -928,7 +955,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
|
|||
wake_up_all(&dev_priv->gmbus_wait_queue);
|
||||
}
|
||||
|
||||
/* Unlike gen6_queue_rps_work() from which this function is originally derived,
|
||||
/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
|
||||
* we must be able to deal with other PM interrupts. This is complicated because
|
||||
* of the way in which we use the masks to defer the RPS work (which for
|
||||
* posterity is necessary because of forcewake).
|
||||
|
@ -936,27 +963,23 @@ static void dp_aux_irq_handler(struct drm_device *dev)
|
|||
static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 pm_iir)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->rps.lock, flags);
|
||||
dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
|
||||
if (dev_priv->rps.pm_iir) {
|
||||
if (pm_iir & GEN6_PM_RPS_EVENTS) {
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
|
||||
/* never want to mask useful interrupts. (also posting read) */
|
||||
WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
|
||||
/* TODO: if queue_work is slow, move it out of the spinlock */
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
|
||||
|
||||
if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
|
||||
|
||||
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
|
||||
DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
|
||||
i915_handle_error(dev_priv->dev, false);
|
||||
}
|
||||
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
|
||||
DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
|
||||
i915_handle_error(dev_priv->dev, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1029,7 +1052,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
|||
gmbus_irq_handler(dev);
|
||||
|
||||
if (pm_iir & GEN6_PM_RPS_EVENTS)
|
||||
gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
gen6_rps_irq_handler(dev_priv, pm_iir);
|
||||
|
||||
I915_WRITE(GTIIR, gt_iir);
|
||||
I915_WRITE(GEN6_PMIIR, pm_iir);
|
||||
|
@ -1267,7 +1290,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
|||
if (IS_HASWELL(dev))
|
||||
hsw_pm_irq_handler(dev_priv, pm_iir);
|
||||
else if (pm_iir & GEN6_PM_RPS_EVENTS)
|
||||
gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
gen6_rps_irq_handler(dev_priv, pm_iir);
|
||||
I915_WRITE(GEN6_PMIIR, pm_iir);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1384,10 +1407,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
}
|
||||
|
||||
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
|
||||
ironlake_handle_rps_change(dev);
|
||||
ironlake_rps_change_irq_handler(dev);
|
||||
|
||||
if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
|
||||
gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
gen6_rps_irq_handler(dev_priv, pm_iir);
|
||||
|
||||
I915_WRITE(GTIIR, gt_iir);
|
||||
I915_WRITE(DEIIR, de_iir);
|
||||
|
@ -1470,535 +1493,6 @@ static void i915_error_work_func(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
/* NB: please notice the memset */
|
||||
static void i915_get_extra_instdone(struct drm_device *dev,
|
||||
uint32_t *instdone)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
|
||||
|
||||
switch(INTEL_INFO(dev)->gen) {
|
||||
case 2:
|
||||
case 3:
|
||||
instdone[0] = I915_READ(INSTDONE);
|
||||
break;
|
||||
case 4:
|
||||
case 5:
|
||||
case 6:
|
||||
instdone[0] = I915_READ(INSTDONE_I965);
|
||||
instdone[1] = I915_READ(INSTDONE1);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Unsupported platform\n");
|
||||
case 7:
|
||||
instdone[0] = I915_READ(GEN7_INSTDONE_1);
|
||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_gem_object *src,
|
||||
const int num_pages)
|
||||
{
|
||||
struct drm_i915_error_object *dst;
|
||||
int i;
|
||||
u32 reloc_offset;
|
||||
|
||||
if (src == NULL || src->pages == NULL)
|
||||
return NULL;
|
||||
|
||||
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
|
||||
if (dst == NULL)
|
||||
return NULL;
|
||||
|
||||
reloc_offset = src->gtt_offset;
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
unsigned long flags;
|
||||
void *d;
|
||||
|
||||
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
if (d == NULL)
|
||||
goto unwind;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (reloc_offset < dev_priv->gtt.mappable_end &&
|
||||
src->has_global_gtt_mapping) {
|
||||
void __iomem *s;
|
||||
|
||||
/* Simply ignore tiling or any overlapping fence.
|
||||
* It's part of the error state, and this hopefully
|
||||
* captures what the GPU read.
|
||||
*/
|
||||
|
||||
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
reloc_offset);
|
||||
memcpy_fromio(d, s, PAGE_SIZE);
|
||||
io_mapping_unmap_atomic(s);
|
||||
} else if (src->stolen) {
|
||||
unsigned long offset;
|
||||
|
||||
offset = dev_priv->mm.stolen_base;
|
||||
offset += src->stolen->start;
|
||||
offset += i << PAGE_SHIFT;
|
||||
|
||||
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
|
||||
} else {
|
||||
struct page *page;
|
||||
void *s;
|
||||
|
||||
page = i915_gem_object_get_page(src, i);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
s = kmap_atomic(page);
|
||||
memcpy(d, s, PAGE_SIZE);
|
||||
kunmap_atomic(s);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
dst->pages[i] = d;
|
||||
|
||||
reloc_offset += PAGE_SIZE;
|
||||
}
|
||||
dst->page_count = num_pages;
|
||||
dst->gtt_offset = src->gtt_offset;
|
||||
|
||||
return dst;
|
||||
|
||||
unwind:
|
||||
while (i--)
|
||||
kfree(dst->pages[i]);
|
||||
kfree(dst);
|
||||
return NULL;
|
||||
}
|
||||
#define i915_error_object_create(dev_priv, src) \
|
||||
i915_error_object_create_sized((dev_priv), (src), \
|
||||
(src)->base.size>>PAGE_SHIFT)
|
||||
|
||||
static void
|
||||
i915_error_object_free(struct drm_i915_error_object *obj)
|
||||
{
|
||||
int page;
|
||||
|
||||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
for (page = 0; page < obj->page_count; page++)
|
||||
kfree(obj->pages[page]);
|
||||
|
||||
kfree(obj);
|
||||
}
|
||||
|
||||
void
|
||||
i915_error_state_free(struct kref *error_ref)
|
||||
{
|
||||
struct drm_i915_error_state *error = container_of(error_ref,
|
||||
typeof(*error), ref);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
i915_error_object_free(error->ring[i].batchbuffer);
|
||||
i915_error_object_free(error->ring[i].ringbuffer);
|
||||
i915_error_object_free(error->ring[i].ctx);
|
||||
kfree(error->ring[i].requests);
|
||||
}
|
||||
|
||||
kfree(error->active_bo);
|
||||
kfree(error->overlay);
|
||||
kfree(error->display);
|
||||
kfree(error);
|
||||
}
|
||||
static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
err->size = obj->base.size;
|
||||
err->name = obj->base.name;
|
||||
err->rseqno = obj->last_read_seqno;
|
||||
err->wseqno = obj->last_write_seqno;
|
||||
err->gtt_offset = obj->gtt_offset;
|
||||
err->read_domains = obj->base.read_domains;
|
||||
err->write_domain = obj->base.write_domain;
|
||||
err->fence_reg = obj->fence_reg;
|
||||
err->pinned = 0;
|
||||
if (obj->pin_count > 0)
|
||||
err->pinned = 1;
|
||||
if (obj->user_pin_count > 0)
|
||||
err->pinned = -1;
|
||||
err->tiling = obj->tiling_mode;
|
||||
err->dirty = obj->dirty;
|
||||
err->purgeable = obj->madv != I915_MADV_WILLNEED;
|
||||
err->ring = obj->ring ? obj->ring->id : -1;
|
||||
err->cache_level = obj->cache_level;
|
||||
}
|
||||
|
||||
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
|
||||
int count, struct list_head *head)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(obj, head, mm_list) {
|
||||
capture_bo(err++, obj);
|
||||
if (++i == count)
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
||||
int count, struct list_head *head)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(obj, head, global_list) {
|
||||
if (obj->pin_count == 0)
|
||||
continue;
|
||||
|
||||
capture_bo(err++, obj);
|
||||
if (++i == count)
|
||||
break;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void i915_gem_record_fences(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 7:
|
||||
case 6:
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
|
||||
break;
|
||||
case 5:
|
||||
case 4:
|
||||
for (i = 0; i < 16; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
|
||||
break;
|
||||
case 3:
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
|
||||
case 2:
|
||||
for (i = 0; i < 8; i++)
|
||||
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 seqno;
|
||||
|
||||
if (!ring->get_seqno)
|
||||
return NULL;
|
||||
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
|
||||
u32 acthd = I915_READ(ACTHD);
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return NULL;
|
||||
|
||||
obj = ring->private;
|
||||
if (acthd >= obj->gtt_offset &&
|
||||
acthd < obj->gtt_offset + obj->base.size)
|
||||
return i915_error_object_create(dev_priv, obj);
|
||||
}
|
||||
|
||||
seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (obj->ring != ring)
|
||||
continue;
|
||||
|
||||
if (i915_seqno_passed(seqno, obj->last_read_seqno))
|
||||
continue;
|
||||
|
||||
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
|
||||
continue;
|
||||
|
||||
/* We need to copy these to an anonymous buffer as the simplest
|
||||
* method to avoid being overwritten by userspace.
|
||||
*/
|
||||
return i915_error_object_create(dev_priv, obj);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
|
||||
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
|
||||
error->semaphore_mboxes[ring->id][0]
|
||||
= I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
error->semaphore_mboxes[ring->id][1]
|
||||
= I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
|
||||
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
|
||||
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
|
||||
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
|
||||
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
|
||||
if (ring->id == RCS)
|
||||
error->bbaddr = I915_READ64(BB_ADDR);
|
||||
} else {
|
||||
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
|
||||
error->ipeir[ring->id] = I915_READ(IPEIR);
|
||||
error->ipehr[ring->id] = I915_READ(IPEHR);
|
||||
error->instdone[ring->id] = I915_READ(INSTDONE);
|
||||
}
|
||||
|
||||
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
|
||||
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
|
||||
error->seqno[ring->id] = ring->get_seqno(ring, false);
|
||||
error->acthd[ring->id] = intel_ring_get_active_head(ring);
|
||||
error->head[ring->id] = I915_READ_HEAD(ring);
|
||||
error->tail[ring->id] = I915_READ_TAIL(ring);
|
||||
error->ctl[ring->id] = I915_READ_CTL(ring);
|
||||
|
||||
error->cpu_ring_head[ring->id] = ring->head;
|
||||
error->cpu_ring_tail[ring->id] = ring->tail;
|
||||
}
|
||||
|
||||
|
||||
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
|
||||
struct drm_i915_error_state *error,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* Currently render ring is the only HW context user */
|
||||
if (ring->id != RCS || !error->ccid)
|
||||
return;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
|
||||
ering->ctx = i915_error_object_create_sized(dev_priv,
|
||||
obj, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_record_rings(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct drm_i915_gem_request *request;
|
||||
int i, count;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
i915_record_ring_state(dev, error, ring);
|
||||
|
||||
error->ring[i].batchbuffer =
|
||||
i915_error_first_batchbuffer(dev_priv, ring);
|
||||
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_object_create(dev_priv, ring->obj);
|
||||
|
||||
|
||||
i915_gem_record_active_context(ring, error, &error->ring[i]);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list)
|
||||
count++;
|
||||
|
||||
error->ring[i].num_requests = count;
|
||||
error->ring[i].requests =
|
||||
kmalloc(count*sizeof(struct drm_i915_error_request),
|
||||
GFP_ATOMIC);
|
||||
if (error->ring[i].requests == NULL) {
|
||||
error->ring[i].num_requests = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
struct drm_i915_error_request *erq;
|
||||
|
||||
erq = &error->ring[i].requests[count++];
|
||||
erq->seqno = request->seqno;
|
||||
erq->jiffies = request->emitted_jiffies;
|
||||
erq->tail = request->tail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_capture_error_state - capture an error record for later analysis
|
||||
* @dev: drm device
|
||||
*
|
||||
* Should be called when an error is detected (either a hang or an error
|
||||
* interrupt) to capture error state from the time of the error. Fills
|
||||
* out a structure which becomes available in debugfs for user level tools
|
||||
* to pick up.
|
||||
*/
|
||||
static void i915_capture_error_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
int i, pipe;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
/* Account for pipe specific data like PIPE*STAT */
|
||||
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
||||
if (!error) {
|
||||
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("capturing error event; look for more information in "
|
||||
"/sys/kernel/debug/dri/%d/i915_error_state\n",
|
||||
dev->primary->index);
|
||||
|
||||
kref_init(&error->ref);
|
||||
error->eir = I915_READ(EIR);
|
||||
error->pgtbl_er = I915_READ(PGTBL_ER);
|
||||
if (HAS_HW_CONTEXTS(dev))
|
||||
error->ccid = I915_READ(CCID);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
|
||||
else if (IS_GEN2(dev))
|
||||
error->ier = I915_READ16(IER);
|
||||
else
|
||||
error->ier = I915_READ(IER);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
error->derrmr = I915_READ(DERRMR);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
error->forcewake = I915_READ(FORCEWAKE_VLV);
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
error->forcewake = I915_READ(FORCEWAKE_MT);
|
||||
else if (INTEL_INFO(dev)->gen == 6)
|
||||
error->forcewake = I915_READ(FORCEWAKE);
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
for_each_pipe(pipe)
|
||||
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
error->error = I915_READ(ERROR_GEN6);
|
||||
error->done_reg = I915_READ(DONE_REG);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 7)
|
||||
error->err_int = I915_READ(GEN7_ERR_INT);
|
||||
|
||||
i915_get_extra_instdone(dev, error->extra_instdone);
|
||||
|
||||
i915_gem_record_fences(dev, error);
|
||||
i915_gem_record_rings(dev, error);
|
||||
|
||||
/* Record buffers on the active and pinned lists. */
|
||||
error->active_bo = NULL;
|
||||
error->pinned_bo = NULL;
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
|
||||
i++;
|
||||
error->active_bo_count = i;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
|
||||
if (obj->pin_count)
|
||||
i++;
|
||||
error->pinned_bo_count = i - error->active_bo_count;
|
||||
|
||||
error->active_bo = NULL;
|
||||
error->pinned_bo = NULL;
|
||||
if (i) {
|
||||
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
|
||||
GFP_ATOMIC);
|
||||
if (error->active_bo)
|
||||
error->pinned_bo =
|
||||
error->active_bo + error->active_bo_count;
|
||||
}
|
||||
|
||||
if (error->active_bo)
|
||||
error->active_bo_count =
|
||||
capture_active_bo(error->active_bo,
|
||||
error->active_bo_count,
|
||||
&dev_priv->mm.active_list);
|
||||
|
||||
if (error->pinned_bo)
|
||||
error->pinned_bo_count =
|
||||
capture_pinned_bo(error->pinned_bo,
|
||||
error->pinned_bo_count,
|
||||
&dev_priv->mm.bound_list);
|
||||
|
||||
do_gettimeofday(&error->time);
|
||||
|
||||
error->overlay = intel_overlay_capture_error_state(dev);
|
||||
error->display = intel_display_capture_error_state(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
if (dev_priv->gpu_error.first_error == NULL) {
|
||||
dev_priv->gpu_error.first_error = error;
|
||||
error = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
i915_error_state_free(&error->ref);
|
||||
}
|
||||
|
||||
void i915_destroy_error_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
dev_priv->gpu_error.first_error = NULL;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error)
|
||||
kref_put(&error->ref, i915_error_state_free);
|
||||
}
|
||||
#else
|
||||
#define i915_capture_error_state(x)
|
||||
#endif
|
||||
|
||||
static void i915_report_and_clear_eir(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2155,10 +1649,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
|
|||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
int dspsurf = DSPSURF(intel_crtc->plane);
|
||||
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
|
||||
obj->gtt_offset;
|
||||
i915_gem_obj_ggtt_offset(obj);
|
||||
} else {
|
||||
int dspaddr = DSPADDR(intel_crtc->plane);
|
||||
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
|
||||
stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
|
||||
crtc->y * crtc->fb->pitches[0] +
|
||||
crtc->x * crtc->fb->bits_per_pixel/8);
|
||||
}
|
||||
|
@ -2648,22 +2142,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
|
|||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *intel_encoder;
|
||||
u32 mask = ~I915_READ(SDEIMR);
|
||||
u32 hotplug;
|
||||
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
|
||||
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
mask &= ~SDE_HOTPLUG_MASK;
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK;
|
||||
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
|
||||
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
|
||||
mask |= hpd_ibx[intel_encoder->hpd_pin];
|
||||
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
|
||||
} else {
|
||||
mask &= ~SDE_HOTPLUG_MASK_CPT;
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
|
||||
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
|
||||
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
|
||||
mask |= hpd_cpt[intel_encoder->hpd_pin];
|
||||
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
|
||||
}
|
||||
|
||||
I915_WRITE(SDEIMR, ~mask);
|
||||
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
|
||||
/*
|
||||
* Enable digital hotplug on the PCH, and configure the DP short pulse
|
||||
|
@ -2792,8 +2285,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
|
||||
if (HAS_VEBOX(dev))
|
||||
pm_irqs |= PM_VEBOX_USER_INTERRUPT |
|
||||
PM_VEBOX_CS_ERROR_INTERRUPT;
|
||||
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
|
||||
|
||||
/* Our enable/disable rps functions may touch these registers so
|
||||
* make sure to set a known state for only the non-RPS bits.
|
||||
|
@ -2817,6 +2309,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
|||
u32 gt_irqs;
|
||||
u32 enable_mask;
|
||||
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
|
||||
unsigned long irqflags;
|
||||
|
||||
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
|
||||
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
||||
|
@ -2842,9 +2335,13 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
|||
I915_WRITE(PIPESTAT(1), 0xffff);
|
||||
POSTING_READ(VLV_IER);
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
|
||||
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
|
@ -3323,6 +2820,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
|||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 enable_mask;
|
||||
u32 error_mask;
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Unmask the interrupts that we always want on. */
|
||||
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
|
||||
|
@ -3341,7 +2839,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
|||
if (IS_G4X(dev))
|
||||
enable_mask |= I915_BSD_USER_INTERRUPT;
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
/*
|
||||
* Enable some error detection, note the instruction error mask
|
||||
|
|
|
@ -363,6 +363,7 @@
|
|||
#define PUNIT_REG_GPU_LFM 0xd3
|
||||
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
|
||||
#define PUNIT_REG_GPU_FREQ_STS 0xd8
|
||||
#define GENFREQSTATUS (1<<0)
|
||||
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
|
||||
|
||||
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
|
||||
|
@ -680,6 +681,7 @@
|
|||
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
|
||||
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
|
||||
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
|
||||
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
|
||||
|
||||
#define FPGA_DBG 0x42300
|
||||
#define FPGA_DBG_RM_NOCLAIM (1<<31)
|
||||
|
@ -1125,7 +1127,8 @@
|
|||
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
|
||||
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
|
||||
#define DPLL_VCO_ENABLE (1 << 31)
|
||||
#define DPLL_DVO_HIGH_SPEED (1 << 30)
|
||||
#define DPLL_SDVO_HIGH_SPEED (1 << 30)
|
||||
#define DPLL_DVO_2X_MODE (1 << 30)
|
||||
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
|
||||
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
|
||||
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
|
||||
|
@ -3880,6 +3883,7 @@
|
|||
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
|
||||
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
|
||||
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
|
||||
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
|
||||
|
||||
/* digital port hotplug */
|
||||
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
|
||||
|
|
|
@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
|
||||
struct device *kdev = container_of(kobj, struct device, kobj);
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct i915_error_state_file_priv error_priv;
|
||||
struct drm_i915_error_state_buf error_str;
|
||||
ssize_t ret_count = 0;
|
||||
int ret;
|
||||
|
||||
memset(&error_priv, 0, sizeof(error_priv));
|
||||
|
||||
ret = i915_error_state_buf_init(&error_str, count, off);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
error_priv.dev = dev;
|
||||
i915_error_state_get(dev, &error_priv);
|
||||
|
||||
ret = i915_error_state_to_str(&error_str, &error_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret_count = count < error_str.bytes ? count : error_str.bytes;
|
||||
|
||||
memcpy(buf, error_str.buf, ret_count);
|
||||
out:
|
||||
i915_error_state_put(&error_priv);
|
||||
i915_error_state_buf_release(&error_str);
|
||||
|
||||
return ret ?: ret_count;
|
||||
}
|
||||
|
||||
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct device *kdev = container_of(kobj, struct device, kobj);
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("Resetting error state\n");
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_destroy_error_state(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute error_state_attr = {
|
||||
.attr.name = "error",
|
||||
.attr.mode = S_IRUSR | S_IWUSR,
|
||||
.size = 0,
|
||||
.read = error_state_read,
|
||||
.write = error_state_write,
|
||||
};
|
||||
|
||||
void i915_setup_sysfs(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
|
|||
if (ret)
|
||||
DRM_ERROR("gen6 sysfs setup failed\n");
|
||||
}
|
||||
|
||||
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
|
||||
&error_state_attr);
|
||||
if (ret)
|
||||
DRM_ERROR("error_state sysfs setup failed\n");
|
||||
}
|
||||
|
||||
void i915_teardown_sysfs(struct drm_device *dev)
|
||||
{
|
||||
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
|
||||
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
|
||||
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
|
||||
#ifdef CONFIG_PM
|
||||
|
|
|
@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->offset = obj->gtt_space->start;
|
||||
__entry->size = obj->gtt_space->size;
|
||||
__entry->offset = i915_gem_obj_ggtt_offset(obj);
|
||||
__entry->size = i915_gem_obj_ggtt_size(obj);
|
||||
__entry->mappable = mappable;
|
||||
),
|
||||
|
||||
|
@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->offset = obj->gtt_space->start;
|
||||
__entry->size = obj->gtt_space->size;
|
||||
__entry->offset = i915_gem_obj_ggtt_offset(obj);
|
||||
__entry->size = i915_gem_obj_ggtt_size(obj);
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, offset=%08x size=%x",
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1360,6 +1360,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
if (dp_to_dig_port(intel_dp)->port == PORT_A) {
|
||||
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
|
||||
pipe_config->port_clock = 162000;
|
||||
else
|
||||
pipe_config->port_clock = 270000;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_disable_dp(struct intel_encoder *encoder)
|
||||
|
|
|
@ -549,13 +549,6 @@ struct intel_unpin_work {
|
|||
bool enable_stall_check;
|
||||
};
|
||||
|
||||
struct intel_fbc_work {
|
||||
struct delayed_work work;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
int interval;
|
||||
};
|
||||
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
|
@ -747,6 +740,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
|
|||
extern void intel_fb_output_poll_changed(struct drm_device *dev);
|
||||
extern void intel_fb_restore_mode(struct drm_device *dev);
|
||||
|
||||
struct intel_shared_dpll *
|
||||
intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
bool state);
|
||||
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
|
||||
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
|
||||
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
|
||||
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
|
||||
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
|
@ -780,7 +789,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
|||
extern void intel_init_pm(struct drm_device *dev);
|
||||
/* FBC */
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
|
||||
extern void intel_update_fbc(struct drm_device *dev);
|
||||
/* IPS */
|
||||
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
|
|
|
@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
|
|||
int pipe = intel_crtc->pipe;
|
||||
u32 dvo_val;
|
||||
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
|
||||
int dpll_reg = DPLL(pipe);
|
||||
|
||||
switch (dvo_reg) {
|
||||
case DVOA:
|
||||
|
@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
|
||||
|
||||
I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
|
||||
|
||||
/*I915_WRITE(DVOB_SRCDIM,
|
||||
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
|
||||
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
|
||||
|
|
|
@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
info->apertures->ranges[0].base = dev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
|
||||
|
||||
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
|
||||
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
|
||||
info->fix.smem_len = size;
|
||||
|
||||
info->screen_base =
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
size);
|
||||
if (!info->screen_base) {
|
||||
ret = -ENOSPC;
|
||||
|
@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
|
||||
fb->width, fb->height,
|
||||
obj->gtt_offset, obj);
|
||||
i915_gem_obj_ggtt_offset(obj), obj);
|
||||
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -115,17 +115,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
* This is an exception to the general rule that mode_set doesn't turn
|
||||
* things on.
|
||||
*/
|
||||
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
|
||||
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *fixed_mode =
|
||||
lvds_encoder->attached_connector->base.panel.fixed_mode;
|
||||
int pipe = intel_crtc->pipe;
|
||||
int pipe = crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
assert_fdi_rx_pll_disabled(dev_priv, pipe);
|
||||
assert_shared_dpll_disabled(dev_priv,
|
||||
intel_crtc_to_shared_dpll(crtc));
|
||||
} else {
|
||||
assert_pll_disabled(dev_priv, pipe);
|
||||
}
|
||||
|
||||
temp = I915_READ(lvds_encoder->reg);
|
||||
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
|
||||
|
||||
|
@ -142,7 +150,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
|
|||
|
||||
/* set the corresponsding LVDS_BORDER bit */
|
||||
temp &= ~LVDS_BORDER_ENABLE;
|
||||
temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
|
||||
temp |= crtc->config.gmch_pfit.lvds_border_bits;
|
||||
/* Set the B0-B3 data pairs corresponding to whether we're going to
|
||||
* set the DPLLs for dual-channel mode or not.
|
||||
*/
|
||||
|
@ -162,8 +170,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
|
|||
if (INTEL_INFO(dev)->gen == 4) {
|
||||
/* Bspec wording suggests that LVDS port dithering only exists
|
||||
* for 18bpp panels. */
|
||||
if (intel_crtc->config.dither &&
|
||||
intel_crtc->config.pipe_bpp == 18)
|
||||
if (crtc->config.dither && crtc->config.pipe_bpp == 18)
|
||||
temp |= LVDS_ENABLE_DITHER;
|
||||
else
|
||||
temp &= ~LVDS_ENABLE_DITHER;
|
||||
|
@ -955,7 +962,7 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
DRM_MODE_ENCODER_LVDS);
|
||||
|
||||
intel_encoder->enable = intel_enable_lvds;
|
||||
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
|
||||
intel_encoder->pre_enable = intel_pre_enable_lvds;
|
||||
intel_encoder->compute_config = intel_lvds_compute_config;
|
||||
intel_encoder->disable = intel_disable_lvds;
|
||||
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
|
||||
|
|
|
@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
|
|||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
|
||||
overlay->reg_bo->gtt_offset);
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
|
||||
return regs;
|
||||
}
|
||||
|
@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
swidth = params->src_w;
|
||||
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
|
||||
sheight = params->src_h;
|
||||
iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y);
|
||||
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
|
||||
ostride = params->stride_Y;
|
||||
|
||||
if (params->format & I915_OVERLAY_YUV_PLANAR) {
|
||||
|
@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
params->src_w/uv_hscale);
|
||||
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
|
||||
sheight |= (params->src_h/uv_vscale) << 16;
|
||||
iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U);
|
||||
iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V);
|
||||
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
|
||||
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
|
||||
ostride |= params->stride_UV << 16;
|
||||
}
|
||||
|
||||
|
@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
|||
DRM_ERROR("failed to pin overlay register bo\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = reg_bo->gtt_offset;
|
||||
overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
|
||||
if (ret) {
|
||||
|
@ -1412,9 +1412,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
|
|||
kfree(dev_priv->overlay);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
struct intel_overlay_error_state {
|
||||
struct overlay_registers regs;
|
||||
unsigned long base;
|
||||
|
@ -1435,7 +1432,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
|
|||
overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
overlay->reg_bo->gtt_offset);
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
|
||||
return regs;
|
||||
}
|
||||
|
@ -1468,7 +1465,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
|
|||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
else
|
||||
error->base = overlay->reg_bo->gtt_offset;
|
||||
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
|
||||
|
||||
regs = intel_overlay_map_regs_atomic(overlay);
|
||||
if (!regs)
|
||||
|
@ -1537,4 +1534,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
|
|||
P(UVSCALEV);
|
||||
#undef P
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "intel_drv.h"
|
||||
#include "../../../platform/x86/intel_ips.h"
|
||||
#include <linux/module.h>
|
||||
#include <drm/i915_powerwell.h>
|
||||
|
||||
#define FORCEWAKE_ACK_TIMEOUT_MS 2
|
||||
|
||||
|
@ -86,7 +87,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
int plane, i;
|
||||
u32 fbc_ctl, fbc_ctl2;
|
||||
|
||||
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
|
||||
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
|
||||
if (fb->pitches[0] < cfb_pitch)
|
||||
cfb_pitch = fb->pitches[0];
|
||||
|
||||
|
@ -217,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
|
||||
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
|
||||
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
|
||||
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
|
||||
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
|
||||
/* enable it... */
|
||||
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
||||
|
@ -274,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
|
||||
I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
|
||||
|
||||
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
|
||||
IVB_DPFC_CTL_FENCE_EN |
|
||||
|
@ -325,7 +326,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (work == dev_priv->fbc_work) {
|
||||
if (work == dev_priv->fbc.fbc_work) {
|
||||
/* Double check that we haven't switched fb without cancelling
|
||||
* the prior work.
|
||||
*/
|
||||
|
@ -333,12 +334,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
|||
dev_priv->display.enable_fbc(work->crtc,
|
||||
work->interval);
|
||||
|
||||
dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
|
||||
dev_priv->cfb_fb = work->crtc->fb->base.id;
|
||||
dev_priv->cfb_y = work->crtc->y;
|
||||
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
|
||||
dev_priv->fbc.fb_id = work->crtc->fb->base.id;
|
||||
dev_priv->fbc.y = work->crtc->y;
|
||||
}
|
||||
|
||||
dev_priv->fbc_work = NULL;
|
||||
dev_priv->fbc.fbc_work = NULL;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -347,28 +348,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
|||
|
||||
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (dev_priv->fbc_work == NULL)
|
||||
if (dev_priv->fbc.fbc_work == NULL)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
|
||||
|
||||
/* Synchronisation is provided by struct_mutex and checking of
|
||||
* dev_priv->fbc_work, so we can perform the cancellation
|
||||
* dev_priv->fbc.fbc_work, so we can perform the cancellation
|
||||
* entirely asynchronously.
|
||||
*/
|
||||
if (cancel_delayed_work(&dev_priv->fbc_work->work))
|
||||
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
|
||||
/* tasklet was killed before being run, clean up */
|
||||
kfree(dev_priv->fbc_work);
|
||||
kfree(dev_priv->fbc.fbc_work);
|
||||
|
||||
/* Mark the work as no longer wanted so that if it does
|
||||
* wake-up (because the work was already running and waiting
|
||||
* for our mutex), it will discover that is no longer
|
||||
* necessary to run.
|
||||
*/
|
||||
dev_priv->fbc_work = NULL;
|
||||
dev_priv->fbc.fbc_work = NULL;
|
||||
}
|
||||
|
||||
void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
{
|
||||
struct intel_fbc_work *work;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -381,6 +382,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
if (work == NULL) {
|
||||
DRM_ERROR("Failed to allocate FBC work structure\n");
|
||||
dev_priv->display.enable_fbc(crtc, interval);
|
||||
return;
|
||||
}
|
||||
|
@ -390,9 +392,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
work->interval = interval;
|
||||
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
|
||||
|
||||
dev_priv->fbc_work = work;
|
||||
|
||||
DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
|
||||
dev_priv->fbc.fbc_work = work;
|
||||
|
||||
/* Delay the actual enabling to let pageflipping cease and the
|
||||
* display to settle before starting the compression. Note that
|
||||
|
@ -418,7 +418,7 @@ void intel_disable_fbc(struct drm_device *dev)
|
|||
return;
|
||||
|
||||
dev_priv->display.disable_fbc(dev);
|
||||
dev_priv->cfb_plane = -1;
|
||||
dev_priv->fbc.plane = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -448,7 +448,6 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int enable_fbc;
|
||||
unsigned int max_hdisplay, max_vdisplay;
|
||||
|
||||
if (!i915_powersave)
|
||||
|
@ -471,7 +470,8 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
!to_intel_crtc(tmp_crtc)->primary_disabled) {
|
||||
if (crtc) {
|
||||
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
|
||||
dev_priv->fbc.no_fbc_reason =
|
||||
FBC_MULTIPLE_PIPES;
|
||||
goto out_disable;
|
||||
}
|
||||
crtc = tmp_crtc;
|
||||
|
@ -480,7 +480,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
|
||||
if (!crtc || crtc->fb == NULL) {
|
||||
DRM_DEBUG_KMS("no output, disabling\n");
|
||||
dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -489,23 +489,22 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
|
||||
enable_fbc = i915_enable_fbc;
|
||||
if (enable_fbc < 0) {
|
||||
DRM_DEBUG_KMS("fbc set to per-chip default\n");
|
||||
enable_fbc = 1;
|
||||
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
|
||||
enable_fbc = 0;
|
||||
if (i915_enable_fbc < 0 &&
|
||||
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
|
||||
DRM_DEBUG_KMS("disabled per chip default\n");
|
||||
dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
|
||||
goto out_disable;
|
||||
}
|
||||
if (!enable_fbc) {
|
||||
if (!i915_enable_fbc) {
|
||||
DRM_DEBUG_KMS("fbc disabled per module param\n");
|
||||
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
|
||||
goto out_disable;
|
||||
}
|
||||
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
DRM_DEBUG_KMS("mode incompatible with compression, "
|
||||
"disabling\n");
|
||||
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -519,13 +518,13 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
if ((crtc->mode.hdisplay > max_hdisplay) ||
|
||||
(crtc->mode.vdisplay > max_vdisplay)) {
|
||||
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
|
||||
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
|
||||
goto out_disable;
|
||||
}
|
||||
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
|
||||
intel_crtc->plane != 0) {
|
||||
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -535,7 +534,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
if (obj->tiling_mode != I915_TILING_X ||
|
||||
obj->fence_reg == I915_FENCE_REG_NONE) {
|
||||
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_NOT_TILED;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -545,7 +544,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
|
||||
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
|
||||
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
|
@ -554,9 +553,9 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
* cannot be unpinned (and have its GTT offset and fence revoked)
|
||||
* without first being decoupled from the scanout and FBC disabled.
|
||||
*/
|
||||
if (dev_priv->cfb_plane == intel_crtc->plane &&
|
||||
dev_priv->cfb_fb == fb->base.id &&
|
||||
dev_priv->cfb_y == crtc->y)
|
||||
if (dev_priv->fbc.plane == intel_crtc->plane &&
|
||||
dev_priv->fbc.fb_id == fb->base.id &&
|
||||
dev_priv->fbc.y == crtc->y)
|
||||
return;
|
||||
|
||||
if (intel_fbc_enabled(dev)) {
|
||||
|
@ -2468,8 +2467,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
|
|||
|
||||
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
|
||||
* case both are at the same level. Prefer r1 in case they're the same. */
|
||||
struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
|
||||
struct hsw_wm_values *r2)
|
||||
static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
|
||||
struct hsw_wm_values *r2)
|
||||
{
|
||||
int i, val_r1 = 0, val_r2 = 0;
|
||||
|
||||
|
@ -3076,19 +3075,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|||
*/
|
||||
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(10);
|
||||
u32 pval;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
do {
|
||||
pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||
if (time_after(jiffies, timeout)) {
|
||||
DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
|
||||
break;
|
||||
}
|
||||
udelay(10);
|
||||
} while (pval & 1);
|
||||
if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
|
||||
DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
|
||||
|
||||
pval >>= 8;
|
||||
|
||||
|
@ -3143,9 +3135,9 @@ static void gen6_disable_rps(struct drm_device *dev)
|
|||
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
|
||||
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
|
||||
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
|
||||
}
|
||||
|
@ -3162,9 +3154,9 @@ static void valleyview_disable_rps(struct drm_device *dev)
|
|||
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
|
||||
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
|
||||
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
|
||||
|
||||
|
@ -3329,13 +3321,13 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
|
||||
/* requires MSI enabled */
|
||||
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
/* FIXME: Our interrupt enabling sequence is bonghits.
|
||||
* dev_priv->rps.pm_iir really should be 0 here. */
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
|
||||
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
/* unmask all PM interrupts */
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0);
|
||||
|
||||
|
@ -3482,7 +3474,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
|
|||
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
|
||||
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
|
||||
pcbr_offset,
|
||||
-1,
|
||||
I915_GTT_OFFSET_NONE,
|
||||
pctx_size);
|
||||
goto out;
|
||||
}
|
||||
|
@ -3609,10 +3601,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
|
||||
/* requires MSI enabled */
|
||||
I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
WARN_ON(dev_priv->rps.pm_iir != 0);
|
||||
I915_WRITE(GEN6_PMIMR, 0);
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
/* enable all PM interrupts */
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0);
|
||||
|
||||
|
@ -3708,7 +3700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
|
||||
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
|
||||
MI_MM_SPACE_GTT |
|
||||
MI_SAVE_EXT_STATE_EN |
|
||||
MI_RESTORE_EXT_STATE_EN |
|
||||
|
@ -3731,7 +3723,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
|
||||
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
|
||||
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
|
||||
}
|
||||
|
||||
|
|
|
@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
|||
* registers with the above sequence (the readback of the HEAD registers
|
||||
* also enforces ordering), otherwise the hw might lose the new ring
|
||||
* register values. */
|
||||
I915_WRITE_START(ring, obj->gtt_offset);
|
||||
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
|
||||
I915_WRITE_CTL(ring,
|
||||
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||
| RING_VALID);
|
||||
|
||||
/* If the head is still not zero, the ring is dead */
|
||||
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
|
||||
I915_READ_START(ring) == obj->gtt_offset &&
|
||||
I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
|
||||
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
|
||||
DRM_ERROR("%s initialization failed "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
|
@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
|
|||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
pc->gtt_offset = obj->gtt_offset;
|
||||
pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
|
||||
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
|
||||
if (pc->cpu_page == NULL) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
|
|||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount.gt++ == 0) {
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
POSTING_READ(GTIMR);
|
||||
|
@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount.gt == 0) {
|
||||
if (--ring->irq_refcount == 0) {
|
||||
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
POSTING_READ(GTIMR);
|
||||
|
@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
|
|||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount.gt++ == 0) {
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
dev_priv->irq_mask &= ~ring->irq_enable_mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ(IMR);
|
||||
|
@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount.gt == 0) {
|
||||
if (--ring->irq_refcount == 0) {
|
||||
dev_priv->irq_mask |= ring->irq_enable_mask;
|
||||
I915_WRITE(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ(IMR);
|
||||
|
@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
|
|||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount.gt++ == 0) {
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
dev_priv->irq_mask &= ~ring->irq_enable_mask;
|
||||
I915_WRITE16(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ16(IMR);
|
||||
|
@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount.gt == 0) {
|
||||
if (--ring->irq_refcount == 0) {
|
||||
dev_priv->irq_mask |= ring->irq_enable_mask;
|
||||
I915_WRITE16(IMR, dev_priv->irq_mask);
|
||||
POSTING_READ16(IMR);
|
||||
|
@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
|
|||
gen6_gt_force_wake_get(dev_priv);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount.gt++ == 0) {
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring,
|
||||
~(ring->irq_enable_mask |
|
||||
|
@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount.gt == 0) {
|
||||
if (--ring->irq_refcount == 0) {
|
||||
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring,
|
||||
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
|
||||
|
@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
|
|||
if (!dev->irq_enabled)
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->rps.lock, flags);
|
||||
if (ring->irq_refcount.pm++ == 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
u32 pm_imr = I915_READ(GEN6_PMIMR);
|
||||
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
||||
I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
|
||||
POSTING_READ(GEN6_PMIMR);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
|
|||
if (!dev->irq_enabled)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->rps.lock, flags);
|
||||
if (--ring->irq_refcount.pm == 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
u32 pm_imr = I915_READ(GEN6_PMIMR);
|
||||
I915_WRITE_IMR(ring, ~0);
|
||||
I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
|
||||
POSTING_READ(GEN6_PMIMR);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
intel_ring_advance(ring);
|
||||
} else {
|
||||
struct drm_i915_gem_object *obj = ring->private;
|
||||
u32 cs_offset = obj->gtt_offset;
|
||||
u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
|
||||
|
||||
if (len > I830_BATCH_LIMIT)
|
||||
return -ENOSPC;
|
||||
|
@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
|
|||
goto err_unref;
|
||||
}
|
||||
|
||||
ring->status_page.gfx_addr = obj->gtt_offset;
|
||||
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
|
||||
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
|
||||
if (ring->status_page.page_addr == NULL) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
goto err_unpin;
|
||||
|
||||
ring->virtual_start =
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
ring->size);
|
||||
if (ring->virtual_start == NULL) {
|
||||
DRM_ERROR("Failed to map ringbuffer.\n");
|
||||
|
@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
|||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
|
||||
PM_VEBOX_CS_ERROR_INTERRUPT;
|
||||
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
||||
ring->irq_get = hsw_vebox_get_irq;
|
||||
ring->irq_put = hsw_vebox_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
|
|
|
@ -78,10 +78,7 @@ struct intel_ring_buffer {
|
|||
*/
|
||||
u32 last_retired_head;
|
||||
|
||||
struct {
|
||||
u32 gt; /* protected by dev_priv->irq_lock */
|
||||
u32 pm; /* protected by dev_priv->rps.lock (sucks) */
|
||||
} irq_refcount;
|
||||
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
u32 trace_irq_seqno;
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
|
|
|
@ -1357,22 +1357,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
/* Cross check the port pixel multiplier with the sdvo encoder state. */
|
||||
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
|
||||
switch (val) {
|
||||
case SDVO_CLOCK_RATE_MULT_1X:
|
||||
encoder_pixel_multiplier = 1;
|
||||
break;
|
||||
case SDVO_CLOCK_RATE_MULT_2X:
|
||||
encoder_pixel_multiplier = 2;
|
||||
break;
|
||||
case SDVO_CLOCK_RATE_MULT_4X:
|
||||
encoder_pixel_multiplier = 4;
|
||||
break;
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
|
||||
&val, 1)) {
|
||||
switch (val) {
|
||||
case SDVO_CLOCK_RATE_MULT_1X:
|
||||
encoder_pixel_multiplier = 1;
|
||||
break;
|
||||
case SDVO_CLOCK_RATE_MULT_2X:
|
||||
encoder_pixel_multiplier = 2;
|
||||
break;
|
||||
case SDVO_CLOCK_RATE_MULT_4X:
|
||||
encoder_pixel_multiplier = 4;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(HAS_PCH_SPLIT(dev))
|
||||
return; /* no pixel multiplier readout support yet */
|
||||
|
||||
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
|
||||
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
|
||||
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
|
||||
|
|
|
@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
|
|||
|
||||
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(SPCNTR(pipe, plane), sprctl);
|
||||
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
|
||||
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
|
||||
sprsurf_offset);
|
||||
POSTING_READ(SPSURF(pipe, plane));
|
||||
}
|
||||
|
@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
|||
if (intel_plane->can_scale)
|
||||
I915_WRITE(SPRSCALE(pipe), sprscale);
|
||||
I915_WRITE(SPRCTL(pipe), sprctl);
|
||||
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
|
||||
I915_MODIFY_DISPBASE(SPRSURF(pipe),
|
||||
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
|
||||
POSTING_READ(SPRSURF(pipe));
|
||||
|
||||
/* potentially re-enable LP watermarks */
|
||||
|
@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
|
|||
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(DVSSCALE(pipe), dvsscale);
|
||||
I915_WRITE(DVSCNTR(pipe), dvscntr);
|
||||
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
|
||||
I915_MODIFY_DISPBASE(DVSSURF(pipe),
|
||||
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
|
||||
POSTING_READ(DVSSURF(pipe));
|
||||
}
|
||||
|
||||
|
|
|
@ -138,10 +138,7 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
|||
/*
|
||||
* Basic range manager support (drm_mm.c)
|
||||
*/
|
||||
extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
bool atomic);
|
||||
extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
|
||||
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
|
||||
unsigned long size,
|
||||
unsigned alignment,
|
||||
|
@ -155,6 +152,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
|
|||
unsigned long start,
|
||||
unsigned long end,
|
||||
int atomic);
|
||||
|
||||
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
|
||||
unsigned long size,
|
||||
unsigned alignment)
|
||||
|
|
Loading…
Reference in New Issue
Block a user