forked from luck/tmp_suning_uos_patched
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (30 commits) vgaarb: fix incorrect dereference of userspace pointer. drm/radeon/kms: retry auxch on 0x20 timeout value. drm/radeon: Skip dma copy test in benchmark if card doesn't have dma engine. drm/vmwgfx: Fix a circular locking dependency bug. drm/vmwgfx: Drop scanout flag compat and add execbuf ioctl parameter members. Bumps major. drm/vmwgfx: Report propper framebuffer_{max|min}_{width|height} drm/vmwgfx: Update the user-space interface. drm/radeon/kms: fix screen clearing before fbcon. nouveau: fix state detection with switchable graphics drm/nouveau: move dereferences after null checks drm/nv50: make the pgraph irq handler loop like the pre-nv50 version drm/nv50: delete ramfc object after disabling fifo, not before drm/nv50: avoid unloading pgraph context when ctxprog is running drm/nv50: align size of buffer object to the right boundaries. drm/nv50: disregard dac outputs in nv50_sor_dpms() drm/nv50: prevent multiple init tables being parsed at the same time drm/nouveau: make dp auxch xfer len check for reads only drm/nv40: make INIT_COMPUTE_MEM a NOP, just like nv50 drm/nouveau: Add proper vgaarb support. drm/nouveau: Fix fbcon on mixed pre-NV50 + NV50 multicard. ...
This commit is contained in:
commit
67dcabd061
|
@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
|
|||
{
|
||||
int result;
|
||||
|
||||
if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
|
||||
if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
|
||||
&result))
|
||||
return -ENODEV;
|
||||
|
||||
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
|
||||
|
||||
if (result & 0x1) { /* Stamina mode - disable the external GPU */
|
||||
if (result) { /* Ensure that the external GPU is enabled */
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
|
||||
NULL);
|
||||
} else { /* Stamina mode - disable the external GPU */
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
|
||||
NULL);
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
|
||||
NULL);
|
||||
} else { /* Ensure that the external GPU is enabled */
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
|
||||
NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
|
|||
|
||||
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
|
||||
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
if (dev_priv->card_type >= NV_40)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
|
@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
*/
|
||||
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct init_exec iexec = {true, false};
|
||||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
uint8_t *table = &bios->data[bios->display.script_table_ptr];
|
||||
uint8_t *otable = NULL;
|
||||
|
@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
}
|
||||
|
||||
bios->display.output = dcbent;
|
||||
|
||||
if (pxclk == 0) {
|
||||
script = ROM16(otable[6]);
|
||||
if (!script) {
|
||||
|
@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk == -1) {
|
||||
script = ROM16(otable[8]);
|
||||
|
@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk == -2) {
|
||||
if (table[4] >= 12)
|
||||
|
@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk > 0) {
|
||||
script = ROM16(otable[table[4] + i*6 + 2]);
|
||||
|
@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk < 0) {
|
||||
script = ROM16(otable[table[4] + i*6 + 4]);
|
||||
|
@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -5864,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
struct init_exec iexec = { true, false };
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bios->lock, flags);
|
||||
bios->display.output = dcbent;
|
||||
parse_init_table(bios, table, &iexec);
|
||||
bios->display.output = NULL;
|
||||
spin_unlock_irqrestore(&bios->lock, flags);
|
||||
}
|
||||
|
||||
static bool NVInitVBIOS(struct drm_device *dev)
|
||||
|
@ -5876,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
|
|||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
|
||||
memset(bios, 0, sizeof(struct nvbios));
|
||||
spin_lock_init(&bios->lock);
|
||||
bios->dev = dev;
|
||||
|
||||
if (!NVShadowVBIOS(dev, bios->data))
|
||||
|
|
|
@ -205,6 +205,8 @@ struct nvbios {
|
|||
struct drm_device *dev;
|
||||
struct nouveau_bios_info pub;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
uint8_t data[NV_PROM_SIZE];
|
||||
unsigned int length;
|
||||
bool execute;
|
||||
|
|
|
@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
|
|||
|
||||
/*
|
||||
* Some of the tile_flags have a periodic structure of N*4096 bytes,
|
||||
* align to to that as well as the page size. Overallocate memory to
|
||||
* avoid corruption of other buffer objects.
|
||||
* align to to that as well as the page size. Align the size to the
|
||||
* appropriate boundaries. This does imply that sizes are rounded up
|
||||
* 3-7 pages, so be aware of this and do not waste memory by allocating
|
||||
* many small buffers.
|
||||
*/
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
|
||||
|
@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
|
|||
case 0x2800:
|
||||
case 0x4800:
|
||||
case 0x7a00:
|
||||
*size = roundup(*size, block_size);
|
||||
if (is_power_of_2(block_size)) {
|
||||
*size += 3 * block_size;
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 12 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
*size += 6 * block_size;
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 8 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
}
|
||||
*size = roundup(*size, *align);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
|
|||
/* Ensure the channel is no longer active on the GPU */
|
||||
pfifo->reassign(dev, false);
|
||||
|
||||
if (pgraph->channel(dev) == chan) {
|
||||
pgraph->fifo_access(dev, false);
|
||||
pgraph->fifo_access(dev, false);
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
pgraph->fifo_access(dev, true);
|
||||
}
|
||||
pgraph->destroy_context(chan);
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
if (pfifo->channel_id(dev) == chan->id) {
|
||||
pfifo->disable(dev);
|
||||
|
|
|
@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
|
|||
{
|
||||
struct nouveau_connector *nv_connector =
|
||||
nouveau_connector(drm_connector);
|
||||
struct drm_device *dev = nv_connector->base.dev;
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
struct drm_device *dev;
|
||||
|
||||
if (!nv_connector)
|
||||
return;
|
||||
|
||||
dev = nv_connector->base.dev;
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
kfree(nv_connector->edid);
|
||||
drm_sysfs_connector_remove(drm_connector);
|
||||
drm_connector_cleanup(drm_connector);
|
||||
|
|
|
@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
|
|||
break;
|
||||
}
|
||||
|
||||
if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
|
||||
ret = -EREMOTEIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cmd & 1) {
|
||||
if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
|
||||
ret = -EREMOTEIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
|
||||
NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
|
||||
|
|
|
@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
|
|||
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
|
||||
int nouveau_vram_notify;
|
||||
int nouveau_vram_notify = 1;
|
||||
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
|
||||
|
@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
|
|||
int nouveau_ignorelid = 0;
|
||||
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(noagp, "Disable all acceleration");
|
||||
int nouveau_noaccel = 0;
|
||||
module_param_named(noaccel, nouveau_noaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
|
||||
int nouveau_nofbaccel = 0;
|
||||
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
|
||||
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
|
||||
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
|
||||
|
|
|
@ -678,6 +678,8 @@ extern int nouveau_reg_debug;
|
|||
extern char *nouveau_vbios;
|
||||
extern int nouveau_ctxfw;
|
||||
extern int nouveau_ignorelid;
|
||||
extern int nouveau_nofbaccel;
|
||||
extern int nouveau_noaccel;
|
||||
|
||||
/* nouveau_state.c */
|
||||
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
|
||||
|
|
|
@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
|
|||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static struct fb_ops nv04_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_setcolreg = drm_fb_helper_setcolreg,
|
||||
.fb_fillrect = nv04_fbcon_fillrect,
|
||||
.fb_copyarea = nv04_fbcon_copyarea,
|
||||
.fb_imageblit = nv04_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static struct fb_ops nv50_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_setcolreg = drm_fb_helper_setcolreg,
|
||||
.fb_fillrect = nv50_fbcon_fillrect,
|
||||
.fb_copyarea = nv50_fbcon_copyarea,
|
||||
.fb_imageblit = nv50_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
|
@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
|
|||
dev_priv->fbdev_info = info;
|
||||
|
||||
strcpy(info->fix.id, "nouveaufb");
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
|
||||
FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
|
||||
if (nouveau_nofbaccel)
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
|
||||
else
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
|
||||
FBINFO_HWACCEL_FILLRECT |
|
||||
FBINFO_HWACCEL_IMAGEBLIT;
|
||||
info->fbops = &nouveau_fbcon_ops;
|
||||
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
|
||||
dev_priv->vm_vram_base;
|
||||
|
@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
|
|||
par->nouveau_fb = nouveau_fb;
|
||||
par->dev = dev;
|
||||
|
||||
if (dev_priv->channel) {
|
||||
if (dev_priv->channel && !nouveau_nofbaccel) {
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_50:
|
||||
nv50_fbcon_accel_init(info);
|
||||
info->fbops = &nv50_fbcon_ops;
|
||||
break;
|
||||
default:
|
||||
nv04_fbcon_accel_init(info);
|
||||
info->fbops = &nv04_fbcon_ops;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
|||
void nouveau_fbcon_restore(void);
|
||||
void nouveau_fbcon_zfill(struct drm_device *dev);
|
||||
|
||||
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv04_fbcon_accel_init(struct fb_info *info);
|
||||
void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv50_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
|
||||
|
|
|
@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
|
||||
spin_lock(&nvbo->bo.lock);
|
||||
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
|
||||
spin_unlock(&nvbo->bo.lock);
|
||||
} else {
|
||||
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
|
||||
if (ret == 0)
|
||||
|
|
|
@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
|
|||
}
|
||||
|
||||
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
|
||||
if (!pgraph->ctxprog) {
|
||||
NV_ERROR(dev, "OOM copying ctxprog\n");
|
||||
if (!pgraph->ctxvals) {
|
||||
NV_ERROR(dev, "OOM copying ctxvals\n");
|
||||
release_firmware(fw);
|
||||
nouveau_grctx_fini(dev);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
|
|||
get + 4);
|
||||
}
|
||||
|
||||
if (status & NV_PFIFO_INTR_SEMAPHORE) {
|
||||
uint32_t sem;
|
||||
|
||||
status &= ~NV_PFIFO_INTR_SEMAPHORE;
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0,
|
||||
NV_PFIFO_INTR_SEMAPHORE);
|
||||
|
||||
sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
|
||||
nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
|
||||
status, chid);
|
||||
|
@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
|
|||
static void
|
||||
nv50_pgraph_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
uint32_t status, nsource;
|
||||
uint32_t status;
|
||||
|
||||
status = nv_rd32(dev, NV03_PGRAPH_INTR);
|
||||
nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
|
||||
if (status & 0x00000001) {
|
||||
nouveau_pgraph_intr_notify(dev, nsource);
|
||||
status &= ~0x00000001;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
|
||||
}
|
||||
if (status & 0x00000001) {
|
||||
nouveau_pgraph_intr_notify(dev, nsource);
|
||||
status &= ~0x00000001;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
|
||||
}
|
||||
|
||||
if (status & 0x00000010) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
|
||||
if (status & 0x00000010) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
|
||||
|
||||
status &= ~0x00000010;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
|
||||
}
|
||||
status &= ~0x00000010;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
|
||||
}
|
||||
|
||||
if (status & 0x00001000) {
|
||||
nv_wr32(dev, 0x400500, 0x00000000);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
|
||||
NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, 0x400500, 0x00010001);
|
||||
if (status & 0x00001000) {
|
||||
nv_wr32(dev, 0x400500, 0x00000000);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR,
|
||||
NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
|
||||
NV40_PGRAPH_INTR_EN) &
|
||||
~NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, 0x400500, 0x00010001);
|
||||
|
||||
nv50_graph_context_switch(dev);
|
||||
nv50_graph_context_switch(dev);
|
||||
|
||||
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
}
|
||||
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
}
|
||||
|
||||
if (status & 0x00100000) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_DATA_ERROR);
|
||||
if (status & 0x00100000) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_DATA_ERROR);
|
||||
|
||||
status &= ~0x00100000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
|
||||
}
|
||||
status &= ~0x00100000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
|
||||
}
|
||||
|
||||
if (status & 0x00200000) {
|
||||
int r;
|
||||
if (status & 0x00200000) {
|
||||
int r;
|
||||
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
|
||||
|
||||
NV_ERROR(dev, "magic set 1:\n");
|
||||
for (r = 0x408900; r <= 0x408910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
|
||||
for (r = 0x408e08; r <= 0x408e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
|
||||
NV_ERROR(dev, "magic set 1:\n");
|
||||
for (r = 0x408900; r <= 0x408910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408900,
|
||||
nv_rd32(dev, 0x408904) | 0xc0000000);
|
||||
for (r = 0x408e08; r <= 0x408e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408e08,
|
||||
nv_rd32(dev, 0x408e08) | 0xc0000000);
|
||||
|
||||
NV_ERROR(dev, "magic set 2:\n");
|
||||
for (r = 0x409900; r <= 0x409910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
|
||||
for (r = 0x409e08; r <= 0x409e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
|
||||
NV_ERROR(dev, "magic set 2:\n");
|
||||
for (r = 0x409900; r <= 0x409910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409900,
|
||||
nv_rd32(dev, 0x409904) | 0xc0000000);
|
||||
for (r = 0x409e08; r <= 0x409e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409e08,
|
||||
nv_rd32(dev, 0x409e08) | 0xc0000000);
|
||||
|
||||
status &= ~0x00200000;
|
||||
nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
|
||||
}
|
||||
status &= ~0x00200000;
|
||||
nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, status);
|
||||
}
|
||||
if (status) {
|
||||
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
|
||||
status);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, status);
|
||||
}
|
||||
|
||||
{
|
||||
const int isb = (1 << 16) | (1 << 0);
|
||||
{
|
||||
const int isb = (1 << 16) | (1 << 0);
|
||||
|
||||
if ((nv_rd32(dev, 0x400500) & isb) != isb)
|
||||
nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
if ((nv_rd32(dev, 0x400500) & isb) != isb)
|
||||
nv_wr32(dev, 0x400500,
|
||||
nv_rd32(dev, 0x400500) | isb);
|
||||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_bo *ntfy = NULL;
|
||||
uint32_t flags;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
|
||||
TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
|
||||
if (nouveau_vram_notify)
|
||||
flags = TTM_PL_FLAG_VRAM;
|
||||
else
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
|
||||
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
|
||||
0, 0x0000, false, true, &ntfy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
|
||||
ret = nouveau_bo_pin(ntfy, flags);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
target = NV_DMA_TARGET_PCI;
|
||||
} else {
|
||||
target = NV_DMA_TARGET_AGP;
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
offset += dev_priv->vm_gart_base;
|
||||
}
|
||||
} else {
|
||||
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
|
||||
|
|
|
@ -885,11 +885,12 @@ int
|
|||
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
|
||||
struct nouveau_gpuobj **gpuobj_ret)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
|
||||
if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
|
||||
return -EINVAL;
|
||||
dev_priv = chan->dev->dev_private;
|
||||
|
||||
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
||||
if (!gpuobj)
|
||||
|
|
|
@ -99,6 +99,7 @@
|
|||
* the card will hang early on in the X init process.
|
||||
*/
|
||||
# define NV_PMC_ENABLE_UNK13 (1<<13)
|
||||
#define NV40_PMC_GRAPH_UNITS 0x00001540
|
||||
#define NV40_PMC_BACKLIGHT 0x000015f0
|
||||
# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
|
||||
#define NV40_PMC_1700 0x00001700
|
||||
|
|
|
@ -54,11 +54,12 @@ static void
|
|||
nouveau_sgdma_clear(struct ttm_backend *be)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
|
||||
NV_DEBUG(nvbe->dev, "\n");
|
||||
struct drm_device *dev;
|
||||
|
||||
if (nvbe && nvbe->pages) {
|
||||
dev = nvbe->dev;
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (nvbe->bound)
|
||||
be->func->unbind(be);
|
||||
|
||||
|
|
|
@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
static unsigned int
|
||||
nouveau_vga_set_decode(void *priv, bool state)
|
||||
{
|
||||
struct drm_device *dev = priv;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->chipset >= 0x40)
|
||||
nv_wr32(dev, 0x88054, state);
|
||||
else
|
||||
nv_wr32(dev, 0x1854, state);
|
||||
|
||||
if (state)
|
||||
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
||||
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
||||
|
@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_timer;
|
||||
|
||||
/* PGRAPH */
|
||||
ret = engine->graph.init(dev);
|
||||
if (ret)
|
||||
goto out_fb;
|
||||
if (nouveau_noaccel)
|
||||
engine->graph.accel_blocked = true;
|
||||
else {
|
||||
/* PGRAPH */
|
||||
ret = engine->graph.init(dev);
|
||||
if (ret)
|
||||
goto out_fb;
|
||||
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
}
|
||||
|
||||
/* this call irq_preinstall, register irq handler and
|
||||
* call irq_postinstall
|
||||
|
@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
|
|||
out_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
out_fifo:
|
||||
engine->fifo.takedown(dev);
|
||||
if (!nouveau_noaccel)
|
||||
engine->fifo.takedown(dev);
|
||||
out_graph:
|
||||
engine->graph.takedown(dev);
|
||||
if (!nouveau_noaccel)
|
||||
engine->graph.takedown(dev);
|
||||
out_fb:
|
||||
engine->fb.takedown(dev);
|
||||
out_timer:
|
||||
|
@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
|||
dev_priv->channel = NULL;
|
||||
}
|
||||
|
||||
engine->fifo.takedown(dev);
|
||||
engine->graph.takedown(dev);
|
||||
if (!nouveau_noaccel) {
|
||||
engine->fifo.takedown(dev);
|
||||
engine->graph.takedown(dev);
|
||||
}
|
||||
engine->fb.takedown(dev);
|
||||
engine->timer.takedown(dev);
|
||||
engine->mc.takedown(dev);
|
||||
|
@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
|
|||
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
|
||||
getparam->value = dev_priv->vm_vram_base;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_GRAPH_UNITS:
|
||||
/* NV40 and NV50 versions are quite different, but register
|
||||
* address is the same. User is supposed to know the card
|
||||
* family anyway... */
|
||||
if (dev_priv->chipset >= 0x40) {
|
||||
getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
|
||||
break;
|
||||
}
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
static void
|
||||
void
|
||||
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
|
|||
|
||||
FIRE_RING(chan);
|
||||
|
||||
info->fbops->fb_fillrect = nv04_fbcon_fillrect;
|
||||
info->fbops->fb_copyarea = nv04_fbcon_copyarea;
|
||||
info->fbops->fb_imageblit = nv04_fbcon_imageblit;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
|
|||
static void
|
||||
nv50_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
struct drm_device *dev;
|
||||
struct nouveau_crtc *nv_crtc;
|
||||
|
||||
if (!crtc)
|
||||
return;
|
||||
|
||||
dev = crtc->dev;
|
||||
nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
drm_crtc_cleanup(&nv_crtc->base);
|
||||
|
||||
nv50_cursor_fini(nv_crtc);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
static void
|
||||
void
|
||||
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
|
||||
dev_priv->vm_vram_base);
|
||||
|
||||
info->fbops->fb_fillrect = nv50_fbcon_fillrect;
|
||||
info->fbops->fb_copyarea = nv50_fbcon_copyarea;
|
||||
info->fbops->fb_imageblit = nv50_fbcon_imageblit;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -317,17 +317,20 @@ void
|
|||
nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->cache);
|
||||
|
||||
/* This will ensure the channel is seen as disabled. */
|
||||
chan->ramfc = NULL;
|
||||
nv50_fifo_channel_disable(dev, chan->id, false);
|
||||
|
||||
/* Dummy channel, also used on ch 127 */
|
||||
if (chan->id == 0)
|
||||
nv50_fifo_channel_disable(dev, 127, false);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &ramfc);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->cache);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
|
|||
uint32_t inst;
|
||||
int i;
|
||||
|
||||
/* Be sure we're not in the middle of a context switch or bad things
|
||||
* will happen, such as unloading the wrong pgraph context.
|
||||
*/
|
||||
if (!nv_wait(0x400300, 0x00000001, 0x00000000))
|
||||
NV_ERROR(dev, "Ctxprog is still running\n");
|
||||
|
||||
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
|
||||
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
|
||||
return NULL;
|
||||
|
@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
|
|||
int
|
||||
nv50_graph_unload_context(struct drm_device *dev)
|
||||
{
|
||||
uint32_t inst, fifo = nv_rd32(dev, 0x400500);
|
||||
uint32_t inst;
|
||||
|
||||
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
|
||||
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
|
||||
|
@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
|
|||
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
nv_wr32(dev, 0x400500, fifo & ~1);
|
||||
nv_wr32(dev, 0x400784, inst);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
|
||||
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
|
||||
nouveau_wait_for_idle(dev);
|
||||
nv_wr32(dev, 0x400500, fifo);
|
||||
|
||||
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
|
||||
return 0;
|
||||
|
|
|
@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
|
|||
struct nouveau_encoder *nvenc = nouveau_encoder(enc);
|
||||
|
||||
if (nvenc == nv_encoder ||
|
||||
nvenc->disconnect != nv50_sor_disconnect ||
|
||||
nvenc->dcb->or != nv_encoder->dcb->or)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
config DRM_RADEON_KMS
|
||||
bool "Enable modesetting on radeon by default"
|
||||
bool "Enable modesetting on radeon by default - NEW DRIVER"
|
||||
depends on DRM_RADEON
|
||||
help
|
||||
Choose this option if you want kernel modesetting enabled by default,
|
||||
and you have a new enough userspace to support this. Running old
|
||||
userspaces with this enabled will cause pain.
|
||||
Choose this option if you want kernel modesetting enabled by default.
|
||||
|
||||
This is a completely new driver. It's only part of the existing drm
|
||||
for compatibility reasons. It requires an entirely different graphics
|
||||
stack above it and works very differently from the old drm stack.
|
||||
i.e. don't enable this unless you know what you are doing it may
|
||||
cause issues or bugs compared to the previous userspace driver stack.
|
||||
|
||||
When kernel modesetting is enabled the IOCTL of radeon/drm
|
||||
driver are considered as invalid and an error message is printed
|
||||
|
|
|
@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
|||
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
|
||||
unsigned char *base;
|
||||
int retry_count = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
|
||||
|
||||
retry:
|
||||
memcpy(base, req_bytes, num_bytes);
|
||||
|
||||
args.lpAuxRequest = 0;
|
||||
|
@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
|||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
if (args.ucReplyStatus) {
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
|
||||
if (args.ucReplyStatus && !args.ucDataOutLen) {
|
||||
if (args.ucReplyStatus == 0x20 && retry_count < 10)
|
||||
goto retry;
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
|
||||
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
|
||||
chan->rec.i2c_id, args.ucReplyStatus);
|
||||
chan->rec.i2c_id, args.ucReplyStatus, retry_count);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1950,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
|
|||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: audio resume failed\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1957,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
|
|||
{
|
||||
int r;
|
||||
|
||||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r600_cp_stop(rdev);
|
||||
rdev->cp.ready = false;
|
||||
|
|
|
@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
|
|||
if (!r600_audio_chipset_supported(rdev))
|
||||
return;
|
||||
|
||||
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
|
||||
|
||||
del_timer(&rdev->audio_timer);
|
||||
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
|
||||
}
|
||||
|
|
|
@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
|||
*connector_type = DRM_MODE_CONNECTOR_DVID;
|
||||
}
|
||||
|
||||
/* XFX Pine Group device rv730 reports no VGA DDC lines
|
||||
* even though they are wired up to record 0x93
|
||||
*/
|
||||
if ((dev->pdev->device == 0x9498) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1682) &&
|
||||
(dev->pdev->subsystem_device == 0x2452)) {
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
|||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
|
||||
/* r100 doesn't have dma engine so skip the test */
|
||||
if (rdev->asic->copy_dma) {
|
||||
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = radeon_copy_dma(rdev, saddr, daddr,
|
||||
size / RADEON_GPU_PAGE_SIZE, fence);
|
||||
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
end_jiffies = jiffies;
|
||||
time = end_jiffies - start_jiffies;
|
||||
time = jiffies_to_msecs(time);
|
||||
if (time > 0) {
|
||||
i = ((n * size) >> 10) / time;
|
||||
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
|
||||
" %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
|
||||
n, size >> 10,
|
||||
sdomain, ddomain, time,
|
||||
i, i * 1000, (i * 1000) / 1024);
|
||||
}
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
end_jiffies = jiffies;
|
||||
time = end_jiffies - start_jiffies;
|
||||
time = jiffies_to_msecs(time);
|
||||
if (time > 0) {
|
||||
i = ((n * size) >> 10) / time;
|
||||
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
|
||||
" in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
|
||||
sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
|
||||
}
|
||||
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
|
|
|
@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
|
|||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
bool dret;
|
||||
bool dret = false;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
ret = connector_status_disconnected;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
|
@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
|
|||
struct drm_mode_object *obj;
|
||||
int i;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool dret;
|
||||
bool dret = false;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
|
|
|
@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
|||
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
|
||||
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
|
||||
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
|
||||
if (radeon_connector->ddc_bus)
|
||||
if (radeon_connector->ddc_bus) {
|
||||
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
radeon_connector->ddc_bus->rec.mask_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.mask_data_reg,
|
||||
|
@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
|||
radeon_connector->ddc_bus->rec.en_data_reg,
|
||||
radeon_connector->ddc_bus->rec.y_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.y_data_reg);
|
||||
} else {
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
|
||||
DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
|
||||
}
|
||||
DRM_INFO(" Encoders:\n");
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
|
|
@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
memset_io(fbptr, 0xff, aligned_size);
|
||||
memset_io(fbptr, 0x0, aligned_size);
|
||||
|
||||
strcpy(info->fix.id, "radeondrmfb");
|
||||
|
||||
|
|
|
@ -39,10 +39,10 @@
|
|||
#include "ttm/ttm_execbuf_util.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20090724"
|
||||
#define VMWGFX_DRIVER_MAJOR 0
|
||||
#define VMWGFX_DRIVER_MINOR 1
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 2
|
||||
#define VMWGFX_DRIVER_DATE "20100209"
|
||||
#define VMWGFX_DRIVER_MAJOR 1
|
||||
#define VMWGFX_DRIVER_MINOR 0
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
|
@ -113,6 +113,7 @@ struct vmw_fifo_state {
|
|||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct mutex fifo_mutex;
|
||||
struct rw_semaphore rwsem;
|
||||
};
|
||||
|
||||
|
@ -213,7 +214,7 @@ struct vmw_private {
|
|||
* Fencing and IRQs.
|
||||
*/
|
||||
|
||||
uint32_t fence_seq;
|
||||
atomic_t fence_seq;
|
||||
wait_queue_head_t fence_queue;
|
||||
wait_queue_head_t fifo_queue;
|
||||
atomic_t fence_queue_waiters;
|
||||
|
|
|
@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
fifo->reserved_size = 0;
|
||||
fifo->using_bounce_buffer = false;
|
||||
|
||||
mutex_init(&fifo->fifo_mutex);
|
||||
init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
|
@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
(unsigned int) min,
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
dev_priv->fence_seq = dev_priv->last_read_sequence;
|
||||
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
|
||||
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
return vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
|
@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
int ret;
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
mutex_lock(&fifo_state->fifo_mutex);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
}
|
||||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
up_write(&fifo_state->rwsem);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
|
||||
}
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
if (fifo_state->using_bounce_buffer || reserveable) {
|
||||
next_cmd += bytes;
|
||||
if (next_cmd >= max)
|
||||
|
@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
up_write(&fifo_state->rwsem);
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
||||
|
@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
|||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
down_write(&fifo_state->rwsem);
|
||||
*sequence = dev_priv->fence_seq;
|
||||
up_write(&fifo_state->rwsem);
|
||||
*sequence = atomic_read(&dev_priv->fence_seq);
|
||||
ret = -ENOMEM;
|
||||
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
|
||||
false, 3*HZ);
|
||||
|
@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
|||
}
|
||||
|
||||
do {
|
||||
*sequence = dev_priv->fence_seq++;
|
||||
*sequence = atomic_add_return(1, &dev_priv->fence_seq);
|
||||
} while (*sequence == 0);
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
||||
|
|
|
@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case DRM_VMW_PARAM_FIFO_OFFSET:
|
||||
param->value = dev_priv->mmio_start;
|
||||
break;
|
||||
case DRM_VMW_PARAM_HW_CAPS:
|
||||
param->value = dev_priv->capabilities;
|
||||
break;
|
||||
case DRM_VMW_PARAM_FIFO_CAPS:
|
||||
param->value = dev_priv->fifo.capabilities;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
|
|
|
@ -84,20 +84,13 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
|
|||
vmw_fifo_idle(dev_priv, sequence))
|
||||
return true;
|
||||
|
||||
/**
|
||||
* Below is to signal stale fences that have wrapped.
|
||||
* First, block fence submission.
|
||||
*/
|
||||
|
||||
down_read(&fifo_state->rwsem);
|
||||
|
||||
/**
|
||||
* Then check if the sequence is higher than what we've actually
|
||||
* emitted. Then the fence is stale and signaled.
|
||||
*/
|
||||
|
||||
ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
|
||||
up_read(&fifo_state->rwsem);
|
||||
ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
|
||||
> VMW_FENCE_WRAP);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
|
||||
if (fifo_idle)
|
||||
down_read(&fifo_state->rwsem);
|
||||
signal_seq = dev_priv->fence_seq;
|
||||
signal_seq = atomic_read(&dev_priv->fence_seq);
|
||||
ret = 0;
|
||||
|
||||
for (;;) {
|
||||
|
|
|
@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
|
|||
|
||||
drm_mode_config_init(dev);
|
||||
dev->mode_config.funcs = &vmw_kms_funcs;
|
||||
dev->mode_config.min_width = 640;
|
||||
dev->mode_config.min_height = 480;
|
||||
dev->mode_config.max_width = 2048;
|
||||
dev->mode_config.max_height = 2048;
|
||||
dev->mode_config.min_width = 1;
|
||||
dev->mode_config.min_height = 1;
|
||||
dev->mode_config.max_width = dev_priv->fb_max_width;
|
||||
dev->mode_config.max_height = dev_priv->fb_max_height;
|
||||
|
||||
ret = vmw_kms_init_legacy_display_system(dev_priv);
|
||||
|
||||
|
|
|
@ -35,11 +35,6 @@
|
|||
#define VMW_RES_SURFACE ttm_driver_type1
|
||||
#define VMW_RES_STREAM ttm_driver_type2
|
||||
|
||||
/* XXX: This isn't a real hardware flag, but just a hack for kernel to
|
||||
* know about primary surfaces. Find a better way to accomplish this.
|
||||
*/
|
||||
#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
|
||||
|
||||
struct vmw_user_context {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_resource res;
|
||||
|
@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
srf->flags = req->flags;
|
||||
srf->format = req->format;
|
||||
srf->scanout = req->scanout;
|
||||
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
|
||||
srf->num_sizes = 0;
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
|
@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
||||
if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
|
||||
/* we should not send this flag down to hardware since
|
||||
* its not a official one
|
||||
*/
|
||||
srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
|
||||
srf->scanout = true;
|
||||
} else {
|
||||
srf->scanout = false;
|
||||
}
|
||||
|
||||
if (srf->scanout &&
|
||||
srf->num_sizes == 1 &&
|
||||
srf->sizes[0].width == 64 &&
|
||||
|
|
|
@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
|
|||
remaining -= 7;
|
||||
pr_devel("client 0x%p called 'target'\n", priv);
|
||||
/* if target is default */
|
||||
if (!strncmp(buf, "default", 7))
|
||||
if (!strncmp(kbuf, "default", 7))
|
||||
pdev = pci_dev_get(vga_default_device());
|
||||
else {
|
||||
if (!vga_pci_str_to_vars(curr_pos, remaining,
|
||||
|
|
|
@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free {
|
|||
#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
|
||||
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
|
||||
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
|
||||
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
|
||||
struct drm_nouveau_getparam {
|
||||
uint64_t param;
|
||||
uint64_t value;
|
||||
|
|
|
@ -68,7 +68,8 @@
|
|||
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
|
||||
#define DRM_VMW_PARAM_3D 2
|
||||
#define DRM_VMW_PARAM_FIFO_OFFSET 3
|
||||
|
||||
#define DRM_VMW_PARAM_HW_CAPS 4
|
||||
#define DRM_VMW_PARAM_FIFO_CAPS 5
|
||||
|
||||
/**
|
||||
* struct drm_vmw_getparam_arg
|
||||
|
@ -181,6 +182,8 @@ struct drm_vmw_context_arg {
|
|||
* The size of the array should equal the total number of mipmap levels.
|
||||
* @shareable: Boolean whether other clients (as identified by file descriptors)
|
||||
* may reference this surface.
|
||||
* @scanout: Boolean whether the surface is intended to be used as a
|
||||
* scanout.
|
||||
*
|
||||
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
|
||||
* Output data from the DRM_VMW_REF_SURFACE Ioctl.
|
||||
|
@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req {
|
|||
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
|
||||
uint64_t size_addr;
|
||||
int32_t shareable;
|
||||
uint32_t pad64;
|
||||
int32_t scanout;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg {
|
|||
*
|
||||
* @commands: User-space address of a command buffer cast to an uint64_t.
|
||||
* @command-size: Size in bytes of the command buffer.
|
||||
* @throttle-us: Sleep until software is less than @throttle_us
|
||||
* microseconds ahead of hardware. The driver may round this value
|
||||
* to the nearest kernel tick.
|
||||
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
|
||||
* uint64_t.
|
||||
* @version: Allows expanding the execbuf ioctl parameters without breaking
|
||||
* backwards compatibility, since user-space will always tell the kernel
|
||||
* which version it uses.
|
||||
* @flags: Execbuf flags. None currently.
|
||||
*
|
||||
* Argument to the DRM_VMW_EXECBUF Ioctl.
|
||||
*/
|
||||
|
||||
#define DRM_VMW_EXECBUF_VERSION 0
|
||||
|
||||
struct drm_vmw_execbuf_arg {
|
||||
uint64_t commands;
|
||||
uint32_t command_size;
|
||||
uint32_t pad64;
|
||||
uint32_t throttle_us;
|
||||
uint64_t fence_rep;
|
||||
uint32_t version;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue
Block a user