forked from luck/tmp_suning_uos_patched
Merge branch 'omap-fixes-for-linus' into omap-for-linus
This commit is contained in:
commit
80c20d543d
|
@ -20,7 +20,7 @@ Description:
|
|||
lsm: [[subj_user=] [subj_role=] [subj_type=]
|
||||
[obj_user=] [obj_role=] [obj_type=]]
|
||||
|
||||
base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION]
|
||||
base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
|
||||
mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
|
||||
fsmagic:= hex value
|
||||
uid:= decimal value
|
||||
|
@ -40,11 +40,11 @@ Description:
|
|||
|
||||
measure func=BPRM_CHECK
|
||||
measure func=FILE_MMAP mask=MAY_EXEC
|
||||
measure func=INODE_PERM mask=MAY_READ uid=0
|
||||
measure func=FILE_CHECK mask=MAY_READ uid=0
|
||||
|
||||
The default policy measures all executables in bprm_check,
|
||||
all files mmapped executable in file_mmap, and all files
|
||||
open for read by root in inode_permission.
|
||||
open for read by root in do_filp_open.
|
||||
|
||||
Examples of LSM specific definitions:
|
||||
|
||||
|
@ -54,8 +54,8 @@ Description:
|
|||
|
||||
dont_measure obj_type=var_log_t
|
||||
dont_measure obj_type=auditd_log_t
|
||||
measure subj_user=system_u func=INODE_PERM mask=MAY_READ
|
||||
measure subj_role=system_r func=INODE_PERM mask=MAY_READ
|
||||
measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
|
||||
measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
|
||||
|
||||
Smack:
|
||||
measure subj_user=_ func=INODE_PERM mask=MAY_READ
|
||||
measure subj_user=_ func=FILE_CHECK mask=MAY_READ
|
||||
|
|
|
@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
|
|||
up_threshold: defines what the average CPU usage between the samplings
|
||||
of 'sampling_rate' needs to be for the kernel to make a decision on
|
||||
whether it should increase the frequency. For example when it is set
|
||||
to its default value of '80' it means that between the checking
|
||||
intervals the CPU needs to be on average more than 80% in use to then
|
||||
to its default value of '95' it means that between the checking
|
||||
intervals the CPU needs to be on average more than 95% in use to then
|
||||
decide that the CPU frequency needs to be increased.
|
||||
|
||||
ignore_nice_load: this parameter takes a value of '0' or '1'. When
|
||||
|
|
|
@ -3411,8 +3411,10 @@ S: Maintained
|
|||
F: drivers/scsi/sym53c8xx_2/
|
||||
|
||||
LTP (Linux Test Project)
|
||||
M: Subrata Modak <subrata@linux.vnet.ibm.com>
|
||||
M: Mike Frysinger <vapier@gentoo.org>
|
||||
M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
|
||||
M: Garrett Cooper <yanegomi@gmail.com>
|
||||
M: Mike Frysinger <vapier@gentoo.org>
|
||||
M: Subrata Modak <subrata@linux.vnet.ibm.com>
|
||||
L: ltp-list@lists.sourceforge.net (subscribers-only)
|
||||
W: http://ltp.sourceforge.net/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
|
||||
|
@ -3836,6 +3838,7 @@ NETWORKING DRIVERS
|
|||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
|
||||
S: Odd Fixes
|
||||
F: drivers/net/
|
||||
F: include/linux/if_*
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 33
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Man-Eating Seals of Antiquity
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -961,16 +961,14 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
|
|||
while (superset->reg_offset != OMAP_MUX_TERMINATOR) {
|
||||
struct omap_mux *entry;
|
||||
|
||||
#ifndef CONFIG_OMAP_MUX
|
||||
/* Skip pins that are not muxed as GPIO by bootloader */
|
||||
if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
|
||||
#ifdef CONFIG_OMAP_MUX
|
||||
if (!superset->muxnames || !superset->muxnames[0]) {
|
||||
superset++;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)
|
||||
if (!superset->muxnames || !superset->muxnames[0]) {
|
||||
#else
|
||||
/* Skip pins that are not muxed as GPIO by bootloader */
|
||||
if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
|
||||
superset++;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1325,7 +1325,7 @@ struct platform_device *__init
|
|||
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
struct mci_dma_slave *slave;
|
||||
struct mci_dma_data *slave;
|
||||
u32 pioa_mask;
|
||||
u32 piob_mask;
|
||||
|
||||
|
@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
|
|||
ARRAY_SIZE(atmel_mci0_resource)))
|
||||
goto fail;
|
||||
|
||||
slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
|
||||
slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
|
||||
if (!slave)
|
||||
goto fail;
|
||||
|
||||
slave->sdata.dma_dev = &dw_dmac0_device.dev;
|
||||
slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
|
||||
|
@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
|
|||
|
||||
if (platform_device_add_data(pdev, data,
|
||||
sizeof(struct mci_platform_data)))
|
||||
goto fail;
|
||||
goto fail_free;
|
||||
|
||||
/* CLK line is common to both slots */
|
||||
pioa_mask = 1 << 10;
|
||||
|
@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
|
|||
/* Slot is unused */
|
||||
break;
|
||||
default:
|
||||
goto fail;
|
||||
goto fail_free;
|
||||
}
|
||||
|
||||
select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
|
||||
|
@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
|
|||
break;
|
||||
default:
|
||||
if (!data->slot[0].bus_width)
|
||||
goto fail;
|
||||
goto fail_free;
|
||||
|
||||
data->slot[1].bus_width = 0;
|
||||
break;
|
||||
|
@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
|
|||
platform_device_add(pdev);
|
||||
return pdev;
|
||||
|
||||
fail_free:
|
||||
kfree(slave);
|
||||
fail:
|
||||
data->dma_slave = NULL;
|
||||
kfree(slave);
|
||||
platform_device_put(pdev);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
microblaze_cache_init();
|
||||
|
||||
invalidate_dcache();
|
||||
enable_dcache();
|
||||
|
||||
invalidate_icache();
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void)
|
|||
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
|
||||
{
|
||||
#ifdef __NEED_VMBITS_PROBE
|
||||
write_c0_entryhi(0x3ffffffffffff000ULL);
|
||||
write_c0_entryhi(0x3fffffffffffe000ULL);
|
||||
back_to_back_c0_hazard();
|
||||
c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL);
|
||||
c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void)
|
|||
cp0_perfcount_irq = -1;
|
||||
} else {
|
||||
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
|
||||
cp0_compare_irq_shift = cp0_compare_irq;
|
||||
cp0_perfcount_irq = -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
|
|||
* Probe Octeon's caches
|
||||
*
|
||||
*/
|
||||
static void __devinit probe_octeon(void)
|
||||
static void __cpuinit probe_octeon(void)
|
||||
{
|
||||
unsigned long icache_size;
|
||||
unsigned long dcache_size;
|
||||
|
@ -235,7 +235,7 @@ static void __devinit probe_octeon(void)
|
|||
* Setup the Octeon cache flush routines
|
||||
*
|
||||
*/
|
||||
void __devinit octeon_cache_init(void)
|
||||
void __cpuinit octeon_cache_init(void)
|
||||
{
|
||||
extern unsigned long ebase;
|
||||
extern char except_vec2_octeon;
|
||||
|
|
|
@ -155,7 +155,7 @@ static inline void setup_protection_map(void)
|
|||
protection_map[15] = PAGE_SHARED;
|
||||
}
|
||||
|
||||
void __devinit cpu_cache_init(void)
|
||||
void __cpuinit cpu_cache_init(void)
|
||||
{
|
||||
if (cpu_has_3k_cache) {
|
||||
extern void __weak r3k_cache_init(void);
|
||||
|
|
|
@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void)
|
|||
if (!rm200_pic_master)
|
||||
return;
|
||||
rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
|
||||
if (!rm200_pic_master) {
|
||||
if (!rm200_pic_slave) {
|
||||
iounmap(rm200_pic_master);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
|||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
tracehook_signal_handler(sig, info, ka, regs, 0);
|
||||
tracehook_signal_handler(sig, info, ka, regs,
|
||||
test_thread_flag(TIF_SINGLESTEP) ||
|
||||
test_thread_flag(TIF_BLOCKSTEP));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
|||
if (huge) {
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
psize = get_slice_psize(mm, addr);
|
||||
/* Mask the address for the correct page size */
|
||||
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
|
||||
#else
|
||||
BUG();
|
||||
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
|
||||
#endif
|
||||
} else
|
||||
} else {
|
||||
psize = pte_pagesize_index(mm, addr, pte);
|
||||
/* Mask the address for the standard page size. If we
|
||||
* have a 64k page kernel, but the hardware does not
|
||||
* support 64k pages, this might be different from the
|
||||
* hardware page size encoded in the slice table. */
|
||||
addr &= PAGE_MASK;
|
||||
}
|
||||
|
||||
/* Mask the address for the correct page size */
|
||||
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
|
||||
|
||||
/* Build full vaddr */
|
||||
if (!is_kernel_addr(addr)) {
|
||||
|
|
|
@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
|
|||
{
|
||||
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
|
||||
|
||||
BUG_ON(os_cppr->index != 0);
|
||||
/*
|
||||
* we only really want to set the priority when there's
|
||||
* just one cppr value on the stack
|
||||
*/
|
||||
WARN_ON(os_cppr->index != 0);
|
||||
|
||||
os_cppr->stack[os_cppr->index] = cppr;
|
||||
os_cppr->stack[0] = cppr;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR))
|
||||
lpar_cppr_info(cppr);
|
||||
|
@ -821,8 +825,14 @@ void xics_setup_cpu(void)
|
|||
|
||||
void xics_teardown_cpu(void)
|
||||
{
|
||||
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* we have to reset the cppr index to 0 because we're
|
||||
* not going to return from the IPI
|
||||
*/
|
||||
os_cppr->index = 0;
|
||||
xics_set_cpu_priority(0);
|
||||
|
||||
/* Clear any pending IPI request */
|
||||
|
|
|
@ -293,12 +293,12 @@ struct _lowcore
|
|||
__u64 clock_comparator; /* 0x02d0 */
|
||||
__u32 machine_flags; /* 0x02d8 */
|
||||
__u32 ftrace_func; /* 0x02dc */
|
||||
__u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */
|
||||
__u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
|
||||
|
||||
/* Interrupt response block */
|
||||
__u8 irb[64]; /* 0x0300 */
|
||||
|
||||
__u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */
|
||||
__u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
|
||||
|
||||
/*
|
||||
* 0xe00 contains the address of the IPL Parameter Information
|
||||
|
|
|
@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
|
|||
mov #1, r5
|
||||
|
||||
call_handle_tlbmiss:
|
||||
setup_frame_reg
|
||||
mov.l 1f, r0
|
||||
mov r5, r8
|
||||
mov.l @r0, r6
|
||||
|
@ -365,6 +364,8 @@ handle_exception:
|
|||
mov.l @k2, k2 ! read out vector and keep in k2
|
||||
|
||||
handle_exception_special:
|
||||
setup_frame_reg
|
||||
|
||||
! Setup return address and jump to exception handler
|
||||
mov.l 7f, r9 ! fetch return address
|
||||
stc r2_bank, r0 ! k2 (vector)
|
||||
|
|
|
@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
|
|||
mempool_free(frame, dwarf_frame_pool);
|
||||
}
|
||||
|
||||
extern void ret_from_irq(void);
|
||||
|
||||
/**
|
||||
* dwarf_unwind_stack - unwind the stack
|
||||
*
|
||||
|
@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
|||
addr = frame->cfa + reg->addr;
|
||||
frame->return_addr = __raw_readl(addr);
|
||||
|
||||
/*
|
||||
* Ah, the joys of unwinding through interrupts.
|
||||
*
|
||||
* Interrupts are tricky - the DWARF info needs to be _really_
|
||||
* accurate and unfortunately I'm seeing a lot of bogus DWARF
|
||||
* info. For example, I've seen interrupts occur in epilogues
|
||||
* just after the frame pointer (r14) had been restored. The
|
||||
* problem was that the DWARF info claimed that the CFA could be
|
||||
* reached by using the value of the frame pointer before it was
|
||||
* restored.
|
||||
*
|
||||
* So until the compiler can be trusted to produce reliable
|
||||
* DWARF info when it really matters, let's stop unwinding once
|
||||
* we've calculated the function that was interrupted.
|
||||
*/
|
||||
if (prev && prev->pc == (unsigned long)ret_from_irq)
|
||||
frame->return_addr = 0;
|
||||
|
||||
return frame;
|
||||
|
||||
bail:
|
||||
|
|
|
@ -70,8 +70,14 @@ ret_from_exception:
|
|||
CFI_STARTPROC simple
|
||||
CFI_DEF_CFA r14, 0
|
||||
CFI_REL_OFFSET 17, 64
|
||||
CFI_REL_OFFSET 15, 0
|
||||
CFI_REL_OFFSET 15, 60
|
||||
CFI_REL_OFFSET 14, 56
|
||||
CFI_REL_OFFSET 13, 52
|
||||
CFI_REL_OFFSET 12, 48
|
||||
CFI_REL_OFFSET 11, 44
|
||||
CFI_REL_OFFSET 10, 40
|
||||
CFI_REL_OFFSET 9, 36
|
||||
CFI_REL_OFFSET 8, 32
|
||||
preempt_stop()
|
||||
ENTRY(ret_from_irq)
|
||||
!
|
||||
|
|
|
@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
* Set some valid stack frames to give to the child.
|
||||
*/
|
||||
childstack = (struct sparc_stackf __user *)
|
||||
(sp & ~0x7UL);
|
||||
(sp & ~0xfUL);
|
||||
parentstack = (struct sparc_stackf __user *)
|
||||
regs->u_regs[UREG_FP];
|
||||
|
||||
|
|
|
@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
|
|||
} else
|
||||
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
|
||||
|
||||
/* Now 8-byte align the stack as this is mandatory in the
|
||||
* Sparc ABI due to how register windows work. This hides
|
||||
* the restriction from thread libraries etc. -DaveM
|
||||
/* Now align the stack as this is mandatory in the Sparc ABI
|
||||
* due to how register windows work. This hides the
|
||||
* restriction from thread libraries etc.
|
||||
*/
|
||||
csp &= ~7UL;
|
||||
csp &= ~15UL;
|
||||
|
||||
distance = fp - psp;
|
||||
rval = (csp - distance);
|
||||
|
|
|
@ -120,8 +120,8 @@ struct rt_signal_frame32 {
|
|||
};
|
||||
|
||||
/* Align macros */
|
||||
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7)))
|
||||
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
|
||||
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
|
||||
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
|
||||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
{
|
||||
|
@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
|
|||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
sp -= framesize;
|
||||
|
||||
/* Always align the stack frame. This handles two cases. First,
|
||||
* sigaltstack need not be mindful of platform specific stack
|
||||
* alignment. Second, if we took this signal because the stack
|
||||
* is not aligned properly, we'd like to take the signal cleanly
|
||||
* and report that.
|
||||
*/
|
||||
sp &= ~7UL;
|
||||
sp &= ~15UL;
|
||||
|
||||
return (void __user *)(sp - framesize);
|
||||
return (void __user *) sp;
|
||||
}
|
||||
|
||||
static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
|
||||
|
|
|
@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
|
|||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
sp -= framesize;
|
||||
|
||||
/* Always align the stack frame. This handles two cases. First,
|
||||
* sigaltstack need not be mindful of platform specific stack
|
||||
* alignment. Second, if we took this signal because the stack
|
||||
* is not aligned properly, we'd like to take the signal cleanly
|
||||
* and report that.
|
||||
*/
|
||||
sp &= ~7UL;
|
||||
sp &= ~15UL;
|
||||
|
||||
return (void __user *)(sp - framesize);
|
||||
return (void __user *) sp;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
|
|
@ -353,7 +353,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
|
|||
/* Checks if the fp is valid */
|
||||
static int invalid_frame_pointer(void __user *fp, int fplen)
|
||||
{
|
||||
if (((unsigned long) fp) & 7)
|
||||
if (((unsigned long) fp) & 15)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
|
|||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
sp -= framesize;
|
||||
|
||||
/* Always align the stack frame. This handles two cases. First,
|
||||
* sigaltstack need not be mindful of platform specific stack
|
||||
* alignment. Second, if we took this signal because the stack
|
||||
* is not aligned properly, we'd like to take the signal cleanly
|
||||
* and report that.
|
||||
*/
|
||||
sp &= ~7UL;
|
||||
sp &= ~15UL;
|
||||
|
||||
return (void __user *)(sp - framesize);
|
||||
return (void __user *) sp;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#include <linux/irqflags.h>
|
||||
|
||||
/* entries in ARCH_DLINFO: */
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
|
||||
# define AT_VECTOR_SIZE_ARCH 2
|
||||
#else
|
||||
#else /* else it's non-compat x86-64 */
|
||||
# define AT_VECTOR_SIZE_ARCH 1
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
|
|||
if (!error) {
|
||||
acpi_lapic = 1;
|
||||
|
||||
#ifdef CONFIG_X86_BIGSMP
|
||||
generic_bigsmp_probe();
|
||||
#endif
|
||||
/*
|
||||
* Parse MADT IO-APIC entries
|
||||
*/
|
||||
|
@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
|
|||
acpi_ioapic = 1;
|
||||
|
||||
smp_found_config = 1;
|
||||
if (apic->setup_apic_routing)
|
||||
apic->setup_apic_routing();
|
||||
}
|
||||
}
|
||||
if (error == -EINVAL) {
|
||||
|
|
|
@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
|
|||
#endif
|
||||
|
||||
enable_IR_x2apic();
|
||||
#ifdef CONFIG_X86_64
|
||||
default_setup_apic_routing();
|
||||
#endif
|
||||
|
||||
verify_local_APIC();
|
||||
connect_bsp_APIC();
|
||||
|
@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
|||
if (apicid > max_physical_apicid)
|
||||
max_physical_apicid = apicid;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (num_processors > 8) {
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
if (!APIC_XAPIC(version)) {
|
||||
def_to_bigsmp = 0;
|
||||
break;
|
||||
}
|
||||
/* If P4 and above fall through */
|
||||
case X86_VENDOR_AMD:
|
||||
def_to_bigsmp = 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
|
||||
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
|
||||
|
|
|
@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
|
|||
}
|
||||
late_initcall(print_ipi_mode);
|
||||
|
||||
void default_setup_apic_routing(void)
|
||||
void __init default_setup_apic_routing(void)
|
||||
{
|
||||
int version = apic_version[boot_cpu_physical_apicid];
|
||||
|
||||
if (num_possible_cpus() > 8) {
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
if (!APIC_XAPIC(version)) {
|
||||
def_to_bigsmp = 0;
|
||||
break;
|
||||
}
|
||||
/* If P4 and above fall through */
|
||||
case X86_VENDOR_AMD:
|
||||
def_to_bigsmp = 1;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_BIGSMP
|
||||
generic_bigsmp_probe();
|
||||
#endif
|
||||
|
||||
if (apic->setup_apic_routing)
|
||||
apic->setup_apic_routing();
|
||||
}
|
||||
|
||||
static void setup_apic_flat_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
printk(KERN_INFO
|
||||
|
@ -103,7 +128,7 @@ struct apic apic_default = {
|
|||
.init_apic_ldr = default_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = default_ioapic_phys_id_map,
|
||||
.setup_apic_routing = default_setup_apic_routing,
|
||||
.setup_apic_routing = setup_apic_flat_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = default_apicid_to_node,
|
||||
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
|
||||
|
|
|
@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
if (apic == &apic_flat && num_processors > 8)
|
||||
if (apic == &apic_flat && num_possible_cpus() > 8)
|
||||
apic = &apic_physflat;
|
||||
|
||||
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
|
||||
|
|
|
@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
|||
|
||||
kfree(data->powernow_table);
|
||||
kfree(data);
|
||||
per_cpu(powernow_data, pol->cpu) = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
|
|||
int err;
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
smp_call_function_single(cpu, query_values_on_cpu, &err, true);
|
||||
if (err)
|
||||
|
|
|
@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
|
|||
x86_init.mpparse.mpc_record(1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_BIGSMP
|
||||
generic_bigsmp_probe();
|
||||
#endif
|
||||
|
||||
if (apic->setup_apic_routing)
|
||||
apic->setup_apic_routing();
|
||||
|
||||
if (!num_processors)
|
||||
printk(KERN_ERR "MPTABLE: no processors registered!\n");
|
||||
return num_processors;
|
||||
|
|
|
@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|||
set_cpu_sibling_map(0);
|
||||
|
||||
enable_IR_x2apic();
|
||||
#ifdef CONFIG_X86_64
|
||||
default_setup_apic_routing();
|
||||
#endif
|
||||
|
||||
if (smp_sanity_check(max_cpus) < 0) {
|
||||
printk(KERN_INFO "SMP disabled\n");
|
||||
|
|
|
@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
addr &= KVM_PIT_CHANNEL_MASK;
|
||||
if (addr == 3)
|
||||
return 0;
|
||||
|
||||
s = &pit_state->channels[addr];
|
||||
|
||||
mutex_lock(&pit_state->lock);
|
||||
|
|
|
@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
|
|||
{
|
||||
static int version;
|
||||
struct pvclock_wall_clock wc;
|
||||
struct timespec now, sys, boot;
|
||||
struct timespec boot;
|
||||
|
||||
if (!wall_clock)
|
||||
return;
|
||||
|
@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
|
|||
* wall clock specified here. guest system time equals host
|
||||
* system time for us, thus we must fill in host boot time here.
|
||||
*/
|
||||
now = current_kernel_time();
|
||||
ktime_get_ts(&sys);
|
||||
boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
|
||||
getboottime(&boot);
|
||||
|
||||
wc.sec = boot.tv_sec;
|
||||
wc.nsec = boot.tv_nsec;
|
||||
|
@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
|
|||
local_irq_save(flags);
|
||||
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
|
||||
ktime_get_ts(&ts);
|
||||
monotonic_to_bootbased(&ts);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* With all the info we got, fill in the values */
|
||||
|
|
|
@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
|
|||
#else
|
||||
/*
|
||||
* With get_user_pages_fast, we walk down the pagetables without taking
|
||||
* any locks. For this we would like to load the pointers atoimcally,
|
||||
* any locks. For this we would like to load the pointers atomically,
|
||||
* but that is not possible (without expensive cmpxchg8b) on PAE. What
|
||||
* we do have is the guarantee that a pte will only either go from not
|
||||
* present to present, or present to not present or both -- it will not
|
||||
|
|
|
@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
|
|||
*/
|
||||
#define CFQ_MIN_TT (2)
|
||||
|
||||
/*
|
||||
* Allow merged cfqqs to perform this amount of seeky I/O before
|
||||
* deciding to break the queues up again.
|
||||
*/
|
||||
#define CFQQ_COOP_TOUT (HZ)
|
||||
|
||||
#define CFQ_SLICE_SCALE (5)
|
||||
#define CFQ_HW_QUEUE_MIN (5)
|
||||
#define CFQ_SERVICE_SHIFT 12
|
||||
|
||||
#define CFQQ_SEEK_THR 8 * 1024
|
||||
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
|
||||
|
||||
#define RQ_CIC(rq) \
|
||||
((struct cfq_io_context *) (rq)->elevator_private)
|
||||
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
|
||||
|
@ -137,7 +134,6 @@ struct cfq_queue {
|
|||
u64 seek_total;
|
||||
sector_t seek_mean;
|
||||
sector_t last_request_pos;
|
||||
unsigned long seeky_start;
|
||||
|
||||
pid_t pid;
|
||||
|
||||
|
@ -314,6 +310,7 @@ enum cfqq_state_flags {
|
|||
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
|
||||
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
|
||||
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
|
||||
CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
|
||||
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
|
||||
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
|
||||
};
|
||||
|
@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
|
|||
CFQ_CFQQ_FNS(slice_new);
|
||||
CFQ_CFQQ_FNS(sync);
|
||||
CFQ_CFQQ_FNS(coop);
|
||||
CFQ_CFQQ_FNS(split_coop);
|
||||
CFQ_CFQQ_FNS(deep);
|
||||
CFQ_CFQQ_FNS(wait_busy);
|
||||
#undef CFQ_CFQQ_FNS
|
||||
|
@ -1565,6 +1563,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfq_clear_cfqq_wait_request(cfqq);
|
||||
cfq_clear_cfqq_wait_busy(cfqq);
|
||||
|
||||
/*
|
||||
* If this cfqq is shared between multiple processes, check to
|
||||
* make sure that those processes are still issuing I/Os within
|
||||
* the mean seek distance. If not, it may be time to break the
|
||||
* queues apart again.
|
||||
*/
|
||||
if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
|
||||
cfq_mark_cfqq_split_coop(cfqq);
|
||||
|
||||
/*
|
||||
* store what was left of this slice, if the queue idled/timed out
|
||||
*/
|
||||
|
@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
|
|||
return cfqd->last_position - blk_rq_pos(rq);
|
||||
}
|
||||
|
||||
#define CFQQ_SEEK_THR 8 * 1024
|
||||
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
|
||||
|
||||
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
struct request *rq, bool for_preempt)
|
||||
{
|
||||
|
@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
total = cfqq->seek_total + (cfqq->seek_samples/2);
|
||||
do_div(total, cfqq->seek_samples);
|
||||
cfqq->seek_mean = (sector_t)total;
|
||||
|
||||
/*
|
||||
* If this cfqq is shared between multiple processes, check to
|
||||
* make sure that those processes are still issuing I/Os within
|
||||
* the mean seek distance. If not, it may be time to break the
|
||||
* queues apart again.
|
||||
*/
|
||||
if (cfq_cfqq_coop(cfqq)) {
|
||||
if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
|
||||
cfqq->seeky_start = jiffies;
|
||||
else if (!CFQQ_SEEKY(cfqq))
|
||||
cfqq->seeky_start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
|
|||
return cic_to_cfqq(cic, 1);
|
||||
}
|
||||
|
||||
static int should_split_cfqq(struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfqq->seeky_start &&
|
||||
time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns NULL if a new cfqq should be allocated, or the old cfqq if this
|
||||
* was the last process referring to said cfqq.
|
||||
|
@ -3469,9 +3452,9 @@ static struct cfq_queue *
|
|||
split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfqq_process_refs(cfqq) == 1) {
|
||||
cfqq->seeky_start = 0;
|
||||
cfqq->pid = current->pid;
|
||||
cfq_clear_cfqq_coop(cfqq);
|
||||
cfq_clear_cfqq_split_coop(cfqq);
|
||||
return cfqq;
|
||||
}
|
||||
|
||||
|
@ -3510,7 +3493,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|||
/*
|
||||
* If the queue was seeky for too long, break it apart.
|
||||
*/
|
||||
if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
|
||||
if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
|
||||
cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
|
||||
cfqq = split_cfqq(cic, cfqq);
|
||||
if (!cfqq)
|
||||
|
|
|
@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
|
|||
if (*pos > h->highest_lun)
|
||||
return 0;
|
||||
|
||||
if (drv == NULL) /* it's possible for h->drv[] to have holes. */
|
||||
return 0;
|
||||
|
||||
if (drv->heads == 0)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
|
|||
|
||||
exit:
|
||||
sdio_release_host(card->func);
|
||||
kfree(tmpbuf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
|
|||
|
||||
static int __init agp_amd64_mod_init(void)
|
||||
{
|
||||
#ifndef MODULE
|
||||
if (gart_iommu_aperture)
|
||||
return agp_bridges_found ? 0 : -ENODEV;
|
||||
|
||||
#endif
|
||||
return agp_amd64_init();
|
||||
}
|
||||
|
||||
static void __exit agp_amd64_cleanup(void)
|
||||
{
|
||||
#ifndef MODULE
|
||||
if (gart_iommu_aperture)
|
||||
return;
|
||||
#endif
|
||||
if (aperture_resource)
|
||||
release_resource(aperture_resource);
|
||||
pci_unregister_driver(&agp_amd64_pci_driver);
|
||||
|
|
|
@ -39,12 +39,12 @@
|
|||
struct tpm_inf_dev {
|
||||
int iotype;
|
||||
|
||||
void __iomem *mem_base; /* MMIO ioremap'd addr */
|
||||
unsigned long map_base; /* phys MMIO base */
|
||||
unsigned long map_size; /* MMIO region size */
|
||||
unsigned int index_off; /* index register offset */
|
||||
void __iomem *mem_base; /* MMIO ioremap'd addr */
|
||||
unsigned long map_base; /* phys MMIO base */
|
||||
unsigned long map_size; /* MMIO region size */
|
||||
unsigned int index_off; /* index register offset */
|
||||
|
||||
unsigned int data_regs; /* Data registers */
|
||||
unsigned int data_regs; /* Data registers */
|
||||
unsigned int data_size;
|
||||
|
||||
unsigned int config_port; /* IO Port config index reg */
|
||||
|
@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
|
|||
.miscdev = {.fops = &inf_ops,},
|
||||
};
|
||||
|
||||
static const struct pnp_device_id tpm_pnp_tbl[] = {
|
||||
static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
|
||||
/* Infineon TPMs */
|
||||
{"IFX0101", 0},
|
||||
{"IFX0102", 0},
|
||||
{"", 0}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
|
||||
MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
|
||||
|
||||
static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
|
||||
const struct pnp_device_id *dev_id)
|
||||
|
@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
|
|||
if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
|
||||
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
|
||||
|
||||
tpm_dev.iotype = TPM_INF_IO_PORT;
|
||||
tpm_dev.iotype = TPM_INF_IO_PORT;
|
||||
|
||||
tpm_dev.config_port = pnp_port_start(dev, 0);
|
||||
tpm_dev.config_size = pnp_port_len(dev, 0);
|
||||
|
@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
|
|||
goto err_last;
|
||||
}
|
||||
} else if (pnp_mem_valid(dev, 0) &&
|
||||
!(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
|
||||
!(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
|
||||
|
||||
tpm_dev.iotype = TPM_INF_IO_MEM;
|
||||
tpm_dev.iotype = TPM_INF_IO_MEM;
|
||||
|
||||
tpm_dev.map_base = pnp_mem_start(dev, 0);
|
||||
tpm_dev.map_size = pnp_mem_len(dev, 0);
|
||||
|
@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
|
|||
"product id 0x%02x%02x"
|
||||
"%s\n",
|
||||
tpm_dev.iotype == TPM_INF_IO_PORT ?
|
||||
tpm_dev.config_port :
|
||||
tpm_dev.map_base + tpm_dev.index_off,
|
||||
tpm_dev.config_port :
|
||||
tpm_dev.map_base + tpm_dev.index_off,
|
||||
tpm_dev.iotype == TPM_INF_IO_PORT ?
|
||||
tpm_dev.data_regs :
|
||||
tpm_dev.map_base + tpm_dev.data_regs,
|
||||
tpm_dev.data_regs :
|
||||
tpm_dev.map_base + tpm_dev.data_regs,
|
||||
version[0], version[1],
|
||||
vendorid[0], vendorid[1],
|
||||
productid[0], productid[1], chipname);
|
||||
|
@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
|
|||
iounmap(tpm_dev.mem_base);
|
||||
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
|
||||
}
|
||||
tpm_dev_vendor_release(chip);
|
||||
tpm_remove_hardware(chip->dev);
|
||||
}
|
||||
}
|
||||
|
||||
static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
|
||||
{
|
||||
struct tpm_chip *chip = pnp_get_drvdata(dev);
|
||||
int rc;
|
||||
if (chip) {
|
||||
u8 savestate[] = {
|
||||
0, 193, /* TPM_TAG_RQU_COMMAND */
|
||||
0, 0, 0, 10, /* blob length (in bytes) */
|
||||
0, 0, 0, 152 /* TPM_ORD_SaveState */
|
||||
};
|
||||
dev_info(&dev->dev, "saving TPM state\n");
|
||||
rc = tpm_inf_send(chip, savestate, sizeof(savestate));
|
||||
if (rc < 0) {
|
||||
dev_err(&dev->dev, "error while saving TPM state\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tpm_inf_pnp_resume(struct pnp_dev *dev)
|
||||
{
|
||||
/* Re-configure TPM after suspending */
|
||||
tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
|
||||
tpm_config_out(IOLIMH, TPM_INF_ADDR);
|
||||
tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
|
||||
tpm_config_out(IOLIML, TPM_INF_ADDR);
|
||||
tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
|
||||
/* activate register */
|
||||
tpm_config_out(TPM_DAR, TPM_INF_ADDR);
|
||||
tpm_config_out(0x01, TPM_INF_DATA);
|
||||
tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
|
||||
/* disable RESET, LP and IRQC */
|
||||
tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
|
||||
return tpm_pm_resume(&dev->dev);
|
||||
}
|
||||
|
||||
static struct pnp_driver tpm_inf_pnp_driver = {
|
||||
.name = "tpm_inf_pnp",
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.suspend = tpm_pm_suspend,
|
||||
.resume = tpm_pm_resume,
|
||||
},
|
||||
.id_table = tpm_pnp_tbl,
|
||||
.id_table = tpm_inf_pnp_tbl,
|
||||
.probe = tpm_inf_pnp_probe,
|
||||
.remove = __devexit_p(tpm_inf_pnp_remove),
|
||||
.suspend = tpm_inf_pnp_suspend,
|
||||
.resume = tpm_inf_pnp_resume,
|
||||
.remove = __devexit_p(tpm_inf_pnp_remove)
|
||||
};
|
||||
|
||||
static int __init init_inf(void)
|
||||
|
@ -638,5 +673,5 @@ module_exit(cleanup_inf);
|
|||
|
||||
MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
|
||||
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
|
||||
MODULE_VERSION("1.9");
|
||||
MODULE_VERSION("1.9.2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
|||
(dbs_tuners_ins.up_threshold -
|
||||
dbs_tuners_ins.down_differential);
|
||||
|
||||
if (freq_next < policy->min)
|
||||
freq_next = policy->min;
|
||||
|
||||
if (!dbs_tuners_ins.powersave_bias) {
|
||||
__cpufreq_driver_target(policy, freq_next,
|
||||
CPUFREQ_RELATION_L);
|
||||
|
|
|
@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
|
|||
cohd_fin->pending_irqs--;
|
||||
cohc->completed = cohd_fin->desc.cookie;
|
||||
|
||||
BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
|
||||
|
||||
if (cohc->nbr_active_done == 0)
|
||||
return;
|
||||
|
||||
|
|
|
@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
|
|||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
device_unregister(&chan->dev->device);
|
||||
free_percpu(chan->local);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dma_async_device_unregister);
|
||||
|
|
|
@ -467,7 +467,7 @@ static int dmatest_func(void *data)
|
|||
|
||||
if (iterations > 0)
|
||||
while (!kthread_should_stop()) {
|
||||
DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
|
||||
interruptible_sleep_on(&wait_dmatest_exit);
|
||||
}
|
||||
|
||||
|
|
|
@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
|
|||
if (is_ioat_active(status) || is_ioat_idle(status))
|
||||
ioat_suspend(chan);
|
||||
while (is_ioat_active(status) || is_ioat_idle(status)) {
|
||||
if (end && time_after(jiffies, end)) {
|
||||
if (tmo && time_after(jiffies, end)) {
|
||||
err = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
|
|||
* @buffer_n: buffer number to update.
|
||||
* 0 or 1 are the only valid values.
|
||||
* @phyaddr: buffer physical address.
|
||||
* @return: Returns 0 on success or negative error code on failure. This
|
||||
* function will fail if the buffer is set to ready.
|
||||
*/
|
||||
/* Called under spin_lock(_irqsave)(&ichan->lock) */
|
||||
static int ipu_update_channel_buffer(struct idmac_channel *ichan,
|
||||
int buffer_n, dma_addr_t phyaddr)
|
||||
static void ipu_update_channel_buffer(struct idmac_channel *ichan,
|
||||
int buffer_n, dma_addr_t phyaddr)
|
||||
{
|
||||
enum ipu_channel channel = ichan->dma_chan.chan_id;
|
||||
uint32_t reg;
|
||||
|
@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(&ipu_data.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called under spin_lock_irqsave(&ichan->lock) */
|
||||
|
@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
|
|||
{
|
||||
unsigned int chan_id = ichan->dma_chan.chan_id;
|
||||
struct device *dev = &ichan->dma_chan.dev->device;
|
||||
int ret;
|
||||
|
||||
if (async_tx_test_ack(&desc->txd))
|
||||
return -EINTR;
|
||||
|
@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
|
|||
* could make it conditional on status >= IPU_CHANNEL_ENABLED, but
|
||||
* doing it again shouldn't hurt either.
|
||||
*/
|
||||
ret = ipu_update_channel_buffer(ichan, buf_idx,
|
||||
sg_dma_address(sg));
|
||||
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
|
||||
sg, chan_id, buf_idx);
|
||||
return ret;
|
||||
}
|
||||
ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
|
||||
|
||||
ipu_select_buffer(chan_id, buf_idx);
|
||||
dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
|
||||
|
@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||
|
||||
if (likely(sgnew) &&
|
||||
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
|
||||
callback = desc->txd.callback;
|
||||
callback_param = desc->txd.callback_param;
|
||||
callback = descnew->txd.callback;
|
||||
callback_param = descnew->txd.callback_param;
|
||||
spin_unlock(&ichan->lock);
|
||||
callback(callback_param);
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
spin_lock(&ichan->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
|
|||
* the memory system completely. A command line option allows to force-enable
|
||||
* hardware ECC later in amd64_enable_ecc_error_reporting().
|
||||
*/
|
||||
static const char *ecc_warning =
|
||||
"WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
|
||||
" Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
|
||||
" Also, use of the override can cause unknown side effects.\n";
|
||||
static const char *ecc_msg =
|
||||
"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
|
||||
" Either enable ECC checking or force module loading by setting "
|
||||
"'ecc_enable_override'.\n"
|
||||
" (Note that use of the override may cause unknown side effects.)\n";
|
||||
|
||||
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
|
||||
{
|
||||
|
@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
|
|||
|
||||
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
|
||||
if (!ecc_enabled)
|
||||
amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
|
||||
amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
|
||||
"is currently disabled, set F3x%x[22] (%s).\n",
|
||||
K8_NBCFG, pci_name(pvt->misc_f3_ctl));
|
||||
else
|
||||
|
@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
|
|||
|
||||
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
|
||||
if (!nb_mce_en)
|
||||
amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
|
||||
amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
|
||||
"0x%08x[4] on node %d to enable.\n",
|
||||
MSR_IA32_MCG_CTL, pvt->mc_node_id);
|
||||
|
||||
if (!ecc_enabled || !nb_mce_en) {
|
||||
if (!ecc_enable_override) {
|
||||
amd64_printk(KERN_WARNING, "%s", ecc_warning);
|
||||
amd64_printk(KERN_NOTICE, "%s", ecc_msg);
|
||||
return -ENODEV;
|
||||
}
|
||||
ecc_enable_override = 0;
|
||||
|
|
|
@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
|
|||
end <<= (24 - PAGE_SHIFT);
|
||||
end |= (1 << (24 - PAGE_SHIFT)) - 1;
|
||||
|
||||
csrow->first_page = start >> PAGE_SHIFT;
|
||||
csrow->last_page = end >> PAGE_SHIFT;
|
||||
csrow->first_page = start;
|
||||
csrow->last_page = end;
|
||||
csrow->nr_pages = end + 1 - start;
|
||||
csrow->grain = 8;
|
||||
csrow->mtype = mtype;
|
||||
|
@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
|
|||
|
||||
mpc85xx_init_csrows(mci);
|
||||
|
||||
#ifdef CONFIG_EDAC_DEBUG
|
||||
edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
|
||||
#endif
|
||||
|
||||
/* store the original error disable bits */
|
||||
orig_ddr_err_disable =
|
||||
in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
|
||||
|
|
|
@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
|
|||
if (cmdbuf->num_cliprects) {
|
||||
cliprects = kcalloc(cmdbuf->num_cliprects,
|
||||
sizeof(struct drm_clip_rect), GFP_KERNEL);
|
||||
if (cliprects == NULL)
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_batch_free;
|
||||
}
|
||||
|
||||
ret = copy_from_user(cliprects, cmdbuf->cliprects,
|
||||
cmdbuf->num_cliprects *
|
||||
|
|
|
@ -174,12 +174,42 @@ const static struct pci_device_id pciidlist[] = {
|
|||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
#endif
|
||||
|
||||
static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||
static int i915_drm_freeze(struct drm_device *dev)
|
||||
{
|
||||
pci_save_state(dev->pdev);
|
||||
|
||||
/* If KMS is active, we do the leavevt stuff here */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
int error = i915_gem_idle(dev);
|
||||
if (error) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"GEM idle failed, resume might fail\n");
|
||||
return error;
|
||||
}
|
||||
drm_irq_uninstall(dev);
|
||||
}
|
||||
|
||||
i915_save_state(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_drm_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!dev || !dev_priv) {
|
||||
DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
|
||||
intel_opregion_free(dev, 1);
|
||||
|
||||
/* Modeset on resume, not lid events */
|
||||
dev_priv->modeset_on_lid = 0;
|
||||
}
|
||||
|
||||
static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!dev || !dev->dev_private) {
|
||||
DRM_ERROR("dev: %p\n", dev);
|
||||
DRM_ERROR("DRM not initialized, aborting suspend.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -187,19 +217,11 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
if (state.event == PM_EVENT_PRETHAW)
|
||||
return 0;
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
error = i915_drm_freeze(dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* If KMS is active, we do the leavevt stuff here */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if (i915_gem_idle(dev))
|
||||
dev_err(&dev->pdev->dev,
|
||||
"GEM idle failed, resume may fail\n");
|
||||
drm_irq_uninstall(dev);
|
||||
}
|
||||
|
||||
i915_save_state(dev);
|
||||
|
||||
intel_opregion_free(dev, 1);
|
||||
i915_drm_suspend(dev);
|
||||
|
||||
if (state.event == PM_EVENT_SUSPEND) {
|
||||
/* Shut down the device */
|
||||
|
@ -207,45 +229,45 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
/* Modeset on resume, not lid events */
|
||||
dev_priv->modeset_on_lid = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_resume(struct drm_device *dev)
|
||||
static int i915_drm_thaw(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (pci_enable_device(dev->pdev))
|
||||
return -1;
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
i915_restore_state(dev);
|
||||
|
||||
intel_opregion_init(dev, 1);
|
||||
int error = 0;
|
||||
|
||||
/* KMS EnterVT equivalent */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
ret = i915_gem_init_ringbuffer(dev);
|
||||
if (ret != 0)
|
||||
ret = -1;
|
||||
error = i915_gem_init_ringbuffer(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
drm_irq_install(dev);
|
||||
}
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
|
||||
/* Resume the modeset for every activated CRTC */
|
||||
drm_helper_resume_force_mode(dev);
|
||||
}
|
||||
|
||||
dev_priv->modeset_on_lid = 0;
|
||||
|
||||
return ret;
|
||||
return error;
|
||||
}
|
||||
|
||||
static int i915_resume(struct drm_device *dev)
|
||||
{
|
||||
if (pci_enable_device(dev->pdev))
|
||||
return -EIO;
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
i915_restore_state(dev);
|
||||
|
||||
intel_opregion_init(dev, 1);
|
||||
|
||||
return i915_drm_thaw(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev)
|
|||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
static int i915_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int error;
|
||||
|
||||
return i915_suspend(dev, state);
|
||||
}
|
||||
if (!drm_dev || !drm_dev->dev_private) {
|
||||
dev_err(dev, "DRM not initialized, aborting suspend.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
error = i915_drm_freeze(drm_dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return i915_resume(dev);
|
||||
}
|
||||
i915_drm_suspend(drm_dev);
|
||||
|
||||
static int
|
||||
i915_pm_suspend(struct device *dev)
|
||||
{
|
||||
return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
|
||||
}
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
|
||||
static int
|
||||
i915_pm_resume(struct device *dev)
|
||||
{
|
||||
return i915_pci_resume(to_pci_dev(dev));
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_freeze(struct device *dev)
|
||||
{
|
||||
return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_thaw(struct device *dev)
|
||||
{
|
||||
/* thaw during hibernate, do nothing! */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_poweroff(struct device *dev)
|
||||
static int i915_pm_resume(struct device *dev)
|
||||
{
|
||||
return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_resume(drm_dev);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_pm_restore(struct device *dev)
|
||||
static int i915_pm_freeze(struct device *dev)
|
||||
{
|
||||
return i915_pci_resume(to_pci_dev(dev));
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
if (!drm_dev || !drm_dev->dev_private) {
|
||||
dev_err(dev, "DRM not initialized, aborting suspend.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return i915_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_drm_thaw(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int error;
|
||||
|
||||
error = i915_drm_freeze(drm_dev);
|
||||
if (!error)
|
||||
i915_drm_suspend(drm_dev);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
const struct dev_pm_ops i915_pm_ops = {
|
||||
|
@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = {
|
|||
.freeze = i915_pm_freeze,
|
||||
.thaw = i915_pm_thaw,
|
||||
.poweroff = i915_pm_poweroff,
|
||||
.restore = i915_pm_restore,
|
||||
.restore = i915_pm_resume,
|
||||
};
|
||||
|
||||
static struct vm_operations_struct i915_gem_vm_ops = {
|
||||
|
|
|
@ -492,6 +492,15 @@ typedef struct drm_i915_private {
|
|||
*/
|
||||
struct list_head flushing_list;
|
||||
|
||||
/**
|
||||
* List of objects currently pending a GPU write flush.
|
||||
*
|
||||
* All elements on this list will belong to either the
|
||||
* active_list or flushing_list, last_rendering_seqno can
|
||||
* be used to differentiate between the two elements.
|
||||
*/
|
||||
struct list_head gpu_write_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
|
@ -592,6 +601,8 @@ struct drm_i915_gem_object {
|
|||
|
||||
/** This object's place on the active/flushing/inactive lists */
|
||||
struct list_head list;
|
||||
/** This object's place on GPU write list */
|
||||
struct list_head gpu_write_list;
|
||||
|
||||
/** This object's place on the fenced object LRU */
|
||||
struct list_head fence_list;
|
||||
|
|
|
@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|||
else
|
||||
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
||||
|
||||
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
|
||||
|
||||
obj_priv->last_rendering_seqno = 0;
|
||||
if (obj_priv->active) {
|
||||
obj_priv->active = 0;
|
||||
|
@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
|
|||
struct drm_i915_gem_object *obj_priv, *next;
|
||||
|
||||
list_for_each_entry_safe(obj_priv, next,
|
||||
&dev_priv->mm.flushing_list, list) {
|
||||
&dev_priv->mm.gpu_write_list,
|
||||
gpu_write_list) {
|
||||
struct drm_gem_object *obj = obj_priv->obj;
|
||||
|
||||
if ((obj->write_domain & flush_domains) ==
|
||||
|
@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
|
|||
uint32_t old_write_domain = obj->write_domain;
|
||||
|
||||
obj->write_domain = 0;
|
||||
list_del_init(&obj_priv->gpu_write_list);
|
||||
i915_gem_object_move_to_active(obj, seqno);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
|
@ -2084,8 +2088,8 @@ static int
|
|||
i915_gem_evict_everything(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t seqno;
|
||||
int ret;
|
||||
uint32_t seqno;
|
||||
bool lists_empty;
|
||||
|
||||
spin_lock(&dev_priv->mm.active_list_lock);
|
||||
|
@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
|
||||
ret = i915_gem_evict_from_inactive_list(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
|
|||
old_write_domain = obj->write_domain;
|
||||
i915_gem_flush(dev, 0, obj->write_domain);
|
||||
seqno = i915_add_request(dev, NULL, obj->write_domain);
|
||||
obj->write_domain = 0;
|
||||
BUG_ON(obj->write_domain);
|
||||
i915_gem_object_move_to_active(obj, seqno);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
|
@ -3682,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (args->num_cliprects != 0) {
|
||||
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
|
||||
GFP_KERNEL);
|
||||
if (cliprects == NULL)
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
ret = copy_from_user(cliprects,
|
||||
(struct drm_clip_rect __user *)
|
||||
|
@ -3850,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
i915_gem_flush(dev,
|
||||
dev->invalidate_domains,
|
||||
dev->flush_domains);
|
||||
if (dev->flush_domains)
|
||||
if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
|
||||
(void)i915_add_request(dev, file_priv,
|
||||
dev->flush_domains);
|
||||
}
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_gem_object *obj = object_list[i];
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
uint32_t old_write_domain = obj->write_domain;
|
||||
|
||||
obj->write_domain = obj->pending_write_domain;
|
||||
if (obj->write_domain)
|
||||
list_move_tail(&obj_priv->gpu_write_list,
|
||||
&dev_priv->mm.gpu_write_list);
|
||||
else
|
||||
list_del_init(&obj_priv->gpu_write_list);
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
obj->read_domains,
|
||||
old_write_domain);
|
||||
|
@ -4370,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
|
|||
obj_priv->obj = obj;
|
||||
obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
||||
INIT_LIST_HEAD(&obj_priv->list);
|
||||
INIT_LIST_HEAD(&obj_priv->gpu_write_list);
|
||||
INIT_LIST_HEAD(&obj_priv->fence_list);
|
||||
obj_priv->madv = I915_MADV_WILLNEED;
|
||||
|
||||
|
@ -4821,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
|
|||
spin_lock_init(&dev_priv->mm.active_list_lock);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.request_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
|
|
|
@ -309,22 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
|||
if (de_iir & DE_GSE)
|
||||
ironlake_opregion_gse_intr(dev);
|
||||
|
||||
if (de_iir & DE_PLANEA_FLIP_DONE)
|
||||
if (de_iir & DE_PLANEA_FLIP_DONE) {
|
||||
intel_prepare_page_flip(dev, 0);
|
||||
|
||||
if (de_iir & DE_PLANEB_FLIP_DONE)
|
||||
intel_prepare_page_flip(dev, 1);
|
||||
|
||||
if (de_iir & DE_PIPEA_VBLANK) {
|
||||
drm_handle_vblank(dev, 0);
|
||||
intel_finish_page_flip(dev, 0);
|
||||
}
|
||||
|
||||
if (de_iir & DE_PIPEB_VBLANK) {
|
||||
drm_handle_vblank(dev, 1);
|
||||
if (de_iir & DE_PLANEB_FLIP_DONE) {
|
||||
intel_prepare_page_flip(dev, 1);
|
||||
intel_finish_page_flip(dev, 1);
|
||||
}
|
||||
|
||||
if (de_iir & DE_PIPEA_VBLANK)
|
||||
drm_handle_vblank(dev, 0);
|
||||
|
||||
if (de_iir & DE_PIPEB_VBLANK)
|
||||
drm_handle_vblank(dev, 1);
|
||||
|
||||
/* check event from PCH */
|
||||
if ((de_iir & DE_PCH_EVENT) &&
|
||||
(pch_iir & SDE_HOTPLUG_MASK)) {
|
||||
|
|
|
@ -338,6 +338,7 @@
|
|||
#define FBC_CTL_PERIODIC (1<<30)
|
||||
#define FBC_CTL_INTERVAL_SHIFT (16)
|
||||
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
|
||||
#define FBC_C3_IDLE (1<<13)
|
||||
#define FBC_CTL_STRIDE_SHIFT (5)
|
||||
#define FBC_CTL_FENCENO (1<<0)
|
||||
#define FBC_COMMAND 0x0320c
|
||||
|
|
|
@ -240,33 +240,86 @@ struct intel_limit {
|
|||
#define IRONLAKE_DOT_MAX 350000
|
||||
#define IRONLAKE_VCO_MIN 1760000
|
||||
#define IRONLAKE_VCO_MAX 3510000
|
||||
#define IRONLAKE_N_MIN 1
|
||||
#define IRONLAKE_N_MAX 6
|
||||
#define IRONLAKE_M_MIN 79
|
||||
#define IRONLAKE_M_MAX 127
|
||||
#define IRONLAKE_M1_MIN 12
|
||||
#define IRONLAKE_M1_MAX 22
|
||||
#define IRONLAKE_M2_MIN 5
|
||||
#define IRONLAKE_M2_MAX 9
|
||||
#define IRONLAKE_P_SDVO_DAC_MIN 5
|
||||
#define IRONLAKE_P_SDVO_DAC_MAX 80
|
||||
#define IRONLAKE_P_LVDS_MIN 28
|
||||
#define IRONLAKE_P_LVDS_MAX 112
|
||||
#define IRONLAKE_P1_MIN 1
|
||||
#define IRONLAKE_P1_MAX 8
|
||||
#define IRONLAKE_P2_SDVO_DAC_SLOW 10
|
||||
#define IRONLAKE_P2_SDVO_DAC_FAST 5
|
||||
#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
|
||||
#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
|
||||
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
|
||||
|
||||
#define IRONLAKE_P_DISPLAY_PORT_MIN 10
|
||||
#define IRONLAKE_P_DISPLAY_PORT_MAX 20
|
||||
#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
|
||||
#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
|
||||
#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
|
||||
#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
|
||||
#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
|
||||
/* We have parameter ranges for different type of outputs. */
|
||||
|
||||
/* DAC & HDMI Refclk 120Mhz */
|
||||
#define IRONLAKE_DAC_N_MIN 1
|
||||
#define IRONLAKE_DAC_N_MAX 5
|
||||
#define IRONLAKE_DAC_M_MIN 79
|
||||
#define IRONLAKE_DAC_M_MAX 127
|
||||
#define IRONLAKE_DAC_P_MIN 5
|
||||
#define IRONLAKE_DAC_P_MAX 80
|
||||
#define IRONLAKE_DAC_P1_MIN 1
|
||||
#define IRONLAKE_DAC_P1_MAX 8
|
||||
#define IRONLAKE_DAC_P2_SLOW 10
|
||||
#define IRONLAKE_DAC_P2_FAST 5
|
||||
|
||||
/* LVDS single-channel 120Mhz refclk */
|
||||
#define IRONLAKE_LVDS_S_N_MIN 1
|
||||
#define IRONLAKE_LVDS_S_N_MAX 3
|
||||
#define IRONLAKE_LVDS_S_M_MIN 79
|
||||
#define IRONLAKE_LVDS_S_M_MAX 118
|
||||
#define IRONLAKE_LVDS_S_P_MIN 28
|
||||
#define IRONLAKE_LVDS_S_P_MAX 112
|
||||
#define IRONLAKE_LVDS_S_P1_MIN 2
|
||||
#define IRONLAKE_LVDS_S_P1_MAX 8
|
||||
#define IRONLAKE_LVDS_S_P2_SLOW 14
|
||||
#define IRONLAKE_LVDS_S_P2_FAST 14
|
||||
|
||||
/* LVDS dual-channel 120Mhz refclk */
|
||||
#define IRONLAKE_LVDS_D_N_MIN 1
|
||||
#define IRONLAKE_LVDS_D_N_MAX 3
|
||||
#define IRONLAKE_LVDS_D_M_MIN 79
|
||||
#define IRONLAKE_LVDS_D_M_MAX 127
|
||||
#define IRONLAKE_LVDS_D_P_MIN 14
|
||||
#define IRONLAKE_LVDS_D_P_MAX 56
|
||||
#define IRONLAKE_LVDS_D_P1_MIN 2
|
||||
#define IRONLAKE_LVDS_D_P1_MAX 8
|
||||
#define IRONLAKE_LVDS_D_P2_SLOW 7
|
||||
#define IRONLAKE_LVDS_D_P2_FAST 7
|
||||
|
||||
/* LVDS single-channel 100Mhz refclk */
|
||||
#define IRONLAKE_LVDS_S_SSC_N_MIN 1
|
||||
#define IRONLAKE_LVDS_S_SSC_N_MAX 2
|
||||
#define IRONLAKE_LVDS_S_SSC_M_MIN 79
|
||||
#define IRONLAKE_LVDS_S_SSC_M_MAX 126
|
||||
#define IRONLAKE_LVDS_S_SSC_P_MIN 28
|
||||
#define IRONLAKE_LVDS_S_SSC_P_MAX 112
|
||||
#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
|
||||
#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
|
||||
#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
|
||||
#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
|
||||
|
||||
/* LVDS dual-channel 100Mhz refclk */
|
||||
#define IRONLAKE_LVDS_D_SSC_N_MIN 1
|
||||
#define IRONLAKE_LVDS_D_SSC_N_MAX 3
|
||||
#define IRONLAKE_LVDS_D_SSC_M_MIN 79
|
||||
#define IRONLAKE_LVDS_D_SSC_M_MAX 126
|
||||
#define IRONLAKE_LVDS_D_SSC_P_MIN 14
|
||||
#define IRONLAKE_LVDS_D_SSC_P_MAX 42
|
||||
#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
|
||||
#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
|
||||
#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
|
||||
#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
|
||||
|
||||
/* DisplayPort */
|
||||
#define IRONLAKE_DP_N_MIN 1
|
||||
#define IRONLAKE_DP_N_MAX 2
|
||||
#define IRONLAKE_DP_M_MIN 81
|
||||
#define IRONLAKE_DP_M_MAX 90
|
||||
#define IRONLAKE_DP_P_MIN 10
|
||||
#define IRONLAKE_DP_P_MAX 20
|
||||
#define IRONLAKE_DP_P2_FAST 10
|
||||
#define IRONLAKE_DP_P2_SLOW 10
|
||||
#define IRONLAKE_DP_P2_LIMIT 0
|
||||
#define IRONLAKE_DP_P1_MIN 1
|
||||
#define IRONLAKE_DP_P1_MAX 2
|
||||
|
||||
static bool
|
||||
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
|
||||
|
@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
|
|||
.find_pll = intel_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_sdvo = {
|
||||
static const intel_limit_t intel_limits_ironlake_dac = {
|
||||
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
|
||||
.n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
|
||||
.m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
|
||||
.n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
|
||||
.m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
|
||||
.p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
|
||||
.p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
|
||||
.p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
|
||||
.p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
|
||||
.p2_slow = IRONLAKE_DAC_P2_SLOW,
|
||||
.p2_fast = IRONLAKE_DAC_P2_FAST },
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_lvds = {
|
||||
static const intel_limit_t intel_limits_ironlake_single_lvds = {
|
||||
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
|
||||
.n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
|
||||
.m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
|
||||
.n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
|
||||
.m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
|
||||
.p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
|
||||
.p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
|
||||
.p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_P2_LVDS_SLOW,
|
||||
.p2_fast = IRONLAKE_P2_LVDS_FAST },
|
||||
.p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
|
||||
.p2_fast = IRONLAKE_LVDS_S_P2_FAST },
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
|
||||
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
|
||||
.n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
|
||||
.m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
|
||||
.p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
|
||||
.p2_fast = IRONLAKE_LVDS_D_P2_FAST },
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
|
||||
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
|
||||
.n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
|
||||
.m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
|
||||
.p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
|
||||
.p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
|
||||
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
|
||||
.n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
|
||||
.m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
|
||||
.p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
|
||||
.p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
|
||||
.p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
|
||||
.find_pll = intel_g4x_find_best_PLL,
|
||||
};
|
||||
|
||||
|
@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
|
|||
.max = IRONLAKE_DOT_MAX },
|
||||
.vco = { .min = IRONLAKE_VCO_MIN,
|
||||
.max = IRONLAKE_VCO_MAX},
|
||||
.n = { .min = IRONLAKE_N_MIN,
|
||||
.max = IRONLAKE_N_MAX },
|
||||
.m = { .min = IRONLAKE_M_MIN,
|
||||
.max = IRONLAKE_M_MAX },
|
||||
.n = { .min = IRONLAKE_DP_N_MIN,
|
||||
.max = IRONLAKE_DP_N_MAX },
|
||||
.m = { .min = IRONLAKE_DP_M_MIN,
|
||||
.max = IRONLAKE_DP_M_MAX },
|
||||
.m1 = { .min = IRONLAKE_M1_MIN,
|
||||
.max = IRONLAKE_M1_MAX },
|
||||
.m2 = { .min = IRONLAKE_M2_MIN,
|
||||
.max = IRONLAKE_M2_MAX },
|
||||
.p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
|
||||
.max = IRONLAKE_P_DISPLAY_PORT_MAX },
|
||||
.p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
|
||||
.max = IRONLAKE_P1_DISPLAY_PORT_MAX},
|
||||
.p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
|
||||
.p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
|
||||
.p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
|
||||
.p = { .min = IRONLAKE_DP_P_MIN,
|
||||
.max = IRONLAKE_DP_P_MAX },
|
||||
.p1 = { .min = IRONLAKE_DP_P1_MIN,
|
||||
.max = IRONLAKE_DP_P1_MAX},
|
||||
.p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
|
||||
.p2_slow = IRONLAKE_DP_P2_SLOW,
|
||||
.p2_fast = IRONLAKE_DP_P2_FAST },
|
||||
.find_pll = intel_find_pll_ironlake_dp,
|
||||
};
|
||||
|
||||
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const intel_limit_t *limit;
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
|
||||
limit = &intel_limits_ironlake_lvds;
|
||||
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
|
||||
int refclk = 120;
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
|
||||
if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
|
||||
refclk = 100;
|
||||
|
||||
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
|
||||
LVDS_CLKB_POWER_UP) {
|
||||
/* LVDS dual channel */
|
||||
if (refclk == 100)
|
||||
limit = &intel_limits_ironlake_dual_lvds_100m;
|
||||
else
|
||||
limit = &intel_limits_ironlake_dual_lvds;
|
||||
} else {
|
||||
if (refclk == 100)
|
||||
limit = &intel_limits_ironlake_single_lvds_100m;
|
||||
else
|
||||
limit = &intel_limits_ironlake_single_lvds;
|
||||
}
|
||||
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
|
||||
HAS_eDP)
|
||||
limit = &intel_limits_ironlake_display_port;
|
||||
else
|
||||
limit = &intel_limits_ironlake_sdvo;
|
||||
limit = &intel_limits_ironlake_dac;
|
||||
|
||||
return limit;
|
||||
}
|
||||
|
@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
|
||||
/* enable it... */
|
||||
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
|
||||
if (IS_I945GM(dev))
|
||||
fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
|
||||
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
|
||||
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
||||
|
@ -3962,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
|
|||
struct intel_unpin_work {
|
||||
struct work_struct work;
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_gem_object *old_fb_obj;
|
||||
struct drm_gem_object *pending_flip_obj;
|
||||
struct drm_pending_vblank_event *event;
|
||||
int pending;
|
||||
};
|
||||
|
@ -3973,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
|
|||
container_of(__work, struct intel_unpin_work, work);
|
||||
|
||||
mutex_lock(&work->dev->struct_mutex);
|
||||
i915_gem_object_unpin(work->obj);
|
||||
drm_gem_object_unreference(work->obj);
|
||||
i915_gem_object_unpin(work->old_fb_obj);
|
||||
drm_gem_object_unreference(work->pending_flip_obj);
|
||||
drm_gem_object_unreference(work->old_fb_obj);
|
||||
mutex_unlock(&work->dev->struct_mutex);
|
||||
kfree(work);
|
||||
}
|
||||
|
@ -3998,7 +4119,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
|
|||
work = intel_crtc->unpin_work;
|
||||
if (work == NULL || !work->pending) {
|
||||
if (work && !work->pending) {
|
||||
obj_priv = work->obj->driver_private;
|
||||
obj_priv = work->pending_flip_obj->driver_private;
|
||||
DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
|
||||
obj_priv,
|
||||
atomic_read(&obj_priv->pending_flip));
|
||||
|
@ -4023,7 +4144,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
|
|||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
obj_priv = work->obj->driver_private;
|
||||
obj_priv = work->pending_flip_obj->driver_private;
|
||||
|
||||
/* Initial scanout buffer will have a 0 pending flip count */
|
||||
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
|
||||
|
@ -4060,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_unpin_work *work;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
|
||||
int ret, pipesrc;
|
||||
RING_LOCALS;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
|
@ -4072,7 +4194,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
work->event = event;
|
||||
work->dev = crtc->dev;
|
||||
intel_fb = to_intel_framebuffer(crtc->fb);
|
||||
work->obj = intel_fb->obj;
|
||||
work->old_fb_obj = intel_fb->obj;
|
||||
INIT_WORK(&work->work, intel_unpin_work_fn);
|
||||
|
||||
/* We borrow the event spin lock for protecting unpin_work */
|
||||
|
@ -4100,14 +4222,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Reference the old fb object for the scheduled work. */
|
||||
drm_gem_object_reference(work->obj);
|
||||
/* Reference the objects for the scheduled work. */
|
||||
drm_gem_object_reference(work->old_fb_obj);
|
||||
drm_gem_object_reference(obj);
|
||||
|
||||
crtc->fb = fb;
|
||||
i915_gem_object_flush_write_domain(obj);
|
||||
drm_vblank_get(dev, intel_crtc->pipe);
|
||||
obj_priv = obj->driver_private;
|
||||
atomic_inc(&obj_priv->pending_flip);
|
||||
work->pending_flip_obj = obj;
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(MI_DISPLAY_FLIP |
|
||||
|
@ -4115,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
OUT_RING(fb->pitch);
|
||||
if (IS_I965G(dev)) {
|
||||
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
|
||||
OUT_RING((fb->width << 16) | fb->height);
|
||||
pipesrc = I915_READ(pipesrc_reg);
|
||||
OUT_RING(pipesrc & 0x0fff0fff);
|
||||
} else {
|
||||
OUT_RING(obj_priv->gtt_offset);
|
||||
OUT_RING(MI_NOOP);
|
||||
|
|
|
@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
ret = i915_gem_object_pin(fbo, PAGE_SIZE);
|
||||
ret = i915_gem_object_pin(fbo, 64*1024);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin fb: %d\n", ret);
|
||||
goto out_unref;
|
||||
|
|
|
@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
|
|||
{
|
||||
int result;
|
||||
|
||||
if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
|
||||
if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
|
||||
&result))
|
||||
return -ENODEV;
|
||||
|
||||
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
|
||||
|
||||
if (result & 0x1) { /* Stamina mode - disable the external GPU */
|
||||
if (result) { /* Ensure that the external GPU is enabled */
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
|
||||
NULL);
|
||||
} else { /* Stamina mode - disable the external GPU */
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
|
||||
NULL);
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
|
||||
NULL);
|
||||
} else { /* Ensure that the external GPU is enabled */
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
|
||||
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
|
||||
NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
|
|||
|
||||
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
|
||||
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
if (dev_priv->card_type >= NV_40)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
|
@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
*/
|
||||
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct init_exec iexec = {true, false};
|
||||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
uint8_t *table = &bios->data[bios->display.script_table_ptr];
|
||||
uint8_t *otable = NULL;
|
||||
|
@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
}
|
||||
|
||||
bios->display.output = dcbent;
|
||||
|
||||
if (pxclk == 0) {
|
||||
script = ROM16(otable[6]);
|
||||
if (!script) {
|
||||
|
@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk == -1) {
|
||||
script = ROM16(otable[8]);
|
||||
|
@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk == -2) {
|
||||
if (table[4] >= 12)
|
||||
|
@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk > 0) {
|
||||
script = ROM16(otable[table[4] + i*6 + 2]);
|
||||
|
@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
} else
|
||||
if (pxclk < 0) {
|
||||
script = ROM16(otable[table[4] + i*6 + 4]);
|
||||
|
@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
}
|
||||
|
||||
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
|
||||
parse_init_table(bios, script, &iexec);
|
||||
nouveau_bios_run_init_table(dev, script, dcbent);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -5864,10 +5861,13 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
struct init_exec iexec = { true, false };
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bios->lock, flags);
|
||||
bios->display.output = dcbent;
|
||||
parse_init_table(bios, table, &iexec);
|
||||
bios->display.output = NULL;
|
||||
spin_unlock_irqrestore(&bios->lock, flags);
|
||||
}
|
||||
|
||||
static bool NVInitVBIOS(struct drm_device *dev)
|
||||
|
@ -5876,6 +5876,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
|
|||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
|
||||
memset(bios, 0, sizeof(struct nvbios));
|
||||
spin_lock_init(&bios->lock);
|
||||
bios->dev = dev;
|
||||
|
||||
if (!NVShadowVBIOS(dev, bios->data))
|
||||
|
|
|
@ -205,6 +205,8 @@ struct nvbios {
|
|||
struct drm_device *dev;
|
||||
struct nouveau_bios_info pub;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
uint8_t data[NV_PROM_SIZE];
|
||||
unsigned int length;
|
||||
bool execute;
|
||||
|
|
|
@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
|
|||
|
||||
/*
|
||||
* Some of the tile_flags have a periodic structure of N*4096 bytes,
|
||||
* align to to that as well as the page size. Overallocate memory to
|
||||
* avoid corruption of other buffer objects.
|
||||
* align to to that as well as the page size. Align the size to the
|
||||
* appropriate boundaries. This does imply that sizes are rounded up
|
||||
* 3-7 pages, so be aware of this and do not waste memory by allocating
|
||||
* many small buffers.
|
||||
*/
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
|
||||
|
@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
|
|||
case 0x2800:
|
||||
case 0x4800:
|
||||
case 0x7a00:
|
||||
*size = roundup(*size, block_size);
|
||||
if (is_power_of_2(block_size)) {
|
||||
*size += 3 * block_size;
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 12 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
*size += 6 * block_size;
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 8 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
}
|
||||
*size = roundup(*size, *align);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
|
|||
/* Ensure the channel is no longer active on the GPU */
|
||||
pfifo->reassign(dev, false);
|
||||
|
||||
if (pgraph->channel(dev) == chan) {
|
||||
pgraph->fifo_access(dev, false);
|
||||
pgraph->fifo_access(dev, false);
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
pgraph->fifo_access(dev, true);
|
||||
}
|
||||
pgraph->destroy_context(chan);
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
if (pfifo->channel_id(dev) == chan->id) {
|
||||
pfifo->disable(dev);
|
||||
|
|
|
@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
|
|||
{
|
||||
struct nouveau_connector *nv_connector =
|
||||
nouveau_connector(drm_connector);
|
||||
struct drm_device *dev = nv_connector->base.dev;
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
struct drm_device *dev;
|
||||
|
||||
if (!nv_connector)
|
||||
return;
|
||||
|
||||
dev = nv_connector->base.dev;
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
kfree(nv_connector->edid);
|
||||
drm_sysfs_connector_remove(drm_connector);
|
||||
drm_connector_cleanup(drm_connector);
|
||||
|
|
|
@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
|
|||
break;
|
||||
}
|
||||
|
||||
if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
|
||||
ret = -EREMOTEIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cmd & 1) {
|
||||
if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
|
||||
ret = -EREMOTEIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
|
||||
NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
|
||||
|
|
|
@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
|
|||
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
|
||||
int nouveau_vram_notify;
|
||||
int nouveau_vram_notify = 1;
|
||||
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
|
||||
|
@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
|
|||
int nouveau_ignorelid = 0;
|
||||
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(noagp, "Disable all acceleration");
|
||||
int nouveau_noaccel = 0;
|
||||
module_param_named(noaccel, nouveau_noaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
|
||||
int nouveau_nofbaccel = 0;
|
||||
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
|
||||
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
|
||||
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
|
||||
|
|
|
@ -678,6 +678,8 @@ extern int nouveau_reg_debug;
|
|||
extern char *nouveau_vbios;
|
||||
extern int nouveau_ctxfw;
|
||||
extern int nouveau_ignorelid;
|
||||
extern int nouveau_nofbaccel;
|
||||
extern int nouveau_noaccel;
|
||||
|
||||
/* nouveau_state.c */
|
||||
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
|
||||
|
|
|
@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
|
|||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static struct fb_ops nv04_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_setcolreg = drm_fb_helper_setcolreg,
|
||||
.fb_fillrect = nv04_fbcon_fillrect,
|
||||
.fb_copyarea = nv04_fbcon_copyarea,
|
||||
.fb_imageblit = nv04_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static struct fb_ops nv50_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_setcolreg = drm_fb_helper_setcolreg,
|
||||
.fb_fillrect = nv50_fbcon_fillrect,
|
||||
.fb_copyarea = nv50_fbcon_copyarea,
|
||||
.fb_imageblit = nv50_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
|
@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
|
|||
dev_priv->fbdev_info = info;
|
||||
|
||||
strcpy(info->fix.id, "nouveaufb");
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
|
||||
FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
|
||||
if (nouveau_nofbaccel)
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
|
||||
else
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
|
||||
FBINFO_HWACCEL_FILLRECT |
|
||||
FBINFO_HWACCEL_IMAGEBLIT;
|
||||
info->fbops = &nouveau_fbcon_ops;
|
||||
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
|
||||
dev_priv->vm_vram_base;
|
||||
|
@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
|
|||
par->nouveau_fb = nouveau_fb;
|
||||
par->dev = dev;
|
||||
|
||||
if (dev_priv->channel) {
|
||||
if (dev_priv->channel && !nouveau_nofbaccel) {
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_50:
|
||||
nv50_fbcon_accel_init(info);
|
||||
info->fbops = &nv50_fbcon_ops;
|
||||
break;
|
||||
default:
|
||||
nv04_fbcon_accel_init(info);
|
||||
info->fbops = &nv04_fbcon_ops;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
|||
void nouveau_fbcon_restore(void);
|
||||
void nouveau_fbcon_zfill(struct drm_device *dev);
|
||||
|
||||
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv04_fbcon_accel_init(struct fb_info *info);
|
||||
void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv50_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
|
||||
|
|
|
@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
|
||||
spin_lock(&nvbo->bo.lock);
|
||||
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
|
||||
spin_unlock(&nvbo->bo.lock);
|
||||
} else {
|
||||
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
|
||||
if (ret == 0)
|
||||
|
|
|
@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
|
|||
}
|
||||
|
||||
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
|
||||
if (!pgraph->ctxprog) {
|
||||
NV_ERROR(dev, "OOM copying ctxprog\n");
|
||||
if (!pgraph->ctxvals) {
|
||||
NV_ERROR(dev, "OOM copying ctxvals\n");
|
||||
release_firmware(fw);
|
||||
nouveau_grctx_fini(dev);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
|
|||
get + 4);
|
||||
}
|
||||
|
||||
if (status & NV_PFIFO_INTR_SEMAPHORE) {
|
||||
uint32_t sem;
|
||||
|
||||
status &= ~NV_PFIFO_INTR_SEMAPHORE;
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0,
|
||||
NV_PFIFO_INTR_SEMAPHORE);
|
||||
|
||||
sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
|
||||
nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
|
||||
status, chid);
|
||||
|
@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
|
|||
static void
|
||||
nv50_pgraph_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
uint32_t status, nsource;
|
||||
uint32_t status;
|
||||
|
||||
status = nv_rd32(dev, NV03_PGRAPH_INTR);
|
||||
nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
|
||||
if (status & 0x00000001) {
|
||||
nouveau_pgraph_intr_notify(dev, nsource);
|
||||
status &= ~0x00000001;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
|
||||
}
|
||||
if (status & 0x00000001) {
|
||||
nouveau_pgraph_intr_notify(dev, nsource);
|
||||
status &= ~0x00000001;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
|
||||
}
|
||||
|
||||
if (status & 0x00000010) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
|
||||
if (status & 0x00000010) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
|
||||
|
||||
status &= ~0x00000010;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
|
||||
}
|
||||
status &= ~0x00000010;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
|
||||
}
|
||||
|
||||
if (status & 0x00001000) {
|
||||
nv_wr32(dev, 0x400500, 0x00000000);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
|
||||
NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, 0x400500, 0x00010001);
|
||||
if (status & 0x00001000) {
|
||||
nv_wr32(dev, 0x400500, 0x00000000);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR,
|
||||
NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
|
||||
NV40_PGRAPH_INTR_EN) &
|
||||
~NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
nv_wr32(dev, 0x400500, 0x00010001);
|
||||
|
||||
nv50_graph_context_switch(dev);
|
||||
nv50_graph_context_switch(dev);
|
||||
|
||||
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
}
|
||||
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
}
|
||||
|
||||
if (status & 0x00100000) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_DATA_ERROR);
|
||||
if (status & 0x00100000) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_DATA_ERROR);
|
||||
|
||||
status &= ~0x00100000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
|
||||
}
|
||||
status &= ~0x00100000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
|
||||
}
|
||||
|
||||
if (status & 0x00200000) {
|
||||
int r;
|
||||
if (status & 0x00200000) {
|
||||
int r;
|
||||
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
|
||||
|
||||
NV_ERROR(dev, "magic set 1:\n");
|
||||
for (r = 0x408900; r <= 0x408910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
|
||||
for (r = 0x408e08; r <= 0x408e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
|
||||
NV_ERROR(dev, "magic set 1:\n");
|
||||
for (r = 0x408900; r <= 0x408910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408900,
|
||||
nv_rd32(dev, 0x408904) | 0xc0000000);
|
||||
for (r = 0x408e08; r <= 0x408e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408e08,
|
||||
nv_rd32(dev, 0x408e08) | 0xc0000000);
|
||||
|
||||
NV_ERROR(dev, "magic set 2:\n");
|
||||
for (r = 0x409900; r <= 0x409910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
|
||||
for (r = 0x409e08; r <= 0x409e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
|
||||
NV_ERROR(dev, "magic set 2:\n");
|
||||
for (r = 0x409900; r <= 0x409910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409900,
|
||||
nv_rd32(dev, 0x409904) | 0xc0000000);
|
||||
for (r = 0x409e08; r <= 0x409e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409e08,
|
||||
nv_rd32(dev, 0x409e08) | 0xc0000000);
|
||||
|
||||
status &= ~0x00200000;
|
||||
nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
|
||||
}
|
||||
status &= ~0x00200000;
|
||||
nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, status);
|
||||
}
|
||||
if (status) {
|
||||
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
|
||||
status);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, status);
|
||||
}
|
||||
|
||||
{
|
||||
const int isb = (1 << 16) | (1 << 0);
|
||||
{
|
||||
const int isb = (1 << 16) | (1 << 0);
|
||||
|
||||
if ((nv_rd32(dev, 0x400500) & isb) != isb)
|
||||
nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
if ((nv_rd32(dev, 0x400500) & isb) != isb)
|
||||
nv_wr32(dev, 0x400500,
|
||||
nv_rd32(dev, 0x400500) | isb);
|
||||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_bo *ntfy = NULL;
|
||||
uint32_t flags;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
|
||||
TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
|
||||
if (nouveau_vram_notify)
|
||||
flags = TTM_PL_FLAG_VRAM;
|
||||
else
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
|
||||
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
|
||||
0, 0x0000, false, true, &ntfy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
|
||||
ret = nouveau_bo_pin(ntfy, flags);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
target = NV_DMA_TARGET_PCI;
|
||||
} else {
|
||||
target = NV_DMA_TARGET_AGP;
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
offset += dev_priv->vm_gart_base;
|
||||
}
|
||||
} else {
|
||||
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
|
||||
|
|
|
@ -885,11 +885,12 @@ int
|
|||
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
|
||||
struct nouveau_gpuobj **gpuobj_ret)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
|
||||
if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
|
||||
return -EINVAL;
|
||||
dev_priv = chan->dev->dev_private;
|
||||
|
||||
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
||||
if (!gpuobj)
|
||||
|
|
|
@ -99,6 +99,7 @@
|
|||
* the card will hang early on in the X init process.
|
||||
*/
|
||||
# define NV_PMC_ENABLE_UNK13 (1<<13)
|
||||
#define NV40_PMC_GRAPH_UNITS 0x00001540
|
||||
#define NV40_PMC_BACKLIGHT 0x000015f0
|
||||
# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
|
||||
#define NV40_PMC_1700 0x00001700
|
||||
|
|
|
@ -54,11 +54,12 @@ static void
|
|||
nouveau_sgdma_clear(struct ttm_backend *be)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
|
||||
NV_DEBUG(nvbe->dev, "\n");
|
||||
struct drm_device *dev;
|
||||
|
||||
if (nvbe && nvbe->pages) {
|
||||
dev = nvbe->dev;
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (nvbe->bound)
|
||||
be->func->unbind(be);
|
||||
|
||||
|
|
|
@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
static unsigned int
|
||||
nouveau_vga_set_decode(void *priv, bool state)
|
||||
{
|
||||
struct drm_device *dev = priv;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->chipset >= 0x40)
|
||||
nv_wr32(dev, 0x88054, state);
|
||||
else
|
||||
nv_wr32(dev, 0x1854, state);
|
||||
|
||||
if (state)
|
||||
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
||||
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
||||
|
@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_timer;
|
||||
|
||||
/* PGRAPH */
|
||||
ret = engine->graph.init(dev);
|
||||
if (ret)
|
||||
goto out_fb;
|
||||
if (nouveau_noaccel)
|
||||
engine->graph.accel_blocked = true;
|
||||
else {
|
||||
/* PGRAPH */
|
||||
ret = engine->graph.init(dev);
|
||||
if (ret)
|
||||
goto out_fb;
|
||||
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
}
|
||||
|
||||
/* this call irq_preinstall, register irq handler and
|
||||
* call irq_postinstall
|
||||
|
@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
|
|||
out_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
out_fifo:
|
||||
engine->fifo.takedown(dev);
|
||||
if (!nouveau_noaccel)
|
||||
engine->fifo.takedown(dev);
|
||||
out_graph:
|
||||
engine->graph.takedown(dev);
|
||||
if (!nouveau_noaccel)
|
||||
engine->graph.takedown(dev);
|
||||
out_fb:
|
||||
engine->fb.takedown(dev);
|
||||
out_timer:
|
||||
|
@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
|||
dev_priv->channel = NULL;
|
||||
}
|
||||
|
||||
engine->fifo.takedown(dev);
|
||||
engine->graph.takedown(dev);
|
||||
if (!nouveau_noaccel) {
|
||||
engine->fifo.takedown(dev);
|
||||
engine->graph.takedown(dev);
|
||||
}
|
||||
engine->fb.takedown(dev);
|
||||
engine->timer.takedown(dev);
|
||||
engine->mc.takedown(dev);
|
||||
|
@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
|
|||
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
|
||||
getparam->value = dev_priv->vm_vram_base;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_GRAPH_UNITS:
|
||||
/* NV40 and NV50 versions are quite different, but register
|
||||
* address is the same. User is supposed to know the card
|
||||
* family anyway... */
|
||||
if (dev_priv->chipset >= 0x40) {
|
||||
getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
|
||||
break;
|
||||
}
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
static void
|
||||
void
|
||||
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
|
|||
|
||||
FIRE_RING(chan);
|
||||
|
||||
info->fbops->fb_fillrect = nv04_fbcon_fillrect;
|
||||
info->fbops->fb_copyarea = nv04_fbcon_copyarea;
|
||||
info->fbops->fb_imageblit = nv04_fbcon_imageblit;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
|
|||
static void
|
||||
nv50_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
struct drm_device *dev;
|
||||
struct nouveau_crtc *nv_crtc;
|
||||
|
||||
if (!crtc)
|
||||
return;
|
||||
|
||||
dev = crtc->dev;
|
||||
nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
drm_crtc_cleanup(&nv_crtc->base);
|
||||
|
||||
nv50_cursor_fini(nv_crtc);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#include "nouveau_dma.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
static void
|
||||
void
|
||||
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
|||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
|
@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
|
||||
dev_priv->vm_vram_base);
|
||||
|
||||
info->fbops->fb_fillrect = nv50_fbcon_fillrect;
|
||||
info->fbops->fb_copyarea = nv50_fbcon_copyarea;
|
||||
info->fbops->fb_imageblit = nv50_fbcon_imageblit;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -317,17 +317,20 @@ void
|
|||
nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->cache);
|
||||
|
||||
/* This will ensure the channel is seen as disabled. */
|
||||
chan->ramfc = NULL;
|
||||
nv50_fifo_channel_disable(dev, chan->id, false);
|
||||
|
||||
/* Dummy channel, also used on ch 127 */
|
||||
if (chan->id == 0)
|
||||
nv50_fifo_channel_disable(dev, 127, false);
|
||||
|
||||
nouveau_gpuobj_ref_del(dev, &ramfc);
|
||||
nouveau_gpuobj_ref_del(dev, &chan->cache);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
|
|||
uint32_t inst;
|
||||
int i;
|
||||
|
||||
/* Be sure we're not in the middle of a context switch or bad things
|
||||
* will happen, such as unloading the wrong pgraph context.
|
||||
*/
|
||||
if (!nv_wait(0x400300, 0x00000001, 0x00000000))
|
||||
NV_ERROR(dev, "Ctxprog is still running\n");
|
||||
|
||||
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
|
||||
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
|
||||
return NULL;
|
||||
|
@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
|
|||
int
|
||||
nv50_graph_unload_context(struct drm_device *dev)
|
||||
{
|
||||
uint32_t inst, fifo = nv_rd32(dev, 0x400500);
|
||||
uint32_t inst;
|
||||
|
||||
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
|
||||
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
|
||||
|
@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
|
|||
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
nv_wr32(dev, 0x400500, fifo & ~1);
|
||||
nv_wr32(dev, 0x400784, inst);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
|
||||
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
|
||||
nouveau_wait_for_idle(dev);
|
||||
nv_wr32(dev, 0x400500, fifo);
|
||||
|
||||
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
|
||||
return 0;
|
||||
|
|
|
@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
|
|||
struct nouveau_encoder *nvenc = nouveau_encoder(enc);
|
||||
|
||||
if (nvenc == nv_encoder ||
|
||||
nvenc->disconnect != nv50_sor_disconnect ||
|
||||
nvenc->dcb->or != nv_encoder->dcb->or)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
config DRM_RADEON_KMS
|
||||
bool "Enable modesetting on radeon by default"
|
||||
bool "Enable modesetting on radeon by default - NEW DRIVER"
|
||||
depends on DRM_RADEON
|
||||
help
|
||||
Choose this option if you want kernel modesetting enabled by default,
|
||||
and you have a new enough userspace to support this. Running old
|
||||
userspaces with this enabled will cause pain.
|
||||
Choose this option if you want kernel modesetting enabled by default.
|
||||
|
||||
This is a completely new driver. It's only part of the existing drm
|
||||
for compatibility reasons. It requires an entirely different graphics
|
||||
stack above it and works very differently from the old drm stack.
|
||||
i.e. don't enable this unless you know what you are doing it may
|
||||
cause issues or bugs compared to the previous userspace driver stack.
|
||||
|
||||
When kernel modesetting is enabled the IOCTL of radeon/drm
|
||||
driver are considered as invalid and an error message is printed
|
||||
|
|
|
@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
|||
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
|
||||
unsigned char *base;
|
||||
int retry_count = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
|
||||
|
||||
retry:
|
||||
memcpy(base, req_bytes, num_bytes);
|
||||
|
||||
args.lpAuxRequest = 0;
|
||||
|
@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
|
|||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
if (args.ucReplyStatus) {
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
|
||||
if (args.ucReplyStatus && !args.ucDataOutLen) {
|
||||
if (args.ucReplyStatus == 0x20 && retry_count < 10)
|
||||
goto retry;
|
||||
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
|
||||
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
|
||||
chan->rec.i2c_id, args.ucReplyStatus);
|
||||
chan->rec.i2c_id, args.ucReplyStatus, retry_count);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1950,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
|
|||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: audio resume failed\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1957,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
|
|||
{
|
||||
int r;
|
||||
|
||||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r600_cp_stop(rdev);
|
||||
rdev->cp.ready = false;
|
||||
|
|
|
@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
|
|||
if (!r600_audio_chipset_supported(rdev))
|
||||
return;
|
||||
|
||||
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
|
||||
|
||||
del_timer(&rdev->audio_timer);
|
||||
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
|
||||
}
|
||||
|
|
|
@ -287,6 +287,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
|||
*connector_type = DRM_MODE_CONNECTOR_DVID;
|
||||
}
|
||||
|
||||
/* XFX Pine Group device rv730 reports no VGA DDC lines
|
||||
* even though they are wired up to record 0x93
|
||||
*/
|
||||
if ((dev->pdev->device == 0x9498) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1682) &&
|
||||
(dev->pdev->subsystem_device == 0x2452)) {
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
|||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
|
||||
/* r100 doesn't have dma engine so skip the test */
|
||||
if (rdev->asic->copy_dma) {
|
||||
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = radeon_copy_dma(rdev, saddr, daddr,
|
||||
size / RADEON_GPU_PAGE_SIZE, fence);
|
||||
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
end_jiffies = jiffies;
|
||||
time = end_jiffies - start_jiffies;
|
||||
time = jiffies_to_msecs(time);
|
||||
if (time > 0) {
|
||||
i = ((n * size) >> 10) / time;
|
||||
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
|
||||
" %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
|
||||
n, size >> 10,
|
||||
sdomain, ddomain, time,
|
||||
i, i * 1000, (i * 1000) / 1024);
|
||||
}
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
end_jiffies = jiffies;
|
||||
time = end_jiffies - start_jiffies;
|
||||
time = jiffies_to_msecs(time);
|
||||
if (time > 0) {
|
||||
i = ((n * size) >> 10) / time;
|
||||
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
|
||||
" in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
|
||||
sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
|
||||
}
|
||||
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
|
|
|
@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
|
|||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
bool dret;
|
||||
bool dret = false;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
ret = connector_status_disconnected;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
|
@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
|
|||
struct drm_mode_object *obj;
|
||||
int i;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool dret;
|
||||
bool dret = false;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
|
||||
}
|
||||
if (dret) {
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
|
|
|
@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
|||
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
|
||||
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
|
||||
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
|
||||
if (radeon_connector->ddc_bus)
|
||||
if (radeon_connector->ddc_bus) {
|
||||
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
radeon_connector->ddc_bus->rec.mask_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.mask_data_reg,
|
||||
|
@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
|||
radeon_connector->ddc_bus->rec.en_data_reg,
|
||||
radeon_connector->ddc_bus->rec.y_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.y_data_reg);
|
||||
} else {
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
|
||||
DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
|
||||
}
|
||||
DRM_INFO(" Encoders:\n");
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
|
|
@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
memset_io(fbptr, 0xff, aligned_size);
|
||||
memset_io(fbptr, 0x0, aligned_size);
|
||||
|
||||
strcpy(info->fix.id, "radeondrmfb");
|
||||
|
||||
|
|
|
@ -39,10 +39,10 @@
|
|||
#include "ttm/ttm_execbuf_util.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20090724"
|
||||
#define VMWGFX_DRIVER_MAJOR 0
|
||||
#define VMWGFX_DRIVER_MINOR 1
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 2
|
||||
#define VMWGFX_DRIVER_DATE "20100209"
|
||||
#define VMWGFX_DRIVER_MAJOR 1
|
||||
#define VMWGFX_DRIVER_MINOR 0
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
|
@ -113,6 +113,7 @@ struct vmw_fifo_state {
|
|||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct mutex fifo_mutex;
|
||||
struct rw_semaphore rwsem;
|
||||
};
|
||||
|
||||
|
@ -213,7 +214,7 @@ struct vmw_private {
|
|||
* Fencing and IRQs.
|
||||
*/
|
||||
|
||||
uint32_t fence_seq;
|
||||
atomic_t fence_seq;
|
||||
wait_queue_head_t fence_queue;
|
||||
wait_queue_head_t fifo_queue;
|
||||
atomic_t fence_queue_waiters;
|
||||
|
|
|
@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
fifo->reserved_size = 0;
|
||||
fifo->using_bounce_buffer = false;
|
||||
|
||||
mutex_init(&fifo->fifo_mutex);
|
||||
init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
|
@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
(unsigned int) min,
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
dev_priv->fence_seq = dev_priv->last_read_sequence;
|
||||
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
|
||||
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
return vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
|
@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
int ret;
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
mutex_lock(&fifo_state->fifo_mutex);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
}
|
||||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
up_write(&fifo_state->rwsem);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
|
||||
}
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
if (fifo_state->using_bounce_buffer || reserveable) {
|
||||
next_cmd += bytes;
|
||||
if (next_cmd >= max)
|
||||
|
@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
up_write(&fifo_state->rwsem);
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
||||
|
@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
|||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
down_write(&fifo_state->rwsem);
|
||||
*sequence = dev_priv->fence_seq;
|
||||
up_write(&fifo_state->rwsem);
|
||||
*sequence = atomic_read(&dev_priv->fence_seq);
|
||||
ret = -ENOMEM;
|
||||
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
|
||||
false, 3*HZ);
|
||||
|
@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
|||
}
|
||||
|
||||
do {
|
||||
*sequence = dev_priv->fence_seq++;
|
||||
*sequence = atomic_add_return(1, &dev_priv->fence_seq);
|
||||
} while (*sequence == 0);
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
||||
|
|
|
@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case DRM_VMW_PARAM_FIFO_OFFSET:
|
||||
param->value = dev_priv->mmio_start;
|
||||
break;
|
||||
case DRM_VMW_PARAM_HW_CAPS:
|
||||
param->value = dev_priv->capabilities;
|
||||
break;
|
||||
case DRM_VMW_PARAM_FIFO_CAPS:
|
||||
param->value = dev_priv->fifo.capabilities;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
|
|
|
@ -84,20 +84,13 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
|
|||
vmw_fifo_idle(dev_priv, sequence))
|
||||
return true;
|
||||
|
||||
/**
|
||||
* Below is to signal stale fences that have wrapped.
|
||||
* First, block fence submission.
|
||||
*/
|
||||
|
||||
down_read(&fifo_state->rwsem);
|
||||
|
||||
/**
|
||||
* Then check if the sequence is higher than what we've actually
|
||||
* emitted. Then the fence is stale and signaled.
|
||||
*/
|
||||
|
||||
ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
|
||||
up_read(&fifo_state->rwsem);
|
||||
ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
|
||||
> VMW_FENCE_WRAP);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
|
||||
if (fifo_idle)
|
||||
down_read(&fifo_state->rwsem);
|
||||
signal_seq = dev_priv->fence_seq;
|
||||
signal_seq = atomic_read(&dev_priv->fence_seq);
|
||||
ret = 0;
|
||||
|
||||
for (;;) {
|
||||
|
|
|
@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
|
|||
|
||||
drm_mode_config_init(dev);
|
||||
dev->mode_config.funcs = &vmw_kms_funcs;
|
||||
dev->mode_config.min_width = 640;
|
||||
dev->mode_config.min_height = 480;
|
||||
dev->mode_config.max_width = 2048;
|
||||
dev->mode_config.max_height = 2048;
|
||||
dev->mode_config.min_width = 1;
|
||||
dev->mode_config.min_height = 1;
|
||||
dev->mode_config.max_width = dev_priv->fb_max_width;
|
||||
dev->mode_config.max_height = dev_priv->fb_max_height;
|
||||
|
||||
ret = vmw_kms_init_legacy_display_system(dev_priv);
|
||||
|
||||
|
|
|
@ -35,11 +35,6 @@
|
|||
#define VMW_RES_SURFACE ttm_driver_type1
|
||||
#define VMW_RES_STREAM ttm_driver_type2
|
||||
|
||||
/* XXX: This isn't a real hardware flag, but just a hack for kernel to
|
||||
* know about primary surfaces. Find a better way to accomplish this.
|
||||
*/
|
||||
#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
|
||||
|
||||
struct vmw_user_context {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_resource res;
|
||||
|
@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
srf->flags = req->flags;
|
||||
srf->format = req->format;
|
||||
srf->scanout = req->scanout;
|
||||
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
|
||||
srf->num_sizes = 0;
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
|
@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
||||
if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
|
||||
/* we should not send this flag down to hardware since
|
||||
* its not a official one
|
||||
*/
|
||||
srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
|
||||
srf->scanout = true;
|
||||
} else {
|
||||
srf->scanout = false;
|
||||
}
|
||||
|
||||
if (srf->scanout &&
|
||||
srf->num_sizes == 1 &&
|
||||
srf->sizes[0].width == 64 &&
|
||||
|
|
|
@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
|
|||
remaining -= 7;
|
||||
pr_devel("client 0x%p called 'target'\n", priv);
|
||||
/* if target is default */
|
||||
if (!strncmp(buf, "default", 7))
|
||||
if (!strncmp(kbuf, "default", 7))
|
||||
pdev = pci_dev_get(vga_default_device());
|
||||
else {
|
||||
if (!vga_pci_str_to_vars(curr_pos, remaining,
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* include interfaces to usb layer */
|
||||
#include <linux/usb.h>
|
||||
|
@ -31,8 +32,8 @@
|
|||
#define CMD_I2C_IO_END (1<<1)
|
||||
|
||||
/* i2c bit delay, default is 10us -> 100kHz */
|
||||
static int delay = 10;
|
||||
module_param(delay, int, 0);
|
||||
static unsigned short delay = 10;
|
||||
module_param(delay, ushort, 0);
|
||||
MODULE_PARM_DESC(delay, "bit delay in microseconds, "
|
||||
"e.g. 10 for 100kHz (default is 100kHz)");
|
||||
|
||||
|
@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
|||
|
||||
static u32 usb_func(struct i2c_adapter *adapter)
|
||||
{
|
||||
u32 func;
|
||||
__le32 func;
|
||||
|
||||
/* get functionality from adapter */
|
||||
if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
|
||||
|
@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
|
|||
return 0;
|
||||
}
|
||||
|
||||
return func;
|
||||
return le32_to_cpu(func);
|
||||
}
|
||||
|
||||
/* This is the actual algorithm we define */
|
||||
|
@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
|
|||
"i2c-tiny-usb at bus %03d device %03d",
|
||||
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
|
||||
|
||||
if (usb_write(&dev->adapter, CMD_SET_DELAY,
|
||||
cpu_to_le16(delay), 0, NULL, 0) != 0) {
|
||||
if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
|
||||
dev_err(&dev->adapter.dev,
|
||||
"failure setting delay to %dus\n", delay);
|
||||
retval = -EIO;
|
||||
|
|
|
@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
|||
if (ret)
|
||||
goto err1;
|
||||
|
||||
if (cma_loopback_addr(addr)) {
|
||||
ret = cma_bind_loopback(id_priv);
|
||||
} else if (!cma_zero_addr(addr)) {
|
||||
if (!cma_any_addr(addr)) {
|
||||
ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user