forked from luck/tmp_suning_uos_patched
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc fixes: - unwinder fixes - AMD CPU topology enumeration fixes - microcode loader fixes - x86 embedded platform fixes - fix for a bootup crash that may trigger when clearcpuid= is used with invalid values" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mpx: Use compatible types in comparison to fix sparse error x86/tsc: Add the Intel Denverton Processor to native_calibrate_tsc() x86/entry: Fix the end of the stack for newly forked tasks x86/unwind: Include __schedule() in stack traces x86/unwind: Disable KASAN checks for non-current tasks x86/unwind: Silence warnings for non-current tasks x86/microcode/intel: Use correct buffer size for saving microcode data x86/microcode/intel: Fix allocation size of struct ucode_patch x86/microcode/intel: Add a helper which gives the microcode revision x86/microcode: Use native CPUID to tickle out microcode revision x86/CPU: Add native CPUID variants returning a single datum x86/boot: Add missing declaration of string functions x86/CPU/AMD: Fix Bulldozer topology x86/platform/intel-mid: Rename 'spidev' to 'mrfld_spidev' x86/cpu: Fix typo in the comment for Anniedale x86/cpu: Fix bootup crashes by sanitizing the argument of the 'clearcpuid=' command-line option
This commit is contained in:
commit
83346fbc07
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include "ctype.h"
|
||||
#include "string.h"
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
|
|
|
@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
|
|||
#define memset(d,c,l) __builtin_memset(d,c,l)
|
||||
#define memcmp __builtin_memcmp
|
||||
|
||||
extern int strcmp(const char *str1, const char *str2);
|
||||
extern int strncmp(const char *cs, const char *ct, size_t count);
|
||||
extern size_t strlen(const char *s);
|
||||
extern char *strstr(const char *s1, const char *s2);
|
||||
extern size_t strnlen(const char *s, size_t maxlen);
|
||||
extern unsigned int atou(const char *s);
|
||||
extern unsigned long long simple_strtoull(const char *cp, char **endp,
|
||||
unsigned int base);
|
||||
|
||||
#endif /* BOOT_STRING_H */
|
||||
|
|
|
@ -254,23 +254,6 @@ ENTRY(__switch_to_asm)
|
|||
jmp __switch_to
|
||||
END(__switch_to_asm)
|
||||
|
||||
/*
|
||||
* The unwinder expects the last frame on the stack to always be at the same
|
||||
* offset from the end of the page, which allows it to validate the stack.
|
||||
* Calling schedule_tail() directly would break that convention because its an
|
||||
* asmlinkage function so its argument has to be pushed on the stack. This
|
||||
* wrapper creates a proper "end of stack" frame header before the call.
|
||||
*/
|
||||
ENTRY(schedule_tail_wrapper)
|
||||
FRAME_BEGIN
|
||||
|
||||
pushl %eax
|
||||
call schedule_tail
|
||||
popl %eax
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(schedule_tail_wrapper)
|
||||
/*
|
||||
* A newly forked process directly context switches into this address.
|
||||
*
|
||||
|
@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
|
|||
* edi: kernel thread arg
|
||||
*/
|
||||
ENTRY(ret_from_fork)
|
||||
call schedule_tail_wrapper
|
||||
FRAME_BEGIN /* help unwinder find end of stack */
|
||||
|
||||
/*
|
||||
* schedule_tail() is asmlinkage so we have to put its 'prev' argument
|
||||
* on the stack.
|
||||
*/
|
||||
pushl %eax
|
||||
call schedule_tail
|
||||
popl %eax
|
||||
|
||||
testl %ebx, %ebx
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
|
||||
2:
|
||||
/* When we fork, we trace the syscall return in the child, too. */
|
||||
movl %esp, %eax
|
||||
leal FRAME_OFFSET(%esp), %eax
|
||||
call syscall_return_slowpath
|
||||
FRAME_END
|
||||
jmp restore_all
|
||||
|
||||
/* kernel thread */
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <asm/smap.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/frame.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
.code64
|
||||
|
@ -408,17 +409,19 @@ END(__switch_to_asm)
|
|||
* r12: kernel thread arg
|
||||
*/
|
||||
ENTRY(ret_from_fork)
|
||||
FRAME_BEGIN /* help unwinder find end of stack */
|
||||
movq %rax, %rdi
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
|
||||
testq %rbx, %rbx /* from kernel_thread? */
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
testq %rbx, %rbx /* from kernel_thread? */
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
|
||||
2:
|
||||
movq %rsp, %rdi
|
||||
leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
|
||||
call syscall_return_slowpath /* returns with IRQs disabled */
|
||||
TRACE_IRQS_ON /* user mode is traced as IRQS on */
|
||||
SWAPGS
|
||||
FRAME_END
|
||||
jmp restore_regs_and_iret
|
||||
|
||||
1:
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
|
||||
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
|
||||
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
|
||||
|
||||
|
|
|
@ -52,6 +52,21 @@ struct extended_sigtable {
|
|||
|
||||
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
|
||||
|
||||
static inline u32 intel_get_microcode_revision(void)
|
||||
{
|
||||
u32 rev, dummy;
|
||||
|
||||
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
native_cpuid_eax(1);
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
|
||||
|
||||
return rev;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MICROCODE_INTEL
|
||||
extern void __init load_ucode_intel_bsp(void);
|
||||
extern void load_ucode_intel_ap(void);
|
||||
|
|
|
@ -219,6 +219,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
|
|||
: "memory");
|
||||
}
|
||||
|
||||
#define native_cpuid_reg(reg) \
|
||||
static inline unsigned int native_cpuid_##reg(unsigned int op) \
|
||||
{ \
|
||||
unsigned int eax = op, ebx, ecx = 0, edx; \
|
||||
\
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx); \
|
||||
\
|
||||
return reg; \
|
||||
}
|
||||
|
||||
/*
|
||||
* Native CPUID functions returning a single datum.
|
||||
*/
|
||||
native_cpuid_reg(eax)
|
||||
native_cpuid_reg(ebx)
|
||||
native_cpuid_reg(ecx)
|
||||
native_cpuid_reg(edx)
|
||||
|
||||
static inline void load_cr3(pgd_t *pgdir)
|
||||
{
|
||||
write_cr3(__pa(pgdir));
|
||||
|
|
|
@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
|
|||
if (task == current)
|
||||
return __builtin_frame_address(0);
|
||||
|
||||
return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
|
||||
return &((struct inactive_task_frame *)task->thread.sp)->bp;
|
||||
}
|
||||
#else
|
||||
static inline unsigned long *
|
||||
|
|
|
@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
|
|||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
|
||||
/* data that is pointed to by thread.sp */
|
||||
/*
|
||||
* This is the structure pointed to by thread.sp for an inactive task. The
|
||||
* order of the fields must match the code in __switch_to_asm().
|
||||
*/
|
||||
struct inactive_task_frame {
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long r15;
|
||||
|
@ -48,6 +51,11 @@ struct inactive_task_frame {
|
|||
unsigned long di;
|
||||
#endif
|
||||
unsigned long bx;
|
||||
|
||||
/*
|
||||
* These two fields must be together. They form a stack frame header,
|
||||
* needed by get_frame_pointer().
|
||||
*/
|
||||
unsigned long bp;
|
||||
unsigned long ret_addr;
|
||||
};
|
||||
|
|
|
@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
|||
|
||||
/* get information required for multi-node processors */
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
node_id = ecx & 7;
|
||||
|
||||
/* get compute unit information */
|
||||
smp_num_siblings = ((ebx >> 8) & 3) + 1;
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
node_id = cpuid_ecx(0x8000001e) & 7;
|
||||
|
||||
/*
|
||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||
|
|
|
@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
|
|||
{
|
||||
int bit;
|
||||
|
||||
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
|
||||
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
|
||||
setup_clear_cpu_cap(bit);
|
||||
else
|
||||
return 0;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/bugs.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/microcode_intel.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <linux/topology.h>
|
||||
|
@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
|
||||
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
|
||||
unsigned lower_word;
|
||||
|
||||
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
|
||||
/* Required by the SDM */
|
||||
sync_core();
|
||||
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
|
||||
}
|
||||
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
|
||||
c->microcode = intel_get_microcode_revision();
|
||||
|
||||
/*
|
||||
* Atom erratum AAE44/AAF40/AAG38/AAH41:
|
||||
|
|
|
@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
|
|||
{
|
||||
struct ucode_patch *p;
|
||||
|
||||
p = kzalloc(size, GFP_KERNEL);
|
||||
p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -368,26 +368,6 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
|
|||
return patch;
|
||||
}
|
||||
|
||||
static void cpuid_1(void)
|
||||
{
|
||||
/*
|
||||
* According to the Intel SDM, Volume 3, 9.11.7:
|
||||
*
|
||||
* CPUID returns a value in a model specific register in
|
||||
* addition to its usual register return values. The
|
||||
* semantics of CPUID cause it to deposit an update ID value
|
||||
* in the 64-bit model-specific register at address 08BH
|
||||
* (IA32_BIOS_SIGN_ID). If no update is present in the
|
||||
* processor, the value in the MSR remains unmodified.
|
||||
*
|
||||
* Use native_cpuid -- this code runs very early and we don't
|
||||
* want to mess with paravirt.
|
||||
*/
|
||||
unsigned int eax = 1, ebx, ecx = 0, edx;
|
||||
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
}
|
||||
|
||||
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
||||
{
|
||||
unsigned int val[2];
|
||||
|
@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
|||
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
|
||||
csig.pf = 1 << ((val[1] >> 18) & 7);
|
||||
}
|
||||
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
cpuid_1();
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
|
||||
|
||||
csig.rev = val[1];
|
||||
csig.rev = intel_get_microcode_revision();
|
||||
|
||||
uci->cpu_sig = csig;
|
||||
uci->valid = 1;
|
||||
|
@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
|
|||
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
||||
{
|
||||
struct microcode_intel *mc;
|
||||
unsigned int val[2];
|
||||
u32 rev;
|
||||
|
||||
mc = uci->mc;
|
||||
if (!mc)
|
||||
|
@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
|||
|
||||
/* write microcode via MSR 0x79 */
|
||||
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
||||
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
cpuid_1();
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
|
||||
if (val[1] != mc->hdr.rev)
|
||||
rev = intel_get_microcode_revision();
|
||||
if (rev != mc->hdr.rev)
|
||||
return -1;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Flush global tlb. This is precaution. */
|
||||
flush_tlb_early();
|
||||
#endif
|
||||
uci->cpu_sig.rev = val[1];
|
||||
uci->cpu_sig.rev = rev;
|
||||
|
||||
if (early)
|
||||
print_ucode(uci);
|
||||
|
@ -804,8 +772,8 @@ static int apply_microcode_intel(int cpu)
|
|||
struct microcode_intel *mc;
|
||||
struct ucode_cpu_info *uci;
|
||||
struct cpuinfo_x86 *c;
|
||||
unsigned int val[2];
|
||||
static int prev_rev;
|
||||
u32 rev;
|
||||
|
||||
/* We should bind the task to the CPU */
|
||||
if (WARN_ON(raw_smp_processor_id() != cpu))
|
||||
|
@ -822,33 +790,28 @@ static int apply_microcode_intel(int cpu)
|
|||
|
||||
/* write microcode via MSR 0x79 */
|
||||
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
||||
wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
cpuid_1();
|
||||
rev = intel_get_microcode_revision();
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
|
||||
|
||||
if (val[1] != mc->hdr.rev) {
|
||||
if (rev != mc->hdr.rev) {
|
||||
pr_err("CPU%d update to revision 0x%x failed\n",
|
||||
cpu, mc->hdr.rev);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (val[1] != prev_rev) {
|
||||
if (rev != prev_rev) {
|
||||
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
|
||||
val[1],
|
||||
rev,
|
||||
mc->hdr.date & 0xffff,
|
||||
mc->hdr.date >> 24,
|
||||
(mc->hdr.date >> 16) & 0xff);
|
||||
prev_rev = val[1];
|
||||
prev_rev = rev;
|
||||
}
|
||||
|
||||
c = &cpu_data(cpu);
|
||||
|
||||
uci->cpu_sig.rev = val[1];
|
||||
c->microcode = val[1];
|
||||
uci->cpu_sig.rev = rev;
|
||||
c->microcode = rev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -860,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
|
||||
int new_rev = uci->cpu_sig.rev;
|
||||
unsigned int leftover = size;
|
||||
unsigned int curr_mc_size = 0;
|
||||
unsigned int curr_mc_size = 0, new_mc_size = 0;
|
||||
unsigned int csig, cpf;
|
||||
|
||||
while (leftover) {
|
||||
|
@ -901,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
vfree(new_mc);
|
||||
new_rev = mc_header.rev;
|
||||
new_mc = mc;
|
||||
new_mc_size = mc_size;
|
||||
mc = NULL; /* trigger new vmalloc */
|
||||
}
|
||||
|
||||
|
@ -926,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
* permanent memory. So it will be loaded early when a CPU is hot added
|
||||
* or resumes.
|
||||
*/
|
||||
save_mc_for_early(new_mc, curr_mc_size);
|
||||
save_mc_for_early(new_mc, new_mc_size);
|
||||
|
||||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||
cpu, new_rev, uci->cpu_sig.rev);
|
||||
|
|
|
@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
|
|||
crystal_khz = 24000; /* 24.0 MHz */
|
||||
break;
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_ATOM_DENVERTON:
|
||||
crystal_khz = 25000; /* 25.0 MHz */
|
||||
break;
|
||||
case INTEL_FAM6_ATOM_GOLDMONT:
|
||||
|
|
|
@ -6,6 +6,21 @@
|
|||
|
||||
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
|
||||
|
||||
/*
|
||||
* This disables KASAN checking when reading a value from another task's stack,
|
||||
* since the other task could be running on another CPU and could have poisoned
|
||||
* the stack in the meantime.
|
||||
*/
|
||||
#define READ_ONCE_TASK_STACK(task, x) \
|
||||
({ \
|
||||
unsigned long val; \
|
||||
if (task == current) \
|
||||
val = READ_ONCE(x); \
|
||||
else \
|
||||
val = READ_ONCE_NOCHECK(x); \
|
||||
val; \
|
||||
})
|
||||
|
||||
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
|
||||
{
|
||||
static bool dumped_before = false;
|
||||
|
@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
|
|||
if (state->regs && user_mode(state->regs))
|
||||
return 0;
|
||||
|
||||
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
|
||||
addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
|
||||
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
|
||||
addr_p);
|
||||
|
||||
return __kernel_text_address(addr) ? addr : 0;
|
||||
|
@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
|
|||
if (state->regs)
|
||||
next_bp = (unsigned long *)state->regs->bp;
|
||||
else
|
||||
next_bp = (unsigned long *)*state->bp;
|
||||
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
|
||||
|
||||
/* is the next frame pointer an encoded pointer to pt_regs? */
|
||||
regs = decode_frame_pointer(next_bp);
|
||||
|
@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
|
|||
return true;
|
||||
|
||||
bad_address:
|
||||
/*
|
||||
* When unwinding a non-current task, the task might actually be
|
||||
* running on another CPU, in which case it could be modifying its
|
||||
* stack while we're reading it. This is generally not a problem and
|
||||
* can be ignored as long as the caller understands that unwinding
|
||||
* another task will not always succeed.
|
||||
*/
|
||||
if (state->task != current)
|
||||
goto the_end;
|
||||
|
||||
if (state->regs) {
|
||||
printk_deferred_once(KERN_WARNING
|
||||
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
|
||||
|
|
|
@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
|
|||
* We were not able to extract an address from the instruction,
|
||||
* probably because there was something invalid in it.
|
||||
*/
|
||||
if (info->si_addr == (void *)-1) {
|
||||
if (info->si_addr == (void __user *)-1) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
|
|||
obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
|
||||
# SPI Devices
|
||||
obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
|
||||
obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
|
||||
# I2C Devices
|
||||
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
|
||||
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
* of the License.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sfi.h>
|
||||
#include <linux/spi/pxa2xx_spi.h>
|
||||
|
@ -34,6 +35,9 @@ static void __init *spidev_platform_data(void *info)
|
|||
{
|
||||
struct spi_board_info *spi_info = info;
|
||||
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
spi_info->mode = SPI_MODE_0;
|
||||
spi_info->controller_data = &spidev_spi_chip;
|
||||
|
Loading…
Reference in New Issue
Block a user