forked from luck/tmp_suning_uos_patched
Merge branch 'linus' into timers/urgent
Merge in Linus's branch which already has timers/core merged. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
1f815faec4
|
@ -162,9 +162,9 @@ over a rather long period of time, but improvements are always welcome!
|
|||
when publicizing a pointer to a structure that can
|
||||
be traversed by an RCU read-side critical section.
|
||||
|
||||
5. If call_rcu(), or a related primitive such as call_rcu_bh() or
|
||||
call_rcu_sched(), is used, the callback function must be
|
||||
written to be called from softirq context. In particular,
|
||||
5. If call_rcu(), or a related primitive such as call_rcu_bh(),
|
||||
call_rcu_sched(), or call_srcu() is used, the callback function
|
||||
must be written to be called from softirq context. In particular,
|
||||
it cannot block.
|
||||
|
||||
6. Since synchronize_rcu() can block, it cannot be called from
|
||||
|
@ -202,11 +202,12 @@ over a rather long period of time, but improvements are always welcome!
|
|||
updater uses call_rcu_sched() or synchronize_sched(), then
|
||||
the corresponding readers must disable preemption, possibly
|
||||
by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
|
||||
If the updater uses synchronize_srcu(), the the corresponding
|
||||
readers must use srcu_read_lock() and srcu_read_unlock(),
|
||||
and with the same srcu_struct. The rules for the expedited
|
||||
primitives are the same as for their non-expedited counterparts.
|
||||
Mixing things up will result in confusion and broken kernels.
|
||||
If the updater uses synchronize_srcu() or call_srcu(),
|
||||
the the corresponding readers must use srcu_read_lock() and
|
||||
srcu_read_unlock(), and with the same srcu_struct. The rules for
|
||||
the expedited primitives are the same as for their non-expedited
|
||||
counterparts. Mixing things up will result in confusion and
|
||||
broken kernels.
|
||||
|
||||
One exception to this rule: rcu_read_lock() and rcu_read_unlock()
|
||||
may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
|
||||
|
@ -333,14 +334,14 @@ over a rather long period of time, but improvements are always welcome!
|
|||
victim CPU from ever going offline.)
|
||||
|
||||
14. SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
|
||||
synchronize_srcu(), and synchronize_srcu_expedited()) may only
|
||||
be invoked from process context. Unlike other forms of RCU, it
|
||||
-is- permissible to block in an SRCU read-side critical section
|
||||
(demarked by srcu_read_lock() and srcu_read_unlock()), hence the
|
||||
"SRCU": "sleepable RCU". Please note that if you don't need
|
||||
to sleep in read-side critical sections, you should be using
|
||||
RCU rather than SRCU, because RCU is almost always faster and
|
||||
easier to use than is SRCU.
|
||||
synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu())
|
||||
may only be invoked from process context. Unlike other forms of
|
||||
RCU, it -is- permissible to block in an SRCU read-side critical
|
||||
section (demarked by srcu_read_lock() and srcu_read_unlock()),
|
||||
hence the "SRCU": "sleepable RCU". Please note that if you
|
||||
don't need to sleep in read-side critical sections, you should be
|
||||
using RCU rather than SRCU, because RCU is almost always faster
|
||||
and easier to use than is SRCU.
|
||||
|
||||
If you need to enter your read-side critical section in a
|
||||
hardirq or exception handler, and then exit that same read-side
|
||||
|
@ -353,8 +354,8 @@ over a rather long period of time, but improvements are always welcome!
|
|||
cleanup_srcu_struct(). These are passed a "struct srcu_struct"
|
||||
that defines the scope of a given SRCU domain. Once initialized,
|
||||
the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
|
||||
synchronize_srcu(), and synchronize_srcu_expedited(). A given
|
||||
synchronize_srcu() waits only for SRCU read-side critical
|
||||
synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
|
||||
A given synchronize_srcu() waits only for SRCU read-side critical
|
||||
sections governed by srcu_read_lock() and srcu_read_unlock()
|
||||
calls that have been passed the same srcu_struct. This property
|
||||
is what makes sleeping read-side critical sections tolerable --
|
||||
|
@ -374,7 +375,7 @@ over a rather long period of time, but improvements are always welcome!
|
|||
requiring SRCU's read-side deadlock immunity or low read-side
|
||||
realtime latency.
|
||||
|
||||
Note that, rcu_assign_pointer() relates to SRCU just as they do
|
||||
Note that, rcu_assign_pointer() relates to SRCU just as it does
|
||||
to other forms of RCU.
|
||||
|
||||
15. The whole point of call_rcu(), synchronize_rcu(), and friends
|
||||
|
|
|
@ -79,8 +79,6 @@ complete. Pseudo-code using rcu_barrier() is as follows:
|
|||
2. Execute rcu_barrier().
|
||||
3. Allow the module to be unloaded.
|
||||
|
||||
Quick Quiz #1: Why is there no srcu_barrier()?
|
||||
|
||||
The rcutorture module makes use of rcu_barrier in its exit function
|
||||
as follows:
|
||||
|
||||
|
@ -162,7 +160,7 @@ for any pre-existing callbacks to complete.
|
|||
Then lines 55-62 print status and do operation-specific cleanup, and
|
||||
then return, permitting the module-unload operation to be completed.
|
||||
|
||||
Quick Quiz #2: Is there any other situation where rcu_barrier() might
|
||||
Quick Quiz #1: Is there any other situation where rcu_barrier() might
|
||||
be required?
|
||||
|
||||
Your module might have additional complications. For example, if your
|
||||
|
@ -242,7 +240,7 @@ reaches zero, as follows:
|
|||
4 complete(&rcu_barrier_completion);
|
||||
5 }
|
||||
|
||||
Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
|
||||
Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
|
||||
immediately (thus incrementing rcu_barrier_cpu_count to the
|
||||
value one), but the other CPU's rcu_barrier_func() invocations
|
||||
are delayed for a full grace period? Couldn't this result in
|
||||
|
@ -259,12 +257,7 @@ so that your module may be safely unloaded.
|
|||
|
||||
Answers to Quick Quizzes
|
||||
|
||||
Quick Quiz #1: Why is there no srcu_barrier()?
|
||||
|
||||
Answer: Since there is no call_srcu(), there can be no outstanding SRCU
|
||||
callbacks. Therefore, there is no need to wait for them.
|
||||
|
||||
Quick Quiz #2: Is there any other situation where rcu_barrier() might
|
||||
Quick Quiz #1: Is there any other situation where rcu_barrier() might
|
||||
be required?
|
||||
|
||||
Answer: Interestingly enough, rcu_barrier() was not originally
|
||||
|
@ -278,7 +271,7 @@ Answer: Interestingly enough, rcu_barrier() was not originally
|
|||
implementing rcutorture, and found that rcu_barrier() solves
|
||||
this problem as well.
|
||||
|
||||
Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
|
||||
Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
|
||||
immediately (thus incrementing rcu_barrier_cpu_count to the
|
||||
value one), but the other CPU's rcu_barrier_func() invocations
|
||||
are delayed for a full grace period? Couldn't this result in
|
||||
|
|
|
@ -174,11 +174,20 @@ torture_type The type of RCU to test, with string values as follows:
|
|||
and synchronize_rcu_bh_expedited().
|
||||
|
||||
"srcu": srcu_read_lock(), srcu_read_unlock() and
|
||||
call_srcu().
|
||||
|
||||
"srcu_sync": srcu_read_lock(), srcu_read_unlock() and
|
||||
synchronize_srcu().
|
||||
|
||||
"srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
|
||||
synchronize_srcu_expedited().
|
||||
|
||||
"srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(),
|
||||
and call_srcu().
|
||||
|
||||
"srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(),
|
||||
and synchronize_srcu().
|
||||
|
||||
"sched": preempt_disable(), preempt_enable(), and
|
||||
call_rcu_sched().
|
||||
|
||||
|
|
|
@ -833,9 +833,9 @@ sched: Critical sections Grace period Barrier
|
|||
|
||||
SRCU: Critical sections Grace period Barrier
|
||||
|
||||
srcu_read_lock synchronize_srcu N/A
|
||||
srcu_read_unlock synchronize_srcu_expedited
|
||||
srcu_read_lock_raw
|
||||
srcu_read_lock synchronize_srcu srcu_barrier
|
||||
srcu_read_unlock call_srcu
|
||||
srcu_read_lock_raw synchronize_srcu_expedited
|
||||
srcu_read_unlock_raw
|
||||
srcu_dereference
|
||||
|
||||
|
|
|
@ -1626,3 +1626,5 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11 1587
|
|||
MX6Q_PAD_SD2_DAT3__GPIO_1_12 1588
|
||||
MX6Q_PAD_SD2_DAT3__SJC_DONE 1589
|
||||
MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 1590
|
||||
MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID 1591
|
||||
MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID 1592
|
||||
|
|
|
@ -86,7 +86,7 @@ There is also a gitweb interface available at
|
|||
http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
|
||||
|
||||
More information about kexec-tools can be found at
|
||||
http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
|
||||
http://horms.net/projects/kexec/
|
||||
|
||||
3) Unpack the tarball with the tar command, as follows:
|
||||
|
||||
|
|
|
@ -2367,6 +2367,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Set maximum number of finished RCU callbacks to process
|
||||
in one batch.
|
||||
|
||||
rcutree.fanout_leaf= [KNL,BOOT]
|
||||
Increase the number of CPUs assigned to each
|
||||
leaf rcu_node structure. Useful for very large
|
||||
systems.
|
||||
|
||||
rcutree.qhimark= [KNL,BOOT]
|
||||
Set threshold of queued
|
||||
RCU callbacks over which batch limiting is disabled.
|
||||
|
|
|
@ -3433,13 +3433,14 @@ S: Supported
|
|||
F: drivers/idle/i7300_idle.c
|
||||
|
||||
IEEE 802.15.4 SUBSYSTEM
|
||||
M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
|
||||
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
|
||||
M: Sergey Lapin <slapin@ossfans.org>
|
||||
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
|
||||
W: http://apps.sourceforge.net/trac/linux-zigbee
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
|
||||
S: Maintained
|
||||
F: net/ieee802154/
|
||||
F: net/mac802154/
|
||||
F: drivers/ieee802154/
|
||||
|
||||
IIO SUBSYSTEM AND DRIVERS
|
||||
|
@ -5564,7 +5565,7 @@ F: Documentation/networking/LICENSE.qla3xxx
|
|||
F: drivers/net/ethernet/qlogic/qla3xxx.*
|
||||
|
||||
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
||||
M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
|
||||
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
|
||||
M: Sony Chacko <sony.chacko@qlogic.com>
|
||||
M: linux-driver@qlogic.com
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -5572,7 +5573,6 @@ S: Supported
|
|||
F: drivers/net/ethernet/qlogic/qlcnic/
|
||||
|
||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
|
||||
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
|
||||
M: Ron Mercer <ron.mercer@qlogic.com>
|
||||
M: linux-driver@qlogic.com
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION =
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -43,8 +43,8 @@ gic: interrupt-controller@ec801000 {
|
|||
|
||||
pmu {
|
||||
compatible = "arm,cortex-a9-pmu";
|
||||
interrupts = <0 8 0x04
|
||||
0 9 0x04>;
|
||||
interrupts = <0 6 0x04
|
||||
0 7 0x04>;
|
||||
};
|
||||
|
||||
L2: l2-cache {
|
||||
|
@ -119,8 +119,8 @@ fsmc: flash@b0000000 {
|
|||
gmac0: eth@e2000000 {
|
||||
compatible = "st,spear600-gmac";
|
||||
reg = <0xe2000000 0x8000>;
|
||||
interrupts = <0 23 0x4
|
||||
0 24 0x4>;
|
||||
interrupts = <0 33 0x4
|
||||
0 34 0x4>;
|
||||
interrupt-names = "macirq", "eth_wake_irq";
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -202,6 +202,7 @@ gpio1: gpio@e0680000 {
|
|||
kbd@e0300000 {
|
||||
compatible = "st,spear300-kbd";
|
||||
reg = <0xe0300000 0x1000>;
|
||||
interrupts = <0 52 0x4>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -224,7 +225,7 @@ rtc@e0580000 {
|
|||
serial@e0000000 {
|
||||
compatible = "arm,pl011", "arm,primecell";
|
||||
reg = <0xe0000000 0x1000>;
|
||||
interrupts = <0 36 0x4>;
|
||||
interrupts = <0 35 0x4>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
/include/ "spear320.dtsi"
|
||||
|
||||
/ {
|
||||
model = "ST SPEAr300 Evaluation Board";
|
||||
compatible = "st,spear300-evb", "st,spear300";
|
||||
model = "ST SPEAr320 Evaluation Board";
|
||||
compatible = "st,spear320-evb", "st,spear320";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
@ -26,7 +26,7 @@ memory {
|
|||
|
||||
ahb {
|
||||
pinmux@b3000000 {
|
||||
st,pinmux-mode = <3>;
|
||||
st,pinmux-mode = <4>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&state_default>;
|
||||
|
||||
|
|
|
@ -181,6 +181,7 @@ i2c@d0200000 {
|
|||
timer@f0000000 {
|
||||
compatible = "st,spear-timer";
|
||||
reg = <0xf0000000 0x400>;
|
||||
interrupt-parent = <&vic0>;
|
||||
interrupts = <16>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -87,7 +87,7 @@ void __init spear3xx_map_io(void)
|
|||
|
||||
static void __init spear3xx_timer_init(void)
|
||||
{
|
||||
char pclk_name[] = "pll3_48m_clk";
|
||||
char pclk_name[] = "pll3_clk";
|
||||
struct clk *gpt_clk, *pclk;
|
||||
|
||||
spear3xx_clk_init();
|
||||
|
|
|
@ -423,7 +423,7 @@ void __init spear6xx_map_io(void)
|
|||
|
||||
static void __init spear6xx_timer_init(void)
|
||||
{
|
||||
char pclk_name[] = "pll3_48m_clk";
|
||||
char pclk_name[] = "pll3_clk";
|
||||
struct clk *gpt_clk, *pclk;
|
||||
|
||||
spear6xx_clk_init();
|
||||
|
|
|
@ -1091,7 +1091,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
|
|||
while (--i)
|
||||
if (pages[i])
|
||||
__free_pages(pages[i], 0);
|
||||
if (array_size < PAGE_SIZE)
|
||||
if (array_size <= PAGE_SIZE)
|
||||
kfree(pages);
|
||||
else
|
||||
vfree(pages);
|
||||
|
@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
|
|||
for (i = 0; i < count; i++)
|
||||
if (pages[i])
|
||||
__free_pages(pages[i], 0);
|
||||
if (array_size < PAGE_SIZE)
|
||||
if (array_size <= PAGE_SIZE)
|
||||
kfree(pages);
|
||||
else
|
||||
vfree(pages);
|
||||
|
|
|
@ -180,9 +180,7 @@ void __cpuinit start_secondary(void)
|
|||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
ipi_call_lock();
|
||||
set_cpu_online(cpu, true);
|
||||
ipi_call_unlock();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
|
|
|
@ -382,7 +382,6 @@ smp_callin (void)
|
|||
set_numa_node(cpu_to_node_map[cpuid]);
|
||||
set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
|
||||
|
||||
ipi_call_lock_irq();
|
||||
spin_lock(&vector_lock);
|
||||
/* Setup the per cpu irq handling data structures */
|
||||
__setup_vector_irq(cpuid);
|
||||
|
@ -390,7 +389,6 @@ smp_callin (void)
|
|||
set_cpu_online(cpuid, true);
|
||||
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
||||
spin_unlock(&vector_lock);
|
||||
ipi_call_unlock_irq();
|
||||
|
||||
smp_setup_percpu_timer();
|
||||
|
||||
|
|
|
@ -43,9 +43,9 @@ endif
|
|||
|
||||
OBJCOPYFLAGS += -R .empty_zero_page
|
||||
|
||||
suffix_$(CONFIG_KERNEL_GZIP) = gz
|
||||
suffix_$(CONFIG_KERNEL_BZIP2) = bz2
|
||||
suffix_$(CONFIG_KERNEL_LZMA) = lzma
|
||||
suffix-$(CONFIG_KERNEL_GZIP) = gz
|
||||
suffix-$(CONFIG_KERNEL_BZIP2) = bz2
|
||||
suffix-$(CONFIG_KERNEL_LZMA) = lzma
|
||||
|
||||
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
|
|
@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
|
|||
static unsigned long free_mem_end_ptr;
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
static void *memset(void *s, int c, size_t n)
|
||||
void *memset(void *s, int c, size_t n)
|
||||
{
|
||||
char *ss = s;
|
||||
|
||||
|
@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_GZIP
|
||||
void *memcpy(void *dest, const void *src, size_t n)
|
||||
{
|
||||
char *d = dest;
|
||||
const char *s = src;
|
||||
while (n--)
|
||||
*d++ = *s++;
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
#define BOOT_HEAP_SIZE 0x10000
|
||||
#include "../../../../lib/decompress_inflate.c"
|
||||
#endif
|
||||
|
|
|
@ -113,9 +113,6 @@ struct pt_regs {
|
|||
|
||||
#define PTRACE_OLDSETOPTIONS 21
|
||||
|
||||
/* options set using PTRACE_SETOPTIONS */
|
||||
#define PTRACE_O_TRACESYSGOOD 0x00000001
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */
|
||||
|
|
|
@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu)
|
|||
return cpu;
|
||||
}
|
||||
|
||||
static __inline__ unsigned int num_booting_cpus(void)
|
||||
{
|
||||
return cpumask_weight(&cpu_callout_map);
|
||||
}
|
||||
|
||||
extern void smp_send_timer(void);
|
||||
extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
|
||||
|
||||
|
|
|
@ -591,17 +591,16 @@ void user_enable_single_step(struct task_struct *child)
|
|||
|
||||
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
|
||||
!= sizeof(insn))
|
||||
return -EIO;
|
||||
return;
|
||||
|
||||
compute_next_pc(insn, pc, &next_pc, child);
|
||||
if (next_pc & 0x80000000)
|
||||
return -EIO;
|
||||
return;
|
||||
|
||||
if (embed_debug_trap(child, next_pc))
|
||||
return -EIO;
|
||||
return;
|
||||
|
||||
invalidate_cache();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
|
|
|
@ -286,7 +286,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
case -ERESTARTNOINTR:
|
||||
regs->r0 = regs->orig_r0;
|
||||
if (prev_insn(regs) < 0)
|
||||
return -EFAULT;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -288,6 +288,7 @@ config MIPS_MALTA
|
|||
select SYS_HAS_CPU_MIPS32_R1
|
||||
select SYS_HAS_CPU_MIPS32_R2
|
||||
select SYS_HAS_CPU_MIPS64_R1
|
||||
select SYS_HAS_CPU_MIPS64_R2
|
||||
select SYS_HAS_CPU_NEVADA
|
||||
select SYS_HAS_CPU_RM7000
|
||||
select SYS_HAS_EARLY_PRINTK
|
||||
|
@ -1423,6 +1424,7 @@ config CPU_SB1
|
|||
config CPU_CAVIUM_OCTEON
|
||||
bool "Cavium Octeon processor"
|
||||
depends on SYS_HAS_CPU_CAVIUM_OCTEON
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select CPU_HAS_PREFETCH
|
||||
select CPU_SUPPORTS_64BIT_KERNEL
|
||||
select SYS_SUPPORTS_SMP
|
||||
|
|
|
@ -21,6 +21,7 @@ config BCM47XX_BCMA
|
|||
select BCMA
|
||||
select BCMA_HOST_SOC
|
||||
select BCMA_DRIVER_MIPS
|
||||
select BCMA_HOST_PCI if PCI
|
||||
select BCMA_DRIVER_PCI_HOSTMODE if PCI
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const __initdata struct {
|
||||
static const struct {
|
||||
unsigned int cs;
|
||||
unsigned int base;
|
||||
unsigned int size;
|
||||
} pcmcia_cs[3] = {
|
||||
} pcmcia_cs[3] __initconst = {
|
||||
{
|
||||
.cs = MPI_CS_PCMCIA_COMMON,
|
||||
.base = BCM_PCMCIA_COMMON_BASE_PA,
|
||||
|
|
|
@ -82,10 +82,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
|
|||
help
|
||||
Lock the kernel's implementation of memcpy() into L2.
|
||||
|
||||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
select SPARSEMEM_STATIC
|
||||
|
||||
config IOMMU_HELPER
|
||||
bool
|
||||
|
||||
|
|
|
@ -185,7 +185,6 @@ static void __cpuinit octeon_init_secondary(void)
|
|||
octeon_init_cvmcount();
|
||||
|
||||
octeon_irq_setup_secondary();
|
||||
raw_local_irq_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -233,6 +232,7 @@ static void octeon_smp_finish(void)
|
|||
|
||||
/* to generate the first CPU timer interrupt */
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/irqflags.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/byteorder.h> /* sigh ... */
|
||||
#include <asm/cpu-features.h>
|
||||
#include <asm/sgidefs.h>
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#ifndef __ASM_CMPXCHG_H
|
||||
#define __ASM_CMPXCHG_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/war.h>
|
||||
|
||||
|
|
|
@ -94,6 +94,7 @@
|
|||
#define PRID_IMP_24KE 0x9600
|
||||
#define PRID_IMP_74K 0x9700
|
||||
#define PRID_IMP_1004K 0x9900
|
||||
#define PRID_IMP_M14KC 0x9c00
|
||||
|
||||
/*
|
||||
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
|
||||
|
@ -260,12 +261,12 @@ enum cpu_type_enum {
|
|||
*/
|
||||
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
|
||||
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
|
||||
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
|
||||
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
|
||||
|
||||
/*
|
||||
* MIPS64 class processors
|
||||
*/
|
||||
CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
|
||||
CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
|
||||
CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
|
||||
CPU_XLR, CPU_XLP,
|
||||
|
||||
|
@ -288,7 +289,7 @@ enum cpu_type_enum {
|
|||
#define MIPS_CPU_ISA_M64R2 0x00000100
|
||||
|
||||
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
|
||||
MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
|
||||
MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
|
||||
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
|
||||
MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@
|
|||
|
||||
#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
|
||||
#define GIC_VPE_EIC_SS(intr) \
|
||||
(GIC_EIC_SHADOW_SET_BASE + (4 * intr))
|
||||
(GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
|
||||
|
||||
#define GIC_VPE_EIC_VEC_BASE 0x0800
|
||||
#define GIC_VPE_EIC_VEC(intr) \
|
||||
|
@ -330,6 +330,17 @@ struct gic_intr_map {
|
|||
#define GIC_FLAG_TRANSPARENT 0x02
|
||||
};
|
||||
|
||||
/*
|
||||
* This is only used in EIC mode. This helps to figure out which
|
||||
* shared interrupts we need to process when we get a vector interrupt.
|
||||
*/
|
||||
#define GIC_MAX_SHARED_INTR 0x5
|
||||
struct gic_shared_intr_map {
|
||||
unsigned int num_shared_intr;
|
||||
unsigned int intr_list[GIC_MAX_SHARED_INTR];
|
||||
unsigned int local_intr_mask;
|
||||
};
|
||||
|
||||
extern void gic_init(unsigned long gic_base_addr,
|
||||
unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
|
||||
unsigned int intrmap_size, unsigned int irqbase);
|
||||
|
@ -338,5 +349,7 @@ extern unsigned int gic_get_int(void);
|
|||
extern void gic_send_ipi(unsigned int intr);
|
||||
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
|
||||
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
|
||||
extern void gic_bind_eic_interrupt(int irq, int set);
|
||||
extern unsigned int gic_get_timer_pending(void);
|
||||
|
||||
#endif /* _ASM_GICREGS_H */
|
||||
|
|
|
@ -251,7 +251,7 @@ struct f_format { /* FPU register format */
|
|||
unsigned int func : 6;
|
||||
};
|
||||
|
||||
struct ma_format { /* FPU multipy and add format (MIPS IV) */
|
||||
struct ma_format { /* FPU multiply and add format (MIPS IV) */
|
||||
unsigned int opcode : 6;
|
||||
unsigned int fr : 5;
|
||||
unsigned int ft : 5;
|
||||
|
@ -324,7 +324,7 @@ struct f_format { /* FPU register format */
|
|||
unsigned int opcode : 6;
|
||||
};
|
||||
|
||||
struct ma_format { /* FPU multipy and add format (MIPS IV) */
|
||||
struct ma_format { /* FPU multiply and add format (MIPS IV) */
|
||||
unsigned int fmt : 2;
|
||||
unsigned int func : 4;
|
||||
unsigned int fd : 5;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/types.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-features.h>
|
||||
|
|
|
@ -136,6 +136,7 @@ extern void free_irqno(unsigned int irq);
|
|||
* IE7. Since R2 their number has to be read from the c0_intctl register.
|
||||
*/
|
||||
#define CP0_LEGACY_COMPARE_IRQ 7
|
||||
#define CP0_LEGACY_PERFCNT_IRQ 7
|
||||
|
||||
extern int cp0_compare_irq;
|
||||
extern int cp0_compare_irq_shift;
|
||||
|
|
|
@ -99,7 +99,7 @@
|
|||
#define CKCTL_6368_USBH_CLK_EN (1 << 15)
|
||||
#define CKCTL_6368_DISABLE_GLESS_EN (1 << 16)
|
||||
#define CKCTL_6368_NAND_CLK_EN (1 << 17)
|
||||
#define CKCTL_6368_IPSEC_CLK_EN (1 << 17)
|
||||
#define CKCTL_6368_IPSEC_CLK_EN (1 << 18)
|
||||
|
||||
#define CKCTL_6368_ALL_SAFE_EN (CKCTL_6368_SWPKT_USB_EN | \
|
||||
CKCTL_6368_SWPKT_SAR_EN | \
|
||||
|
|
|
@ -86,6 +86,16 @@
|
|||
#define GIC_CPU_INT4 4 /* . */
|
||||
#define GIC_CPU_INT5 5 /* Core Interrupt 5 */
|
||||
|
||||
/* MALTA GIC local interrupts */
|
||||
#define GIC_INT_TMR (GIC_CPU_INT5)
|
||||
#define GIC_INT_PERFCTR (GIC_CPU_INT5)
|
||||
|
||||
/* GIC constants */
|
||||
/* Add 2 to convert non-eic hw int # to eic vector # */
|
||||
#define GIC_CPU_TO_VEC_OFFSET (2)
|
||||
/* If we map an intr to pin X, GIC will actually generate vector X+1 */
|
||||
#define GIC_PIN_TO_VEC_OFFSET (1)
|
||||
|
||||
#define GIC_EXT_INTR(x) x
|
||||
|
||||
/* External Interrupts used for IPI */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
#define CP0_VPECONF0 $1, 2
|
||||
#define CP0_VPECONF1 $1, 3
|
||||
#define CP0_YQMASK $1, 4
|
||||
#define CP0_VPESCHEDULE $1, 5
|
||||
#define CP0_VPESCHEDULE $1, 5
|
||||
#define CP0_VPESCHEFBK $1, 6
|
||||
#define CP0_TCSTATUS $2, 1
|
||||
#define CP0_TCBIND $2, 2
|
||||
|
|
|
@ -22,7 +22,7 @@ struct task_struct;
|
|||
* switch_to(n) should switch tasks to task nr n, first
|
||||
* checking that n isn't the current task, in which case it does nothing.
|
||||
*/
|
||||
extern asmlinkage void *resume(void *last, void *next, void *next_ti);
|
||||
extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
|
||||
|
||||
extern unsigned int ll_bit;
|
||||
extern struct task_struct *ll_task;
|
||||
|
@ -66,11 +66,13 @@ do { \
|
|||
|
||||
#define switch_to(prev, next, last) \
|
||||
do { \
|
||||
u32 __usedfpu; \
|
||||
__mips_mt_fpaff_switch_to(prev); \
|
||||
if (cpu_has_dsp) \
|
||||
__save_dsp(prev); \
|
||||
__clear_software_ll_bit(); \
|
||||
(last) = resume(prev, next, task_thread_info(next)); \
|
||||
__usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
|
||||
(last) = resume(prev, next, task_thread_info(next), __usedfpu); \
|
||||
} while (0)
|
||||
|
||||
#define finish_arch_switch(prev) \
|
||||
|
|
|
@ -60,6 +60,8 @@ struct thread_info {
|
|||
register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#define current_thread_info() __current_thread_info
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* thread information allocation */
|
||||
#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
|
||||
#define THREAD_SIZE_ORDER (1)
|
||||
|
@ -85,8 +87,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
|||
|
||||
#define STACK_WARN (THREAD_SIZE / 8)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define PREEMPT_ACTIVE 0x10000000
|
||||
|
||||
/*
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Copyright (C) xxxx the Anonymous
|
||||
* Copyright (C) 1994 - 2006 Ralf Baechle
|
||||
* Copyright (C) 2003, 2004 Maciej W. Rozycki
|
||||
* Copyright (C) 2001, 2004 MIPS Inc.
|
||||
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
@ -199,6 +199,7 @@ void __init check_wait(void)
|
|||
cpu_wait = rm7k_wait_irqoff;
|
||||
break;
|
||||
|
||||
case CPU_M14KC:
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
case CPU_1004K:
|
||||
|
@ -810,6 +811,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
c->cputype = CPU_5KC;
|
||||
__cpu_name[cpu] = "MIPS 5Kc";
|
||||
break;
|
||||
case PRID_IMP_5KE:
|
||||
c->cputype = CPU_5KE;
|
||||
__cpu_name[cpu] = "MIPS 5KE";
|
||||
break;
|
||||
case PRID_IMP_20KC:
|
||||
c->cputype = CPU_20KC;
|
||||
__cpu_name[cpu] = "MIPS 20Kc";
|
||||
|
@ -831,6 +836,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
c->cputype = CPU_74K;
|
||||
__cpu_name[cpu] = "MIPS 74Kc";
|
||||
break;
|
||||
case PRID_IMP_M14KC:
|
||||
c->cputype = CPU_M14KC;
|
||||
__cpu_name[cpu] = "MIPS M14Kc";
|
||||
break;
|
||||
case PRID_IMP_1004K:
|
||||
c->cputype = CPU_1004K;
|
||||
__cpu_name[cpu] = "MIPS 1004Kc";
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
|
||||
* Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
|
||||
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -34,6 +34,12 @@ EXPORT_SYMBOL(memmove);
|
|||
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
|
||||
/*
|
||||
* Functions that operate on entire pages. Mostly used by memory management.
|
||||
*/
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
|
||||
/*
|
||||
* Userspace access stuff.
|
||||
*/
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
/*
|
||||
* task_struct *resume(task_struct *prev, task_struct *next,
|
||||
* struct thread_info *next_ti)
|
||||
* struct thread_info *next_ti, int usedfpu)
|
||||
*/
|
||||
.align 7
|
||||
LEAF(resume)
|
||||
|
|
|
@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
|
|||
return counters >> vpe_shift();
|
||||
}
|
||||
|
||||
static unsigned int counters_per_cpu_to_total(unsigned int counters)
|
||||
{
|
||||
return counters << vpe_shift();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_MIPS_MT_SMP */
|
||||
#define vpe_id() 0
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
|
||||
/*
|
||||
* task_struct *resume(task_struct *prev, task_struct *next,
|
||||
* struct thread_info *next_ti) )
|
||||
* struct thread_info *next_ti, int usedfpu)
|
||||
*/
|
||||
LEAF(resume)
|
||||
mfc0 t1, CP0_STATUS
|
||||
|
@ -51,18 +51,9 @@ LEAF(resume)
|
|||
cpu_save_nonscratch a0
|
||||
sw ra, THREAD_REG31(a0)
|
||||
|
||||
/*
|
||||
* check if we need to save FPU registers
|
||||
*/
|
||||
lw t3, TASK_THREAD_INFO(a0)
|
||||
lw t0, TI_FLAGS(t3)
|
||||
li t1, _TIF_USEDFPU
|
||||
and t2, t0, t1
|
||||
beqz t2, 1f
|
||||
nor t1, zero, t1
|
||||
beqz a3, 1f
|
||||
|
||||
and t0, t0, t1
|
||||
sw t0, TI_FLAGS(t3)
|
||||
PTR_L t3, TASK_THREAD_INFO(a0)
|
||||
|
||||
/*
|
||||
* clear saved user stack CU1 bit
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
|
||||
/*
|
||||
* task_struct *resume(task_struct *prev, task_struct *next,
|
||||
* struct thread_info *next_ti)
|
||||
* struct thread_info *next_ti, int usedfpu)
|
||||
*/
|
||||
.align 5
|
||||
LEAF(resume)
|
||||
|
@ -53,16 +53,10 @@
|
|||
/*
|
||||
* check if we need to save FPU registers
|
||||
*/
|
||||
|
||||
beqz a3, 1f
|
||||
|
||||
PTR_L t3, TASK_THREAD_INFO(a0)
|
||||
LONG_L t0, TI_FLAGS(t3)
|
||||
li t1, _TIF_USEDFPU
|
||||
and t2, t0, t1
|
||||
beqz t2, 1f
|
||||
nor t1, zero, t1
|
||||
|
||||
and t0, t0, t1
|
||||
LONG_S t0, TI_FLAGS(t3)
|
||||
|
||||
/*
|
||||
* clear saved user stack CU1 bit
|
||||
*/
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/reboot.h>
|
||||
|
@ -197,13 +196,6 @@ static void bmips_init_secondary(void)
|
|||
|
||||
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
|
||||
#endif
|
||||
|
||||
/* make sure there won't be a timer interrupt for a little while */
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
|
||||
|
||||
irq_enable_hazard();
|
||||
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
|
||||
irq_enable_hazard();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -212,6 +204,13 @@ static void bmips_init_secondary(void)
|
|||
static void bmips_smp_finish(void)
|
||||
{
|
||||
pr_info("SMP: CPU%d is running\n", smp_processor_id());
|
||||
|
||||
/* make sure there won't be a timer interrupt for a little while */
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
|
||||
|
||||
irq_enable_hazard();
|
||||
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
|
||||
irq_enable_hazard();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -122,13 +122,21 @@ asmlinkage __cpuinit void start_secondary(void)
|
|||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
mp_ops->smp_finish();
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_callin_map);
|
||||
|
||||
synchronise_count_slave();
|
||||
|
||||
/*
|
||||
* irq will be enabled in ->smp_finish(), enabling it too early
|
||||
* is dangerous.
|
||||
*/
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
mp_ops->smp_finish();
|
||||
|
||||
cpu_idle();
|
||||
}
|
||||
|
||||
|
@ -196,8 +204,6 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
while (!cpu_isset(cpu, cpu_callin_map))
|
||||
udelay(100);
|
||||
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
|
|||
|
||||
/*
|
||||
* Common setup before any secondaries are started
|
||||
* Make sure all CPU's are in a sensible state before we boot any of the
|
||||
* Make sure all CPUs are in a sensible state before we boot any of the
|
||||
* secondaries.
|
||||
*
|
||||
* For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
|
||||
|
@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
|
|||
/*
|
||||
* TCContext gets an offset from the base of the IPIQ array
|
||||
* to be used in low-level code to detect the presence of
|
||||
* an active IPI queue
|
||||
* an active IPI queue.
|
||||
*/
|
||||
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
|
||||
/* Bind tc to vpe */
|
||||
write_tc_c0_tcbind(vpe);
|
||||
/* In general, all TCs should have the same cpu_data indications */
|
||||
/* In general, all TCs should have the same cpu_data indications. */
|
||||
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
|
||||
/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
|
||||
if (cpu_data[0].cputype == CPU_34K ||
|
||||
|
@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
|
|||
}
|
||||
|
||||
/*
|
||||
* Tweak to get Count registes in as close a sync as possible.
|
||||
* Value seems good for 34K-class cores.
|
||||
* Tweak to get Count registes in as close a sync as possible. The
|
||||
* value seems good for 34K-class cores.
|
||||
*/
|
||||
|
||||
#define CP0_SKEW 8
|
||||
|
@ -615,7 +615,6 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
|
|||
|
||||
void smtc_init_secondary(void)
|
||||
{
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void smtc_smp_finish(void)
|
||||
|
@ -631,6 +630,8 @@ void smtc_smp_finish(void)
|
|||
if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
printk("TC %d going on-line as CPU %d\n",
|
||||
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
|
||||
}
|
||||
|
|
|
@ -111,7 +111,6 @@ void __cpuinit synchronise_count_master(void)
|
|||
void __cpuinit synchronise_count_slave(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
unsigned int initcount;
|
||||
int ncpus;
|
||||
|
||||
|
@ -123,8 +122,6 @@ void __cpuinit synchronise_count_slave(void)
|
|||
return;
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* Not every cpu is online at the time this gets called,
|
||||
* so we first wait for the master to say everyone is ready
|
||||
|
@ -154,7 +151,5 @@ void __cpuinit synchronise_count_slave(void)
|
|||
}
|
||||
/* Arrange for an interrupt in a short while */
|
||||
write_c0_compare(read_c0_count() + COUNTON);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#undef NR_LOOPS
|
||||
|
|
|
@ -132,6 +132,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
|
|||
unsigned long ra = regs->regs[31];
|
||||
unsigned long pc = regs->cp0_epc;
|
||||
|
||||
if (!task)
|
||||
task = current;
|
||||
|
||||
if (raw_show_trace || !__kernel_text_address(pc)) {
|
||||
show_raw_backtrace(sp);
|
||||
return;
|
||||
|
@ -1249,6 +1252,7 @@ static inline void parity_protection_init(void)
|
|||
break;
|
||||
|
||||
case CPU_5KC:
|
||||
case CPU_5KE:
|
||||
write_c0_ecc(0x80000000);
|
||||
back_to_back_c0_hazard();
|
||||
/* Set the PE bit (bit 31) in the c0_errctl register. */
|
||||
|
@ -1498,6 +1502,7 @@ extern void flush_tlb_handlers(void);
|
|||
* Timer interrupt
|
||||
*/
|
||||
int cp0_compare_irq;
|
||||
EXPORT_SYMBOL_GPL(cp0_compare_irq);
|
||||
int cp0_compare_irq_shift;
|
||||
|
||||
/*
|
||||
|
@ -1597,7 +1602,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
|||
cp0_perfcount_irq = -1;
|
||||
} else {
|
||||
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
|
||||
cp0_compare_irq_shift = cp0_compare_irq;
|
||||
cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
|
||||
cp0_perfcount_irq = -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
#undef mips
|
||||
|
@ -72,7 +73,7 @@ SECTIONS
|
|||
.data : { /* Data */
|
||||
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
|
||||
|
||||
INIT_TASK_DATA(PAGE_SIZE)
|
||||
INIT_TASK_DATA(THREAD_SIZE)
|
||||
NOSAVE_DATA
|
||||
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
|
||||
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
#
|
||||
|
||||
obj-y += cache.o dma-default.o extable.o fault.o \
|
||||
gup.o init.o mmap.o page.o tlbex.o \
|
||||
tlbex-fault.o uasm.o
|
||||
gup.o init.o mmap.o page.o page-funcs.o \
|
||||
tlbex.o tlbex-fault.o uasm.o
|
||||
|
||||
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
|
||||
obj-$(CONFIG_64BIT) += pgtable-64.o
|
||||
|
|
|
@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void)
|
|||
c->icache.linesz = 2 << lsize;
|
||||
else
|
||||
c->icache.linesz = lsize;
|
||||
c->icache.sets = 64 << ((config1 >> 22) & 7);
|
||||
c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
|
||||
c->icache.ways = 1 + ((config1 >> 16) & 7);
|
||||
|
||||
icache_size = c->icache.sets *
|
||||
|
@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void)
|
|||
c->dcache.linesz = 2 << lsize;
|
||||
else
|
||||
c->dcache.linesz= lsize;
|
||||
c->dcache.sets = 64 << ((config1 >> 13) & 7);
|
||||
c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
|
||||
c->dcache.ways = 1 + ((config1 >> 7) & 7);
|
||||
|
||||
dcache_size = c->dcache.sets *
|
||||
|
@ -1051,6 +1051,7 @@ static void __cpuinit probe_pcache(void)
|
|||
case CPU_R14000:
|
||||
break;
|
||||
|
||||
case CPU_M14KC:
|
||||
case CPU_24K:
|
||||
case CPU_34K:
|
||||
case CPU_74K:
|
||||
|
|
50
arch/mips/mm/page-funcs.S
Normal file
50
arch/mips/mm/page-funcs.S
Normal file
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Micro-assembler generated clear_page/copy_page functions.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc.
|
||||
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/regdef.h>
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
#define cpu_clear_page_function_name clear_page_cpu
|
||||
#define cpu_copy_page_function_name copy_page_cpu
|
||||
#else
|
||||
#define cpu_clear_page_function_name clear_page
|
||||
#define cpu_copy_page_function_name copy_page
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum sizes:
|
||||
*
|
||||
* R4000 128 bytes S-cache: 0x058 bytes
|
||||
* R4600 v1.7: 0x05c bytes
|
||||
* R4600 v2.0: 0x060 bytes
|
||||
* With prefetching, 16 word strides 0x120 bytes
|
||||
*/
|
||||
EXPORT(__clear_page_start)
|
||||
LEAF(cpu_clear_page_function_name)
|
||||
1: j 1b /* Dummy, will be replaced. */
|
||||
.space 288
|
||||
END(cpu_clear_page_function_name)
|
||||
EXPORT(__clear_page_end)
|
||||
|
||||
/*
|
||||
* Maximum sizes:
|
||||
*
|
||||
* R4000 128 bytes S-cache: 0x11c bytes
|
||||
* R4600 v1.7: 0x080 bytes
|
||||
* R4600 v2.0: 0x07c bytes
|
||||
* With prefetching, 16 word strides 0x540 bytes
|
||||
*/
|
||||
EXPORT(__copy_page_start)
|
||||
LEAF(cpu_copy_page_function_name)
|
||||
1: j 1b /* Dummy, will be replaced. */
|
||||
.space 1344
|
||||
END(cpu_copy_page_function_name)
|
||||
EXPORT(__copy_page_end)
|
|
@ -6,6 +6,7 @@
|
|||
* Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2007 Maciej W. Rozycki
|
||||
* Copyright (C) 2008 Thiemo Seufer
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5];
|
|||
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
|
||||
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
|
||||
|
||||
/*
|
||||
* Maximum sizes:
|
||||
*
|
||||
* R4000 128 bytes S-cache: 0x058 bytes
|
||||
* R4600 v1.7: 0x05c bytes
|
||||
* R4600 v2.0: 0x060 bytes
|
||||
* With prefetching, 16 word strides 0x120 bytes
|
||||
*/
|
||||
|
||||
static u32 clear_page_array[0x120 / 4];
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
|
||||
#else
|
||||
void clear_page(void *page) __attribute__((alias("clear_page_array")));
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
|
||||
/*
|
||||
* Maximum sizes:
|
||||
*
|
||||
* R4000 128 bytes S-cache: 0x11c bytes
|
||||
* R4600 v1.7: 0x080 bytes
|
||||
* R4600 v2.0: 0x07c bytes
|
||||
* With prefetching, 16 word strides 0x540 bytes
|
||||
*/
|
||||
static u32 copy_page_array[0x540 / 4];
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
void
|
||||
copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
|
||||
#else
|
||||
void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
|
||||
|
||||
static int pref_bias_clear_store __cpuinitdata;
|
||||
static int pref_bias_copy_load __cpuinitdata;
|
||||
static int pref_bias_copy_store __cpuinitdata;
|
||||
|
@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
|
|||
}
|
||||
}
|
||||
|
||||
extern u32 __clear_page_start;
|
||||
extern u32 __clear_page_end;
|
||||
extern u32 __copy_page_start;
|
||||
extern u32 __copy_page_end;
|
||||
|
||||
void __cpuinit build_clear_page(void)
|
||||
{
|
||||
int off;
|
||||
u32 *buf = (u32 *)&clear_page_array;
|
||||
u32 *buf = &__clear_page_start;
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
int i;
|
||||
|
@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void)
|
|||
uasm_i_jr(&buf, RA);
|
||||
uasm_i_nop(&buf);
|
||||
|
||||
BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
|
||||
BUG_ON(buf > &__clear_page_end);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
pr_debug("Synthesized clear page handler (%u instructions).\n",
|
||||
(u32)(buf - clear_page_array));
|
||||
(u32)(buf - &__clear_page_start));
|
||||
|
||||
pr_debug("\t.set push\n");
|
||||
pr_debug("\t.set noreorder\n");
|
||||
for (i = 0; i < (buf - clear_page_array); i++)
|
||||
pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
|
||||
for (i = 0; i < (buf - &__clear_page_start); i++)
|
||||
pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
|
||||
pr_debug("\t.set pop\n");
|
||||
}
|
||||
|
||||
|
@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
|
|||
void __cpuinit build_copy_page(void)
|
||||
{
|
||||
int off;
|
||||
u32 *buf = (u32 *)©_page_array;
|
||||
u32 *buf = &__copy_page_start;
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
int i;
|
||||
|
@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void)
|
|||
uasm_i_jr(&buf, RA);
|
||||
uasm_i_nop(&buf);
|
||||
|
||||
BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
|
||||
BUG_ON(buf > &__copy_page_end);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
pr_debug("Synthesized copy page handler (%u instructions).\n",
|
||||
(u32)(buf - copy_page_array));
|
||||
(u32)(buf - &__copy_page_start));
|
||||
|
||||
pr_debug("\t.set push\n");
|
||||
pr_debug("\t.set noreorder\n");
|
||||
for (i = 0; i < (buf - copy_page_array); i++)
|
||||
pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
|
||||
for (i = 0; i < (buf - &__copy_page_start); i++)
|
||||
pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
|
||||
pr_debug("\t.set pop\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
extern void clear_page_cpu(void *page);
|
||||
extern void copy_page_cpu(void *to, void *from);
|
||||
|
||||
/*
|
||||
* Pad descriptors to cacheline, since each is exclusively owned by a
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
|
||||
* Copyright (C) 2011 MIPS Technologies, Inc.
|
||||
*
|
||||
* ... and the days got worse and worse and now you see
|
||||
* I've gone completly out of my mind.
|
||||
|
@ -494,6 +495,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
|||
case CPU_R14000:
|
||||
case CPU_4KC:
|
||||
case CPU_4KEC:
|
||||
case CPU_M14KC:
|
||||
case CPU_SB1:
|
||||
case CPU_SB1A:
|
||||
case CPU_4KSC:
|
||||
|
|
|
@ -241,8 +241,9 @@ void __init mips_pcibios_init(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (controller->io_resource->start < 0x00001000UL) /* FIXME */
|
||||
controller->io_resource->start = 0x00001000UL;
|
||||
/* Change start address to avoid conflicts with ACPI and SMB devices */
|
||||
if (controller->io_resource->start < 0x00002000UL)
|
||||
controller->io_resource->start = 0x00002000UL;
|
||||
|
||||
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
|
||||
ioport_resource.end = controller->io_resource->end;
|
||||
|
@ -253,7 +254,7 @@ void __init mips_pcibios_init(void)
|
|||
}
|
||||
|
||||
/* Enable PCI 2.1 compatibility in PIIX4 */
|
||||
static void __init quirk_dlcsetup(struct pci_dev *dev)
|
||||
static void __devinit quirk_dlcsetup(struct pci_dev *dev)
|
||||
{
|
||||
u8 odlc, ndlc;
|
||||
(void) pci_read_config_byte(dev, 0x82, &odlc);
|
||||
|
|
|
@ -111,7 +111,7 @@ static void __init pci_clock_check(void)
|
|||
unsigned int __iomem *jmpr_p =
|
||||
(unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
|
||||
int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
|
||||
static const int pciclocks[] __initdata = {
|
||||
static const int pciclocks[] __initconst = {
|
||||
33, 20, 25, 30, 12, 16, 37, 10
|
||||
};
|
||||
int pciclock = pciclocks[jmpr];
|
||||
|
|
|
@ -82,8 +82,10 @@ void __init prom_free_prom_memory(void)
|
|||
|
||||
void xlp_mmu_init(void)
|
||||
{
|
||||
/* enable extended TLB and Large Fixed TLB */
|
||||
write_c0_config6(read_c0_config6() | 0x24);
|
||||
current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
|
||||
|
||||
/* set page mask of Fixed TLB in config7 */
|
||||
write_c0_config7(PM_DEFAULT_MASK >>
|
||||
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
|
||||
}
|
||||
|
@ -100,6 +102,10 @@ void __init prom_init(void)
|
|||
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
|
||||
#ifdef CONFIG_SMP
|
||||
nlm_wakeup_secondary_cpus(0xffffffff);
|
||||
|
||||
/* update TLB size after waking up threads */
|
||||
current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
|
||||
|
||||
register_smp_ops(&nlm_smp_ops);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
|
|||
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_5KC:
|
||||
case CPU_M14KC:
|
||||
case CPU_20KC:
|
||||
case CPU_24K:
|
||||
case CPU_25KF:
|
||||
|
|
|
@ -322,6 +322,10 @@ static int __init mipsxx_init(void)
|
|||
|
||||
op_model_mipsxx_ops.num_counters = counters;
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_M14KC:
|
||||
op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
|
||||
break;
|
||||
|
||||
case CPU_20KC:
|
||||
op_model_mipsxx_ops.cpu_type = "mips/20K";
|
||||
break;
|
||||
|
|
|
@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
|
@ -60,7 +60,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
|
|||
pci_write_config_dword(pdev, 0xe4, 1 << 5);
|
||||
}
|
||||
|
||||
static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
|
@ -135,7 +135,7 @@ static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
|
|||
printk(KERN_INFO"via686b fix: ISA bridge done\n");
|
||||
}
|
||||
|
||||
static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
printk(KERN_INFO"via686b fix: IDE\n");
|
||||
|
||||
|
@ -168,19 +168,19 @@ static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
|
|||
printk(KERN_INFO"via686b fix: IDE done\n");
|
||||
}
|
||||
|
||||
static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
/* irq routing */
|
||||
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
|
||||
}
|
||||
|
||||
static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
/* irq routing */
|
||||
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
|
||||
}
|
||||
|
||||
static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned int val;
|
||||
unsigned char c;
|
||||
|
|
|
@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
|
|||
}
|
||||
|
||||
/* CS5536 SPEC. fixup */
|
||||
static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
/* the uart1 and uart2 interrupt in PIC is enabled as default */
|
||||
pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
|
||||
pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
|
||||
}
|
||||
|
||||
static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
/* setting the mutex pin as IDE function */
|
||||
pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
|
||||
CS5536_IDE_FLASH_SIGNATURE);
|
||||
}
|
||||
|
||||
static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
/* enable the AUDIO interrupt in PIC */
|
||||
pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
|
||||
|
@ -118,14 +118,14 @@ static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
|
|||
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
|
||||
}
|
||||
|
||||
static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
/* enable the OHCI interrupt in PIC */
|
||||
/* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
|
||||
pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
|
||||
}
|
||||
|
||||
static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
u32 hi, lo;
|
||||
|
||||
|
@ -137,7 +137,7 @@ static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
|
|||
pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
|
||||
}
|
||||
|
||||
static void __init loongson_nec_fixup(struct pci_dev *pdev)
|
||||
static void __devinit loongson_nec_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
|
|
|
@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
|
||||
static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned char reg_val;
|
||||
static int piixirqmap[16] __initdata = { /* PIIX PIRQC[A:D] irq mappings */
|
||||
static int piixirqmap[16] __devinitdata = { /* PIIX PIRQC[A:D] irq mappings */
|
||||
0, 0, 0, 3,
|
||||
4, 5, 6, 7,
|
||||
0, 9, 10, 11,
|
||||
|
@ -83,7 +83,7 @@ static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
|
|||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
|
||||
malta_piix_func0_fixup);
|
||||
|
||||
static void __init malta_piix_func1_fixup(struct pci_dev *pdev)
|
||||
static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned char reg_val;
|
||||
|
||||
|
|
|
@ -22,13 +22,13 @@
|
|||
|
||||
#include <asm/vr41xx/mpc30x.h>
|
||||
|
||||
static const int internal_func_irqs[] __initdata = {
|
||||
static const int internal_func_irqs[] __initconst = {
|
||||
VRC4173_CASCADE_IRQ,
|
||||
VRC4173_AC97_IRQ,
|
||||
VRC4173_USB_IRQ,
|
||||
};
|
||||
|
||||
static const int irq_tab_mpc30x[] __initdata = {
|
||||
static const int irq_tab_mpc30x[] __initconst = {
|
||||
[12] = VRC4173_PCMCIA1_IRQ,
|
||||
[13] = VRC4173_PCMCIA2_IRQ,
|
||||
[29] = MQ200_IRQ,
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* Set the BCM1250, etc. PCI host bridge's TRDY timeout
|
||||
* to the finite max.
|
||||
*/
|
||||
static void __init quirk_sb1250_pci(struct pci_dev *dev)
|
||||
static void __devinit quirk_sb1250_pci(struct pci_dev *dev)
|
||||
{
|
||||
pci_write_config_byte(dev, 0x40, 0xff);
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
|
|||
/*
|
||||
* The BCM1250, etc. PCI/HT bridge reports as a host bridge.
|
||||
*/
|
||||
static void __init quirk_sb1250_ht(struct pci_dev *dev)
|
||||
static void __devinit quirk_sb1250_ht(struct pci_dev *dev)
|
||||
{
|
||||
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
|
|||
/*
|
||||
* Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
|
||||
*/
|
||||
static void __init quirk_sp1011(struct pci_dev *dev)
|
||||
static void __devinit quirk_sp1011(struct pci_dev *dev)
|
||||
{
|
||||
pci_write_config_byte(dev, 0x64, 0xff);
|
||||
}
|
||||
|
|
|
@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_TOSHIBA_FPCIB0
|
||||
static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
|
||||
static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
|
||||
{
|
||||
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
|
||||
|
||||
|
|
|
@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev)
|
|||
bridge->b_widget.w_tflush; /* Flush */
|
||||
}
|
||||
|
||||
static void __init pci_fixup_ioc3(struct pci_dev *d)
|
||||
static void __devinit pci_fixup_ioc3(struct pci_dev *d)
|
||||
{
|
||||
pci_disable_swapping(d);
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/pci_regs.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
|
@ -156,35 +157,55 @@ struct pci_controller nlm_pci_controller = {
|
|||
.io_offset = 0x00000000UL,
|
||||
};
|
||||
|
||||
/*
|
||||
* The top level PCIe links on the XLS PCIe controller appear as
|
||||
* bridges. Given a device, this function finds which link it is
|
||||
* on.
|
||||
*/
|
||||
static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
|
||||
{
|
||||
struct pci_bus *bus, *p;
|
||||
|
||||
/* Find the bridge on bus 0 */
|
||||
bus = dev->bus;
|
||||
for (p = bus->parent; p && p->number != 0; p = p->parent)
|
||||
bus = p;
|
||||
|
||||
return p ? bus->self : NULL;
|
||||
}
|
||||
|
||||
static int get_irq_vector(const struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *lnk;
|
||||
|
||||
if (!nlm_chip_is_xls())
|
||||
return PIC_PCIX_IRQ; /* for XLR just one IRQ*/
|
||||
return PIC_PCIX_IRQ; /* for XLR just one IRQ */
|
||||
|
||||
/*
|
||||
* For XLS PCIe, there is an IRQ per Link, find out which
|
||||
* link the device is on to assign interrupts
|
||||
*/
|
||||
if (dev->bus->self == NULL)
|
||||
*/
|
||||
lnk = xls_get_pcie_link(dev);
|
||||
if (lnk == NULL)
|
||||
return 0;
|
||||
|
||||
switch (dev->bus->self->devfn) {
|
||||
case 0x0:
|
||||
switch (PCI_SLOT(lnk->devfn)) {
|
||||
case 0:
|
||||
return PIC_PCIE_LINK0_IRQ;
|
||||
case 0x8:
|
||||
case 1:
|
||||
return PIC_PCIE_LINK1_IRQ;
|
||||
case 0x10:
|
||||
case 2:
|
||||
if (nlm_chip_is_xls_b())
|
||||
return PIC_PCIE_XLSB0_LINK2_IRQ;
|
||||
else
|
||||
return PIC_PCIE_LINK2_IRQ;
|
||||
case 0x18:
|
||||
case 3:
|
||||
if (nlm_chip_is_xls_b())
|
||||
return PIC_PCIE_XLSB0_LINK3_IRQ;
|
||||
else
|
||||
return PIC_PCIE_LINK3_IRQ;
|
||||
}
|
||||
WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
|
||||
WARN(1, "Unexpected devfn %d\n", lnk->devfn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -202,7 +223,27 @@ void arch_teardown_msi_irq(unsigned int irq)
|
|||
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
struct pci_dev *lnk;
|
||||
int irq, ret;
|
||||
u16 val;
|
||||
|
||||
/* MSI not supported on XLR */
|
||||
if (!nlm_chip_is_xls())
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Enable MSI on the XLS PCIe controller bridge which was disabled
|
||||
* at enumeration, the bridge MSI capability is at 0x50
|
||||
*/
|
||||
lnk = xls_get_pcie_link(dev);
|
||||
if (lnk == NULL)
|
||||
return 1;
|
||||
|
||||
pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
|
||||
if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
|
||||
val |= PCI_MSI_FLAGS_ENABLE;
|
||||
pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
|
||||
}
|
||||
|
||||
irq = get_irq_vector(dev);
|
||||
if (irq <= 0)
|
||||
|
@ -327,7 +368,7 @@ static int __init pcibios_init(void)
|
|||
}
|
||||
} else {
|
||||
/* XLR PCI controller ACK */
|
||||
irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
|
||||
irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
|||
*/
|
||||
static void __cpuinit yos_init_secondary(void)
|
||||
{
|
||||
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
|
||||
}
|
||||
|
||||
static void __cpuinit yos_smp_finish(void)
|
||||
{
|
||||
set_c0_status(ST0_CO | ST0_IM | ST0_IE);
|
||||
}
|
||||
|
||||
/* Hook for after all CPUs are online */
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x))
|
||||
|
||||
const struct register_map calliope_register_map __initdata = {
|
||||
const struct register_map calliope_register_map __initconst = {
|
||||
.eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
|
||||
.eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
|
||||
.eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
|
||||
|
||||
const struct register_map cronus_register_map __initdata = {
|
||||
const struct register_map cronus_register_map __initconst = {
|
||||
.eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
|
||||
.eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
|
||||
.eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <asm/mach-powertv/asic.h>
|
||||
|
||||
const struct register_map gaia_register_map __initdata = {
|
||||
const struct register_map gaia_register_map __initconst = {
|
||||
.eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
|
||||
.eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
|
||||
.eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define ZEUS_ADDR(x) (ZEUS_IO_BASE + (x))
|
||||
|
||||
const struct register_map zeus_register_map __initdata = {
|
||||
const struct register_map zeus_register_map __initconst = {
|
||||
.eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
|
||||
.eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
|
||||
.eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
|
||||
|
|
|
@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void __init quirk_slc90e66_bridge(struct pci_dev *dev)
|
||||
static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
|
||||
{
|
||||
int irq; /* PCI/ISA Bridge interrupt */
|
||||
u8 reg_64;
|
||||
|
|
|
@ -459,10 +459,11 @@ static int handle_signal(int sig,
|
|||
else
|
||||
ret = setup_frame(sig, ka, oldset, regs);
|
||||
if (ret)
|
||||
return;
|
||||
return ret;
|
||||
|
||||
signal_delivered(sig, info, ka, regs,
|
||||
test_thread_flag(TIF_SINGLESTEP));
|
||||
test_thread_flag(TIF_SINGLESTEP));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -876,9 +876,7 @@ static void __init smp_online(void)
|
|||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
ipi_call_lock();
|
||||
set_cpu_online(cpu, true);
|
||||
ipi_call_unlock();
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
|
@ -300,9 +300,7 @@ smp_cpu_init(int cpunum)
|
|||
|
||||
notify_cpu_starting(cpunum);
|
||||
|
||||
ipi_call_lock();
|
||||
set_cpu_online(cpunum, true);
|
||||
ipi_call_unlock();
|
||||
|
||||
/* Initialise the idle task for this CPU */
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
|
|
|
@ -571,7 +571,6 @@ void __devinit start_secondary(void *unused)
|
|||
if (system_state == SYSTEM_RUNNING)
|
||||
vdso_data->processorCount++;
|
||||
#endif
|
||||
ipi_call_lock();
|
||||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
/* Update sibling maps */
|
||||
|
@ -601,7 +600,6 @@ void __devinit start_secondary(void *unused)
|
|||
of_node_put(np);
|
||||
}
|
||||
of_node_put(l2_cache);
|
||||
ipi_call_unlock();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
|
|
|
@ -717,9 +717,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
|
|||
init_cpu_vtimer();
|
||||
pfault_init();
|
||||
notify_cpu_starting(smp_processor_id());
|
||||
ipi_call_lock();
|
||||
set_cpu_online(smp_processor_id(), true);
|
||||
ipi_call_unlock();
|
||||
local_irq_enable();
|
||||
/* cpu_idle will call schedule for us */
|
||||
cpu_idle();
|
||||
|
|
|
@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
|
|||
if (cheetah_pcache_forced_on)
|
||||
cheetah_enable_pcache();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
callin_flag = 1;
|
||||
__asm__ __volatile__("membar #Sync\n\t"
|
||||
"flush %%g6" : : : "memory");
|
||||
|
@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
|
|||
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
|
||||
rmb();
|
||||
|
||||
ipi_call_lock_irq();
|
||||
set_cpu_online(cpuid, true);
|
||||
ipi_call_unlock_irq();
|
||||
local_irq_enable();
|
||||
|
||||
/* idle thread is expected to have preempt disabled */
|
||||
preempt_disable();
|
||||
|
@ -1308,9 +1305,7 @@ int __cpu_disable(void)
|
|||
mdelay(1);
|
||||
local_irq_disable();
|
||||
|
||||
ipi_call_lock();
|
||||
set_cpu_online(cpu, false);
|
||||
ipi_call_unlock();
|
||||
|
||||
cpu_map_rebuild();
|
||||
|
||||
|
|
|
@ -198,17 +198,7 @@ void __cpuinit online_secondary(void)
|
|||
|
||||
notify_cpu_starting(smp_processor_id());
|
||||
|
||||
/*
|
||||
* We need to hold call_lock, so there is no inconsistency
|
||||
* between the time smp_call_function() determines number of
|
||||
* IPI recipients, and the time when the determination is made
|
||||
* for which cpus receive the IPI. Holding this
|
||||
* lock helps us to not include this cpu in a currently in progress
|
||||
* smp_call_function().
|
||||
*/
|
||||
ipi_call_lock();
|
||||
set_cpu_online(smp_processor_id(), 1);
|
||||
ipi_call_unlock();
|
||||
__get_cpu_var(cpu_state) = CPU_ONLINE;
|
||||
|
||||
/* Set up tile-specific state for this cpu. */
|
||||
|
|
|
@ -49,6 +49,9 @@ else
|
|||
KBUILD_AFLAGS += -m64
|
||||
KBUILD_CFLAGS += -m64
|
||||
|
||||
# Use -mpreferred-stack-boundary=3 if supported.
|
||||
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
|
||||
|
||||
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
|
||||
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
|
||||
|
|
|
@ -75,23 +75,54 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
|
||||
|
||||
#define b_replacement(number) "663"#number
|
||||
#define e_replacement(number) "664"#number
|
||||
|
||||
#define alt_slen "662b-661b"
|
||||
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
|
||||
|
||||
#define ALTINSTR_ENTRY(feature, number) \
|
||||
" .long 661b - .\n" /* label */ \
|
||||
" .long " b_replacement(number)"f - .\n" /* new instruction */ \
|
||||
" .word " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte " alt_slen "\n" /* source len */ \
|
||||
" .byte " alt_rlen(number) "\n" /* replacement len */
|
||||
|
||||
#define DISCARD_ENTRY(number) /* rlen <= slen */ \
|
||||
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
|
||||
|
||||
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
|
||||
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
|
||||
|
||||
/* alternative assembly primitive: */
|
||||
#define ALTERNATIVE(oldinstr, newinstr, feature) \
|
||||
\
|
||||
"661:\n\t" oldinstr "\n662:\n" \
|
||||
".section .altinstructions,\"a\"\n" \
|
||||
" .long 661b - .\n" /* label */ \
|
||||
" .long 663f - .\n" /* new instruction */ \
|
||||
" .word " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* sourcelen */ \
|
||||
" .byte 664f-663f\n" /* replacementlen */ \
|
||||
".previous\n" \
|
||||
".section .discard,\"aw\",@progbits\n" \
|
||||
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
|
||||
".previous\n" \
|
||||
".section .altinstr_replacement, \"ax\"\n" \
|
||||
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
|
||||
".previous"
|
||||
OLDINSTR(oldinstr) \
|
||||
".section .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature, 1) \
|
||||
".previous\n" \
|
||||
".section .discard,\"aw\",@progbits\n" \
|
||||
DISCARD_ENTRY(1) \
|
||||
".previous\n" \
|
||||
".section .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
|
||||
".previous"
|
||||
|
||||
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
|
||||
OLDINSTR(oldinstr) \
|
||||
".section .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature1, 1) \
|
||||
ALTINSTR_ENTRY(feature2, 2) \
|
||||
".previous\n" \
|
||||
".section .discard,\"aw\",@progbits\n" \
|
||||
DISCARD_ENTRY(1) \
|
||||
DISCARD_ENTRY(2) \
|
||||
".previous\n" \
|
||||
".section .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
|
||||
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
|
||||
".previous"
|
||||
|
||||
/*
|
||||
* This must be included *after* the definition of ALTERNATIVE due to
|
||||
|
@ -139,6 +170,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
|
||||
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
|
||||
|
||||
/*
|
||||
* Like alternative_call, but there are two features and respective functions.
|
||||
* If CPU has feature2, function2 is used.
|
||||
* Otherwise, if CPU has feature1, function1 is used.
|
||||
* Otherwise, old function is used.
|
||||
*/
|
||||
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
|
||||
output, input...) \
|
||||
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
|
||||
"call %P[new2]", feature2) \
|
||||
: output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
|
||||
[new2] "i" (newfunc2), ## input)
|
||||
|
||||
/*
|
||||
* use this macro(s) if you need more than one output parameter
|
||||
* in alternative_io
|
||||
|
|
|
@ -306,7 +306,8 @@ struct apic {
|
|||
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
|
||||
const struct cpumask *mask);
|
||||
void (*init_apic_ldr)(void);
|
||||
|
||||
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
|
||||
|
@ -331,9 +332,9 @@ struct apic {
|
|||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid);
|
||||
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
|
@ -537,6 +538,11 @@ static inline const struct cpumask *default_target_cpus(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline const struct cpumask *online_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
|
||||
|
||||
|
@ -586,21 +592,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
|||
|
||||
#endif
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
static inline int
|
||||
flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
|
||||
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
|
||||
cpumask_bits(andmask)[0] &
|
||||
cpumask_bits(cpu_online_mask)[0] &
|
||||
APIC_ALL_CPUS;
|
||||
|
||||
if (likely(cpu_mask)) {
|
||||
*apicid = (unsigned int)cpu_mask;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
extern int
|
||||
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid);
|
||||
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
static inline void
|
||||
flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
static inline void
|
||||
default_vector_allocation_domain(int cpu, struct cpumask *retmask,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
cpumask_copy(retmask, cpumask_of(cpu));
|
||||
}
|
||||
|
||||
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
|
||||
|
|
|
@ -4,9 +4,7 @@
|
|||
enum reboot_type {
|
||||
BOOT_TRIPLE = 't',
|
||||
BOOT_KBD = 'k',
|
||||
#ifdef CONFIG_X86_32
|
||||
BOOT_BIOS = 'b',
|
||||
#endif
|
||||
BOOT_ACPI = 'a',
|
||||
BOOT_EFI = 'e',
|
||||
BOOT_CF9 = 'p',
|
||||
|
|
|
@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
|
|||
virtual_dma_residue += virtual_dma_count;
|
||||
virtual_dma_count = 0;
|
||||
#ifdef TRACE_FLPY_INT
|
||||
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
|
||||
printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
|
||||
virtual_dma_count, virtual_dma_residue, calls, bytes,
|
||||
dma_wait);
|
||||
calls = 0;
|
||||
|
|
|
@ -313,8 +313,8 @@ struct kvm_pmu {
|
|||
u64 counter_bitmask[2];
|
||||
u64 global_ctrl_mask;
|
||||
u8 version;
|
||||
struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
|
||||
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
|
||||
struct irq_work irq_work;
|
||||
u64 reprogram_pmi;
|
||||
};
|
||||
|
|
|
@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
|
|||
|
||||
extern unsigned long long native_read_tsc(void);
|
||||
|
||||
extern int native_rdmsr_safe_regs(u32 regs[8]);
|
||||
extern int native_wrmsr_safe_regs(u32 regs[8]);
|
||||
extern int rdmsr_safe_regs(u32 regs[8]);
|
||||
extern int wrmsr_safe_regs(u32 regs[8]);
|
||||
|
||||
static __always_inline unsigned long long __native_read_tsc(void)
|
||||
{
|
||||
|
@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
|||
return err;
|
||||
}
|
||||
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
int err;
|
||||
|
||||
gprs[1] = msr;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
err = native_rdmsr_safe_regs(gprs);
|
||||
|
||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
|
||||
gprs[0] = (u32)val;
|
||||
gprs[1] = msr;
|
||||
gprs[2] = val >> 32;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
return native_wrmsr_safe_regs(gprs);
|
||||
}
|
||||
|
||||
static inline int rdmsr_safe_regs(u32 regs[8])
|
||||
{
|
||||
return native_rdmsr_safe_regs(regs);
|
||||
}
|
||||
|
||||
static inline int wrmsr_safe_regs(u32 regs[8])
|
||||
{
|
||||
return native_wrmsr_safe_regs(regs);
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)__native_read_tsc())
|
||||
|
||||
|
@ -237,6 +200,8 @@ do { \
|
|||
(high) = (u32)(_l >> 32); \
|
||||
} while (0)
|
||||
|
||||
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
|
||||
|
||||
#define rdtscp(low, high, aux) \
|
||||
do { \
|
||||
unsigned long long _val = native_read_tscp(&(aux)); \
|
||||
|
@ -248,8 +213,7 @@ do { \
|
|||
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
|
||||
#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
|
||||
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
|
||||
(u32)((val) >> 32))
|
||||
|
||||
#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
|
||||
|
|
|
@ -44,28 +44,14 @@ struct nmiaction {
|
|||
const char *name;
|
||||
};
|
||||
|
||||
#define register_nmi_handler(t, fn, fg, n) \
|
||||
#define register_nmi_handler(t, fn, fg, n, init...) \
|
||||
({ \
|
||||
static struct nmiaction fn##_na = { \
|
||||
static struct nmiaction init fn##_na = { \
|
||||
.handler = (fn), \
|
||||
.name = (n), \
|
||||
.flags = (fg), \
|
||||
}; \
|
||||
__register_nmi_handler((t), &fn##_na); \
|
||||
})
|
||||
|
||||
/*
|
||||
* For special handlers that register/unregister in the
|
||||
* init section only. This should be considered rare.
|
||||
*/
|
||||
#define register_nmi_handler_initonly(t, fn, fg, n) \
|
||||
({ \
|
||||
static struct nmiaction fn##_na __initdata = { \
|
||||
.handler = (fn), \
|
||||
.name = (n), \
|
||||
.flags = (fg), \
|
||||
}; \
|
||||
__register_nmi_handler((t), &fn##_na); \
|
||||
__register_nmi_handler((t), &fn##_na); \
|
||||
})
|
||||
|
||||
int __register_nmi_handler(unsigned int, struct nmiaction *);
|
||||
|
|
|
@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
|||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
||||
}
|
||||
|
||||
static inline int paravirt_rdmsr_regs(u32 *regs)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
|
||||
}
|
||||
|
||||
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
||||
}
|
||||
|
||||
static inline int paravirt_wrmsr_regs(u32 *regs)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
|
||||
}
|
||||
|
||||
/* These should all do BUG_ON(_err), but our headers are too tangled. */
|
||||
#define rdmsr(msr, val1, val2) \
|
||||
do { \
|
||||
|
@ -176,9 +166,6 @@ do { \
|
|||
_err; \
|
||||
})
|
||||
|
||||
#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
|
||||
#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
|
||||
|
||||
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
|||
*p = paravirt_read_msr(msr, &err);
|
||||
return err;
|
||||
}
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
int err;
|
||||
|
||||
gprs[1] = msr;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
err = paravirt_rdmsr_regs(gprs);
|
||||
|
||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
|
||||
gprs[0] = (u32)val;
|
||||
gprs[1] = msr;
|
||||
gprs[2] = val >> 32;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
return paravirt_wrmsr_regs(gprs);
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_tsc(void)
|
||||
{
|
||||
|
@ -252,6 +213,8 @@ do { \
|
|||
high = _l >> 32; \
|
||||
} while (0)
|
||||
|
||||
#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
|
||||
|
||||
static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
|
||||
{
|
||||
return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
|
||||
|
|
|
@ -153,9 +153,7 @@ struct pv_cpu_ops {
|
|||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*rdmsr_regs)(u32 *regs);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
int (*wrmsr_regs)(u32 *regs);
|
||||
|
||||
u64 (*read_tsc)(void);
|
||||
u64 (*read_pmc)(int counter);
|
||||
|
|
|
@ -7,9 +7,13 @@
|
|||
#undef DEBUG
|
||||
|
||||
#ifdef DEBUG
|
||||
#define DBG(x...) printk(x)
|
||||
#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define DBG(x...)
|
||||
#define DBG(fmt, ...) \
|
||||
do { \
|
||||
if (0) \
|
||||
printk(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define PCI_PROBE_BIOS 0x0001
|
||||
|
|
|
@ -5,11 +5,10 @@
|
|||
* Performance event hw details:
|
||||
*/
|
||||
|
||||
#define X86_PMC_MAX_GENERIC 32
|
||||
#define X86_PMC_MAX_FIXED 3
|
||||
#define INTEL_PMC_MAX_GENERIC 32
|
||||
#define INTEL_PMC_MAX_FIXED 3
|
||||
#define INTEL_PMC_IDX_FIXED 32
|
||||
|
||||
#define X86_PMC_IDX_GENERIC 0
|
||||
#define X86_PMC_IDX_FIXED 32
|
||||
#define X86_PMC_IDX_MAX 64
|
||||
|
||||
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
|
||||
|
@ -48,8 +47,7 @@
|
|||
(X86_RAW_EVENT_MASK | \
|
||||
AMD64_EVENTSEL_EVENT)
|
||||
#define AMD64_NUM_COUNTERS 4
|
||||
#define AMD64_NUM_COUNTERS_F15H 6
|
||||
#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
|
||||
#define AMD64_NUM_COUNTERS_CORE 6
|
||||
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||
|
@ -121,16 +119,16 @@ struct x86_pmu_capability {
|
|||
|
||||
/* Instr_Retired.Any: */
|
||||
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
|
||||
#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
|
||||
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
|
||||
|
||||
/* CPU_CLK_Unhalted.Core: */
|
||||
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
|
||||
#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
|
||||
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
|
||||
|
||||
/* CPU_CLK_Unhalted.Ref: */
|
||||
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
|
||||
#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
|
||||
#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
|
||||
#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
|
||||
#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
|
||||
|
||||
/*
|
||||
* We model BTS tracing as another fixed-mode PMC.
|
||||
|
@ -139,7 +137,7 @@ struct x86_pmu_capability {
|
|||
* values are used by actual fixed events and higher values are used
|
||||
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
|
||||
*/
|
||||
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
|
||||
#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
|
||||
|
||||
/*
|
||||
* IBS cpuid feature detection
|
||||
|
@ -234,6 +232,7 @@ struct perf_guest_switch_msr {
|
|||
|
||||
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
|
||||
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
|
||||
extern void perf_check_microcode(void);
|
||||
#else
|
||||
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
|
||||
{
|
||||
|
@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
|
|||
}
|
||||
|
||||
static inline void perf_events_lapic_init(void) { }
|
||||
static inline void perf_check_microcode(void) { }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
#define _ASM_X86_PGTABLE_2LEVEL_H
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
|
||||
pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
|
||||
#define pgd_ERROR(e) \
|
||||
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/*
|
||||
* Certain architectures need to do special things when PTEs
|
||||
|
|
|
@ -9,13 +9,13 @@
|
|||
*/
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
|
||||
pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
|
||||
__FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
|
||||
#define pmd_ERROR(e) \
|
||||
printk("%s:%d: bad pmd %p(%016Lx).\n", \
|
||||
pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
|
||||
__FILE__, __LINE__, &(e), pmd_val(e))
|
||||
#define pgd_ERROR(e) \
|
||||
printk("%s:%d: bad pgd %p(%016Lx).\n", \
|
||||
pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
|
||||
__FILE__, __LINE__, &(e), pgd_val(e))
|
||||
|
||||
/* Rules for using set_pte: the pte being assigned *must* be
|
||||
|
|
|
@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[];
|
|||
extern void paging_init(void);
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %p(%016lx).\n", \
|
||||
pr_err("%s:%d: bad pte %p(%016lx)\n", \
|
||||
__FILE__, __LINE__, &(e), pte_val(e))
|
||||
#define pmd_ERROR(e) \
|
||||
printk("%s:%d: bad pmd %p(%016lx).\n", \
|
||||
pr_err("%s:%d: bad pmd %p(%016lx)\n", \
|
||||
__FILE__, __LINE__, &(e), pmd_val(e))
|
||||
#define pud_ERROR(e) \
|
||||
printk("%s:%d: bad pud %p(%016lx).\n", \
|
||||
pr_err("%s:%d: bad pud %p(%016lx)\n", \
|
||||
__FILE__, __LINE__, &(e), pud_val(e))
|
||||
#define pgd_ERROR(e) \
|
||||
printk("%s:%d: bad pgd %p(%016lx).\n", \
|
||||
pr_err("%s:%d: bad pgd %p(%016lx)\n", \
|
||||
__FILE__, __LINE__, &(e), pgd_val(e))
|
||||
|
||||
struct mm_struct;
|
||||
|
|
|
@ -21,8 +21,9 @@ struct real_mode_header {
|
|||
u32 wakeup_header;
|
||||
#endif
|
||||
/* APM/BIOS reboot */
|
||||
#ifdef CONFIG_X86_32
|
||||
u32 machine_real_restart_asm;
|
||||
#ifdef CONFIG_X86_64
|
||||
u32 machine_real_restart_seg;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@ extern struct machine_ops machine_ops;
|
|||
|
||||
void native_machine_crash_shutdown(struct pt_regs *regs);
|
||||
void native_machine_shutdown(void);
|
||||
void machine_real_restart(unsigned int type);
|
||||
/* These must match dispatch_table in reboot_32.S */
|
||||
void __noreturn machine_real_restart(unsigned int type);
|
||||
/* These must match dispatch in arch/x86/realmore/rm/reboot.S */
|
||||
#define MRR_BIOS 0
|
||||
#define MRR_APM 1
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user