forked from luck/tmp_suning_uos_patched
Merge branch 'akpm' (patches from Andrew)
Merge even more updates from Andrew Morton: - a kernel-wide sweep of show_stack() - pagetable cleanups - abstract out accesses to mmap_sem - prep for mmap_sem scalability work - hch's user acess work Subsystems affected by this patch series: debug, mm/pagemap, mm/maccess, mm/documentation. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (93 commits) include/linux/cache.h: expand documentation over __read_mostly maccess: return -ERANGE when probe_kernel_read() fails x86: use non-set_fs based maccess routines maccess: allow architectures to provide kernel probing directly maccess: move user access routines together maccess: always use strict semantics for probe_kernel_read maccess: remove strncpy_from_unsafe tracing/kprobes: handle mixed kernel/userspace probes better bpf: rework the compat kernel probe handling bpf:bpf_seq_printf(): handle potentially unsafe format string better bpf: handle the compat string in bpf_trace_copy_string better bpf: factor out a bpf_trace_copy_string helper maccess: unify the probe kernel arch hooks maccess: remove probe_read_common and probe_write_common maccess: rename strnlen_unsafe_user to strnlen_user_nofault maccess: rename strncpy_from_unsafe_strict to strncpy_from_kernel_nofault maccess: rename strncpy_from_unsafe_user to strncpy_from_user_nofault maccess: update the top of file comment maccess: clarify kerneldoc comments maccess: remove duplicate kerneldoc comments ...
This commit is contained in:
commit
a5ad5742f6
|
@ -364,19 +364,19 @@ follows:
|
|||
|
||||
2) for querying the policy, we do not need to take an extra reference on the
|
||||
target task's task policy nor vma policies because we always acquire the
|
||||
task's mm's mmap_sem for read during the query. The set_mempolicy() and
|
||||
mbind() APIs [see below] always acquire the mmap_sem for write when
|
||||
task's mm's mmap_lock for read during the query. The set_mempolicy() and
|
||||
mbind() APIs [see below] always acquire the mmap_lock for write when
|
||||
installing or replacing task or vma policies. Thus, there is no possibility
|
||||
of a task or thread freeing a policy while another task or thread is
|
||||
querying it.
|
||||
|
||||
3) Page allocation usage of task or vma policy occurs in the fault path where
|
||||
we hold them mmap_sem for read. Again, because replacing the task or vma
|
||||
policy requires that the mmap_sem be held for write, the policy can't be
|
||||
we hold them mmap_lock for read. Again, because replacing the task or vma
|
||||
policy requires that the mmap_lock be held for write, the policy can't be
|
||||
freed out from under us while we're using it for page allocation.
|
||||
|
||||
4) Shared policies require special consideration. One task can replace a
|
||||
shared memory policy while another task, with a distinct mmap_sem, is
|
||||
shared memory policy while another task, with a distinct mmap_lock, is
|
||||
querying or allocating a page based on the policy. To resolve this
|
||||
potential race, the shared policy infrastructure adds an extra reference
|
||||
to the shared policy during lookup while holding a spin lock on the shared
|
||||
|
|
|
@ -33,7 +33,7 @@ memory ranges) provides two primary functionalities:
|
|||
The real advantage of userfaults if compared to regular virtual memory
|
||||
management of mremap/mprotect is that the userfaults in all their
|
||||
operations never involve heavyweight structures like vmas (in fact the
|
||||
``userfaultfd`` runtime load never takes the mmap_sem for writing).
|
||||
``userfaultfd`` runtime load never takes the mmap_lock for writing).
|
||||
|
||||
Vmas are not suitable for page- (or hugepage) granular fault tracking
|
||||
when dealing with virtual address spaces that could span
|
||||
|
|
|
@ -615,7 +615,7 @@ prototypes::
|
|||
locking rules:
|
||||
|
||||
============= ======== ===========================
|
||||
ops mmap_sem PageLocked(page)
|
||||
ops mmap_lock PageLocked(page)
|
||||
============= ======== ===========================
|
||||
open: yes
|
||||
close: yes
|
||||
|
|
|
@ -191,15 +191,15 @@ The usage pattern is::
|
|||
|
||||
again:
|
||||
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
ret = hmm_range_fault(&range);
|
||||
if (ret) {
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
if (ret == -EBUSY)
|
||||
goto again;
|
||||
return ret;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
take_lock(driver->update);
|
||||
if (mmu_interval_read_retry(&ni, range.notifier_seq) {
|
||||
|
|
|
@ -98,9 +98,9 @@ split_huge_page() or split_huge_pmd() has a cost.
|
|||
|
||||
To make pagetable walks huge pmd aware, all you need to do is to call
|
||||
pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the
|
||||
mmap_sem in read (or write) mode to be sure a huge pmd cannot be
|
||||
mmap_lock in read (or write) mode to be sure a huge pmd cannot be
|
||||
created from under you by khugepaged (khugepaged collapse_huge_page
|
||||
takes the mmap_sem in write mode in addition to the anon_vma lock). If
|
||||
takes the mmap_lock in write mode in addition to the anon_vma lock). If
|
||||
pmd_trans_huge returns false, you just fallback in the old code
|
||||
paths. If instead pmd_trans_huge returns true, you have to take the
|
||||
page table lock (pmd_lock()) and re-run pmd_trans_huge. Taking the
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
#include <asm/console.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <stdarg.h>
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
#include <asm/console.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <stdarg.h>
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
#include <asm/console.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/hwrpb.h>
|
||||
|
||||
|
|
|
@ -276,15 +276,6 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return
|
|||
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
|
||||
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
|
||||
|
||||
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
|
||||
|
||||
/* to find an entry in a kernel page-table-directory */
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
|
||||
|
||||
/* to find an entry in a page-table-directory. */
|
||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
|
||||
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
|
||||
|
||||
/*
|
||||
* The smp_read_barrier_depends() in the following functions are required to
|
||||
* order the load of *dir (the pointer in the top level page table) with any
|
||||
|
@ -305,6 +296,7 @@ extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
|
|||
smp_read_barrier_depends(); /* see above */
|
||||
return ret;
|
||||
}
|
||||
#define pmd_offset pmd_offset
|
||||
|
||||
/* Find an entry in the third-level page table.. */
|
||||
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
|
||||
|
@ -314,9 +306,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
|
|||
smp_read_barrier_depends(); /* see above */
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
#define pte_offset_kernel pte_offset_kernel
|
||||
|
||||
extern pgd_t swapper_pg_dir[1024];
|
||||
|
||||
|
@ -355,8 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
|
|||
|
||||
extern void paging_init(void);
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <asm/reg.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/* Prototypes of functions used across modules here in this directory. */
|
||||
|
||||
#define vucp volatile unsigned char *
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include <linux/audit.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
#include "proto.h"
|
||||
|
|
|
@ -55,7 +55,6 @@ static struct notifier_block alpha_panic_block = {
|
|||
};
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <asm/dma.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_apecs.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/core_lca.h>
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_tsunami.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_apecs.h>
|
||||
#include <asm/core_lca.h>
|
||||
#include <asm/hwrpb.h>
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_tsunami.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "proto.h"
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_marvel.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_apecs.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_irongate.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_apecs.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_mcpcia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_polaris.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_t2.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_apecs.h>
|
||||
#include <asm/core_lca.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_titan.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/core_wildfire.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
|
|
@ -121,10 +121,10 @@ dik_show_code(unsigned int *pc)
|
|||
}
|
||||
|
||||
static void
|
||||
dik_show_trace(unsigned long *sp)
|
||||
dik_show_trace(unsigned long *sp, const char *loglvl)
|
||||
{
|
||||
long i = 0;
|
||||
printk("Trace:\n");
|
||||
printk("%sTrace:\n", loglvl);
|
||||
while (0x1ff8 & (unsigned long) sp) {
|
||||
extern char _stext[], _etext[];
|
||||
unsigned long tmp = *sp;
|
||||
|
@ -133,24 +133,24 @@ dik_show_trace(unsigned long *sp)
|
|||
continue;
|
||||
if (tmp >= (unsigned long) &_etext)
|
||||
continue;
|
||||
printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
|
||||
printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
|
||||
if (i > 40) {
|
||||
printk(" ...");
|
||||
printk("%s ...", loglvl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
printk("%s\n", loglvl);
|
||||
}
|
||||
|
||||
static int kstack_depth_to_print = 24;
|
||||
|
||||
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
|
||||
{
|
||||
unsigned long *stack;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* debugging aid: "show_stack(NULL);" prints the
|
||||
* debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
|
||||
* back trace for this cpu.
|
||||
*/
|
||||
if(sp==NULL)
|
||||
|
@ -163,14 +163,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
|||
if ((i % 4) == 0) {
|
||||
if (i)
|
||||
pr_cont("\n");
|
||||
printk(" ");
|
||||
printk("%s ", loglvl);
|
||||
} else {
|
||||
pr_cont(" ");
|
||||
}
|
||||
pr_cont("%016lx", *stack++);
|
||||
}
|
||||
pr_cont("\n");
|
||||
dik_show_trace(sp);
|
||||
dik_show_trace(sp, loglvl);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -184,7 +184,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
|
|||
printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
|
||||
dik_show_regs(regs, r9_15);
|
||||
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
||||
dik_show_trace((unsigned long *)(regs+1));
|
||||
dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
|
||||
dik_show_code((unsigned int *)regs->pc);
|
||||
|
||||
if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
|
||||
|
@ -625,7 +625,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
|||
printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
|
||||
|
||||
dik_show_code((unsigned int *)pc);
|
||||
dik_show_trace((unsigned long *)(regs+1));
|
||||
dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
|
||||
|
||||
if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
|
||||
printk("die_if_kernel recursion detected.\n");
|
||||
|
@ -957,12 +957,12 @@ do_entUnaUser(void __user * va, unsigned long opcode,
|
|||
si_code = SEGV_ACCERR;
|
||||
else {
|
||||
struct mm_struct *mm = current->mm;
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
if (find_vma(mm, (unsigned long)va))
|
||||
si_code = SEGV_ACCERR;
|
||||
else
|
||||
si_code = SEGV_MAPERR;
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
}
|
||||
send_sig_fault(SIGSEGV, si_code, va, 0, current);
|
||||
return;
|
||||
|
|
|
@ -117,7 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
|
@ -171,7 +171,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
if (fault & VM_FAULT_RETRY) {
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/* No need to up_read(&mm->mmap_sem) as we would
|
||||
/* No need to mmap_read_unlock(mm) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
@ -180,14 +180,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
return;
|
||||
|
||||
/* Something tried to access memory that isn't in our memory map.
|
||||
Fix it, but check if it's kernel or user first. */
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
if (user_mode(regs))
|
||||
goto do_sigsegv;
|
||||
|
@ -211,14 +211,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
/* We ran out of memory, or some other thing happened to us that
|
||||
made us unable to handle the page fault gracefully. */
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
/* Send a sigbus, regardless of whether we were in kernel
|
||||
or user mode. */
|
||||
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/gfp.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/dma.h>
|
||||
|
|
|
@ -13,7 +13,8 @@
|
|||
struct task_struct;
|
||||
|
||||
void show_regs(struct pt_regs *regs);
|
||||
void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
|
||||
void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
|
||||
const char *loglvl);
|
||||
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
|
||||
unsigned long address);
|
||||
void die(const char *str, struct pt_regs *regs, unsigned long address);
|
||||
|
|
|
@ -248,9 +248,6 @@
|
|||
extern char empty_zero_page[PAGE_SIZE];
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
#define pte_unmap_nested(pte) do { } while (0)
|
||||
|
||||
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
|
||||
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
|
||||
|
||||
|
@ -282,18 +279,6 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|||
|
||||
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
|
||||
/*
|
||||
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
|
||||
* and returns ptr to PTE entry corresponding to @addr
|
||||
*/
|
||||
#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
|
||||
__pte_index(addr))
|
||||
|
||||
/* No mapping of Page Tables in high mem etc, so following same as above */
|
||||
#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
|
||||
#define pte_offset_map(dir, addr) pte_offset(dir, addr)
|
||||
|
||||
/* Zoo of pte_xxx function */
|
||||
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
|
||||
|
@ -331,13 +316,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
set_pte(ptep, pteval);
|
||||
}
|
||||
|
||||
/*
|
||||
* All kernel related VM pages are in init's mm.
|
||||
*/
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
|
||||
#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
|
||||
|
||||
/*
|
||||
* Macro to quickly access the PGD entry, utlising the fact that some
|
||||
* arch may cache the pointer to Page Directory of "current" task
|
||||
|
@ -390,8 +368,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
#include <asm/hugepage.h>
|
||||
#endif
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* to cope with aliasing VIPT cache */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
|
|
|
@ -90,10 +90,10 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||
if (unlikely(ret != -EFAULT))
|
||||
goto fail;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
mmap_read_lock(current->mm);
|
||||
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
|
||||
FAULT_FLAG_WRITE, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
if (likely(!ret))
|
||||
goto again;
|
||||
|
|
|
@ -158,9 +158,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
|||
/* Call-back which plugs into unwinding core to dump the stack in
|
||||
* case of panic/OOPs/BUG etc
|
||||
*/
|
||||
static int __print_sym(unsigned int address, void *unused)
|
||||
static int __print_sym(unsigned int address, void *arg)
|
||||
{
|
||||
printk(" %pS\n", (void *)address);
|
||||
const char *loglvl = arg;
|
||||
|
||||
printk("%s %pS\n", loglvl, (void *)address);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -217,17 +219,18 @@ static int __get_first_nonsched(unsigned int address, void *unused)
|
|||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
|
||||
noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
|
||||
const char *loglvl)
|
||||
{
|
||||
pr_info("\nStack Trace:\n");
|
||||
arc_unwind_core(tsk, regs, __print_sym, NULL);
|
||||
printk("%s\nStack Trace:\n", loglvl);
|
||||
arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
|
||||
}
|
||||
EXPORT_SYMBOL(show_stacktrace);
|
||||
|
||||
/* Expected by sched Code */
|
||||
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
||||
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
||||
{
|
||||
show_stacktrace(tsk, NULL);
|
||||
show_stacktrace(tsk, NULL, loglvl);
|
||||
}
|
||||
|
||||
/* Another API expected by schedular, shows up in "ps" as Wait Channel
|
||||
|
|
|
@ -89,7 +89,7 @@ static void show_faulting_vma(unsigned long address)
|
|||
/* can't use print_vma_addr() yet as it doesn't check for
|
||||
* non-inclusive vma
|
||||
*/
|
||||
down_read(&active_mm->mmap_sem);
|
||||
mmap_read_lock(active_mm);
|
||||
vma = find_vma(active_mm, address);
|
||||
|
||||
/* check against the find_vma( ) behaviour which returns the next VMA
|
||||
|
@ -111,7 +111,7 @@ static void show_faulting_vma(unsigned long address)
|
|||
} else
|
||||
pr_info(" @No matching VMA found\n");
|
||||
|
||||
up_read(&active_mm->mmap_sem);
|
||||
mmap_read_unlock(active_mm);
|
||||
}
|
||||
|
||||
static void show_ecr_verbose(struct pt_regs *regs)
|
||||
|
@ -240,5 +240,5 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
|
|||
|
||||
/* Show stack trace if this Fatality happened in kernel mode */
|
||||
if (!user_mode(regs))
|
||||
show_stacktrace(current, regs);
|
||||
show_stacktrace(current, regs, KERN_DEFAULT);
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
|||
flags |= FAULT_FLAG_WRITE;
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
|
@ -141,7 +141,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
/*
|
||||
* Fault retry nuances, mmap_sem already relinquished by core mm
|
||||
* Fault retry nuances, mmap_lock already relinquished by core mm
|
||||
*/
|
||||
if (unlikely((fault & VM_FAULT_RETRY) &&
|
||||
(flags & FAULT_FLAG_ALLOW_RETRY))) {
|
||||
|
@ -150,7 +150,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
/*
|
||||
* Major/minor page fault accounting
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
@ -92,17 +92,9 @@ EXPORT_SYMBOL(kunmap_atomic_high);
|
|||
|
||||
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
|
||||
{
|
||||
pgd_t *pgd_k;
|
||||
p4d_t *p4d_k;
|
||||
pud_t *pud_k;
|
||||
pmd_t *pmd_k;
|
||||
pmd_t *pmd_k = pmd_off_k(kvaddr);
|
||||
pte_t *pte_k;
|
||||
|
||||
pgd_k = pgd_offset_k(kvaddr);
|
||||
p4d_k = p4d_offset(pgd_k, kvaddr);
|
||||
pud_k = pud_offset(p4d_k, kvaddr);
|
||||
pmd_k = pmd_offset(pud_k, kvaddr);
|
||||
|
||||
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pte_k)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||
|
|
|
@ -33,9 +33,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/entry.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/arcregs.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/processor.h>
|
||||
|
|
|
@ -82,7 +82,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
|
|||
struct pt_regs *),
|
||||
int sig, int code, const char *name);
|
||||
|
||||
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
|
||||
extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
|
||||
const char *loglvl);
|
||||
|
||||
struct mm_struct;
|
||||
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <asm/highmem.h>
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
#define FIXADDR_END 0xfff00000UL
|
||||
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
|
||||
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
enum fixed_addresses {
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#define __ASM_IDMAP_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
/* Tag a function as requiring to be executed via an identity mapping. */
|
||||
#define __idmap __section(.idmap.text) noinline notrace
|
||||
|
|
|
@ -187,6 +187,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|||
{
|
||||
return (pmd_t *)pud;
|
||||
}
|
||||
#define pmd_offset pmd_offset
|
||||
|
||||
#define pmd_large(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
|
||||
|
|
|
@ -133,13 +133,6 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
|
|||
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
|
||||
}
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
||||
{
|
||||
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
|
||||
}
|
||||
|
||||
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
|
||||
|
||||
#define copy_pmd(pmdpd,pmdps) \
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#define pgd_bad(pgd) (0)
|
||||
#define pgd_clear(pgdp)
|
||||
#define kern_addr_valid(addr) (1)
|
||||
#define pmd_offset(a, b) ((void *)0)
|
||||
/* FIXME */
|
||||
/*
|
||||
* PMD_SHIFT determines the size of the area a second-level page table can map
|
||||
|
@ -73,8 +72,6 @@ extern unsigned int kobjsize(const void *objp);
|
|||
|
||||
#define FIRST_USER_ADDRESS 0UL
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
|
|
|
@ -166,14 +166,6 @@ extern struct page *empty_zero_page;
|
|||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
/* to find an entry in a page-table-directory */
|
||||
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
|
||||
|
||||
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
|
||||
|
||||
/* to find an entry in a kernel page-table-directory */
|
||||
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
|
||||
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
|
@ -183,21 +175,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
|||
|
||||
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
|
||||
|
||||
#ifndef CONFIG_HIGHPTE
|
||||
#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
|
||||
#define __pte_unmap(pte) do { } while (0)
|
||||
#else
|
||||
#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
|
||||
#define __pte_unmap(pte) kunmap_atomic(pte)
|
||||
#endif
|
||||
|
||||
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
|
||||
#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
|
||||
|
||||
#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
|
||||
#define pte_unmap(pte) __pte_unmap(pte)
|
||||
|
||||
#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
|
||||
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
|
||||
|
||||
|
@ -339,8 +316,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
/* FIXME: this is not correct */
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/*
|
||||
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
|
||||
*/
|
||||
|
|
|
@ -29,7 +29,8 @@ static inline int __in_irqentry_text(unsigned long ptr)
|
|||
}
|
||||
|
||||
extern void __init early_trap_init(void *);
|
||||
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
|
||||
extern void dump_backtrace_entry(unsigned long where, unsigned long from,
|
||||
unsigned long frame, const char *loglvl);
|
||||
extern void ptrace_break(struct pt_regs *regs);
|
||||
|
||||
extern void *vectors_page;
|
||||
|
|
|
@ -36,7 +36,8 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
|
|||
unsigned long text_addr,
|
||||
unsigned long text_size);
|
||||
extern void unwind_table_del(struct unwind_table *tab);
|
||||
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
|
||||
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cp15.h>
|
||||
|
@ -18,7 +19,6 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
|
||||
#include CONFIG_DEBUG_LL_INCLUDE
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/unwind.h>
|
||||
|
|
|
@ -431,7 +431,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|||
npages = 1; /* for sigpage */
|
||||
npages += vdso_total_pages;
|
||||
|
||||
if (down_write_killable(&mm->mmap_sem))
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
hint = sigpage_addr(mm, npages);
|
||||
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
|
||||
|
@ -458,7 +458,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|||
arm_install_vdso(mm, addr + PAGE_SIZE);
|
||||
|
||||
up_fail:
|
||||
up_write(&mm->mmap_sem);
|
||||
mmap_write_unlock(mm);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/tracehook.h>
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <asm/idmap.h>
|
||||
#include <asm/topology.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/processor.h>
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/idmap.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/suspend.h>
|
||||
|
|
|
@ -97,12 +97,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
|
|||
{
|
||||
int si_code;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
mmap_read_lock(current->mm);
|
||||
if (find_vma(current->mm, addr) == NULL)
|
||||
si_code = SEGV_MAPERR;
|
||||
else
|
||||
si_code = SEGV_ACCERR;
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
||||
arm_notify_die("Illegal memory access", regs,
|
||||
|
|
|
@ -62,21 +62,24 @@ __setup("user_debug=", user_debug_setup);
|
|||
|
||||
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
|
||||
|
||||
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
|
||||
void dump_backtrace_entry(unsigned long where, unsigned long from,
|
||||
unsigned long frame, const char *loglvl)
|
||||
{
|
||||
unsigned long end = frame + 4 + sizeof(struct pt_regs);
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
|
||||
printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
|
||||
loglvl, where, (void *)where, from, (void *)from);
|
||||
#else
|
||||
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
|
||||
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
|
||||
loglvl, where, from);
|
||||
#endif
|
||||
|
||||
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
|
||||
dump_mem("", "Exception stack", frame + 4, end);
|
||||
dump_mem(loglvl, "Exception stack", frame + 4, end);
|
||||
}
|
||||
|
||||
void dump_backtrace_stm(u32 *stack, u32 instruction)
|
||||
void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
|
||||
{
|
||||
char str[80], *p;
|
||||
unsigned int x;
|
||||
|
@ -88,12 +91,12 @@ void dump_backtrace_stm(u32 *stack, u32 instruction)
|
|||
if (++x == 6) {
|
||||
x = 0;
|
||||
p = str;
|
||||
printk("%s\n", str);
|
||||
printk("%s%s\n", loglvl, str);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (p != str)
|
||||
printk("%s\n", str);
|
||||
printk("%s%s\n", loglvl, str);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_UNWIND
|
||||
|
@ -201,17 +204,19 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
{
|
||||
unwind_backtrace(regs, tsk);
|
||||
unwind_backtrace(regs, tsk, loglvl);
|
||||
}
|
||||
#else
|
||||
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
{
|
||||
unsigned int fp, mode;
|
||||
int ok = 1;
|
||||
|
||||
printk("Backtrace: ");
|
||||
printk("%sBacktrace: ", loglvl);
|
||||
|
||||
if (!tsk)
|
||||
tsk = current;
|
||||
|
@ -238,13 +243,13 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||
pr_cont("\n");
|
||||
|
||||
if (ok)
|
||||
c_backtrace(fp, mode);
|
||||
c_backtrace(fp, mode, loglvl);
|
||||
}
|
||||
#endif
|
||||
|
||||
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
||||
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
||||
{
|
||||
dump_backtrace(NULL, tsk);
|
||||
dump_backtrace(NULL, tsk, loglvl);
|
||||
barrier();
|
||||
}
|
||||
|
||||
|
@ -288,7 +293,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
|
|||
if (!user_mode(regs) || in_interrupt()) {
|
||||
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
|
||||
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
|
||||
dump_backtrace(regs, tsk);
|
||||
dump_backtrace(regs, tsk, KERN_EMERG);
|
||||
dump_instr(KERN_EMERG, regs);
|
||||
}
|
||||
|
||||
|
@ -663,10 +668,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
|||
if (user_debug & UDBG_SYSCALL) {
|
||||
pr_err("[%d] %s: arm syscall %d\n",
|
||||
task_pid_nr(current), current->comm, no);
|
||||
dump_instr("", regs);
|
||||
dump_instr(KERN_ERR, regs);
|
||||
if (user_mode(regs)) {
|
||||
__show_regs(regs);
|
||||
c_backtrace(frame_pointer(regs), processor_mode(regs));
|
||||
c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -455,7 +455,8 @@ int unwind_frame(struct stackframe *frame)
|
|||
return URC_OK;
|
||||
}
|
||||
|
||||
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
const char *loglvl)
|
||||
{
|
||||
struct stackframe frame;
|
||||
|
||||
|
@ -493,7 +494,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||
urc = unwind_frame(&frame);
|
||||
if (urc < 0)
|
||||
break;
|
||||
dump_backtrace_entry(where, frame.pc, frame.sp - 4);
|
||||
dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -240,7 +240,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
|
|||
return PTR_ERR_OR_ZERO(vma);
|
||||
}
|
||||
|
||||
/* assumes mmap_sem is write-locked */
|
||||
/* assumes mmap_lock is write-locked */
|
||||
void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
|
|
@ -8,13 +8,13 @@
|
|||
#include "vmlinux-xip.lds.S"
|
||||
#else
|
||||
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "vmlinux.lds.h"
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define sv_pc r6
|
||||
#define mask r7
|
||||
#define sv_lr r8
|
||||
#define loglvl r9
|
||||
|
||||
ENTRY(c_backtrace)
|
||||
|
||||
|
@ -99,6 +100,7 @@ ENDPROC(c_backtrace)
|
|||
@ to ensure 8 byte alignment
|
||||
movs frame, r0 @ if frame pointer is zero
|
||||
beq no_frame @ we have no stack frames
|
||||
mov loglvl, r2
|
||||
tst r1, #0x10 @ 26 or 32-bit mode?
|
||||
moveq mask, #0xfc000003
|
||||
movne mask, #0 @ mask for 32-bit
|
||||
|
@ -167,6 +169,7 @@ finished_setup:
|
|||
mov r1, sv_lr
|
||||
mov r2, frame
|
||||
bic r1, r1, mask @ mask PC/LR for the mode
|
||||
mov r3, loglvl
|
||||
bl dump_backtrace_entry
|
||||
|
||||
/*
|
||||
|
@ -183,6 +186,7 @@ finished_setup:
|
|||
ldr r0, [frame] @ locals are stored in
|
||||
@ the preceding frame
|
||||
subeq r0, r0, #4
|
||||
mov r2, loglvl
|
||||
bleq dump_backtrace_stm @ dump saved registers
|
||||
|
||||
/*
|
||||
|
@ -196,7 +200,8 @@ finished_setup:
|
|||
bhi for_each_frame
|
||||
|
||||
1006: adr r0, .Lbad
|
||||
mov r1, frame
|
||||
mov r1, loglvl
|
||||
mov r2, frame
|
||||
bl printk
|
||||
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
|
||||
ENDPROC(c_backtrace)
|
||||
|
@ -209,7 +214,7 @@ ENDPROC(c_backtrace)
|
|||
.long 1005b, 1006b
|
||||
.popsection
|
||||
|
||||
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
|
||||
.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
|
||||
.align
|
||||
.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
|
||||
.word 0x0b000000 @ bl if these bits are set
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#define sv_pc r6
|
||||
#define mask r7
|
||||
#define offset r8
|
||||
#define loglvl r9
|
||||
|
||||
ENTRY(c_backtrace)
|
||||
|
||||
|
@ -25,9 +26,10 @@ ENTRY(c_backtrace)
|
|||
ret lr
|
||||
ENDPROC(c_backtrace)
|
||||
#else
|
||||
stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
|
||||
stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location...
|
||||
movs frame, r0 @ if frame pointer is zero
|
||||
beq no_frame @ we have no stack frames
|
||||
mov loglvl, r2
|
||||
|
||||
tst r1, #0x10 @ 26 or 32-bit mode?
|
||||
ARM( moveq mask, #0xfc000003 )
|
||||
|
@ -73,6 +75,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
|||
ldr r1, [frame, #-4] @ get saved lr
|
||||
mov r2, frame
|
||||
bic r1, r1, mask @ mask PC/LR for the mode
|
||||
mov r3, loglvl
|
||||
bl dump_backtrace_entry
|
||||
|
||||
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
|
||||
|
@ -80,12 +83,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
|||
teq r3, r1, lsr #11
|
||||
ldreq r0, [frame, #-8] @ get sp
|
||||
subeq r0, r0, #4 @ point at the last arg
|
||||
mov r2, loglvl
|
||||
bleq dump_backtrace_stm @ dump saved registers
|
||||
|
||||
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
|
||||
ldr r3, .Ldsi @ instruction exists,
|
||||
teq r3, r1, lsr #11
|
||||
subeq r0, frame, #16
|
||||
mov r2, loglvl
|
||||
bleq dump_backtrace_stm @ dump saved registers
|
||||
|
||||
teq sv_fp, #0 @ zero saved fp means
|
||||
|
@ -96,9 +101,10 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
|||
bhi for_each_frame
|
||||
|
||||
1006: adr r0, .Lbad
|
||||
mov r1, frame
|
||||
mov r1, loglvl
|
||||
mov r2, frame
|
||||
bl printk
|
||||
no_frame: ldmfd sp!, {r4 - r8, pc}
|
||||
no_frame: ldmfd sp!, {r4 - r9, pc}
|
||||
ENDPROC(c_backtrace)
|
||||
|
||||
.pushsection __ex_table,"a"
|
||||
|
@ -109,7 +115,7 @@ ENDPROC(c_backtrace)
|
|||
.long 1004b, 1006b
|
||||
.popsection
|
||||
|
||||
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
|
||||
.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
|
||||
.align
|
||||
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
|
||||
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
|
||||
|
|
|
@ -101,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|||
atomic = faulthandler_disabled();
|
||||
|
||||
if (!atomic)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
mmap_read_lock(current->mm);
|
||||
while (n) {
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
@ -109,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|||
|
||||
while (!pin_page_for_write(to, &pte, &ptl)) {
|
||||
if (!atomic)
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
mmap_read_unlock(current->mm);
|
||||
if (__put_user(0, (char __user *)to))
|
||||
goto out;
|
||||
if (!atomic)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
mmap_read_lock(current->mm);
|
||||
}
|
||||
|
||||
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
|
||||
|
@ -133,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|||
spin_unlock(ptl);
|
||||
}
|
||||
if (!atomic)
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
out:
|
||||
return n;
|
||||
|
@ -170,17 +170,17 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
mmap_read_lock(current->mm);
|
||||
while (n) {
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
int tocopy;
|
||||
|
||||
while (!pin_page_for_write(addr, &pte, &ptl)) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
mmap_read_unlock(current->mm);
|
||||
if (__put_user(0, (char __user *)addr))
|
||||
goto out;
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
mmap_read_lock(current->mm);
|
||||
}
|
||||
|
||||
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
|
||||
|
@ -198,7 +198,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
|||
else
|
||||
spin_unlock(ptl);
|
||||
}
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
mmap_read_unlock(current->mm);
|
||||
|
||||
out:
|
||||
return n;
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <video/vga.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pinctrl/machine.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
#include "common.h"
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pinctrl/machine.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
#include "common.h"
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/pinctrl/machine.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
|
|
@ -21,10 +21,10 @@
|
|||
#include <linux/stat.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "hardware.h"
|
||||
#include "cm.h"
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/serial_core.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gpio/machine.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/setup.h>
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <asm/mach/time.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "hardware.h"
|
||||
#include "irqs.h"
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <asm/mach/time.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "hardware.h"
|
||||
#include "irqs.h"
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include <asm/mach/time.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "hardware.h"
|
||||
#include "irqs.h"
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include <mach/hardware.h>
|
||||
#include <mach/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/irq.h>
|
||||
|
|
|
@ -12,11 +12,11 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "keystone.h"
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
|
@ -633,7 +632,7 @@ static void __init map_sa1100_gpio_regs( void )
|
|||
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset_k(virt), virt), virt), virt);
|
||||
pmd = pmd_off_k(virt);
|
||||
*pmd = __pmd(phys | prot);
|
||||
flush_pmd_entry(pmd);
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@
|
|||
#include <linux/gpio.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/flash.h>
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#ifndef __MACH_TEGRA_IOMAP_H
|
||||
#define __MACH_TEGRA_IOMAP_H
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#define TEGRA_IRAM_BASE 0x40000000
|
||||
|
|
|
@ -24,13 +24,13 @@
|
|||
#include <linux/irqchip/arm-gic.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sys_soc.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <asm/domain.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/ptdump.h>
|
||||
|
||||
static struct addr_marker address_markers[] = {
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <asm/bugs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "mm.h"
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -272,11 +271,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
* validly references user space from well defined areas of the code,
|
||||
* we can bug out early if this is from code which shouldn't.
|
||||
*/
|
||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||
if (!mmap_read_trylock(mm)) {
|
||||
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
||||
goto no_context;
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
} else {
|
||||
/*
|
||||
* The above down_read_trylock() might have succeeded in
|
||||
|
@ -294,7 +293,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
fault = __do_page_fault(mm, addr, fsr, flags, tsk);
|
||||
|
||||
/* If we need to retry but a fatal signal is pending, handle the
|
||||
* signal first. We do not need to release the mmap_sem because
|
||||
* signal first. We do not need to release the mmap_lock because
|
||||
* it would already be released in __lock_page_or_retry in
|
||||
* mm/filemap.c. */
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
|
@ -326,7 +325,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
/*
|
||||
* Handle the "normal" case first - VM_FAULT_MAJOR
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
static inline void set_fixmap_pte(int idx, pte_t pte)
|
||||
{
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
pte_t *ptep = virt_to_kpte(vaddr);
|
||||
|
||||
set_pte_ext(ptep, pte, 0);
|
||||
local_flush_tlb_kernel_page(vaddr);
|
||||
|
@ -26,7 +26,7 @@ static inline void set_fixmap_pte(int idx, pte_t pte)
|
|||
|
||||
static inline pte_t get_fixmap_pte(unsigned long vaddr)
|
||||
{
|
||||
pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
||||
pte_t *ptep = virt_to_kpte(vaddr);
|
||||
|
||||
return *ptep;
|
||||
}
|
||||
|
|
|
@ -3,12 +3,12 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/idmap.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/system_info.h>
|
||||
|
||||
|
|
|
@ -141,16 +141,8 @@ void __check_vmalloc_seq(struct mm_struct *mm)
|
|||
static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||
{
|
||||
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmdp;
|
||||
pmd_t *pmdp = pmd_off_k(addr);
|
||||
|
||||
flush_cache_vunmap(addr, end);
|
||||
pgd = pgd_offset_k(addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
pmdp = pmd_offset(pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
|
||||
|
@ -191,10 +183,7 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
|
|||
size_t size, const struct mem_type *type)
|
||||
{
|
||||
unsigned long addr = virt, end = virt + size;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pmd_t *pmd = pmd_off_k(addr);
|
||||
|
||||
/*
|
||||
* Remove and free any PTE-based mapping, and
|
||||
|
@ -202,10 +191,6 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
|
|||
*/
|
||||
unmap_area_sections(virt, size);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
|
||||
pfn += SZ_1M >> PAGE_SHIFT;
|
||||
|
@ -225,21 +210,13 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
|
|||
size_t size, const struct mem_type *type)
|
||||
{
|
||||
unsigned long addr = virt, end = virt + size;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pmd_t *pmd = pmd_off_k(addr);
|
||||
|
||||
/*
|
||||
* Remove and free any PTE-based mapping, and
|
||||
* sync the current kernel mapping.
|
||||
*/
|
||||
unmap_area_sections(virt, size);
|
||||
|
||||
pgd = pgd_offset_k(virt);
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
pud = pud_offset(p4d, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
unsigned long super_pmd_val, i;
|
||||
|
||||
|
|
|
@ -2,8 +2,7 @@
|
|||
#ifdef CONFIG_MMU
|
||||
#include <linux/list.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
/* the upper-most page table pointer */
|
||||
extern pmd_t *top_pmd;
|
||||
|
@ -36,11 +35,6 @@ static inline pte_t get_top_pte(unsigned long va)
|
|||
return *ptep;
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_off_k(unsigned long virt)
|
||||
{
|
||||
return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(virt), virt), virt), virt);
|
||||
}
|
||||
|
||||
struct mem_type {
|
||||
pteval_t prot_pte;
|
||||
pteval_t prot_pte_s2;
|
||||
|
|
|
@ -356,12 +356,7 @@ static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
|
|||
|
||||
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
|
||||
return pmd;
|
||||
return pmd_off_k(addr);
|
||||
}
|
||||
|
||||
void __init early_fixmap_init(void)
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
|
|
|
@ -11,11 +11,11 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#include "proc-macros.S"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user