forked from luck/tmp_suning_uos_patched
mm: add vmf_insert_pfn_pmd()
Similar to vm_insert_pfn(), but for PMDs rather than PTEs. The 'vmf_' prefix instead of 'vm_' prefix is intended to indicate that it returns a VMF_ value rather than an errno (which would only have to be converted into a VMF_ value anyway). Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fc43704437
commit
5cad465d7f
|
@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
|
|||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot,
|
||||
int prot_numa);
|
||||
int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
||||
unsigned long pfn, bool write);
|
||||
|
||||
enum transparent_hugepage_flag {
|
||||
TRANSPARENT_HUGEPAGE_FLAG,
|
||||
|
|
|
@ -869,6 +869,49 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
flags);
|
||||
}
|
||||
|
||||
static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pmd_t entry;
|
||||
spinlock_t *ptl;
|
||||
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
if (pmd_none(*pmd)) {
|
||||
entry = pmd_mkhuge(pfn_pmd(pfn, prot));
|
||||
if (write) {
|
||||
entry = pmd_mkyoung(pmd_mkdirty(entry));
|
||||
entry = maybe_pmd_mkwrite(entry, vma);
|
||||
}
|
||||
set_pmd_at(mm, addr, pmd, entry);
|
||||
update_mmu_cache_pmd(vma, addr, pmd);
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, unsigned long pfn, bool write)
|
||||
{
|
||||
pgprot_t pgprot = vma->vm_page_prot;
|
||||
/*
|
||||
* If we had pmd_special, we could avoid all these restrictions,
|
||||
* but we need to be consistent with PTEs and architectures that
|
||||
* can't support a 'special' bit.
|
||||
*/
|
||||
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
|
||||
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
|
||||
(VM_PFNMAP|VM_MIXEDMAP));
|
||||
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
||||
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
|
||||
|
||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
if (track_pfn_insert(vma, &pgprot, pfn))
|
||||
return VM_FAULT_SIGBUS;
|
||||
return insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
|
||||
}
|
||||
|
||||
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||
struct vm_area_struct *vma)
|
||||
|
|
Loading…
Reference in New Issue
Block a user