forked from luck/tmp_suning_uos_patched
s390/mm,kvm: flush gmap address space with IDTE
The __tlb_flush_mm() helper uses a global flush if the mm struct has a gmap structure attached to it. Replace the global flush with two individual flushes by means of the IDTE instruction if only a single gmap is attached the the mm. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
d5dcafee5f
commit
44b6cc8130
|
@ -12,6 +12,7 @@ typedef struct {
|
|||
struct list_head pgtable_list;
|
||||
spinlock_t gmap_lock;
|
||||
struct list_head gmap_list;
|
||||
unsigned long gmap_asce;
|
||||
unsigned long asce;
|
||||
unsigned long asce_limit;
|
||||
unsigned long vdso_base;
|
||||
|
|
|
@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|||
INIT_LIST_HEAD(&mm->context.gmap_list);
|
||||
cpumask_clear(&mm->context.cpu_attach_mask);
|
||||
atomic_set(&mm->context.flush_count, 0);
|
||||
mm->context.gmap_asce = 0;
|
||||
mm->context.flush_mm = 0;
|
||||
#ifdef CONFIG_PGSTE
|
||||
mm->context.alloc_pgste = page_table_allocate_pgste;
|
||||
|
|
|
@ -60,18 +60,25 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush TLB entries for a specific ASCE on all CPUs. Should never be used
|
||||
* when more than one asce (e.g. gmap) ran on this mm.
|
||||
*/
|
||||
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
|
||||
static inline void __tlb_flush_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long gmap_asce;
|
||||
|
||||
/*
|
||||
* If the machine has IDTE we prefer to do a per mm flush
|
||||
* on all cpus instead of doing a local flush if the mm
|
||||
* only ran on the local cpu.
|
||||
*/
|
||||
preempt_disable();
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (MACHINE_HAS_IDTE)
|
||||
__tlb_flush_idte(asce);
|
||||
else
|
||||
__tlb_flush_global();
|
||||
gmap_asce = READ_ONCE(mm->context.gmap_asce);
|
||||
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
|
||||
if (gmap_asce)
|
||||
__tlb_flush_idte(gmap_asce);
|
||||
__tlb_flush_idte(mm->context.asce);
|
||||
} else {
|
||||
__tlb_flush_full(mm);
|
||||
}
|
||||
/* Reset TLB flush mask */
|
||||
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
|
@ -92,7 +99,7 @@ static inline void __tlb_flush_kernel(void)
|
|||
/*
|
||||
* Flush TLB entries for a specific ASCE on all CPUs.
|
||||
*/
|
||||
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
|
||||
static inline void __tlb_flush_mm(struct mm_struct *mm)
|
||||
{
|
||||
__tlb_flush_local();
|
||||
}
|
||||
|
@ -103,19 +110,6 @@ static inline void __tlb_flush_kernel(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void __tlb_flush_mm(struct mm_struct * mm)
|
||||
{
|
||||
/*
|
||||
* If the machine has IDTE we prefer to do a per mm flush
|
||||
* on all cpus instead of doing a local flush if the mm
|
||||
* only ran on the local cpu.
|
||||
*/
|
||||
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
|
||||
__tlb_flush_asce(mm, mm->context.asce);
|
||||
else
|
||||
__tlb_flush_full(mm);
|
||||
}
|
||||
|
||||
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
|
||||
{
|
||||
if (mm->context.flush_mm) {
|
||||
|
|
|
@ -94,6 +94,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
|
|||
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
|
||||
{
|
||||
struct gmap *gmap;
|
||||
unsigned long gmap_asce;
|
||||
|
||||
gmap = gmap_alloc(limit);
|
||||
if (!gmap)
|
||||
|
@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
|
|||
gmap->mm = mm;
|
||||
spin_lock(&mm->context.gmap_lock);
|
||||
list_add_rcu(&gmap->list, &mm->context.gmap_list);
|
||||
if (list_is_singular(&mm->context.gmap_list))
|
||||
gmap_asce = gmap->asce;
|
||||
else
|
||||
gmap_asce = -1UL;
|
||||
WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
|
||||
spin_unlock(&mm->context.gmap_lock);
|
||||
return gmap;
|
||||
}
|
||||
|
@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
|
|||
void gmap_remove(struct gmap *gmap)
|
||||
{
|
||||
struct gmap *sg, *next;
|
||||
unsigned long gmap_asce;
|
||||
|
||||
/* Remove all shadow gmaps linked to this gmap */
|
||||
if (!list_empty(&gmap->children)) {
|
||||
|
@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
|
|||
/* Remove gmap from the pre-mm list */
|
||||
spin_lock(&gmap->mm->context.gmap_lock);
|
||||
list_del_rcu(&gmap->list);
|
||||
if (list_empty(&gmap->mm->context.gmap_list))
|
||||
gmap_asce = 0;
|
||||
else if (list_is_singular(&gmap->mm->context.gmap_list))
|
||||
gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
|
||||
struct gmap, list)->asce;
|
||||
else
|
||||
gmap_asce = -1UL;
|
||||
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
|
||||
spin_unlock(&gmap->mm->context.gmap_lock);
|
||||
synchronize_rcu();
|
||||
/* Put reference */
|
||||
|
|
Loading…
Reference in New Issue
Block a user