mm: workingset: add vmstat counter for shadow nodes

Make it easier to catch bugs in the shadow node shrinker by adding a
counter for the shadow nodes in circulation.

[akpm@linux-foundation.org: assert that irqs are disabled, for __inc_lruvec_page_state()]
[akpm@linux-foundation.org: s/WARN_ON_ONCE/VM_WARN_ON_ONCE/, per Johannes]
Link: http://lkml.kernel.org/r/20181009184732.762-4-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2018-10-26 15:06:39 -07:00 committed by Linus Torvalds
parent 505802a535
commit 68d48e6a2d
3 changed files with 14 additions and 2 deletions

View File

@ -161,6 +161,7 @@ enum node_stat_item {
NR_SLAB_UNRECLAIMABLE, NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
WORKINGSET_REFAULT, WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE, WORKINGSET_ACTIVATE,
WORKINGSET_RESTORE, WORKINGSET_RESTORE,

View File

@ -1143,6 +1143,7 @@ const char * const vmstat_text[] = {
"nr_slab_unreclaimable", "nr_slab_unreclaimable",
"nr_isolated_anon", "nr_isolated_anon",
"nr_isolated_file", "nr_isolated_file",
"workingset_nodes",
"workingset_refault", "workingset_refault",
"workingset_activate", "workingset_activate",
"workingset_restore", "workingset_restore",

View File

@ -377,12 +377,20 @@ void workingset_update_node(struct radix_tree_node *node)
* already where they should be. The list_empty() test is safe * already where they should be. The list_empty() test is safe
* as node->private_list is protected by the i_pages lock. * as node->private_list is protected by the i_pages lock.
*/ */
VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
if (node->count && node->count == node->exceptional) { if (node->count && node->count == node->exceptional) {
if (list_empty(&node->private_list)) if (list_empty(&node->private_list)) {
list_lru_add(&shadow_nodes, &node->private_list); list_lru_add(&shadow_nodes, &node->private_list);
__inc_lruvec_page_state(virt_to_page(node),
WORKINGSET_NODES);
}
} else { } else {
if (!list_empty(&node->private_list)) if (!list_empty(&node->private_list)) {
list_lru_del(&shadow_nodes, &node->private_list); list_lru_del(&shadow_nodes, &node->private_list);
__dec_lruvec_page_state(virt_to_page(node),
WORKINGSET_NODES);
}
} }
} }
@ -473,6 +481,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
} }
list_lru_isolate(lru, item); list_lru_isolate(lru, item);
__dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES);
spin_unlock(lru_lock); spin_unlock(lru_lock);
/* /*