mm/hugetlb: count file_region to be added when regions_needed != NULL

There are only two cases of function add_reservation_in_range()

    * count file_region and return the number in regions_needed
    * do the real list operation without counting

This means it is not necessary to have two parameters to classify these
two cases.

Just use regions_needed to separate them.

Signed-off-by: Wei Yang <richard.weiyang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: https://lkml.kernel.org/r/20200831022351.20916-5-richard.weiyang@linux.alibaba.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Wei Yang 2020-10-13 16:56:30 -07:00 committed by Linus Torvalds
parent d3ec7b6e09
commit 972a3da355

View File

@ -321,16 +321,17 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
} }
} }
/* Must be called with resv->lock held. Calling this with count_only == true /*
* will count the number of pages to be added but will not modify the linked * Must be called with resv->lock held.
* list. If regions_needed != NULL and count_only == true, then regions_needed *
* will indicate the number of file_regions needed in the cache to carry out to * Calling this with regions_needed != NULL will count the number of pages
* add the regions for this range. * to be added but will not modify the linked list. And regions_needed will
* indicate the number of file_regions needed in the cache to carry out to add
* the regions for this range.
*/ */
static long add_reservation_in_range(struct resv_map *resv, long f, long t, static long add_reservation_in_range(struct resv_map *resv, long f, long t,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct hstate *h, long *regions_needed, struct hstate *h, long *regions_needed)
bool count_only)
{ {
long add = 0; long add = 0;
struct list_head *head = &resv->regions; struct list_head *head = &resv->regions;
@ -366,14 +367,14 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
*/ */
if (rg->from > last_accounted_offset) { if (rg->from > last_accounted_offset) {
add += rg->from - last_accounted_offset; add += rg->from - last_accounted_offset;
if (!count_only) { if (!regions_needed) {
nrg = get_file_region_entry_from_cache( nrg = get_file_region_entry_from_cache(
resv, last_accounted_offset, rg->from); resv, last_accounted_offset, rg->from);
record_hugetlb_cgroup_uncharge_info(h_cg, h, record_hugetlb_cgroup_uncharge_info(h_cg, h,
resv, nrg); resv, nrg);
list_add(&nrg->link, rg->link.prev); list_add(&nrg->link, rg->link.prev);
coalesce_file_region(resv, nrg); coalesce_file_region(resv, nrg);
} else if (regions_needed) } else
*regions_needed += 1; *regions_needed += 1;
} }
@ -385,13 +386,13 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
*/ */
if (last_accounted_offset < t) { if (last_accounted_offset < t) {
add += t - last_accounted_offset; add += t - last_accounted_offset;
if (!count_only) { if (!regions_needed) {
nrg = get_file_region_entry_from_cache( nrg = get_file_region_entry_from_cache(
resv, last_accounted_offset, t); resv, last_accounted_offset, t);
record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg); record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
list_add(&nrg->link, rg->link.prev); list_add(&nrg->link, rg->link.prev);
coalesce_file_region(resv, nrg); coalesce_file_region(resv, nrg);
} else if (regions_needed) } else
*regions_needed += 1; *regions_needed += 1;
} }
@ -484,8 +485,8 @@ static long region_add(struct resv_map *resv, long f, long t,
retry: retry:
/* Count how many regions are actually needed to execute this add. */ /* Count how many regions are actually needed to execute this add. */
add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed, add_reservation_in_range(resv, f, t, NULL, NULL,
true); &actual_regions_needed);
/* /*
* Check for sufficient descriptors in the cache to accommodate * Check for sufficient descriptors in the cache to accommodate
@ -513,7 +514,7 @@ static long region_add(struct resv_map *resv, long f, long t,
goto retry; goto retry;
} }
add = add_reservation_in_range(resv, f, t, h_cg, h, NULL, false); add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
resv->adds_in_progress -= in_regions_needed; resv->adds_in_progress -= in_regions_needed;
@ -549,9 +550,9 @@ static long region_chg(struct resv_map *resv, long f, long t,
spin_lock(&resv->lock); spin_lock(&resv->lock);
/* Count how many hugepages in this range are NOT respresented. */ /* Count how many hugepages in this range are NOT represented. */
chg = add_reservation_in_range(resv, f, t, NULL, NULL, chg = add_reservation_in_range(resv, f, t, NULL, NULL,
out_regions_needed, true); out_regions_needed);
if (*out_regions_needed == 0) if (*out_regions_needed == 0)
*out_regions_needed = 1; *out_regions_needed = 1;