forked from luck/tmp_suning_uos_patched
xen/swiotlb: don't initialize swiotlb twice on arm64
On arm64 swiotlb is often (not always) already initialized by mem_init. We don't want to initialize it twice, which would trigger a second memory allocation. Moreover, the second memory pool is typically made of high pages and ends up replacing the original memory pool of low pages. As a side effect of this change, it is possible to have low pages in swiotlb-xen on arm64. Signed-off-by: Stefano Stabellini <stefanos@xilinx.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
063b8271ec
commit
4e7372e0dc
|
@ -211,6 +211,15 @@ int __ref xen_swiotlb_init(int verbose, bool early)
|
|||
retry:
|
||||
bytes = xen_set_nslabs(xen_io_tlb_nslabs);
|
||||
order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
|
||||
|
||||
/*
|
||||
* IO TLB memory already allocated. Just use it.
|
||||
*/
|
||||
if (io_tlb_start != 0) {
|
||||
xen_io_tlb_start = phys_to_virt(io_tlb_start);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get IO TLB memory from any location.
|
||||
*/
|
||||
|
@ -237,7 +246,6 @@ int __ref xen_swiotlb_init(int verbose, bool early)
|
|||
m_ret = XEN_SWIOTLB_ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
xen_io_tlb_end = xen_io_tlb_start + bytes;
|
||||
/*
|
||||
* And replace that memory with pages under 4GB.
|
||||
*/
|
||||
|
@ -264,6 +272,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
|
|||
} else
|
||||
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
|
||||
|
||||
end:
|
||||
xen_io_tlb_end = xen_io_tlb_start + bytes;
|
||||
if (!rc)
|
||||
swiotlb_set_max_segment(PAGE_SIZE);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user