forked from luck/tmp_suning_uos_patched
dma-direct: don't over-decrypt memory
commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 upstream.
The original x86 sev_alloc() only called set_memory_decrypted() on
memory returned by alloc_pages_node(), so the page order calculation
fell out of that logic. However, the common dma-direct code has several
potential allocators, not all of which are guaranteed to round up the
underlying allocation to a power-of-two size, so carrying over that
calculation for the encryption/decryption size was a mistake. Fix it by
rounding to a *number* of pages, rather than an order.
Until recently there was an even worse interaction with DMA_DIRECT_REMAP
where we could have ended up decrypting part of the next adjacent
vmalloc area, only averted by no architecture actually supporting both
configs at once. Don't ask how I found that one out...
Fixes: c10f07aa27
("dma/direct: Handle force decryption for DMA coherent buffers in common code")
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: David Rientjes <rientjes@google.com>
[ backport the functional change without all the prior refactoring ]
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
aa9a001efa
commit
73bc8a5e8e
|
@ -188,7 +188,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (force_dma_unencrypted(dev)) {
|
||||||
err = set_memory_decrypted((unsigned long)ret,
|
err = set_memory_decrypted((unsigned long)ret,
|
||||||
1 << get_order(size));
|
PFN_UP(size));
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
}
|
}
|
||||||
|
@ -210,7 +210,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||||
ret = page_address(page);
|
ret = page_address(page);
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (force_dma_unencrypted(dev)) {
|
||||||
err = set_memory_decrypted((unsigned long)ret,
|
err = set_memory_decrypted((unsigned long)ret,
|
||||||
1 << get_order(size));
|
PFN_UP(size));
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||||
out_encrypt_pages:
|
out_encrypt_pages:
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (force_dma_unencrypted(dev)) {
|
||||||
err = set_memory_encrypted((unsigned long)page_address(page),
|
err = set_memory_encrypted((unsigned long)page_address(page),
|
||||||
1 << get_order(size));
|
PFN_UP(size));
|
||||||
/* If memory cannot be re-encrypted, it must be leaked */
|
/* If memory cannot be re-encrypted, it must be leaked */
|
||||||
if (err)
|
if (err)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -244,8 +244,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||||
void dma_direct_free(struct device *dev, size_t size,
|
void dma_direct_free(struct device *dev, size_t size,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned int page_order = get_order(size);
|
|
||||||
|
|
||||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||||
!force_dma_unencrypted(dev)) {
|
!force_dma_unencrypted(dev)) {
|
||||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||||
|
@ -266,7 +264,7 @@ void dma_direct_free(struct device *dev, size_t size,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (force_dma_unencrypted(dev))
|
if (force_dma_unencrypted(dev))
|
||||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
||||||
vunmap(cpu_addr);
|
vunmap(cpu_addr);
|
||||||
|
@ -302,8 +300,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
|
|
||||||
ret = page_address(page);
|
ret = page_address(page);
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (force_dma_unencrypted(dev)) {
|
||||||
if (set_memory_decrypted((unsigned long)ret,
|
if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
|
||||||
1 << get_order(size)))
|
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
}
|
}
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
@ -318,7 +315,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
||||||
struct page *page, dma_addr_t dma_addr,
|
struct page *page, dma_addr_t dma_addr,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
unsigned int page_order = get_order(size);
|
|
||||||
void *vaddr = page_address(page);
|
void *vaddr = page_address(page);
|
||||||
|
|
||||||
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
||||||
|
@ -327,7 +323,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (force_dma_unencrypted(dev))
|
if (force_dma_unencrypted(dev))
|
||||||
set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
|
set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
|
||||||
|
|
||||||
dma_free_contiguous(dev, page, size);
|
dma_free_contiguous(dev, page, size);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user