forked from luck/tmp_suning_uos_patched
s390: allow absolute memory access for /dev/mem
Currently dev/mem for s390 provides only real memory access. This means that the CPU prefix pages are swapped. The prefix swap for real memory works as follows: Each CPU owns a prefix register that points to a page aligned memory location "P". If this CPU accesses the address range [0,0x1fff], it is translated by the hardware to [P,P+0x1fff]. Accordingly if this CPU accesses the address range [P,P+0x1fff], it is translated by the hardware to [0,0x1fff]. Therefore, if [P,P+0x1fff] or [0,0x1fff] is read from the current /dev/mem device, the incorrectly swapped memory content is returned. With this patch the /dev/mem architecture code is modified to provide absolute memory access. This is done via the arch specific functions xlate_dev_mem_ptr() and unxlate_dev_mem_ptr(). For swapped pages on s390 the function xlate_dev_mem_ptr() now returns a new buffer with a copy of the requested absolute memory. In case the buffer was allocated, the unxlate_dev_mem_ptr() function frees it after /dev/mem code has called copy_to_user(). Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
6022afc060
commit
b2a68c2356
|
@ -38,11 +38,8 @@ static inline void * phys_to_virt(unsigned long address)
|
|||
return (void *) address;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
void *xlate_dev_mem_ptr(unsigned long phys);
|
||||
void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
|
||||
/*
|
||||
|
@ -166,3 +167,69 @@ int copy_from_user_real(void *dest, void __user *src, size_t count)
|
|||
free_page((unsigned long) buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if physical address is within prefix or zero page
|
||||
*/
|
||||
static int is_swapped(unsigned long addr)
|
||||
{
|
||||
unsigned long lc;
|
||||
int cpu;
|
||||
|
||||
if (addr < sizeof(struct _lowcore))
|
||||
return 1;
|
||||
for_each_online_cpu(cpu) {
|
||||
lc = (unsigned long) lowcore_ptr[cpu];
|
||||
if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
|
||||
continue;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return swapped prefix or zero page address
|
||||
*/
|
||||
static unsigned long get_swapped(unsigned long addr)
|
||||
{
|
||||
unsigned long prefix = store_prefix();
|
||||
|
||||
if (addr < sizeof(struct _lowcore))
|
||||
return addr + prefix;
|
||||
if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
|
||||
return addr - prefix;
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a physical pointer for /dev/mem access
|
||||
*
|
||||
* For swapped prefix pages a new buffer is returned that contains a copy of
|
||||
* the absolute memory. The buffer size is maximum one page large.
|
||||
*/
|
||||
void *xlate_dev_mem_ptr(unsigned long addr)
|
||||
{
|
||||
void *bounce = (void *) addr;
|
||||
unsigned long size;
|
||||
|
||||
get_online_cpus();
|
||||
preempt_disable();
|
||||
if (is_swapped(addr)) {
|
||||
size = PAGE_SIZE - (addr & ~PAGE_MASK);
|
||||
bounce = (void *) __get_free_page(GFP_ATOMIC);
|
||||
if (bounce)
|
||||
memcpy_real(bounce, (void *) get_swapped(addr), size);
|
||||
}
|
||||
preempt_enable();
|
||||
put_online_cpus();
|
||||
return bounce;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free converted buffer for /dev/mem access (if necessary)
|
||||
*/
|
||||
void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
|
||||
{
|
||||
if ((void *) addr != buf)
|
||||
free_page((unsigned long) buf);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user