forked from luck/tmp_suning_uos_patched
xenbus: Support HVM backends
Add HVM implementations of xenbus_(map,unmap)_ring_v(alloc,free) so that ring mappings can be done without using GNTMAP_contains_pte which is not supported on HVM. This also removes the need to use vmlist_lock on PV by tracking the allocated xenbus rings. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> [v1: Fix compile error when XENBUS_FRONTEND is defined as module] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
cb85f123cd
commit
2c5d37d30f
|
@ -32,15 +32,39 @@
|
|||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/xen/page.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/event_channel.h>
|
||||
#include <xen/balloon.h>
|
||||
#include <xen/events.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/xenbus.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include "xenbus_probe.h"
|
||||
|
||||
struct xenbus_map_node {
|
||||
struct list_head next;
|
||||
union {
|
||||
struct vm_struct *area; /* PV */
|
||||
struct page *page; /* HVM */
|
||||
};
|
||||
grant_handle_t handle;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(xenbus_valloc_lock);
|
||||
static LIST_HEAD(xenbus_valloc_pages);
|
||||
|
||||
struct xenbus_ring_ops {
|
||||
int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
|
||||
int (*unmap)(struct xenbus_device *dev, void *vaddr);
|
||||
};
|
||||
|
||||
static const struct xenbus_ring_ops *ring_ops __read_mostly;
|
||||
|
||||
const char *xenbus_strstate(enum xenbus_state state)
|
||||
{
|
||||
|
@ -435,21 +459,35 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
|
|||
* XenbusStateClosing and the error message will be saved in XenStore.
|
||||
*/
|
||||
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
||||
{
|
||||
return ring_ops->map(dev, gnt_ref, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
|
||||
|
||||
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
|
||||
int gnt_ref, void **vaddr)
|
||||
{
|
||||
struct gnttab_map_grant_ref op = {
|
||||
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
|
||||
.ref = gnt_ref,
|
||||
.dom = dev->otherend_id,
|
||||
};
|
||||
struct xenbus_map_node *node;
|
||||
struct vm_struct *area;
|
||||
pte_t *pte;
|
||||
|
||||
*vaddr = NULL;
|
||||
|
||||
area = alloc_vm_area(PAGE_SIZE, &pte);
|
||||
if (!area)
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
area = alloc_vm_area(PAGE_SIZE, &pte);
|
||||
if (!area) {
|
||||
kfree(node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
|
||||
|
||||
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
|
||||
|
@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
|||
|
||||
if (op.status != GNTST_okay) {
|
||||
free_vm_area(area);
|
||||
kfree(node);
|
||||
xenbus_dev_fatal(dev, op.status,
|
||||
"mapping in shared page %d from domain %d",
|
||||
gnt_ref, dev->otherend_id);
|
||||
return op.status;
|
||||
}
|
||||
|
||||
/* Stuff the handle in an unused field */
|
||||
area->phys_addr = (unsigned long)op.handle;
|
||||
node->handle = op.handle;
|
||||
node->area = area;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_add(&node->next, &xenbus_valloc_pages);
|
||||
spin_unlock(&xenbus_valloc_lock);
|
||||
|
||||
*vaddr = area->addr;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
|
||||
|
||||
static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
||||
int gnt_ref, void **vaddr)
|
||||
{
|
||||
struct xenbus_map_node *node;
|
||||
int err;
|
||||
void *addr;
|
||||
|
||||
*vaddr = NULL;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
addr = pfn_to_kaddr(page_to_pfn(node->page));
|
||||
|
||||
err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_add(&node->next, &xenbus_valloc_pages);
|
||||
spin_unlock(&xenbus_valloc_lock);
|
||||
|
||||
*vaddr = addr;
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
free_xenballooned_pages(1, &node->page);
|
||||
kfree(node);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
|
@ -525,32 +603,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
|
|||
*/
|
||||
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
return ring_ops->unmap(dev, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
||||
|
||||
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
|
||||
{
|
||||
struct xenbus_map_node *node;
|
||||
struct gnttab_unmap_grant_ref op = {
|
||||
.host_addr = (unsigned long)vaddr,
|
||||
};
|
||||
unsigned int level;
|
||||
|
||||
/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
|
||||
* method so that we don't have to muck with vmalloc internals here.
|
||||
* We could force the user to hang on to their struct vm_struct from
|
||||
* xenbus_map_ring_valloc, but these 6 lines considerably simplify
|
||||
* this API.
|
||||
*/
|
||||
read_lock(&vmlist_lock);
|
||||
for (area = vmlist; area != NULL; area = area->next) {
|
||||
if (area->addr == vaddr)
|
||||
break;
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_for_each_entry(node, &xenbus_valloc_pages, next) {
|
||||
if (node->area->addr == vaddr) {
|
||||
list_del(&node->next);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
read_unlock(&vmlist_lock);
|
||||
node = NULL;
|
||||
found:
|
||||
spin_unlock(&xenbus_valloc_lock);
|
||||
|
||||
if (!area) {
|
||||
if (!node) {
|
||||
xenbus_dev_error(dev, -ENOENT,
|
||||
"can't find mapped virtual address %p", vaddr);
|
||||
return GNTST_bad_virt_addr;
|
||||
}
|
||||
|
||||
op.handle = (grant_handle_t)area->phys_addr;
|
||||
op.handle = node->handle;
|
||||
op.host_addr = arbitrary_virt_to_machine(
|
||||
lookup_address((unsigned long)vaddr, &level)).maddr;
|
||||
|
||||
|
@ -558,16 +640,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
|
|||
BUG();
|
||||
|
||||
if (op.status == GNTST_okay)
|
||||
free_vm_area(area);
|
||||
free_vm_area(node->area);
|
||||
else
|
||||
xenbus_dev_error(dev, op.status,
|
||||
"unmapping page at handle %d error %d",
|
||||
(int16_t)area->phys_addr, op.status);
|
||||
node->handle, op.status);
|
||||
|
||||
kfree(node);
|
||||
return op.status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
||||
|
||||
static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
||||
{
|
||||
int rv;
|
||||
struct xenbus_map_node *node;
|
||||
void *addr;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_for_each_entry(node, &xenbus_valloc_pages, next) {
|
||||
addr = pfn_to_kaddr(page_to_pfn(node->page));
|
||||
if (addr == vaddr) {
|
||||
list_del(&node->next);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
node = NULL;
|
||||
found:
|
||||
spin_unlock(&xenbus_valloc_lock);
|
||||
|
||||
if (!node) {
|
||||
xenbus_dev_error(dev, -ENOENT,
|
||||
"can't find mapped virtual address %p", vaddr);
|
||||
return GNTST_bad_virt_addr;
|
||||
}
|
||||
|
||||
rv = xenbus_unmap_ring(dev, node->handle, addr);
|
||||
|
||||
if (!rv)
|
||||
free_xenballooned_pages(1, &node->page);
|
||||
else
|
||||
WARN(1, "Leaking %p\n", vaddr);
|
||||
|
||||
kfree(node);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* xenbus_unmap_ring
|
||||
|
@ -617,3 +733,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
|
|||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
|
||||
|
||||
static const struct xenbus_ring_ops ring_ops_pv = {
|
||||
.map = xenbus_map_ring_valloc_pv,
|
||||
.unmap = xenbus_unmap_ring_vfree_pv,
|
||||
};
|
||||
|
||||
static const struct xenbus_ring_ops ring_ops_hvm = {
|
||||
.map = xenbus_map_ring_valloc_hvm,
|
||||
.unmap = xenbus_unmap_ring_vfree_hvm,
|
||||
};
|
||||
|
||||
void __init xenbus_ring_ops_init(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
ring_ops = &ring_ops_pv;
|
||||
else
|
||||
ring_ops = &ring_ops_hvm;
|
||||
}
|
||||
|
|
|
@ -730,6 +730,8 @@ static int __init xenbus_init(void)
|
|||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
xenbus_ring_ops_init();
|
||||
|
||||
if (xen_hvm_domain()) {
|
||||
uint64_t v = 0;
|
||||
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
|
||||
|
|
|
@ -76,4 +76,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch,
|
|||
extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
|
||||
char *id_node, char *path_node);
|
||||
|
||||
void xenbus_ring_ops_init(void);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user