forked from luck/tmp_suning_uos_patched
xen/xenbus: Rename *RING_PAGE* to *RING_GRANT*
Linux may use a different page size than the size of grant. So make clear that the order is actually in number of grant. Signed-off-by: Julien Grall <julien.grall@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
This commit is contained in:
parent
d5f985c834
commit
9cce2914e2
|
@ -87,7 +87,7 @@ MODULE_PARM_DESC(max_persistent_grants,
|
|||
* Maximum order of pages to be used for the shared ring between front and
|
||||
* backend, 4KB page granularity is used.
|
||||
*/
|
||||
unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
|
||||
unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
|
||||
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
|
||||
/*
|
||||
|
@ -1446,10 +1446,10 @@ static int __init xen_blkif_init(void)
|
|||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
|
||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
|
||||
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
|
||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
|
||||
xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
|
||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
|
||||
xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
|
||||
}
|
||||
|
||||
rc = xen_blkif_interface_init();
|
||||
|
|
|
@ -829,7 +829,7 @@ static void connect(struct backend_info *be)
|
|||
static int connect_ring(struct backend_info *be)
|
||||
{
|
||||
struct xenbus_device *dev = be->dev;
|
||||
unsigned int ring_ref[XENBUS_MAX_RING_PAGES];
|
||||
unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
|
||||
unsigned int evtchn, nr_grefs, ring_page_order;
|
||||
unsigned int pers_grants;
|
||||
char protocol[64] = "";
|
||||
|
|
|
@ -111,7 +111,7 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the
|
|||
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
|
||||
|
||||
#define BLK_MAX_RING_SIZE \
|
||||
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_PAGES)
|
||||
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
|
||||
|
||||
/*
|
||||
* ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
|
||||
|
@ -133,7 +133,7 @@ struct blkfront_info
|
|||
int vdevice;
|
||||
blkif_vdev_t handle;
|
||||
enum blkif_state connected;
|
||||
int ring_ref[XENBUS_MAX_RING_PAGES];
|
||||
int ring_ref[XENBUS_MAX_RING_GRANTS];
|
||||
unsigned int nr_ring_pages;
|
||||
struct blkif_front_ring ring;
|
||||
unsigned int evtchn, irq;
|
||||
|
@ -1413,7 +1413,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
|||
struct blkif_sring *sring;
|
||||
int err, i;
|
||||
unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
|
||||
grant_ref_t gref[XENBUS_MAX_RING_PAGES];
|
||||
grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
|
||||
|
||||
for (i = 0; i < info->nr_ring_pages; i++)
|
||||
info->ring_ref[i] = GRANT_INVALID_REF;
|
||||
|
@ -2284,9 +2284,9 @@ static int __init xlblk_init(void)
|
|||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
|
||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
|
||||
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
|
||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
|
||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
|
||||
xen_blkif_max_ring_order = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -56,11 +56,11 @@ struct xenbus_map_node {
|
|||
struct vm_struct *area;
|
||||
} pv;
|
||||
struct {
|
||||
struct page *pages[XENBUS_MAX_RING_PAGES];
|
||||
struct page *pages[XENBUS_MAX_RING_GRANTS];
|
||||
void *addr;
|
||||
} hvm;
|
||||
};
|
||||
grant_handle_t handles[XENBUS_MAX_RING_PAGES];
|
||||
grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
|
||||
unsigned int nr_handles;
|
||||
};
|
||||
|
||||
|
@ -479,12 +479,12 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
|
|||
unsigned int flags,
|
||||
bool *leaked)
|
||||
{
|
||||
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES];
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
|
||||
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
|
||||
int i, j;
|
||||
int err = GNTST_okay;
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < nr_grefs; i++) {
|
||||
|
@ -540,15 +540,15 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
|
|||
{
|
||||
struct xenbus_map_node *node;
|
||||
struct vm_struct *area;
|
||||
pte_t *ptes[XENBUS_MAX_RING_PAGES];
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
|
||||
pte_t *ptes[XENBUS_MAX_RING_GRANTS];
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
|
||||
int err = GNTST_okay;
|
||||
int i;
|
||||
bool leaked;
|
||||
|
||||
*vaddr = NULL;
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
|
||||
return -EINVAL;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
|
@ -602,10 +602,10 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
|||
void *addr;
|
||||
bool leaked = false;
|
||||
/* Why do we need two arrays? See comment of __xenbus_map_ring */
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
|
||||
unsigned long addrs[XENBUS_MAX_RING_PAGES];
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
|
||||
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
|
||||
return -EINVAL;
|
||||
|
||||
*vaddr = NULL;
|
||||
|
@ -686,10 +686,10 @@ int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
|
|||
unsigned int nr_grefs, grant_handle_t *handles,
|
||||
unsigned long *vaddrs, bool *leaked)
|
||||
{
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
|
||||
int i;
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < nr_grefs; i++)
|
||||
|
@ -722,7 +722,7 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
|||
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
|
||||
{
|
||||
struct xenbus_map_node *node;
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
|
||||
unsigned int level;
|
||||
int i;
|
||||
bool leaked = false;
|
||||
|
@ -787,7 +787,7 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
|||
int rv;
|
||||
struct xenbus_map_node *node;
|
||||
void *addr;
|
||||
unsigned long addrs[XENBUS_MAX_RING_PAGES];
|
||||
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
|
||||
int i;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
|
@ -840,11 +840,11 @@ int xenbus_unmap_ring(struct xenbus_device *dev,
|
|||
grant_handle_t *handles, unsigned int nr_handles,
|
||||
unsigned long *vaddrs)
|
||||
{
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
|
||||
int i;
|
||||
int err;
|
||||
|
||||
if (nr_handles > XENBUS_MAX_RING_PAGES)
|
||||
if (nr_handles > XENBUS_MAX_RING_GRANTS)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < nr_handles; i++)
|
||||
|
|
|
@ -46,8 +46,8 @@
|
|||
#include <xen/interface/io/xenbus.h>
|
||||
#include <xen/interface/io/xs_wire.h>
|
||||
|
||||
#define XENBUS_MAX_RING_PAGE_ORDER 4
|
||||
#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER)
|
||||
#define XENBUS_MAX_RING_GRANT_ORDER 4
|
||||
#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
|
||||
#define INVALID_GRANT_HANDLE (~0U)
|
||||
|
||||
/* Register callback to watch this node. */
|
||||
|
|
Loading…
Reference in New Issue
Block a user