Mailing List Archive

[PATCH 1/6 v2] xenbus: Support HVM backends
Initial version lacked the list_del in xenbus_unmap_ring_vfree_hvm

-------------------------------------------------------->8

Add HVM implementations of xenbus_(map,unmap)_ring_v(alloc,free) so
that ring mappings can be done without using GNTMAP_contains_pte which
is not supported on HVM.

Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
drivers/xen/xenbus/xenbus_client.c | 155 +++++++++++++++++++++++++++++-------
1 files changed, 125 insertions(+), 30 deletions(-)

diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 52bc57f..4b2fbcc 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -32,15 +32,26 @@

#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
+#include <xen/balloon.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>

+struct xenbus_map_node {
+ struct list_head next;
+ struct page *page;
+ grant_handle_t handle;
+};
+
+static DEFINE_SPINLOCK(xenbus_valloc_lock);
+static LIST_HEAD(xenbus_valloc_pages);
+
const char *xenbus_strstate(enum xenbus_state state)
{
static const char *const name[] = {
@@ -419,21 +430,8 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port)
EXPORT_SYMBOL_GPL(xenbus_free_evtchn);


-/**
- * xenbus_map_ring_valloc
- * @dev: xenbus device
- * @gnt_ref: grant reference
- * @vaddr: pointer to address to be filled out by mapping
- *
- * Based on Rusty Russell's skeleton driver's map_page.
- * Map a page of memory into this domain from another domain's grant table.
- * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
- * page to that address, and sets *vaddr to that address.
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM on error. If an error is returned, device will switch to
- * XenbusStateClosing and the error message will be saved in XenStore.
- */
-int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
{
struct gnttab_map_grant_ref op = {
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
@@ -468,6 +466,64 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
*vaddr = area->addr;
return 0;
}
+
+static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
+ struct xenbus_map_node *node;
+ int err;
+ void *addr;
+
+ *vaddr = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ err = alloc_xenballooned_pages(1, &node->page, false);
+ if (err)
+ goto out_err;
+
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+
+ err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
+ if (err)
+ goto out_err;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
+
+ *vaddr = addr;
+ return 0;
+
+ out_err:
+ free_xenballooned_pages(1, &node->page);
+ kfree(node);
+ return err;
+}
+
+/**
+ * xenbus_map_ring_valloc
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @vaddr: pointer to address to be filled out by mapping
+ *
+ * Based on Rusty Russell's skeleton driver's map_page.
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
+ * page to that address, and sets *vaddr to that address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+{
+ if (xen_pv_domain())
+ return xenbus_map_ring_valloc_pv(dev, gnt_ref, vaddr);
+ else
+ return xenbus_map_ring_valloc_hvm(dev, gnt_ref, vaddr);
+}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);


@@ -509,20 +565,7 @@ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
}
EXPORT_SYMBOL_GPL(xenbus_map_ring);

-
-/**
- * xenbus_unmap_ring_vfree
- * @dev: xenbus device
- * @vaddr: addr to unmap
- *
- * Based on Rusty Russell's skeleton driver's unmap_page.
- * Unmap a page of memory in this domain that was imported from another domain.
- * Use xenbus_unmap_ring_vfree if you mapped in your memory with
- * xenbus_map_ring_valloc (it will free the virtual address space).
- * Returns 0 on success and returns GNTST_* on error
- * (see xen/include/interface/grant_table.h).
- */
-int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
struct vm_struct *area;
struct gnttab_unmap_grant_ref op = {
@@ -565,8 +608,60 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)

return op.status;
}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);

+static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+{
+ int rv;
+ struct xenbus_map_node *node;
+ void *addr;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+ if (addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
+ }
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
+
+ if (!node) {
+ xenbus_dev_error(dev, -ENOENT,
+ "can't find mapped virtual address %p", vaddr);
+ return -ENOENT;
+ }
+
+ rv = xenbus_unmap_ring(dev, node->handle, addr);
+
+ if (!rv)
+ free_xenballooned_pages(1, &node->page);
+
+ kfree(node);
+ return rv;
+}
+
+/**
+ * xenbus_unmap_ring_vfree
+ * @dev: xenbus device
+ * @vaddr: addr to unmap
+ *
+ * Based on Rusty Russell's skeleton driver's unmap_page.
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
+ * xenbus_map_ring_valloc (it will free the virtual address space).
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
+{
+ if (xen_pv_domain())
+ return xenbus_unmap_ring_vfree_pv(dev, vaddr);
+ else
+ return xenbus_unmap_ring_vfree_hvm(dev, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);

/**
* xenbus_unmap_ring
--
1.7.6.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 1/6 v2] xenbus: Support HVM backends [ In reply to ]
On Thu, Oct 20, 2011 at 12:48:04PM -0400, Daniel De Graaf wrote:
> Initial version lacked the list_del in xenbus_unmap_ring_vfree_hvm
>
> -------------------------------------------------------->8
>
> Add HVM implementations of xenbus_(map,unmap)_ring_v(alloc,free) so
> that ring mappings can be done without using GNTMAP_contains_pte which
> is not supported on HVM.
>
> Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
> ---
> drivers/xen/xenbus/xenbus_client.c | 155 +++++++++++++++++++++++++++++-------
> 1 files changed, 125 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
> index 52bc57f..4b2fbcc 100644
> --- a/drivers/xen/xenbus/xenbus_client.c
> +++ b/drivers/xen/xenbus/xenbus_client.c
> @@ -32,15 +32,26 @@
>
> #include <linux/slab.h>
> #include <linux/types.h>
> +#include <linux/spinlock.h>
> #include <linux/vmalloc.h>
> #include <asm/xen/hypervisor.h>
> #include <asm/xen/page.h>
> #include <xen/interface/xen.h>
> #include <xen/interface/event_channel.h>
> +#include <xen/balloon.h>
> #include <xen/events.h>
> #include <xen/grant_table.h>
> #include <xen/xenbus.h>
>
> +struct xenbus_map_node {
> + struct list_head next;
> + struct page *page;
> + grant_handle_t handle;
> +};
> +
> +static DEFINE_SPINLOCK(xenbus_valloc_lock);
> +static LIST_HEAD(xenbus_valloc_pages);
> +
> const char *xenbus_strstate(enum xenbus_state state)
> {
> static const char *const name[] = {
> @@ -419,21 +430,8 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port)
> EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
>
>
> -/**
> - * xenbus_map_ring_valloc
> - * @dev: xenbus device
> - * @gnt_ref: grant reference
> - * @vaddr: pointer to address to be filled out by mapping
> - *
> - * Based on Rusty Russell's skeleton driver's map_page.
> - * Map a page of memory into this domain from another domain's grant table.
> - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
> - * page to that address, and sets *vaddr to that address.
> - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
> - * or -ENOMEM on error. If an error is returned, device will switch to
> - * XenbusStateClosing and the error message will be saved in XenStore.
> - */
> -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
> +static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
> + int gnt_ref, void **vaddr)
> {
> struct gnttab_map_grant_ref op = {
> .flags = GNTMAP_host_map | GNTMAP_contains_pte,
> @@ -468,6 +466,64 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
> *vaddr = area->addr;
> return 0;
> }
> +
> +static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
> + int gnt_ref, void **vaddr)
> +{
> + struct xenbus_map_node *node;
> + int err;
> + void *addr;
> +
> + *vaddr = NULL;
> +
> + node = kzalloc(sizeof(*node), GFP_KERNEL);
> + if (!node)
> + return -ENOMEM;
> +
> + err = alloc_xenballooned_pages(1, &node->page, false);

Add /* lowmem */ on the 'false' parameter.


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel