Mailing List Archive

[linux-2.6.18-xen] Remove xencomm page size limit.
# HG changeset patch
# User kfraser@localhost.localdomain
# Date 1188311595 -3600
# Node ID 7419a3be82737bc2de83749030759e104c92e1ea
# Parent b5fdf02c38f4765697196f5fad5d1262f2c157f4
Remove xencomm page size limit.

Currently xencomm has page size limit so that a domain with many
memory (e.g. 100GB~) can't be created.

Now that xencomm of xen side accepts struct xencomm_desc whose address
array crosses page boundary. Thus it isn't necessary to allocate
single page not to cross page boundary. We can allocate exact sized
memory. Note that struct xencomm_desc can't cross page boundary and
slab allocator returns sizeof(void*) aligned pointer.
Where sizeof(*desc) > sizeof(void*), e.g. 32 bit environment,
the slab allocator return pointer doesn't gurantee that
struct xencomm_desc doesn't cross page boundary. So we fall back to
page allocator.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
---
drivers/xen/core/xencomm.c | 57 +++++++++++++++++++++++++++++++++------------
1 files changed, 43 insertions(+), 14 deletions(-)

diff -r b5fdf02c38f4 -r 7419a3be8273 drivers/xen/core/xencomm.c
--- a/drivers/xen/core/xencomm.c Thu Aug 16 13:44:51 2007 -0600
+++ b/drivers/xen/core/xencomm.c Tue Aug 28 15:33:15 2007 +0100
@@ -68,25 +68,54 @@ static int xencomm_init(struct xencomm_d
return 0;
}

-/* XXX use slab allocator */
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
-
- desc = (struct xencomm_desc *)__get_free_page(gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) /
+static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
+ void *buffer, unsigned long bytes)
+{
+ struct xencomm_desc *desc;
+ unsigned long buffer_ulong = (unsigned long)buffer;
+ unsigned long start = buffer_ulong & PAGE_MASK;
+ unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
+ unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
+ unsigned long size = sizeof(*desc) +
+ sizeof(desc->address[0]) * nr_addrs;
+
+ /*
+ * slab allocator returns at least sizeof(void*) aligned pointer.
+ * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
+ * cross page boundary.
+ */
+ if (sizeof(*desc) > sizeof(void*)) {
+ unsigned long order = get_order(size);
+ desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
+ order);
+ if (desc == NULL)
+ return NULL;
+
+ desc->nr_addrs =
+ ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
sizeof(*desc->address);
-
+ } else {
+ desc = kmalloc(size, gfp_mask);
+ if (desc == NULL)
+ return NULL;
+
+ desc->nr_addrs = nr_addrs;
+ }
return desc;
}

void xencomm_free(struct xencomm_handle *desc)
{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG))
- free_page((unsigned long)__va(desc));
+ if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
+ struct xencomm_desc *desc__ = (struct xencomm_desc*)desc;
+ if (sizeof(*desc__) > sizeof(void*)) {
+ unsigned long size = sizeof(*desc__) +
+ sizeof(desc__->address[0]) * desc__->nr_addrs;
+ unsigned long order = get_order(size);
+ free_pages((unsigned long)__va(desc), order);
+ } else
+ kfree(__va(desc));
+ }
}

static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask)
@@ -105,7 +134,7 @@ static int xencomm_create(void *buffer,

BUG_ON(buffer == NULL); /* 'bytes' is non-zero */

- desc = xencomm_alloc(gfp_mask);
+ desc = xencomm_alloc(gfp_mask, buffer, bytes);
if (!desc) {
printk("%s failure\n", "xencomm_alloc");
return -ENOMEM;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog