Mailing List Archive

[linux-2.6.18-xen] ia64, xencomm: support XEN_SYSCTL_topologyinfo and XEN_SYSCTL_numainfo
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1275890255 -3600
# Node ID 8af51a7a64c72b66debc8f312affc37d156649d6
# Parent b7eb9756e5225835e02fd2f5f10f65439bd9ff08
ia64, xencomm: support XEN_SYSCTL_topologyinfo and XEN_SYSCTL_numainfo

Signed-off-by: KUWAMURA Shin'ya <kuwa@jp.fujitsu.com>
---
arch/ia64/xen/xcom_privcmd.c | 88 +++++++++++++++++++++++++++++++++++++------
1 files changed, 76 insertions(+), 12 deletions(-)

diff -r b7eb9756e522 -r 8af51a7a64c7 arch/ia64/xen/xcom_privcmd.c
--- a/arch/ia64/xen/xcom_privcmd.c Mon Jun 07 06:57:11 2010 +0100
+++ b/arch/ia64/xen/xcom_privcmd.c Mon Jun 07 06:57:35 2010 +0100
@@ -87,6 +87,7 @@ xencomm_privcmd_sysctl(privcmd_hypercall
struct xencomm_handle *op_desc;
struct xencomm_handle *desc = NULL;
struct xencomm_handle *desc1 = NULL;
+ struct xencomm_handle *desc2 = NULL;
int ret = 0;

user_op = (xen_sysctl_t __user *)hypercall->arg[0];
@@ -111,6 +112,7 @@ xencomm_privcmd_sysctl(privcmd_hypercall
(void *)desc);
break;
case XEN_SYSCTL_tbuf_op:
+ case XEN_SYSCTL_physinfo:
case XEN_SYSCTL_sched_id:
case XEN_SYSCTL_availheap:
break;
@@ -183,18 +185,6 @@ xencomm_privcmd_sysctl(privcmd_hypercall
(void *)desc);
break;

- case XEN_SYSCTL_physinfo:
- desc = xencomm_map(
- xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
- kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t));
- if (xen_guest_handle(kern_op.u.physinfo.cpu_to_node) != NULL &&
- kern_op.u.physinfo.max_cpu_id > 0 && desc == NULL)
- return -ENOMEM;
-
- set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
- (void *)desc);
- break;
-
case XEN_SYSCTL_get_pmstat:
if (kern_op.u.get_pmstat.type == PMSTAT_get_pxstat) {
struct pm_px_stat *getpx =
@@ -219,6 +209,79 @@ xencomm_privcmd_sysctl(privcmd_hypercall
}
break;

+ case XEN_SYSCTL_topologyinfo:
+ {
+ xen_sysctl_topologyinfo_t *info = &kern_op.u.topologyinfo;
+ unsigned long size =
+ (info->max_cpu_index + 1) * sizeof(uint32_t);
+
+ desc = xencomm_map(xen_guest_handle(info->cpu_to_core), size);
+ if (xen_guest_handle(info->cpu_to_core) != NULL &&
+ info->max_cpu_index > 0 && desc == NULL)
+ return -ENOMEM;
+
+ set_xen_guest_handle(info->cpu_to_core, (void *)desc);
+
+ desc1 = xencomm_map(
+ xen_guest_handle(info->cpu_to_socket), size);
+ if (xen_guest_handle(info->cpu_to_socket) != NULL &&
+ info->max_cpu_index > 0 && desc1 == NULL) {
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->cpu_to_socket, (void *)desc1);
+
+ desc2 = xencomm_map(xen_guest_handle(info->cpu_to_node), size);
+ if (xen_guest_handle(info->cpu_to_node) != NULL &&
+ info->max_cpu_index > 0 && desc2 == NULL) {
+ xencomm_free(desc1);
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->cpu_to_node, (void *)desc2);
+ break;
+ }
+
+ case XEN_SYSCTL_numainfo:
+ {
+ xen_sysctl_numainfo_t *info = &kern_op.u.numainfo;
+ uint32_t max = info->max_node_index;
+
+ desc = xencomm_map(xen_guest_handle(info->node_to_memsize),
+ (max + 1) * sizeof(uint64_t));
+ if (xen_guest_handle(info->node_to_memsize) != NULL &&
+ desc == NULL)
+ return -ENOMEM;
+
+ set_xen_guest_handle(info->node_to_memsize, (void *)desc);
+
+ desc1 = xencomm_map(xen_guest_handle(info->node_to_memfree),
+ (max + 1) * sizeof(uint64_t));
+ if (xen_guest_handle(info->node_to_memfree) != NULL &&
+ desc1 == NULL) {
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->node_to_memfree, (void *)desc1);
+
+ desc2 = xencomm_map(
+ xen_guest_handle(info->node_to_node_distance),
+ (max + 1) * (max + 1) * sizeof(uint32_t));
+ if (xen_guest_handle(info->node_to_node_distance) != NULL &&
+ desc2 == NULL) {
+ xencomm_free(desc1);
+ xencomm_free(desc);
+ return -ENOMEM;
+ }
+
+ set_xen_guest_handle(info->node_to_node_distance,
+ (void *)desc2);
+ break;
+ }
+
default:
printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
return -ENOSYS;
@@ -249,6 +312,7 @@ xencomm_privcmd_sysctl(privcmd_hypercall

xencomm_free(desc);
xencomm_free(desc1);
+ xencomm_free(desc2);
return ret;
}


_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog