Mailing List Archive

[xen-unstable] sysctl: Return max_node_id rather than nr_nodes from physinfo command.
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1262772835 0
# Node ID a1d0a575b4bace0a79ecfb48704b24fc9ae7866b
# Parent bec36e63fb0e82f78ff9cfb1737b8a09cf621d05
sysctl: Return max_node_id rather than nr_nodes from physinfo command.

Python extension continues to synthesise a nr_nodes value.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
---
tools/python/xen/lowlevel/xc/xc.c | 95 ++++++++++++++++++--------------------
xen/arch/ia64/xen/dom0_ops.c | 2
xen/arch/x86/sysctl.c | 2
xen/include/public/sysctl.h | 4 -
xen/include/xen/nodemask.h | 32 ++++++++----
5 files changed, 73 insertions(+), 62 deletions(-)

diff -r bec36e63fb0e -r a1d0a575b4ba tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Jan 06 09:39:01 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c Wed Jan 06 10:13:55 2010 +0000
@@ -1078,7 +1078,7 @@ static PyObject *pyxc_physinfo(XcObject
#define MAX_CPU_ID 255
xc_physinfo_t info;
char cpu_cap[128], virt_caps[128], *p;
- int i, j, max_cpu_id;
+ int i, j, max_cpu_id, nr_nodes = 0;
uint64_t free_heap;
PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj;
PyObject *node_to_dma32_mem_obj;
@@ -1105,8 +1105,53 @@ static PyObject *pyxc_physinfo(XcObject
if ( p != virt_caps )
*(p-1) = '\0';

+ max_cpu_id = info.max_cpu_id;
+ if ( max_cpu_id > MAX_CPU_ID )
+ max_cpu_id = MAX_CPU_ID;
+
+ /* Construct node-to-* lists. */
+ node_to_cpu_obj = PyList_New(0);
+ node_to_memory_obj = PyList_New(0);
+ node_to_dma32_mem_obj = PyList_New(0);
+ for ( i = 0; i <= info.max_node_id; i++ )
+ {
+ int node_exists = 0;
+ PyObject *pyint;
+
+ /* CPUs. */
+ PyObject *cpus = PyList_New(0);
+ for ( j = 0; j <= max_cpu_id; j++ )
+ {
+ if ( i != map[j] )
+ continue;
+ pyint = PyInt_FromLong(j);
+ PyList_Append(cpus, pyint);
+ Py_DECREF(pyint);
+ node_exists = 1;
+ }
+ PyList_Append(node_to_cpu_obj, cpus);
+ Py_DECREF(cpus);
+
+ /* Memory. */
+ xc_availheap(self->xc_handle, 0, 0, i, &free_heap);
+ node_exists = node_exists || (free_heap != 0);
+ pyint = PyInt_FromLong(free_heap / 1024);
+ PyList_Append(node_to_memory_obj, pyint);
+ Py_DECREF(pyint);
+
+ /* DMA memory. */
+ xc_availheap(self->xc_handle, 0, 32, i, &free_heap);
+ pyint = PyInt_FromLong(free_heap / 1024);
+ PyList_Append(node_to_dma32_mem_obj, pyint);
+ Py_DECREF(pyint);
+
+ if ( node_exists )
+ nr_nodes++;
+ }
+
ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}",
- "nr_nodes", info.nr_nodes,
+ "nr_nodes", nr_nodes,
+ "max_node_id", info.max_node_id,
"max_cpu_id", info.max_cpu_id,
"threads_per_core", info.threads_per_core,
"cores_per_socket", info.cores_per_socket,
@@ -1117,52 +1162,6 @@ static PyObject *pyxc_physinfo(XcObject
"cpu_khz", info.cpu_khz,
"hw_caps", cpu_cap,
"virt_caps", virt_caps);
-
- max_cpu_id = info.max_cpu_id;
- if ( max_cpu_id > MAX_CPU_ID )
- max_cpu_id = MAX_CPU_ID;
-
- /* Construct node-to-cpu lists. */
- node_to_cpu_obj = PyList_New(0);
-
- /* Make a list for each node. */
- for ( i = 0; i < info.nr_nodes; i++ )
- {
- PyObject *cpus = PyList_New(0);
- for ( j = 0; j <= max_cpu_id; j++ )
- if ( i == map[j]) {
- PyObject *pyint = PyInt_FromLong(j);
- PyList_Append(cpus, pyint);
- Py_DECREF(pyint);
- }
- PyList_Append(node_to_cpu_obj, cpus);
- Py_DECREF(cpus);
- }
-
- node_to_memory_obj = PyList_New(0);
-
- for ( i = 0; i < info.nr_nodes; i++ )
- {
- PyObject *pyint;
-
- xc_availheap(self->xc_handle, 0, 0, i, &free_heap);
- pyint = PyInt_FromLong(free_heap / 1024);
- PyList_Append(node_to_memory_obj, pyint);
- Py_DECREF(pyint);
- }
-
- /* DMA memory. */
- node_to_dma32_mem_obj = PyList_New(0);
-
- for ( i = 0; i < info.nr_nodes; i++ )
- {
- PyObject *pyint;
- xc_availheap(self->xc_handle, 0, 32, i, &free_heap);
- pyint = PyInt_FromLong(free_heap / 1024);
- PyList_Append(node_to_dma32_mem_obj, pyint);
- Py_DECREF(pyint);
- }
-
PyDict_SetItemString(ret_obj, "node_to_cpu", node_to_cpu_obj);
Py_DECREF(node_to_cpu_obj);
PyDict_SetItemString(ret_obj, "node_to_memory", node_to_memory_obj);
diff -r bec36e63fb0e -r a1d0a575b4ba xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Wed Jan 06 09:39:01 2010 +0000
+++ b/xen/arch/ia64/xen/dom0_ops.c Wed Jan 06 10:13:55 2010 +0000
@@ -715,12 +715,12 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
pi->cores_per_socket =
cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
pi->nr_cpus = (u32)num_online_cpus();
- pi->nr_nodes = num_online_nodes();
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
pi->scrub_pages = 0;
pi->cpu_khz = local_cpu_data->proc_freq / 1000;

+ pi->max_node_id = last_node(node_online_map);
pi->max_cpu_id = last_cpu(cpu_online_map);
max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);

diff -r bec36e63fb0e -r a1d0a575b4ba xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c Wed Jan 06 09:39:01 2010 +0000
+++ b/xen/arch/x86/sysctl.c Wed Jan 06 10:13:55 2010 +0000
@@ -64,7 +64,6 @@ long arch_do_sysctl(
pi->cores_per_socket =
cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
pi->nr_cpus = (u32)num_online_cpus();
- pi->nr_nodes = num_online_nodes();
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
pi->scrub_pages = 0;
@@ -75,6 +74,7 @@ long arch_do_sysctl(
if ( iommu_enabled )
pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;

+ pi->max_node_id = last_node(node_online_map);
pi->max_cpu_id = last_cpu(cpu_online_map);
max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);

diff -r bec36e63fb0e -r a1d0a575b4ba xen/include/public/sysctl.h
--- a/xen/include/public/sysctl.h Wed Jan 06 09:39:01 2010 +0000
+++ b/xen/include/public/sysctl.h Wed Jan 06 10:13:55 2010 +0000
@@ -34,7 +34,7 @@
#include "xen.h"
#include "domctl.h"

-#define XEN_SYSCTL_INTERFACE_VERSION 0x00000006
+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000007

/*
* Read console content from Xen buffer ring.
@@ -94,7 +94,7 @@ struct xen_sysctl_physinfo {
uint32_t threads_per_core;
uint32_t cores_per_socket;
uint32_t nr_cpus;
- uint32_t nr_nodes;
+ uint32_t max_node_id;
uint32_t cpu_khz;
uint64_aligned_t total_pages;
uint64_aligned_t free_pages;
diff -r bec36e63fb0e -r a1d0a575b4ba xen/include/xen/nodemask.h
--- a/xen/include/xen/nodemask.h Wed Jan 06 09:39:01 2010 +0000
+++ b/xen/include/xen/nodemask.h Wed Jan 06 10:13:55 2010 +0000
@@ -38,6 +38,7 @@
*
* int first_node(mask) Number lowest set bit, or MAX_NUMNODES
* int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * int last_node(mask) Number highest set bit, or MAX_NUMNODES
* int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES.
*
@@ -214,16 +215,27 @@ static inline void __nodes_shift_left(no
/* FIXME: better would be to fix all architectures to never return
> MAX_NUMNODES, then the silly min_ts could be dropped. */

-#define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
-{
- return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
-}
-
-#define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
-{
- return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+#define first_node(src) __first_node(&(src), MAX_NUMNODES)
+static inline int __first_node(const nodemask_t *srcp, int nbits)
+{
+ return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+}
+
+#define next_node(n, src) __next_node((n), &(src), MAX_NUMNODES)
+static inline int __next_node(int n, const nodemask_t *srcp, int nbits)
+{
+ return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+}
+
+#define last_node(src) __last_node(&(src), MAX_NUMNODES)
+static inline int __last_node(const nodemask_t *srcp, int nbits)
+{
+ int node, pnode = nbits;
+ for (node = __first_node(srcp, nbits);
+ node < nbits;
+ node = __next_node(node, srcp, nbits))
+ pnode = node;
+ return pnode;
}

#define nodemask_of_node(node) \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog