--- a/xen/common/numa.c
+++ b/xen/common/numa.c
@@ -382,7 +382,7 @@ static bool __init numa_process_nodes(paddr_t
start, paddr_t end)
* 0 if memnodmap[] too small (or shift too small)
* -1 if node overlap or lost ram (shift too big)
*/
-static int __init populate_memnodemap(const struct node *nodes,
+static int __init populate_memnodemap(const struct node *numa_nodes,
unsigned int numnodes, unsigned
int shift,
const nodeid_t *nodeids)
{
@@ -393,8 +393,8 @@ static int __init populate_memnodemap(const struct
node *nodes,
for ( i = 0; i < numnodes; i++ )
{
- unsigned long spdx = paddr_to_pdx(nodes[i].start);
- unsigned long epdx = paddr_to_pdx(nodes[i].end - 1);
+ unsigned long spdx = paddr_to_pdx(numa_nodes[i].start);
+ unsigned long epdx = paddr_to_pdx(numa_nodes[i].end - 1);
if ( spdx > epdx )
continue;
@@ -440,7 +440,7 @@ static int __init
allocate_cachealigned_memnodemap(void)
* The LSB of all start addresses in the node map is the value of the
* maximum possible shift.
*/
-static unsigned int __init extract_lsb_from_nodes(const struct node
*nodes,
+static unsigned int __init extract_lsb_from_nodes(const struct node
*numa_nodes,
nodeid_t numnodes,
const nodeid_t
*nodeids)
{
@@ -449,8 +449,8 @@ static unsigned int __init
extract_lsb_from_nodes(const struct node *nodes,
for ( i = 0; i < numnodes; i++ )
{
- unsigned long spdx = paddr_to_pdx(nodes[i].start);
- unsigned long epdx = paddr_to_pdx(nodes[i].end - 1) + 1;
+ unsigned long spdx = paddr_to_pdx(numa_nodes[i].start);
+ unsigned long epdx = paddr_to_pdx(numa_nodes[i].end - 1) + 1;
if ( spdx >= epdx )
continue;
@@ -475,10 +475,10 @@ static unsigned int __init
extract_lsb_from_nodes(const struct node *nodes,
return i;
}
-int __init compute_hash_shift(const struct node *nodes,
+int __init compute_hash_shift(const struct node *numa_nodes,
unsigned int numnodes, const nodeid_t
*nodeids)
{
- unsigned int shift = extract_lsb_from_nodes(nodes, numnodes,
nodeids);
+ unsigned int shift = extract_lsb_from_nodes(numa_nodes, numnodes,
nodeids);
if ( memnodemapsize <= ARRAY_SIZE(_memnodemap) )
memnodemap = _memnodemap;
@@ -487,7 +487,7 @@ int __init compute_hash_shift(const struct node
*nodes,
printk(KERN_DEBUG "NUMA: Using %u for the hash shift\n", shift);
- if ( populate_memnodemap(nodes, numnodes, shift, nodeids) != 1 )
+ if ( populate_memnodemap(numa_nodes, numnodes, shift, nodeids) !=
1 )
{
printk(KERN_INFO "Your memory is not aligned you need to "
"rebuild your hypervisor with a bigger NODEMAPSIZE "
@@ -541,7 +541,7 @@ static int __init numa_emulation(unsigned long
start_pfn,
{
int ret;
unsigned int i;
- struct node nodes[MAX_NUMNODES];
+ struct node numa_nodes[MAX_NUMNODES];
uint64_t sz = pfn_to_paddr(end_pfn - start_pfn) / numa_fake;
/* Kludge needed for the hash function */
@@ -556,22 +556,22 @@ static int __init numa_emulation(unsigned long
start_pfn,
sz = x;
}
- memset(&nodes, 0, sizeof(nodes));
+ memset(&numa_nodes, 0, sizeof(numa_nodes));
for ( i = 0; i < numa_fake; i++ )
{
- nodes[i].start = pfn_to_paddr(start_pfn) + i * sz;
+ numa_nodes[i].start = pfn_to_paddr(start_pfn) + i * sz;
if ( i == numa_fake - 1 )
- sz = pfn_to_paddr(end_pfn) - nodes[i].start;
+ sz = pfn_to_paddr(end_pfn) - numa_nodes[i].start;
- nodes[i].end = nodes[i].start + sz;
+ numa_nodes[i].end = numa_nodes[i].start + sz;
printk(KERN_INFO "Faking node %u at %"PRIx64"-%"PRIx64"
(%"PRIu64"MB)\n",
- i, nodes[i].start, nodes[i].end,
- (nodes[i].end - nodes[i].start) >> 20);
+ i, numa_nodes[i].start, numa_nodes[i].end,
+ (numa_nodes[i].end - numa_nodes[i].start) >> 20);
node_set_online(i);
}
- ret = compute_hash_shift(nodes, numa_fake, NULL);
+ ret = compute_hash_shift(numa_nodes, numa_fake, NULL);
if ( ret < 0 )
{
printk(KERN_ERR "No NUMA hash function found. Emulation
disabled.\n");
@@ -580,7 +580,7 @@ static int __init numa_emulation(unsigned long
start_pfn,
memnode_shift = ret;
for_each_online_node ( i )
- setup_node_bootmem(i, nodes[i].start, nodes[i].end);
+ setup_node_bootmem(i, numa_nodes[i].start,
numa_nodes[i].end);
numa_init_array();