Mailing List Archive

[IA64] fix name conflict(map_domain_page)
# HG changeset patch
# User awilliam@xenbuild.aw
# Node ID 9eb9fa8a9933721dc42fe547d9c681a5142b5eaa
# Parent 9c6cd777259b388a58c7f506c5f176707ddac43b
[IA64] fix name conflict(map_domain_page)

one is defined in xen/include/xen/domain_page.h.
another is defined in xen/arch/ia64/xen/domain.c.
this patch renames one defined in xen/arch/ia64/xen/domain.c.

For consistency its family is also renamed.
map_new_domain_page() -> assign_new_domain_page()
map_domain_page() -> assign_domain_page()
map_domain_io_page() -> assign_domain_io_page()

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>

diff -r 9c6cd777259b -r 9eb9fa8a9933 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Thu Feb 9 19:59:50 2006
+++ b/xen/arch/ia64/vmx/vmx_init.c Thu Feb 9 20:35:11 2006
@@ -317,7 +317,7 @@
for (j = io_ranges[i].start;
j < io_ranges[i].start + io_ranges[i].size;
j += PAGE_SIZE)
- map_domain_page(d, j, io_ranges[i].type);
+ assign_domain_page(d, j, io_ranges[i].type);
}

conf_nr = VMX_CONFIG_PAGES(d);
@@ -334,14 +334,14 @@
for (i = 0;
i < (end < MMIO_START ? end : MMIO_START);
i += PAGE_SIZE, pgnr++)
- map_domain_page(d, i, pgnr << PAGE_SHIFT);
+ assign_domain_page(d, i, pgnr << PAGE_SHIFT);

/* Map normal memory beyond 4G */
if (unlikely(end > MMIO_START)) {
start = 4 * MEM_G;
end = start + (end - 3 * MEM_G);
for (i = start; i < end; i += PAGE_SIZE, pgnr++)
- map_domain_page(d, i, pgnr << PAGE_SHIFT);
+ assign_domain_page(d, i, pgnr << PAGE_SHIFT);
}

d->arch.max_pfn = end >> PAGE_SHIFT;
@@ -356,7 +356,7 @@
/* Map guest firmware */
pgnr = page_to_mfn(page);
for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
- map_domain_page(d, i, pgnr << PAGE_SHIFT);
+ assign_domain_page(d, i, pgnr << PAGE_SHIFT);

if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
printk("Could not allocate order=1 pages for vmx contig alloc\n");
@@ -365,9 +365,9 @@

/* Map for shared I/O page and xenstore */
pgnr = page_to_mfn(page);
- map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
+ assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
pgnr++;
- map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
+ assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);

set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
return 0;
diff -r 9c6cd777259b -r 9eb9fa8a9933 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Thu Feb 9 19:59:50 2006
+++ b/xen/arch/ia64/xen/domain.c Thu Feb 9 20:35:11 2006
@@ -389,7 +389,7 @@
}

/* allocate new page for domain and map it to the specified metaphysical addr */
-struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
+struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
{
struct mm_struct *mm = d->arch.mm;
struct page *p = (struct page *)0;
@@ -400,7 +400,7 @@
extern unsigned long vhpt_paddr, vhpt_pend;

if (!mm->pgd) {
- printk("map_new_domain_page: domain pgd must exist!\n");
+ printk("assign_new_domain_page: domain pgd must exist!\n");
return(p);
}
pgd = pgd_offset(mm,mpaddr);
@@ -428,21 +428,21 @@
if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
}
if (unlikely(!p)) {
-printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
return(p);
}
if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
- printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
+ printf("assign_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
}
set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
}
- else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+ else printk("assign_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
return p;
}

/* map a physical address to the specified metaphysical addr */
-void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
+void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
{
struct mm_struct *mm = d->arch.mm;
pgd_t *pgd;
@@ -451,7 +451,7 @@
pte_t *pte;

if (!mm->pgd) {
- printk("map_domain_page: domain pgd must exist!\n");
+ printk("assign_domain_page: domain pgd must exist!\n");
return;
}
pgd = pgd_offset(mm,mpaddr);
@@ -472,11 +472,11 @@
set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
}
- else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+ else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
}
#if 0
/* map a physical address with specified I/O flag */
-void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
+void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
{
struct mm_struct *mm = d->arch.mm;
pgd_t *pgd;
@@ -486,7 +486,7 @@
pte_t io_pte;

if (!mm->pgd) {
- printk("map_domain_page: domain pgd must exist!\n");
+ printk("assign_domain_page: domain pgd must exist!\n");
return;
}
ASSERT(flags & GPFN_IO_MASK);
@@ -509,7 +509,7 @@
pte_val(io_pte) = flags;
set_pte(pte, io_pte);
}
- else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+ else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
}
#endif
void mpafoo(unsigned long mpaddr)
@@ -557,7 +557,7 @@
}
/* if lookup fails and mpaddr is "legal", "create" the page */
if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
- if (map_new_domain_page(d,mpaddr)) goto tryagain;
+ if (assign_new_domain_page(d,mpaddr)) goto tryagain;
}
printk("lookup_domain_mpa: bad mpa %p (> %p\n",
mpaddr,d->max_pages<<PAGE_SHIFT);
@@ -655,7 +655,7 @@
else
#endif
while (memsz > 0) {
- p = map_new_domain_page(d,dom_mpaddr);
+ p = assign_new_domain_page(d,dom_mpaddr);
if (unlikely(!p)) BUG();
dom_imva = __va(page_to_maddr(p));
if (filesz > 0) {
diff -r 9c6cd777259b -r 9eb9fa8a9933 xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h Thu Feb 9 19:59:50 2006
+++ b/xen/include/asm-ia64/grant_table.h Thu Feb 9 20:35:11 2006
@@ -17,7 +17,7 @@
#define gnttab_shared_gmfn(d, t, i) \
( ((d) == dom0) ? \
((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) : \
- (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)), \
+ (assign_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)), \
1UL << (40 - PAGE_SHIFT)) \
)


_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog