Mailing List Archive

[xen-unstable] [IA64] Extend interfaces to use itir instead logps
# HG changeset patch
# User Alex Williamson <alex.williamson@hp.com>
# Date 1185835127 21600
# Node ID 255abff9d1f75f1dd1502e5764c736835232712f
# Parent 85c2f2d754ef2a0f557470ff19518e1cf06310b5
[IA64] Extend interfaces to use itir instead logps

Changed some interfaces to use cr.itir instead of only the logps part in
handling itc_i/itc_d and vhpt_insert.

Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
---
xen/arch/ia64/vmx/vmmu.c | 4 -
xen/arch/ia64/vmx/vtlb.c | 4 -
xen/arch/ia64/xen/faults.c | 10 ++--
xen/arch/ia64/xen/mm.c | 15 +++---
xen/arch/ia64/xen/vcpu.c | 56 +++++++++++++------------
xen/arch/ia64/xen/vhpt.c | 20 ++++----
xen/include/asm-ia64/linux-xen/asm/processor.h | 15 ++++++
xen/include/asm-ia64/mm.h | 3 -
xen/include/asm-ia64/vhpt.h | 4 -
9 files changed, 78 insertions(+), 53 deletions(-)

diff -r 85c2f2d754ef -r 255abff9d1f7 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/arch/ia64/vmx/vmmu.c Mon Jul 30 16:38:47 2007 -0600
@@ -232,10 +232,10 @@ void machine_tlb_insert(struct vcpu *v,

psr = ia64_clear_ic();
if ( cl == ISIDE_TLB ) {
- ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
+ ia64_itc(1, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0));
}
else {
- ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
+ ia64_itc(2, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0));
}
ia64_set_psr(psr);
ia64_srlz_i();
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c Mon Jul 30 16:38:47 2007 -0600
@@ -199,7 +199,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte,
} else {
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
- ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
+ ia64_itc(type + 1, va, phy_pte, itir);
ia64_set_psr(psr);
ia64_srlz_i();
}
@@ -562,7 +562,7 @@ int thash_purge_and_insert(VCPU *v, u64
u64 psr;
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
- ia64_itc(type + 1, ifa, phy_pte, ps);
+ ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
ia64_set_psr(psr);
ia64_srlz_i();
// ps < mrr.ps, this is not supported
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/arch/ia64/xen/faults.c Mon Jul 30 16:38:47 2007 -0600
@@ -168,7 +168,7 @@ void ia64_do_page_fault(unsigned long ad
unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
IA64FAULT fault;
int is_ptc_l_needed = 0;
- u64 logps;
+ ia64_itir_t _itir = {.itir = itir};

if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA)
@@ -190,14 +190,14 @@ void ia64_do_page_fault(unsigned long ad
struct p2m_entry entry;
unsigned long m_pteval;
m_pteval = translate_domain_pte(pteval, address, itir,
- &logps, &entry);
+ &(_itir.itir), &entry);
vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
- m_pteval, pteval, logps, &entry);
+ m_pteval, pteval, _itir.itir, &entry);
if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
p2m_entry_retry(&entry)) {
/* dtlb has been purged in-between. This dtlb was
matching. Undo the work. */
- vcpu_flush_tlb_vhpt_range(address, logps);
+ vcpu_flush_tlb_vhpt_range(address, _itir.ps);

// the stale entry which we inserted above
// may remains in tlb cache.
@@ -209,7 +209,7 @@ void ia64_do_page_fault(unsigned long ad
}

if (is_ptc_l_needed)
- vcpu_ptc_l(current, address, logps);
+ vcpu_ptc_l(current, address, _itir.ps);
if (!guest_mode(regs)) {
/* The fault occurs inside Xen. */
if (!ia64_done_with_exception(regs)) {
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/arch/ia64/xen/mm.c Mon Jul 30 16:38:47 2007 -0600
@@ -448,11 +448,11 @@ gmfn_to_mfn_foreign(struct domain *d, un
// address, convert the pte for a physical address for (possibly different)
// Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
// PAGE_SIZE!)
-u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps,
+u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,
struct p2m_entry* entry)
{
struct domain *d = current->domain;
- ia64_itir_t itir = {.itir = itir__};
+ ia64_itir_t _itir = {.itir = itir__};
u64 mask, mpaddr, pteval2;
u64 arflags;
u64 arflags2;
@@ -461,13 +461,14 @@ u64 translate_domain_pte(u64 pteval, u64
pteval &= ((1UL << 53) - 1);// ignore [63:53] bits

// FIXME address had better be pre-validated on insert
- mask = ~itir_mask(itir.itir);
+ mask = ~itir_mask(_itir.itir);
mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);

- if (itir.ps > PAGE_SHIFT)
- itir.ps = PAGE_SHIFT;
-
- *logps = itir.ps;
+ if (_itir.ps > PAGE_SHIFT)
+ _itir.ps = PAGE_SHIFT;
+
+ ((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */
+ ((ia64_itir_t*)itir)->ps = _itir.ps; /* Overwrite ps part! */

pteval2 = lookup_domain_mpa(d, mpaddr, entry);

diff -r 85c2f2d754ef -r 255abff9d1f7 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/arch/ia64/xen/vcpu.c Mon Jul 30 16:38:47 2007 -0600
@@ -2200,23 +2200,25 @@ IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64

void
vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
- u64 mp_pte, u64 logps, struct p2m_entry *entry)
-{
+ u64 mp_pte, u64 itir, struct p2m_entry *entry)
+{
+ ia64_itir_t _itir = {.itir = itir};
unsigned long psr;
- unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT;
-
- check_xen_space_overlap("itc", vaddr, 1UL << logps);
+ unsigned long ps = (vcpu->domain == dom0) ? _itir.ps : PAGE_SHIFT;
+
+ check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);

// FIXME, must be inlined or potential for nested fault here!
- if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT))
+ if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
"smaller page size!\n");

- BUG_ON(logps > PAGE_SHIFT);
+ BUG_ON(_itir.ps > PAGE_SHIFT);
vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
psr = ia64_clear_ic();
pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
- ia64_itc(IorD, vaddr, pte, ps); // FIXME: look for bigger mappings
+ // FIXME: look for bigger mappings
+ ia64_itc(IorD, vaddr, pte, IA64_ITIR_PS_KEY(ps, _itir.key));
ia64_set_psr(psr);
// ia64_srlz_i(); // no srls req'd, will rfi later
if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
@@ -2224,39 +2226,42 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD,
// addresses never get flushed. More work needed if this
// ever happens.
//printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
- if (logps > PAGE_SHIFT)
- vhpt_multiple_insert(vaddr, pte, logps);
+ if (_itir.ps > PAGE_SHIFT)
+ vhpt_multiple_insert(vaddr, pte, _itir.itir);
else
- vhpt_insert(vaddr, pte, logps << 2);
+ vhpt_insert(vaddr, pte, _itir.itir);
}
// even if domain pagesize is larger than PAGE_SIZE, just put
// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
- else
- vhpt_insert(vaddr, pte, PAGE_SHIFT << 2);
+ else {
+ _itir.ps = PAGE_SHIFT;
+ vhpt_insert(vaddr, pte, _itir.itir);
+ }
}

IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
{
- unsigned long pteval, logps = itir_ps(itir);
+ unsigned long pteval;
BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
struct p2m_entry entry;
-
- if (logps < PAGE_SHIFT)
+ ia64_itir_t _itir = {.itir = itir};
+
+ if (_itir.ps < PAGE_SHIFT)
panic_domain(NULL, "vcpu_itc_d: domain trying to use "
"smaller page size!\n");

again:
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
- pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
+ pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
if (!pteval)
return IA64_ILLOP_FAULT;
if (swap_rr0)
set_one_rr(0x0, PSCB(vcpu, rrs[0]));
- vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry);
+ vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
if (swap_rr0)
set_metaphysical_rr0();
if (p2m_entry_retry(&entry)) {
- vcpu_flush_tlb_vhpt_range(ifa, logps);
+ vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
goto again;
}
vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
@@ -2265,25 +2270,26 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pt

IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
{
- unsigned long pteval, logps = itir_ps(itir);
+ unsigned long pteval;
BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
struct p2m_entry entry;
-
- if (logps < PAGE_SHIFT)
+ ia64_itir_t _itir = {.itir = itir};
+
+ if (_itir.ps < PAGE_SHIFT)
panic_domain(NULL, "vcpu_itc_i: domain trying to use "
"smaller page size!\n");
again:
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
- pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
+ pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
if (!pteval)
return IA64_ILLOP_FAULT;
if (swap_rr0)
set_one_rr(0x0, PSCB(vcpu, rrs[0]));
- vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry);
+ vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
if (swap_rr0)
set_metaphysical_rr0();
if (p2m_entry_retry(&entry)) {
- vcpu_flush_tlb_vhpt_range(ifa, logps);
+ vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
goto again;
}
vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/arch/ia64/xen/vhpt.c Mon Jul 30 16:38:47 2007 -0600
@@ -71,7 +71,7 @@ vhpt_erase(unsigned long vhpt_maddr)
// initialize cache too???
}

-void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
+void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir)
{
struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
unsigned long tag = ia64_ttag (vadr);
@@ -80,21 +80,23 @@ void vhpt_insert (unsigned long vadr, un
* because the processor may support speculative VHPT walk. */
vlfe->ti_tag = INVALID_TI_TAG;
wmb();
- vlfe->itir = logps;
+ vlfe->itir = itir;
vlfe->page_flags = pte | _PAGE_P;
*(volatile unsigned long*)&vlfe->ti_tag = tag;
}

-void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
-{
- unsigned long mask = (1L << logps) - 1;
+void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
+ unsigned long itir)
+{
+ ia64_itir_t _itir = {.itir = itir};
+ unsigned long mask = (1L << _itir.ps) - 1;
int i;

- if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
+ if (_itir.ps-PAGE_SHIFT > 10 && !running_on_sim) {
// if this happens, we may want to revisit this algorithm
panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
}
- if (logps-PAGE_SHIFT > 2) {
+ if (_itir.ps-PAGE_SHIFT > 2) {
// FIXME: Should add counter here to see how often this
// happens (e.g. for 16MB pages!) and determine if it
// is a performance problem. On a quick look, it takes
@@ -109,8 +111,8 @@ void vhpt_multiple_insert(unsigned long
}
vaddr &= ~mask;
pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
- for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
- vhpt_insert(vaddr,pte,logps<<2);
+ for (i = 1L << (_itir.ps-PAGE_SHIFT); i > 0; i--) {
+ vhpt_insert(vaddr, pte, _itir.itir);
vaddr += PAGE_SIZE;
}
}
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/include/asm-ia64/linux-xen/asm/processor.h
--- a/xen/include/asm-ia64/linux-xen/asm/processor.h Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/processor.h Mon Jul 30 16:38:47 2007 -0600
@@ -533,6 +533,20 @@ ia64_itr (__u64 target_mask, __u64 tr_nu
* Insert a translation into the instruction and/or data translation
* cache.
*/
+#ifdef XEN
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir)
+{
+ ia64_setreg(_IA64_REG_CR_ITIR, itir);
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
+ /* as per EAS2.6, itc must be the last instruction in an instruction group */
+ if (target_mask & 0x1)
+ ia64_itci(pte);
+ if (target_mask & 0x2)
+ ia64_itcd(pte);
+}
+#else
static inline void
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size)
@@ -546,6 +560,7 @@ ia64_itc (__u64 target_mask, __u64 vmadd
if (target_mask & 0x2)
ia64_itcd(pte);
}
+#endif

/*
* Purge a range of addresses from instruction and/or data translation
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/include/asm-ia64/mm.h Mon Jul 30 16:38:47 2007 -0600
@@ -447,7 +447,8 @@ extern unsigned long dom0vp_expose_p2m(s

extern volatile unsigned long *mpt_table;
extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
-extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry);
+extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
+ u64* itir, struct p2m_entry* entry);
#define machine_to_phys_mapping mpt_table

#define INVALID_M2P_ENTRY (~0UL)
diff -r 85c2f2d754ef -r 255abff9d1f7 xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h Mon Jul 30 16:10:17 2007 -0600
+++ b/xen/include/asm-ia64/vhpt.h Mon Jul 30 16:38:47 2007 -0600
@@ -38,9 +38,9 @@ extern void vhpt_init (void);
extern void vhpt_init (void);
extern void gather_vhpt_stats(void);
extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
- unsigned long logps);
+ unsigned long itir);
extern void vhpt_insert (unsigned long vadr, unsigned long pte,
- unsigned long logps);
+ unsigned long itir);
void local_vhpt_flush(void);
extern void vcpu_vhpt_flush(struct vcpu* v);


_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog