Mailing List Archive

Cleanup VTLB code
# HG changeset patch
# User djm@kirby.fc.hp.com
# Node ID b57ed8182812de2c521d791a6806e740910c460b
# Parent f998426f9069aa9e4e060ceb48a3cd9cfc1231d9
Cleanup VTLB code
Signed-off-by Anthony Xu <anthony.xu@intel.com>

diff -r f998426f9069 -r b57ed8182812 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c Thu Dec 15 22:07:47 2005
+++ b/xen/arch/ia64/vmx/vmmu.c Thu Dec 15 22:09:19 2005
@@ -246,117 +246,30 @@
*/
void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
{
-#if 0
- u64 saved_itir, saved_ifa;
-#endif
- u64 saved_rr;
- u64 pages;
u64 psr;
thash_data_t mtlb;
- ia64_rr vrr;
unsigned int cl = tlb->cl;

mtlb.ifa = tlb->vadr;
mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
- vrr = vmmu_get_rr(d,mtlb.ifa);
//vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
- pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
- mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages);
+ mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1);
if (mtlb.ppn == INVALID_MFN)
panic("Machine tlb insert with invalid mfn number.\n");

psr = ia64_clear_ic();
-#if 0
- saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
- saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
-#endif
- saved_rr = ia64_get_rr(mtlb.ifa);
- ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
- ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
- /* Only access memory stack which is mapped by TR,
- * after rr is switched.
- */
- ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.rrval));
- ia64_srlz_d();
if ( cl == ISIDE_TLB ) {
- ia64_itci(mtlb.page_flags);
- ia64_srlz_i();
+ ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
}
else {
- ia64_itcd(mtlb.page_flags);
- ia64_srlz_d();
- }
- ia64_set_rr(mtlb.ifa,saved_rr);
- ia64_srlz_d();
-#if 0
- ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
- ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
-#endif
+ ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
+ }
ia64_set_psr(psr);
ia64_srlz_i();
-}
-
-
-u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
-{
- u64 saved_pta, saved_rr0;
- u64 hash_addr, tag;
- unsigned long psr;
- struct vcpu *v = current;
- ia64_rr vrr;
-
- saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
- saved_rr0 = ia64_get_rr(0);
- vrr.rrval = saved_rr0;
- vrr.rid = rid;
- vrr.ps = ps;
-
- va = (va << 3) >> 3; // set VRN to 0.
- // TODO: Set to enforce lazy mode
- local_irq_save(psr);
- ia64_setreg(_IA64_REG_CR_PTA, pta.val);
- ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval));
- ia64_srlz_d();
-
- hash_addr = ia64_thash(va);
- ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
-
- ia64_set_rr(0, saved_rr0);
- ia64_srlz_d();
- ia64_set_psr(psr);
- return hash_addr;
-}
-
-u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
-{
- u64 saved_pta, saved_rr0;
- u64 hash_addr, tag;
- u64 psr;
- struct vcpu *v = current;
- ia64_rr vrr;
-
- // TODO: Set to enforce lazy mode
- saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
- saved_rr0 = ia64_get_rr(0);
- vrr.rrval = saved_rr0;
- vrr.rid = rid;
- vrr.ps = ps;
-
- va = (va << 3) >> 3; // set VRN to 0.
- local_irq_save(psr);
- ia64_setreg(_IA64_REG_CR_PTA, pta.val);
- ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval));
- ia64_srlz_d();
-
- tag = ia64_ttag(va);
- ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
-
- ia64_set_rr(0, saved_rr0);
- ia64_srlz_d();
- local_irq_restore(psr);
- return tag;
-}
+ return;
+}
+
/*
* Purge machine tlb.
* INPUT
@@ -365,25 +278,52 @@
* size: bits format (1<<size) for the address range to purge.
*
*/
-void machine_tlb_purge(u64 rid, u64 va, u64 ps)
-{
- u64 saved_rr0;
- u64 psr;
- ia64_rr vrr;
-
- va = (va << 3) >> 3; // set VRN to 0.
- saved_rr0 = ia64_get_rr(0);
- vrr.rrval = saved_rr0;
- vrr.rid = rid;
- vrr.ps = ps;
- local_irq_save(psr);
- ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.rrval) );
- ia64_srlz_d();
+void machine_tlb_purge(u64 va, u64 ps)
+{
+// u64 psr;
+// psr = ia64_clear_ic();
ia64_ptcl(va, ps << 2);
- ia64_set_rr( 0, saved_rr0 );
- ia64_srlz_d();
- local_irq_restore(psr);
-}
+// ia64_set_psr(psr);
+// ia64_srlz_i();
+// return;
+}
+
+u64 machine_thash(PTA pta, u64 va)
+{
+ u64 saved_pta;
+ u64 hash_addr, tag;
+ unsigned long psr;
+ struct vcpu *v = current;
+ ia64_rr vrr;
+
+ saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
+ psr = ia64_clear_ic();
+ ia64_setreg(_IA64_REG_CR_PTA, pta.val);
+ hash_addr = ia64_thash(va);
+ ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+ return hash_addr;
+}
+
+u64 machine_ttag(PTA pta, u64 va)
+{
+// u64 saved_pta;
+// u64 hash_addr, tag;
+// u64 psr;
+// struct vcpu *v = current;
+
+// saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
+// psr = ia64_clear_ic();
+// ia64_setreg(_IA64_REG_CR_PTA, pta.val);
+// tag = ia64_ttag(va);
+ return ia64_ttag(va);
+// ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
+// ia64_set_psr(psr);
+// ia64_srlz_i();
+// return tag;
+}
+


int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
diff -r f998426f9069 -r b57ed8182812 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Thu Dec 15 22:07:47 2005
+++ b/xen/arch/ia64/vmx/vtlb.c Thu Dec 15 22:09:19 2005
@@ -68,8 +68,7 @@
static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl)
{
u64 size1,sa1,ea1;
-
- if ( tlb->rid != rid || tlb->cl != cl )
+ if ( tlb->rid != rid ||(!tlb->tc && tlb->cl != cl) )
return 0;
size1 = PSIZE(tlb->ps);
sa1 = tlb->vadr & ~(size1-1); // mask the low address bits
@@ -89,7 +88,7 @@
{
uint64_t size1,size2,sa1,ea1,ea2;

- if ( entry->invalid || entry->rid != rid || entry->cl != cl ) {
+ if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl ) ) {
return 0;
}
size1=PSIZE(entry->ps);
@@ -292,8 +291,7 @@
if ( mfn == INVALID_MFN ) return 0;

// TODO with machine discontinuous address space issue.
- vhpt->etag = (hcb->vs->tag_func)( hcb->pta,
- tlb->vadr, tlb->rid, tlb->ps);
+ vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
//vhpt->ti = 0;
vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
@@ -331,6 +329,17 @@
rep_tr(hcb, entry, idx);
return ;
}
+thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
+{
+ thash_data_t *cch;
+
+ cch = cch_alloc(hcb);
+ if(cch == NULL){
+ thash_purge_all(hcb);
+ }
+ return cch;
+}
+

thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
{
@@ -365,15 +374,14 @@
u64 gppn;
u64 ppns, ppne;

- hash_table = (hcb->hash_func)(hcb->pta,
- va, entry->rid, entry->ps);
+ hash_table = (hcb->hash_func)(hcb->pta, va);
if( INVALID_ENTRY(hcb, hash_table) ) {
*hash_table = *entry;
hash_table->next = 0;
}
else {
// TODO: Add collision chain length limitation.
- cch = __alloc_chain(hcb,entry);
+ cch = vtlb_alloc_chain(hcb,entry);
if(cch == NULL){
*hash_table = *entry;
hash_table->next = 0;
@@ -415,8 +423,7 @@
if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
panic("Can't convert to machine VHPT entry\n");
}
- hash_table = (hcb->hash_func)(hcb->pta,
- va, entry->rid, entry->ps);
+ hash_table = (hcb->hash_func)(hcb->pta, va);
if( INVALID_ENTRY(hcb, hash_table) ) {
*hash_table = vhpt_entry;
hash_table->next = 0;
@@ -581,9 +588,7 @@
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
- hash_table = (hcb->hash_func)(hcb->pta,
- priv->_curva, rid, priv->ps);
-
+ hash_table = (hcb->hash_func)(hcb->pta, priv->_curva);
priv->s_sect = s_sect;
priv->cl = cl;
priv->_tr_idx = 0;
@@ -605,11 +610,8 @@
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
- hash_table = (hcb->hash_func)( hcb->pta,
- priv->_curva, rid, priv->ps);
- tag = (hcb->vs->tag_func)( hcb->pta,
- priv->_curva, rid, priv->ps);
-
+ hash_table = (hcb->hash_func)( hcb->pta, priv->_curva);
+ tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
priv->tag = tag;
priv->hash_base = hash_table;
priv->cur_cch = hash_table;
@@ -671,8 +673,7 @@
}
}
priv->_curva += rr_psize;
- priv->hash_base = (hcb->hash_func)( hcb->pta,
- priv->_curva, priv->rid, priv->ps);
+ priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
priv->cur_cch = priv->hash_base;
}
return NULL;
@@ -697,10 +698,8 @@
}
}
priv->_curva += rr_psize;
- priv->hash_base = (hcb->hash_func)( hcb->pta,
- priv->_curva, priv->rid, priv->ps);
- priv->tag = (hcb->vs->tag_func)( hcb->pta,
- priv->_curva, priv->rid, priv->ps);
+ priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
+ priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
priv->cur_cch = priv->hash_base;
}
return NULL;
@@ -771,7 +770,26 @@
#endif
(hcb->ins_hash)(hcb, in, in->vadr);
}
-
+/*
+ * Purge one hash line (include the entry in hash table).
+ * Can only be called by thash_purge_all.
+ * Input:
+ * hash: The head of collision chain (hash table)
+ *
+ */
+static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash)
+{
+ if ( INVALID_ENTRY(hcb, hash) ) return;
+ thash_data_t *prev, *next;
+ next=hash->next;
+ while ( next ) {
+ prev=next;
+ next=next->next;
+ cch_free(hcb, prev);
+ }
+ // Then hash table itself.
+ INVALIDATE_HASH(hcb, hash);
+}
/*
* Purge all TCs or VHPT entries including those in Hash table.
*
@@ -792,10 +810,17 @@
#endif

hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-
for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
- thash_rem_line(hcb, hash_table);
- }
+ thash_purge_line(hcb, hash_table);
+ }
+ if(hcb->ht== THASH_TLB) {
+ hcb = hcb->ts->vhpt;
+ hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
+ for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
+ thash_purge_line(hcb, hash_table);
+ }
+ }
+ local_flush_tlb_all();
}


@@ -826,7 +851,7 @@
if ( cch ) return cch;

vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
- hash_table = (hcb->hash_func)( hcb->pta,va, rid, vrr.ps);
+ hash_table = (hcb->hash_func)( hcb->pta, va);

if ( INVALID_ENTRY(hcb, hash_table ) )
return NULL;
@@ -893,7 +918,7 @@

s_sect.v = 0;
thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
- machine_tlb_purge(entry->rid, entry->vadr, entry->ps);
+ machine_tlb_purge(entry->vadr, entry->ps);
}

/*
diff -r f998426f9069 -r b57ed8182812 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h Thu Dec 15 22:07:47 2005
+++ b/xen/include/asm-ia64/vmmu.h Thu Dec 15 22:09:19 2005
@@ -148,8 +148,8 @@
/*
* Use to calculate the HASH index of thash_data_t.
*/
-typedef u64 *(THASH_FN)(PTA pta, u64 va, u64 rid, u64 ps);
-typedef u64 *(TTAG_FN)(PTA pta, u64 va, u64 rid, u64 ps);
+typedef u64 *(THASH_FN)(PTA pta, u64 va);
+typedef u64 *(TTAG_FN)(PTA pta, u64 va);
typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
typedef void *(REM_NOTIFIER_FN)(struct hash_cb *hcb, thash_data_t *entry);
typedef void (RECYCLE_FN)(struct hash_cb *hc, u64 para);
@@ -329,8 +329,8 @@

#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
#define PAGE_FLAGS_RV_MASK (0x2 | (0x3UL<<50)|(((1UL<<11)-1)<<53))
-extern u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps);
-extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
+extern u64 machine_ttag(PTA pta, u64 va);
+extern u64 machine_thash(PTA pta, u64 va);
extern void purge_machine_tc_by_domid(domid_t domid);
extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog