Mailing List Archive

[xen staging] x86/iommu: add common page-table allocator
commit 15bc9a1ef51c4e0f9786eb143e9d43a657e7d239
Author: Paul Durrant <pdurrant@amazon.com>
AuthorDate: Tue Aug 4 14:41:57 2020 +0100
Commit: Andrew Cooper <andrew.cooper3@citrix.com>
CommitDate: Fri Aug 14 16:14:21 2020 +0100

x86/iommu: add common page-table allocator

Instead of having separate page table allocation functions in VT-d and AMD
IOMMU code, we could use a common allocation function in the general x86 code.

This patch adds a new allocation function, iommu_alloc_pgtable(), for this
purpose. The function adds the page table pages to a list. The pages in this
list are then freed by iommu_free_pgtables(), which is called by
domain_relinquish_resources() after PCI devices have been de-assigned.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/domain.c | 9 ++++++-
xen/drivers/passthrough/x86/iommu.c | 51 +++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/iommu.h | 7 +++++
3 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f8084dc9e3..d1ecc7b83b 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2153,7 +2153,8 @@ int domain_relinquish_resources(struct domain *d)
d->arch.rel_priv = PROG_ ## x; /* Fallthrough */ case PROG_ ## x

enum {
- PROG_paging = 1,
+ PROG_iommu_pagetables = 1,
+ PROG_paging,
PROG_vcpu_pagetables,
PROG_shared,
PROG_xen,
@@ -2168,6 +2169,12 @@ int domain_relinquish_resources(struct domain *d)
if ( ret )
return ret;

+ PROGRESS(iommu_pagetables):
+
+ ret = iommu_free_pgtables(d);
+ if ( ret )
+ return ret;
+
PROGRESS(paging):

/* Tear down paging-assistance stuff. */
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index a12109a1de..aea07e47c4 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -140,6 +140,9 @@ int arch_iommu_domain_init(struct domain *d)

spin_lock_init(&hd->arch.mapping_lock);

+ INIT_PAGE_LIST_HEAD(&hd->arch.pgtables.list);
+ spin_lock_init(&hd->arch.pgtables.lock);
+
return 0;
}

@@ -257,6 +260,54 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
return;
}

+int iommu_free_pgtables(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+ struct page_info *pg;
+ unsigned int done = 0;
+
+ while ( (pg = page_list_remove_head(&hd->arch.pgtables.list)) )
+ {
+ free_domheap_page(pg);
+
+ if ( !(++done & 0xff) && general_preempt_check() )
+ return -ERESTART;
+ }
+
+ return 0;
+}
+
+struct page_info *iommu_alloc_pgtable(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+ unsigned int memflags = 0;
+ struct page_info *pg;
+ void *p;
+
+#ifdef CONFIG_NUMA
+ if ( hd->node != NUMA_NO_NODE )
+ memflags = MEMF_node(hd->node);
+#endif
+
+ pg = alloc_domheap_page(NULL, memflags);
+ if ( !pg )
+ return NULL;
+
+ p = __map_domain_page(pg);
+ clear_page(p);
+
+ if ( hd->platform_ops->sync_cache )
+ iommu_vcall(hd->platform_ops, sync_cache, p, PAGE_SIZE);
+
+ unmap_domain_page(p);
+
+ spin_lock(&hd->arch.pgtables.lock);
+ page_list_add(pg, &hd->arch.pgtables.list);
+ spin_unlock(&hd->arch.pgtables.lock);
+
+ return pg;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 8ce97c981f..970eb06ffa 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -46,6 +46,10 @@ typedef uint64_t daddr_t;
struct arch_iommu
{
spinlock_t mapping_lock; /* io page table lock */
+ struct {
+ struct page_list_head list;
+ spinlock_t lock;
+ } pgtables;

union {
/* Intel VT-d */
@@ -131,6 +135,9 @@ int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
iommu_vcall(ops, sync_cache, addr, size); \
})

+int __must_check iommu_free_pgtables(struct domain *d);
+struct page_info *__must_check iommu_alloc_pgtable(struct domain *d);
+
#endif /* !__ARCH_X86_IOMMU_H__ */
/*
* Local variables:
--
generated by git-patchbot for /home/xen/git/xen.git#staging