Mailing List Archive

[PATCH 5 of 6] amd iommu: add iotlb invalidation command
# HG changeset patch
# User Wei Wang <wei.wang2@amd.com>
# Date 1319472700 -7200
# Node ID 8ec947b278afaf89acadf905237c95ba7b64524a
# Parent 3dc5e805d9142cbe1074610879e258b6fab43409
amd iommu: add iotlb invalidation command

Signed-off-by: Wei Wang <wei.wang2@amd.com>

diff -r 3dc5e805d914 -r 8ec947b278af xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c Mon Oct 24 18:11:36 2011 +0200
+++ b/xen/drivers/passthrough/amd/iommu_map.c Mon Oct 24 18:11:40 2011 +0200
@@ -23,6 +23,7 @@
#include <xen/hvm/iommu.h>
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#include "../x86/ats.h"

static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])
{
@@ -128,6 +129,75 @@ static void invalidate_iommu_pages(struc
send_iommu_command(iommu, cmd);
}

+static void invalidate_iotlb_pages(struct amd_iommu *iommu,
+ u16 maxpend, u32 pasid, u16 queueid,
+ u64 io_addr, u16 dev_id, u16 order)
+{
+ u64 addr_lo, addr_hi;
+ u32 cmd[4], entry;
+ int sflag = 0;
+
+ ASSERT ( order == 0 || order == 9 || order == 18 );
+
+ if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
+ sflag = 1;
+
+ /* If sflag == 1, the size of the invalidate command is determined
+ by the first zero bit in the address starting from Address[12] */
+ if ( order )
+ {
+ u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
+ io_addr &= ~mask;
+ io_addr |= mask - 1;
+ }
+
+ addr_lo = io_addr & DMA_32BIT_MASK;
+ addr_hi = io_addr >> 32;
+
+ set_field_in_reg_u32(dev_id, 0,
+ IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK,
+ IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry);
+
+ set_field_in_reg_u32(maxpend, entry,
+ IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK,
+ IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry);
+
+ set_field_in_reg_u32(pasid & 0xff, entry,
+ IOMMU_INV_IOTLB_PAGES_PASID1_MASK,
+ IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry);
+ cmd[0] = entry;
+
+ set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0,
+ IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+ &entry);
+
+ set_field_in_reg_u32(pasid >> 8, entry,
+ IOMMU_INV_IOTLB_PAGES_PASID2_MASK,
+ IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT,
+ &entry);
+
+ set_field_in_reg_u32(queueid, entry,
+ IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK,
+ IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT,
+ &entry);
+ cmd[1] = entry;
+
+ set_field_in_reg_u32(sflag, 0,
+ IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK,
+ IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry);
+
+ set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
+ IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK,
+ IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry);
+ cmd[2] = entry;
+
+ set_field_in_reg_u32((u32)addr_hi, 0,
+ IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK,
+ IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry);
+ cmd[3] = entry;
+
+ send_iommu_command(iommu, cmd);
+}
void flush_command_buffer(struct amd_iommu *iommu)
{
u32 cmd[4], status;
@@ -896,6 +966,62 @@ int amd_iommu_reserve_domain_unity_map(s
return 0;
}

+void amd_iommu_flush_iotlb(struct pci_dev *pdev,
+ uint64_t gaddr, unsigned int order)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ unsigned int bdf, req_id, queueid, maxpend;
+ struct pci_ats_dev *ats_pdev;
+
+ if ( !ats_enabled )
+ return;
+
+ ats_pdev = get_ats_device(pdev->seg, pdev->bus, pdev->devfn);
+ if ( ats_pdev == NULL )
+ return;
+ if ( !pci_ats_enabled(ats_pdev->seg,
+ ats_pdev->bus, ats_pdev->devfn) )
+ return;
+
+ bdf = (ats_pdev->bus << 8) | ats_pdev->devfn;
+ iommu = find_iommu_for_device(ats_pdev->seg, bdf);
+
+ if ( !iommu )
+ {
+ AMD_IOMMU_DEBUG("%s: Fail to find iommu for device %04x:%02x:%02x.%u\n",
+ __func__, ats_pdev->seg, ats_pdev->bus,
+ PCI_SLOT(ats_pdev->devfn),
+ PCI_FUNC(ats_pdev->devfn));
+ return;
+ }
+
+ if ( !iommu->iotlb_support )
+ return;
+
+ req_id = get_dma_requestor_id(iommu->seg, bdf);
+ queueid = req_id;
+ maxpend = (ats_pdev->ats_queue_depth + 32) & 0xff;
+
+ /* send INVALIDATE_IOTLB_PAGES command */
+ spin_lock_irqsave(&iommu->lock, flags);
+ invalidate_iotlb_pages(iommu, maxpend, 0, queueid,
+ gaddr, req_id, order);
+ flush_command_buffer(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void amd_iommu_flush_all_iotlbs(struct domain *d,
+ uint64_t gaddr, unsigned int order)
+{
+ struct pci_dev *pdev;
+
+ if ( !ats_enabled )
+ return;
+
+ for_each_pdev( d, pdev )
+ amd_iommu_flush_iotlb(pdev, gaddr, order);
+}

/* Flush iommu cache after p2m changes. */
static void _amd_iommu_flush_pages(struct domain *d,
@@ -914,6 +1040,9 @@ static void _amd_iommu_flush_pages(struc
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
+
+ if ( ats_enabled )
+ amd_iommu_flush_all_iotlbs(d, gaddr, order);
}

void amd_iommu_flush_all_pages(struct domain *d)
diff -r 3dc5e805d914 -r 8ec947b278af xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Mon Oct 24 18:11:36 2011 +0200
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Mon Oct 24 18:11:40 2011 +0200
@@ -233,6 +233,24 @@
#define IOMMU_INV_INT_TABLE_DEVICE_ID_MASK 0x0000FFFF
#define IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT 0

+/* INVALIDATE_IOTLB_PAGES command */
+#define IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK 0xff000000
+#define IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT 24
+#define IOMMU_INV_IOTLB_PAGES_PASID1_MASK 0x00ff0000
+#define IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT 16
+#define IOMMU_INV_IOTLB_PAGES_PASID2_MASK 0x0fff0000
+#define IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT 16
+#define IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK 0x0000ffff
+#define IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT 0
+#define IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK 0x0000FFFF
+#define IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT 0
+#define IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK 0xFFFFF000
+#define IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT 12
+#define IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK 0xFFFFFFFF
+#define IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT 0
+#define IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK 0x00000001
+#define IOMMU_INV_IOTLB_PAGES_S_FLAG_SHIFT 0
+
/* Event Log */
#define IOMMU_EVENT_LOG_BASE_LOW_OFFSET 0x10
#define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET 0x14
diff -r 3dc5e805d914 -r 8ec947b278af xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Mon Oct 24 18:11:36 2011 +0200
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Mon Oct 24 18:11:40 2011 +0200
@@ -55,6 +55,8 @@ int amd_iommu_unmap_page(struct domain *
void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
unsigned int order);
void amd_iommu_flush_all_pages(struct domain *d);
+void amd_iommu_flush_iotlb(struct pci_dev *pdev,
+ uint64_t gaddr, unsigned int order);

u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel