Mailing List Archive

[PATCH v3 08/13] viridian: add ExProcessorMasks variants of the flush hypercalls
From: Paul Durrant <pdurrant@amazon.com>

The Microsoft Hypervisor TLFS specifies variants of the already implemented
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE/LIST hypercalls that take a 'Virtual
Processor Set' as an argument rather than a simple 64-bit mask.

This patch adds a new hvcall_flush_ex() function to implement these
(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE/LIST_EX) hypercalls. This makes use of
new helper functions, hv_vpset_nr_banks() and hv_vpset_to_vpmask(), to
determine the size of the Virtual Processor Set (so it can be copied from
guest memory) and parse it into hypercall_vpmask (respectively).

NOTE: A guest should not yet issue these hypercalls as 'ExProcessorMasks'
support needs to be advertised via CPUID. This will be done in a
subsequent patch.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Wei Liu <wl@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>

v3:
- Adjust one of the helper macros
- A few more consts and type tweaks
- Adjust prototype of new function

v2:
- Add helper macros to define mask and struct sizes
- Use a union to determine the size of 'hypercall_vpset'
- Use hweight64() in hv_vpset_nr_banks()
- Sanity check size before hvm_copy_from_guest_phys()
---
xen/arch/x86/hvm/viridian/viridian.c | 141 +++++++++++++++++++++++++++
1 file changed, 141 insertions(+)

diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 4867a1bd140b..5d0b49012360 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -577,6 +577,69 @@ static unsigned int vpmask_nr(const struct hypercall_vpmask *vpmask)
return bitmap_weight(vpmask->mask, HVM_MAX_VCPUS);
}

+#define HV_VPSET_BANK_SIZE \
+ sizeof_field(struct hv_vpset, bank_contents[0])
+
+#define HV_VPSET_SIZE(banks) \
+ (offsetof(struct hv_vpset, bank_contents) + \
+ ((banks) * HV_VPSET_BANK_SIZE))
+
+#define HV_VPSET_MAX_BANKS \
+ (sizeof_field(struct hv_vpset, valid_bank_mask) * 8)
+
+union hypercall_vpset {
+ struct hv_vpset set;
+ uint8_t pad[HV_VPSET_SIZE(HV_VPSET_MAX_BANKS)];
+};
+
+static DEFINE_PER_CPU(union hypercall_vpset, hypercall_vpset);
+
+static unsigned int hv_vpset_nr_banks(struct hv_vpset *vpset)
+{
+ return hweight64(vpset->valid_bank_mask);
+}
+
+static uint16_t hv_vpset_to_vpmask(const struct hv_vpset *set,
+ struct hypercall_vpmask *vpmask)
+{
+#define NR_VPS_PER_BANK (HV_VPSET_BANK_SIZE * 8)
+
+ switch ( set->format )
+ {
+ case HV_GENERIC_SET_ALL:
+ vpmask_fill(vpmask);
+ return 0;
+
+ case HV_GENERIC_SET_SPARSE_4K:
+ {
+ uint64_t bank_mask;
+ unsigned int vp, bank = 0;
+
+ vpmask_empty(vpmask);
+ for ( vp = 0, bank_mask = set->valid_bank_mask;
+ bank_mask;
+ vp += NR_VPS_PER_BANK, bank_mask >>= 1 )
+ {
+ if ( bank_mask & 1 )
+ {
+ uint64_t mask = set->bank_contents[bank];
+
+ vpmask_set(vpmask, vp, mask);
+ bank++;
+ }
+ }
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+
+#undef NR_VPS_PER_BANK
+}
+
/*
* Windows should not issue the hypercalls requiring this callback in the
* case where vcpu_id would exceed the size of the mask.
@@ -657,6 +720,78 @@ static int hvcall_flush(const union hypercall_input *input,
return 0;
}

+static int hvcall_flush_ex(const union hypercall_input *input,
+ union hypercall_output *output,
+ paddr_t input_params_gpa,
+ paddr_t output_params_gpa)
+{
+ struct hypercall_vpmask *vpmask = &this_cpu(hypercall_vpmask);
+ struct {
+ uint64_t address_space;
+ uint64_t flags;
+ struct hv_vpset set;
+ } input_params;
+
+ /* These hypercalls should never use the fast-call convention. */
+ if ( input->fast )
+ return -EINVAL;
+
+ /* Get input parameters. */
+ if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
+ sizeof(input_params)) != HVMTRANS_okay )
+ return -EINVAL;
+
+ if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
+ vpmask_fill(vpmask);
+ else
+ {
+ union hypercall_vpset *vpset = &this_cpu(hypercall_vpset);
+ struct hv_vpset *set = &vpset->set;
+ size_t size;
+ int rc;
+
+ *set = input_params.set;
+ if ( set->format == HV_GENERIC_SET_SPARSE_4K )
+ {
+ unsigned long offset = offsetof(typeof(input_params),
+ set.bank_contents);
+
+ size = sizeof(*set->bank_contents) * hv_vpset_nr_banks(set);
+
+ if ( offsetof(typeof(*vpset), set.bank_contents[0]) + size >
+ sizeof(*vpset) )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
+ if ( hvm_copy_from_guest_phys(&set->bank_contents[0],
+ input_params_gpa + offset,
+ size) != HVMTRANS_okay)
+ return -EINVAL;
+
+ size += sizeof(*set);
+ }
+ else
+ size = sizeof(*set);
+
+ rc = hv_vpset_to_vpmask(set, vpmask);
+ if ( rc )
+ return rc;
+ }
+
+ /*
+ * A false return means that another vcpu is currently trying
+ * a similar operation, so back off.
+ */
+ if ( !paging_flush_tlb(need_flush, vpmask) )
+ return -ERESTART;
+
+ output->rep_complete = input->rep_count;
+
+ return 0;
+}
+
static void send_ipi(struct hypercall_vpmask *vpmask, uint8_t vector)
{
struct domain *currd = current->domain;
@@ -770,6 +905,12 @@ int viridian_hypercall(struct cpu_user_regs *regs)
output_params_gpa);
break;

+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ rc = hvcall_flush_ex(&input, &output, input_params_gpa,
+ output_params_gpa);
+ break;
+
case HVCALL_SEND_IPI:
rc = hvcall_ipi(&input, &output, input_params_gpa,
output_params_gpa);
--
2.20.1