hyperv-next for v6.18

-----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCgAxFiEEIbPD0id6easf0xsudhRwX5BBoF4FAmjkpakTHHdlaS5saXVA
 a2VybmVsLm9yZwAKCRB2FHBfkEGgXip5B/48MvTFJ1qwRGPVzevZQ8Z4SDogEREp
 69VS/xRf1YCIzyXyanwqf1dXLq8NAqicSp6ewpJAmNA55/9O0cwT2EtohjeGCu61
 krPIvS3KT7xI0uSEniBdhBtALYBscnQ0e3cAbLNzL7bwA6Q6OmvoIawpBADgE/cW
 aZNCK9jy+WUqtXc6lNtkJtST0HWGDn0h04o2hjqIkZ+7ewjuEEJBUUB/JZwJ41Od
 UxbID0PAcn9O4n/u/Y/GH65MX+ddrdCgPHEGCLAGAKT24lou3NzVv445OuCw0c4W
 ilALIRb9iea56ZLVBW5O82+7g9Ag41LGq+841MNlZjeRNONGykaUpTWZ
 =OR26
 -----END PGP SIGNATURE-----

Merge tag 'hyperv-next-signed-20251006' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv updates from Wei Liu:

 - Unify guest entry code for KVM and MSHV (Sean Christopherson)

 - Switch Hyper-V MSI domain to use msi_create_parent_irq_domain()
   (Nam Cao)

 - Add CONFIG_HYPERV_VMBUS and limit the semantics of CONFIG_HYPERV
   (Mukesh Rathor)

 - Add kexec/kdump support on Azure CVMs (Vitaly Kuznetsov)

 - Deprecate hyperv_fb in favor of Hyper-V DRM driver (Prasanna
   Kumar T S M)

 - Miscellaneous enhancements, fixes and cleanups (Abhishek Tiwari,
   Alok Tiwari, Nuno Das Neves, Wei Liu, Roman Kisel, Michael Kelley)

* tag 'hyperv-next-signed-20251006' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  hyperv: Remove the spurious null directive line
  MAINTAINERS: Mark hyperv_fb driver Obsolete
  fbdev/hyperv_fb: deprecate this in favor of Hyper-V DRM driver
  Drivers: hv: Make CONFIG_HYPERV bool
  Drivers: hv: Add CONFIG_HYPERV_VMBUS option
  Drivers: hv: vmbus: Fix typos in vmbus_drv.c
  Drivers: hv: vmbus: Fix sysfs output format for ring buffer index
  Drivers: hv: vmbus: Clean up sscanf format specifier in target_cpu_store()
  x86/hyperv: Switch to msi_create_parent_irq_domain()
  mshv: Use common "entry virt" APIs to do work in root before running guest
  entry: Rename "kvm" entry code assets to "virt" to genericize APIs
  entry/kvm: KVM: Move KVM details related to signal/-EINTR into KVM proper
  mshv: Handle NEED_RESCHED_LAZY before transferring to guest
  x86/hyperv: Add kexec/kdump support on Azure CVMs
  Drivers: hv: Simplify data structures for VMBus channel close message
  Drivers: hv: util: Cosmetic changes for hv_utils_transport.c
  mshv: Add support for a new parent partition configuration
  clocksource: hyper-v: Skip unnecessary checks for the root partition
  hyperv: Add missing field to hv_output_map_device_interrupt
pull/1354/merge
Linus Torvalds 2025-10-07 08:40:15 -07:00
commit 2215336295
45 changed files with 449 additions and 193 deletions

View File

@ -10390,7 +10390,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/entry
F: include/linux/entry-common.h
F: include/linux/entry-kvm.h
F: include/linux/entry-virt.h
F: include/linux/irq-entry-common.h
F: kernel/entry/
@ -11604,7 +11604,6 @@ F: drivers/pci/controller/pci-hyperv-intf.c
F: drivers/pci/controller/pci-hyperv.c
F: drivers/scsi/storvsc_drv.c
F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
F: include/asm-generic/mshyperv.h
F: include/clocksource/hyperv_timer.h
F: include/hyperv/hvgdk.h
@ -11618,6 +11617,16 @@ F: include/uapi/linux/hyperv.h
F: net/vmw_vsock/hyperv_transport.c
F: tools/hv/
HYPER-V FRAMEBUFFER DRIVER
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Wei Liu <wei.liu@kernel.org>
M: Dexuan Cui <decui@microsoft.com>
L: linux-hyperv@vger.kernel.org
S: Obsolete
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
F: drivers/video/fbdev/hyperv_fb.c
HYPERBUS SUPPORT
M: Vignesh Raghavendra <vigneshr@ti.com>
R: Tudor Ambarus <tudor.ambarus@linaro.org>

View File

@ -25,7 +25,7 @@ menuconfig KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_XFER_TO_GUEST_WORK
select VIRT_XFER_TO_GUEST_WORK
select KVM_VFIO
select HAVE_KVM_DIRTY_RING_ACQ_REL
select NEED_KVM_DIRTY_RING_WITH_BITMAP

View File

@ -6,7 +6,6 @@
#include <linux/bug.h>
#include <linux/cpu_pm.h>
#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@ -1183,7 +1182,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/*
* Check conditions before entering the guest
*/
ret = xfer_to_guest_mode_handle_work(vcpu);
ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (!ret)
ret = 1;

View File

@ -31,7 +31,7 @@ config KVM
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMIO
select KVM_XFER_TO_GUEST_WORK
select VIRT_XFER_TO_GUEST_WORK
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
help

View File

@ -4,7 +4,6 @@
*/
#include <linux/kvm_host.h>
#include <linux/entry-kvm.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/loongarch.h>
@ -251,7 +250,7 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
/*
* Check conditions before entering the guest
*/
ret = xfer_to_guest_mode_handle_work(vcpu);
ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (ret < 0)
return ret;

View File

@ -30,7 +30,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
select KVM_MMIO
select KVM_XFER_TO_GUEST_WORK
select VIRT_XFER_TO_GUEST_WORK
select KVM_GENERIC_MMU_NOTIFIER
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS

View File

@ -7,7 +7,6 @@
*/
#include <linux/bitops.h>
#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
@ -911,7 +910,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
/* Check conditions before entering the guest */
ret = xfer_to_guest_mode_handle_work(vcpu);
ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (ret)
continue;
ret = 1;

View File

@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/irqchip/irq-msi-lib.h>
#include <asm/mshyperv.h>
static int hv_map_interrupt(union hv_device_id device_id, bool level,
@ -289,59 +290,99 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
(void)hv_unmap_msi_interrupt(dev, &old_entry);
}
static void hv_msi_free_irq(struct irq_domain *domain,
struct msi_domain_info *info, unsigned int virq)
{
struct irq_data *irqd = irq_get_irq_data(virq);
struct msi_desc *desc;
if (!irqd)
return;
desc = irq_data_get_msi_desc(irqd);
if (!desc || !desc->irq || WARN_ON_ONCE(!dev_is_pci(desc->dev)))
return;
hv_teardown_msi_irq(to_pci_dev(desc->dev), irqd);
}
/*
* IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
* which implement the MSI or MSI-X Capability Structure.
*/
static struct irq_chip hv_pci_msi_controller = {
.name = "HV-PCI-MSI",
.irq_unmask = pci_msi_unmask_irq,
.irq_mask = pci_msi_mask_irq,
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = hv_irq_compose_msi_msg,
.irq_set_affinity = msi_domain_set_affinity,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
static struct msi_domain_ops pci_msi_domain_ops = {
.msi_free = hv_msi_free_irq,
.msi_prepare = pci_msi_prepare,
static bool hv_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent, struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
chip->flags |= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED;
info->ops->msi_prepare = pci_msi_prepare;
return true;
}
#define HV_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX)
#define HV_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS)
static struct msi_parent_ops hv_msi_parent_ops = {
.supported_flags = HV_MSI_FLAGS_SUPPORTED,
.required_flags = HV_MSI_FLAGS_REQUIRED,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI,
.chip_flags = MSI_CHIP_FLAG_SET_ACK,
.prefix = "HV-",
.init_dev_msi_info = hv_init_dev_msi_info,
};
static struct msi_domain_info hv_pci_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX,
.ops = &pci_msi_domain_ops,
.chip = &hv_pci_msi_controller,
.handler = handle_edge_irq,
.handler_name = "edge",
static int hv_msi_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
void *arg)
{
/*
* TODO: The allocation bits of hv_irq_compose_msi_msg(), i.e. everything except
* entry_to_msi_msg() should be in here.
*/
int ret;
ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
if (ret)
return ret;
for (int i = 0; i < nr_irqs; ++i) {
irq_domain_set_info(d, virq + i, 0, &hv_pci_msi_controller, NULL,
handle_edge_irq, NULL, "edge");
}
return 0;
}
static void hv_msi_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
{
for (int i = 0; i < nr_irqs; ++i) {
struct irq_data *irqd = irq_domain_get_irq_data(d, virq);
struct msi_desc *desc;
desc = irq_data_get_msi_desc(irqd);
if (!desc || !desc->irq || WARN_ON_ONCE(!dev_is_pci(desc->dev)))
continue;
hv_teardown_msi_irq(to_pci_dev(desc->dev), irqd);
}
irq_domain_free_irqs_top(d, virq, nr_irqs);
}
static const struct irq_domain_ops hv_msi_domain_ops = {
.select = msi_lib_irq_domain_select,
.alloc = hv_msi_domain_alloc,
.free = hv_msi_domain_free,
};
struct irq_domain * __init hv_create_pci_msi_domain(void)
{
struct irq_domain *d = NULL;
struct fwnode_handle *fn;
fn = irq_domain_alloc_named_fwnode("HV-PCI-MSI");
if (fn)
d = pci_msi_create_irq_domain(fn, &hv_pci_msi_domain_info, x86_vector_domain);
struct irq_domain_info info = {
.fwnode = irq_domain_alloc_named_fwnode("HV-PCI-MSI"),
.ops = &hv_msi_domain_ops,
.parent = x86_vector_domain,
};
if (info.fwnode)
d = msi_create_parent_irq_domain(&info, &hv_msi_parent_ops);
/* No point in going further if we can't get an irq domain */
BUG_ON(!d);

View File

@ -462,6 +462,195 @@ void hv_ivm_msr_read(u64 msr, u64 *value)
hv_ghcb_msr_read(msr, value);
}
/*
* Keep track of the PFN regions which were shared with the host. The access
* must be revoked upon kexec/kdump (see hv_ivm_clear_host_access()).
*/
struct hv_enc_pfn_region {
struct list_head list;
u64 pfn;
int count;
};
static LIST_HEAD(hv_list_enc);
static DEFINE_RAW_SPINLOCK(hv_list_enc_lock);
static int hv_list_enc_add(const u64 *pfn_list, int count)
{
struct hv_enc_pfn_region *ent;
unsigned long flags;
u64 pfn;
int i;
for (i = 0; i < count; i++) {
pfn = pfn_list[i];
raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
/* Check if the PFN already exists in some region first */
list_for_each_entry(ent, &hv_list_enc, list) {
if ((ent->pfn <= pfn) && (ent->pfn + ent->count - 1 >= pfn))
/* Nothing to do - pfn is already in the list */
goto unlock_done;
}
/*
* Check if the PFN is adjacent to an existing region. Growing
* a region can make it adjacent to another one but merging is
* not (yet) implemented for simplicity. A PFN cannot be added
* to two regions to keep the logic in hv_list_enc_remove()
* correct.
*/
list_for_each_entry(ent, &hv_list_enc, list) {
if (ent->pfn + ent->count == pfn) {
/* Grow existing region up */
ent->count++;
goto unlock_done;
} else if (pfn + 1 == ent->pfn) {
/* Grow existing region down */
ent->pfn--;
ent->count++;
goto unlock_done;
}
}
raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
/* No adjacent region found -- create a new one */
ent = kzalloc(sizeof(struct hv_enc_pfn_region), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->pfn = pfn;
ent->count = 1;
raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
list_add(&ent->list, &hv_list_enc);
unlock_done:
raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
}
return 0;
}
static int hv_list_enc_remove(const u64 *pfn_list, int count)
{
struct hv_enc_pfn_region *ent, *t;
struct hv_enc_pfn_region new_region;
unsigned long flags;
u64 pfn;
int i;
for (i = 0; i < count; i++) {
pfn = pfn_list[i];
raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
list_for_each_entry_safe(ent, t, &hv_list_enc, list) {
if (pfn == ent->pfn + ent->count - 1) {
/* Removing tail pfn */
ent->count--;
if (!ent->count) {
list_del(&ent->list);
kfree(ent);
}
goto unlock_done;
} else if (pfn == ent->pfn) {
/* Removing head pfn */
ent->count--;
ent->pfn++;
if (!ent->count) {
list_del(&ent->list);
kfree(ent);
}
goto unlock_done;
} else if (pfn > ent->pfn && pfn < ent->pfn + ent->count - 1) {
/*
* Removing a pfn in the middle. Cut off the tail
* of the existing region and create a template for
* the new one.
*/
new_region.pfn = pfn + 1;
new_region.count = ent->count - (pfn - ent->pfn + 1);
ent->count = pfn - ent->pfn;
goto unlock_split;
}
}
unlock_done:
raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
continue;
unlock_split:
raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
ent = kzalloc(sizeof(struct hv_enc_pfn_region), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->pfn = new_region.pfn;
ent->count = new_region.count;
raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
list_add(&ent->list, &hv_list_enc);
raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
}
return 0;
}
/* Stop new private<->shared conversions */
static void hv_vtom_kexec_begin(void)
{
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
/*
* Crash kernel reaches here with interrupts disabled: can't wait for
* conversions to finish.
*
* If race happened, just report and proceed.
*/
if (!set_memory_enc_stop_conversion())
pr_warn("Failed to stop shared<->private conversions\n");
}
static void hv_vtom_kexec_finish(void)
{
struct hv_gpa_range_for_visibility *input;
struct hv_enc_pfn_region *ent;
unsigned long flags;
u64 hv_status;
int cur, i;
local_irq_save(flags);
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
if (unlikely(!input))
goto out;
list_for_each_entry(ent, &hv_list_enc, list) {
for (i = 0, cur = 0; i < ent->count; i++) {
input->gpa_page_list[cur] = ent->pfn + i;
cur++;
if (cur == HV_MAX_MODIFY_GPA_REP_COUNT || i == ent->count - 1) {
input->partition_id = HV_PARTITION_ID_SELF;
input->host_visibility = VMBUS_PAGE_NOT_VISIBLE;
input->reserved0 = 0;
input->reserved1 = 0;
hv_status = hv_do_rep_hypercall(
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY,
cur, 0, input, NULL);
WARN_ON_ONCE(!hv_result_success(hv_status));
cur = 0;
}
}
}
out:
local_irq_restore(flags);
}
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.
*
@ -475,6 +664,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
struct hv_gpa_range_for_visibility *input;
u64 hv_status;
unsigned long flags;
int ret;
/* no-op if partition isolation is not enabled */
if (!hv_is_isolation_supported())
@ -486,6 +676,13 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
return -EINVAL;
}
if (visibility == VMBUS_PAGE_NOT_VISIBLE)
ret = hv_list_enc_remove(pfn, count);
else
ret = hv_list_enc_add(pfn, count);
if (ret)
return ret;
local_irq_save(flags);
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
@ -506,8 +703,18 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
if (hv_result_success(hv_status))
return 0;
if (visibility == VMBUS_PAGE_NOT_VISIBLE)
ret = hv_list_enc_add(pfn, count);
else
return -EFAULT;
ret = hv_list_enc_remove(pfn, count);
/*
* There's no good way to recover from -ENOMEM here, the accounting is
* wrong either way.
*/
WARN_ON_ONCE(ret);
return -EFAULT;
}
/*
@ -669,6 +876,8 @@ void __init hv_vtom_init(void)
x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present;
x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
x86_platform.guest.enc_kexec_begin = hv_vtom_kexec_begin;
x86_platform.guest.enc_kexec_finish = hv_vtom_kexec_finish;
/* Set WB as the default cache mode. */
guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);

View File

@ -565,6 +565,11 @@ static void __init ms_hyperv_init_platform(void)
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
#endif
/*
* HV_ACCESS_TSC_INVARIANT is always zero for the root partition. Root
* partition doesn't need to write to synthetic MSR to enable invariant
* TSC feature. It sees what the hardware provides.
*/
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
/*
* Writing to synthetic MSR 0x40000118 updates/changes the
@ -636,8 +641,12 @@ static void __init ms_hyperv_init_platform(void)
* TSC should be marked as unstable only after Hyper-V
* clocksource has been initialized. This ensures that the
* stability of the sched_clock is not altered.
*
* HV_ACCESS_TSC_INVARIANT is always zero for the root partition. No
* need to check for it.
*/
if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
if (!hv_root_partition() &&
!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
mark_tsc_unstable("running on Hyper-V");
hardlockup_detector_disable();

View File

@ -40,7 +40,7 @@ config KVM_X86
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_NO_POLL
select KVM_XFER_TO_GUEST_WORK
select VIRT_XFER_TO_GUEST_WORK
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select HAVE_KVM_PM_NOTIFIER if PM

View File

@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/trace_events.h>
#include <linux/entry-kvm.h>
#include <asm/apic.h>
#include <asm/asm.h>

View File

@ -59,7 +59,6 @@
#include <linux/sched/stat.h>
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
#include <linux/suspend.h>
#include <linux/smp.h>
@ -11635,7 +11634,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (__xfer_to_guest_mode_work_pending()) {
kvm_vcpu_srcu_read_unlock(vcpu);
r = xfer_to_guest_mode_handle_work(vcpu);
r = kvm_xfer_to_guest_mode_handle_work(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
if (r)
return r;

View File

@ -161,7 +161,7 @@ obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/

View File

@ -549,14 +549,22 @@ static void __init hv_init_tsc_clocksource(void)
union hv_reference_tsc_msr tsc_msr;
/*
* When running as a guest partition:
*
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
* pause/resume, and other VM management operations. So lower the
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
*
* When running as the root partition:
*
* There is no HV_ACCESS_TSC_INVARIANT feature. Always lower the rating
* of the Hyper-V Reference TSC.
*/
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
if ((ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) ||
hv_root_partition()) {
hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 245;
}

View File

@ -400,7 +400,7 @@ source "drivers/gpu/drm/tyr/Kconfig"
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && HYPERV
depends on DRM && PCI && HYPERV_VMBUS
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER

View File

@ -1171,7 +1171,7 @@ config GREENASIA_FF
config HID_HYPERV_MOUSE
tristate "Microsoft Hyper-V mouse driver"
depends on HYPERV
depends on HYPERV_VMBUS
help
Select this option to enable the Hyper-V mouse driver.

View File

@ -3,13 +3,14 @@
menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
bool "Microsoft Hyper-V core hypervisor support"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
|| (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
select SYSFB if EFI && !HYPERV_VTL_MODE
select IRQ_MSI_LIB if X86
help
Select this option to run Linux as a Hyper-V client operating
system.
@ -44,18 +45,25 @@ config HYPERV_TIMER
config HYPERV_UTILS
tristate "Microsoft Hyper-V Utilities driver"
depends on HYPERV && CONNECTOR && NLS
depends on HYPERV_VMBUS && CONNECTOR && NLS
depends on PTP_1588_CLOCK_OPTIONAL
help
Select this option to enable the Hyper-V Utilities.
config HYPERV_BALLOON
tristate "Microsoft Hyper-V Balloon driver"
depends on HYPERV
depends on HYPERV_VMBUS
select PAGE_REPORTING
help
Select this option to enable Hyper-V Balloon driver.
config HYPERV_VMBUS
tristate "Microsoft Hyper-V VMBus driver"
depends on HYPERV
default HYPERV
help
Select this option to enable Hyper-V Vmbus driver.
config MSHV_ROOT
tristate "Microsoft Hyper-V root partition support"
depends on HYPERV && (X86_64 || ARM64)
@ -66,6 +74,7 @@ config MSHV_ROOT
# no particular order, making it impossible to reassemble larger pages
depends on PAGE_SIZE_4KB
select EVENTFD
select VIRT_XFER_TO_GUEST_WORK
default n
help
Select this option to enable support for booting and running as root

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_VMBUS) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
obj-$(CONFIG_MSHV_ROOT) += mshv_root.o
@ -16,5 +16,5 @@ mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
mshv_root_hv_call.o mshv_portid_table.o
# Code that must be built-in
obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
obj-$(CONFIG_HYPERV) += hv_common.o
obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o mshv_common.o

View File

@ -925,7 +925,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
/* Send a closing message */
msg = &channel->close_msg.msg;
msg = &channel->close_msg;
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
msg->child_relid = channel->offermsg.child_relid;

View File

@ -257,7 +257,7 @@ static void hv_kmsg_dump_register(void)
static inline bool hv_output_page_exists(void)
{
return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
return hv_parent_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
}
void __init hv_get_partition_id(void)
@ -377,7 +377,7 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_output_arg);
}
if (hv_root_partition()) {
if (hv_parent_partition()) {
hv_synic_eventring_tail = alloc_percpu(u8 *);
BUG_ON(!hv_synic_eventring_tail);
}
@ -531,7 +531,7 @@ int hv_common_cpu_init(unsigned int cpu)
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
if (hv_root_partition()) {
if (hv_parent_partition()) {
synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
*synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT,
sizeof(u8), flags);
@ -558,7 +558,7 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
if (hv_root_partition()) {
if (hv_parent_partition()) {
synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
kfree(*synic_eventring_tail);
*synic_eventring_tail = NULL;
@ -729,13 +729,17 @@ void hv_identify_partition_type(void)
* the root partition setting if also a Confidential VM.
*/
if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) &&
(ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
!(ms_hyperv.priv_high & HV_ISOLATION)) {
pr_info("Hyper-V: running as root partition\n");
if (IS_ENABLED(CONFIG_MSHV_ROOT))
hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
else
if (!IS_ENABLED(CONFIG_MSHV_ROOT)) {
pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n");
} else if (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) {
pr_info("Hyper-V: running as root partition\n");
hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
} else {
pr_info("Hyper-V: running as L1VH partition\n");
hv_curr_partition_type = HV_PARTITION_TYPE_L1VH;
}
}
}

View File

@ -129,8 +129,7 @@ static int hvt_op_open(struct inode *inode, struct file *file)
* device gets released.
*/
hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
}
else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
/*
* We're switching from netlink communication to using char
* device. Issue the reset first.
@ -195,7 +194,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
}
spin_unlock(&hvt_list_lock);
if (!hvt_found) {
pr_warn("hvt_cn_callback: spurious message received!\n");
pr_warn("%s: spurious message received!\n", __func__);
return;
}
@ -210,7 +209,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
hvt_found->on_msg(msg->data, msg->len);
else
pr_warn("hvt_cn_callback: unexpected netlink message!\n");
pr_warn("%s: unexpected netlink message!\n", __func__);
mutex_unlock(&hvt->lock);
}
@ -260,8 +259,9 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
hvt->outmsg_len = len;
hvt->on_read = on_read_cb;
wake_up_interruptible(&hvt->outmsg_q);
} else
} else {
ret = -ENOMEM;
}
out_unlock:
mutex_unlock(&hvt->lock);
return ret;

View File

@ -25,6 +25,4 @@ int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
int hv_call_get_partition_property(u64 partition_id, u64 property_code,
u64 *property_value);
int mshv_do_pre_guest_mode_work(ulong th_flags);
#endif /* _MSHV_H */

View File

@ -138,25 +138,3 @@ int hv_call_get_partition_property(u64 partition_id,
return 0;
}
EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
/*
* Handle any pre-processing before going into the guest mode on this cpu, most
* notably call schedule(). Must be invoked with both preemption and
* interrupts enabled.
*
* Returns: 0 on success, -errno on error.
*/
int mshv_do_pre_guest_mode_work(ulong th_flags)
{
if (th_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
return -EINTR;
if (th_flags & _TIF_NEED_RESCHED)
schedule();
if (th_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(NULL);
return 0;
}
EXPORT_SYMBOL_GPL(mshv_do_pre_guest_mode_work);

View File

@ -8,6 +8,7 @@
* Authors: Microsoft Linux virtualization team
*/
#include <linux/entry-virt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
@ -37,12 +38,6 @@ MODULE_AUTHOR("Microsoft");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
/* TODO move this to mshyperv.h when needed outside driver */
static inline bool hv_parent_partition(void)
{
return hv_root_partition();
}
/* TODO move this to another file when debugfs code is added */
enum hv_stats_vp_counters { /* HV_THREAD_COUNTER */
#if defined(CONFIG_X86)
@ -487,28 +482,6 @@ mshv_vp_wait_for_hv_kick(struct mshv_vp *vp)
return 0;
}
static int mshv_pre_guest_mode_work(struct mshv_vp *vp)
{
const ulong work_flags = _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING |
_TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME;
ulong th_flags;
th_flags = read_thread_flags();
while (th_flags & work_flags) {
int ret;
/* nb: following will call schedule */
ret = mshv_do_pre_guest_mode_work(th_flags);
if (ret)
return ret;
th_flags = read_thread_flags();
}
return 0;
}
/* Must be called with interrupts enabled */
static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
{
@ -529,9 +502,11 @@ static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
u32 flags = 0;
struct hv_output_dispatch_vp output;
ret = mshv_pre_guest_mode_work(vp);
if (ret)
break;
if (__xfer_to_guest_mode_work_pending()) {
ret = xfer_to_guest_mode_handle_work();
if (ret)
break;
}
if (vp->run.flags.intercept_suspend)
flags |= HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND;
@ -2074,9 +2049,13 @@ static int __init hv_retrieve_scheduler_type(enum hv_scheduler_type *out)
/* Retrieve and stash the supported scheduler type */
static int __init mshv_retrieve_scheduler_type(struct device *dev)
{
int ret;
int ret = 0;
if (hv_l1vh_partition())
hv_scheduler_type = HV_SCHEDULER_TYPE_CORE_SMT;
else
ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
if (ret)
return ret;
@ -2203,9 +2182,6 @@ static int __init mshv_root_partition_init(struct device *dev)
{
int err;
if (mshv_retrieve_scheduler_type(dev))
return -ENODEV;
err = root_scheduler_init(dev);
if (err)
return err;
@ -2227,7 +2203,7 @@ static int __init mshv_parent_partition_init(void)
struct device *dev;
union hv_hypervisor_version_info version_info;
if (!hv_root_partition() || is_kdump_kernel())
if (!hv_parent_partition() || is_kdump_kernel())
return -ENODEV;
if (hv_get_hypervisor_version(&version_info))
@ -2264,7 +2240,12 @@ static int __init mshv_parent_partition_init(void)
mshv_cpuhp_online = ret;
ret = mshv_root_partition_init(dev);
ret = mshv_retrieve_scheduler_type(dev);
if (ret)
goto remove_cpu_state;
if (hv_root_partition())
ret = mshv_root_partition_init(dev);
if (ret)
goto remove_cpu_state;

View File

@ -322,7 +322,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
return sysfs_emit(buf, "%d\n", outbound.current_read_index);
return sysfs_emit(buf, "%u\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@ -341,7 +341,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
return sysfs_emit(buf, "%d\n", outbound.current_write_index);
return sysfs_emit(buf, "%u\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@ -1742,7 +1742,7 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
u32 target_cpu;
ssize_t ret;
if (sscanf(buf, "%uu", &target_cpu) != 1)
if (sscanf(buf, "%u", &target_cpu) != 1)
return -EIO;
cpus_read_lock();
@ -1947,7 +1947,7 @@ static const struct kobj_type vmbus_chan_ktype = {
* is running.
* For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
* time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
* exposing the ring buffer by default, this function is reponsible to enable visibility of
* exposing the ring buffer by default, this function is responsible to enable visibility of
* ring for userspace to use.
* Note: Race conditions can happen with userspace and it is not encouraged to create new
* use-cases for this. This was added to maintain backward compatibility, while solving
@ -2110,7 +2110,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
ret = vmbus_add_channel_kobj(child_device_obj,
child_device_obj->channel);
if (ret) {
pr_err("Unable to register primary channeln");
pr_err("Unable to register primary channel\n");
goto err_kset_unregister;
}
hv_debug_add_dev_dir(child_device_obj);

View File

@ -276,8 +276,8 @@ config SERIO_OLPC_APSP
config HYPERV_KEYBOARD
tristate "Microsoft Synthetic Keyboard driver"
depends on HYPERV
default HYPERV
depends on HYPERV_VMBUS
default HYPERV_VMBUS
help
Select this option to enable the Hyper-V Keyboard driver.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
depends on HYPERV
depends on HYPERV_VMBUS
select UCS2_STRING
select NLS
help

View File

@ -221,7 +221,7 @@ config PCI_LABEL
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && SYSFS
depends on ((X86 && X86_64) || ARM64) && HYPERV_VMBUS && PCI_MSI && SYSFS
select PCI_HYPERV_INTERFACE
select IRQ_MSI_LIB
help

View File

@ -589,7 +589,7 @@ config XEN_SCSI_FRONTEND
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
depends on SCSI && HYPERV
depends on SCSI && HYPERV_VMBUS
depends on m || SCSI_FC_ATTRS != m
default HYPERV
help

View File

@ -140,7 +140,7 @@ config UIO_MF624
config UIO_HV_GENERIC
tristate "Generic driver for Hyper-V VMBus"
depends on HYPERV
depends on HYPERV_VMBUS
help
Generic driver that you can bind, dynamically, to any
Hyper-V VMBus device. It is useful to provide direct access

View File

@ -1773,13 +1773,16 @@ config FB_BROADSHEET
a bridge adapter.
config FB_HYPERV
tristate "Microsoft Hyper-V Synthetic Video support"
depends on FB && HYPERV
tristate "Microsoft Hyper-V Synthetic Video support (DEPRECATED)"
depends on FB && HYPERV_VMBUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA
select FB_IOMEM_HELPERS_DEFERRED
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
This driver is deprecated, please use the Hyper-V DRM driver at
drivers/gpu/drm/hyperv (CONFIG_DRM_HYPERV) instead.
config FB_SIMPLE
tristate "Simple framebuffer support"
depends on FB

View File

@ -1357,6 +1357,8 @@ static int __init hvfb_drv_init(void)
{
int ret;
pr_warn("Deprecated: use Hyper-V DRM driver instead\n");
if (fb_modesetting_disabled("hyper_fb"))
return -ENODEV;

View File

@ -31,6 +31,7 @@
enum hv_partition_type {
HV_PARTITION_TYPE_GUEST,
HV_PARTITION_TYPE_ROOT,
HV_PARTITION_TYPE_L1VH,
};
struct ms_hyperv_info {
@ -162,6 +163,7 @@ static inline u64 hv_generate_guest_id(u64 kernel_version)
return guest_id;
}
#if IS_ENABLED(CONFIG_HYPERV_VMBUS)
/* Free the message slot and signal end-of-message if required */
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
@ -197,6 +199,10 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
}
}
extern int vmbus_interrupt;
extern int vmbus_irq;
#endif /* CONFIG_HYPERV_VMBUS */
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
void hv_setup_vmbus_handler(void (*handler)(void));
@ -210,9 +216,6 @@ void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
void hv_setup_mshv_handler(void (*handler)(void));
extern int vmbus_interrupt;
extern int vmbus_irq;
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
@ -354,12 +357,22 @@ static inline bool hv_root_partition(void)
{
return hv_curr_partition_type == HV_PARTITION_TYPE_ROOT;
}
static inline bool hv_l1vh_partition(void)
{
return hv_curr_partition_type == HV_PARTITION_TYPE_L1VH;
}
static inline bool hv_parent_partition(void)
{
return hv_root_partition() || hv_l1vh_partition();
}
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
#else /* CONFIG_MSHV_ROOT */
static inline bool hv_root_partition(void) { return false; }
static inline bool hv_l1vh_partition(void) { return false; }
static inline bool hv_parent_partition(void) { return false; }
static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
{
return -EOPNOTSUPP;

View File

@ -597,8 +597,6 @@ struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */
#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
#
/* Hyper-V defined statically assigned SINTs */
#define HV_SYNIC_INTERCEPTION_SINT_INDEX 0x00000000
#define HV_SYNIC_IOMMU_FAULT_SINT_INDEX 0x00000001

View File

@ -301,6 +301,7 @@ struct hv_input_map_device_interrupt {
/* HV_OUTPUT_MAP_DEVICE_INTERRUPT */
struct hv_output_map_device_interrupt {
struct hv_interrupt_entry interrupt_entry;
u64 ext_status_deprecated[5];
} __packed;
/* HV_INPUT_UNMAP_DEVICE_INTERRUPT */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ENTRYKVM_H
#define __LINUX_ENTRYKVM_H
#ifndef __LINUX_ENTRYVIRT_H
#define __LINUX_ENTRYVIRT_H
#include <linux/static_call_types.h>
#include <linux/resume_user_mode.h>
@ -10,7 +10,7 @@
#include <linux/tick.h>
/* Transfer to guest mode work */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
@ -21,8 +21,6 @@
_TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
ARCH_XFER_TO_GUEST_MODE_WORK)
struct kvm_vcpu;
/**
* arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
* mode work handling function.
@ -32,12 +30,10 @@ struct kvm_vcpu;
* Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
* replaced by architecture specific code.
*/
static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
unsigned long ti_work);
static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work);
#ifndef arch_xfer_to_guest_mode_work
static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
unsigned long ti_work)
static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work)
{
return 0;
}
@ -46,11 +42,10 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
/**
* xfer_to_guest_mode_handle_work - Check and handle pending work which needs
* to be handled before going to guest mode
* @vcpu: Pointer to current's VCPU data
*
* Returns: 0 or an error code
*/
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
int xfer_to_guest_mode_handle_work(void);
/**
* xfer_to_guest_mode_prepare - Perform last minute preparation work that
@ -95,6 +90,6 @@ static inline bool xfer_to_guest_mode_work_pending(void)
lockdep_assert_irqs_disabled();
return __xfer_to_guest_mode_work_pending();
}
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
#endif

View File

@ -707,11 +707,6 @@ struct vmbus_channel_msginfo {
unsigned char msg[];
};
struct vmbus_close_msg {
struct vmbus_channel_msginfo info;
struct vmbus_channel_close_channel msg;
};
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@ -800,7 +795,7 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
struct vmbus_close_msg close_msg;
struct vmbus_channel_close_channel close_msg;
/* Statistics */
u64 interrupts; /* Host to Guest interrupts */

View File

@ -2,7 +2,7 @@
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
#include <linux/entry-virt.h>
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
@ -2450,13 +2450,24 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu->stat.signal_exits++;
}
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
{
int r = xfer_to_guest_mode_handle_work();
if (r) {
WARN_ON_ONCE(r != -EINTR);
kvm_handle_signal_exit(vcpu);
}
return r;
}
#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
/*
* If more than one page is being (un)accounted, @virt must be the address of

View File

@ -129,7 +129,7 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
static __always_inline void rcu_irq_work_resched(void) { }

View File

@ -14,4 +14,4 @@ CFLAGS_common.o += -fno-stack-protector
obj-$(CONFIG_GENERIC_IRQ_ENTRY) += common.o
obj-$(CONFIG_GENERIC_SYSCALL) += syscall-common.o syscall_user_dispatch.o
obj-$(CONFIG_KVM_XFER_TO_GUEST_WORK) += kvm.o
obj-$(CONFIG_VIRT_XFER_TO_GUEST_WORK) += virt.o

View File

@ -1,17 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/entry-kvm.h>
#include <linux/kvm_host.h>
#include <linux/entry-virt.h>
static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
static int xfer_to_guest_mode_work(unsigned long ti_work)
{
do {
int ret;
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
kvm_handle_signal_exit(vcpu);
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
return -EINTR;
}
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
schedule();
@ -19,7 +16,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
if (ti_work & _TIF_NOTIFY_RESUME)
resume_user_mode_work(NULL);
ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
ret = arch_xfer_to_guest_mode_handle_work(ti_work);
if (ret)
return ret;
@ -28,7 +25,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
return 0;
}
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
int xfer_to_guest_mode_handle_work(void)
{
unsigned long ti_work;
@ -44,6 +41,6 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
return 0;
return xfer_to_guest_mode_work(vcpu, ti_work);
return xfer_to_guest_mode_work(ti_work);
}
EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work);

View File

@ -573,7 +573,7 @@ void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len)
}
EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs);
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK))
/*
* An empty function that will trigger a reschedule on
* IRQ tail once IRQs get re-enabled on userspace/guest resume.
@ -602,7 +602,7 @@ noinstr void rcu_irq_work_resched(void)
if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
return;
if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
if (IS_ENABLED(CONFIG_VIRT_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
return;
instrumentation_begin();
@ -611,7 +611,7 @@ noinstr void rcu_irq_work_resched(void)
}
instrumentation_end();
}
#endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
#endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK)) */
#ifdef CONFIG_PROVE_RCU
/**

View File

@ -72,7 +72,7 @@ config VIRTIO_VSOCKETS_COMMON
config HYPERV_VSOCKETS
tristate "Hyper-V transport for Virtual Sockets"
depends on VSOCKETS && HYPERV
depends on VSOCKETS && HYPERV_VMBUS
help
This module implements a Hyper-V transport for Virtual Sockets.

View File

@ -87,7 +87,7 @@ config HAVE_KVM_VCPU_RUN_PID_CHANGE
config HAVE_KVM_NO_POLL
bool
config KVM_XFER_TO_GUEST_WORK
config VIRT_XFER_TO_GUEST_WORK
bool
config HAVE_KVM_PM_NOTIFIER