KVM: Implement barriers before accessing kvm->buses[] on SRCU read paths
This ensures that, if a VCPU has "observed" that an IO registration has occurred, the instruction currently being trapped or emulated will also observe the IO registration. At the same time, enforce that kvm_get_bus() is used only on the update side, ensuring that a long-term reference cannot be obtained by an SRCU reader. Signed-off-by: Keir Fraser <keirf@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org>pull/1354/merge
parent
11490b5ec6
commit
7788255aba
|
|
@ -5785,6 +5785,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
|||
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Ensure that any updates to kvm->buses[] observed by the
|
||||
* previous instruction (emulated or otherwise) are also
|
||||
* visible to the instruction KVM is about to emulate.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
if (!kvm_emulate_instruction(vcpu, 0))
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -966,11 +966,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
|
|||
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a bus reference under the update-side lock. No long-term SRCU reader
|
||||
* references are permitted, to avoid stale reads vs concurrent IO
|
||||
* registrations.
|
||||
*/
|
||||
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
|
||||
{
|
||||
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
|
||||
lockdep_is_held(&kvm->slots_lock) ||
|
||||
!refcount_read(&kvm->users_count));
|
||||
return rcu_dereference_protected(kvm->buses[idx],
|
||||
lockdep_is_held(&kvm->slots_lock));
|
||||
}
|
||||
|
||||
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
|
||||
|
|
|
|||
|
|
@ -1103,6 +1103,14 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
|
|||
{
|
||||
}
|
||||
|
||||
/* Called only on cleanup and destruction paths when there are no users. */
|
||||
static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
|
||||
enum kvm_bus idx)
|
||||
{
|
||||
return rcu_dereference_protected(kvm->buses[idx],
|
||||
!refcount_read(&kvm->users_count));
|
||||
}
|
||||
|
||||
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
|
||||
{
|
||||
struct kvm *kvm = kvm_arch_alloc_vm();
|
||||
|
|
@ -1228,7 +1236,7 @@ out_err_no_disable:
|
|||
out_err_no_arch_destroy_vm:
|
||||
WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
|
||||
for (i = 0; i < KVM_NR_BUSES; i++)
|
||||
kfree(kvm_get_bus(kvm, i));
|
||||
kfree(kvm_get_bus_for_destruction(kvm, i));
|
||||
kvm_free_irq_routing(kvm);
|
||||
out_err_no_irq_routing:
|
||||
cleanup_srcu_struct(&kvm->irq_srcu);
|
||||
|
|
@ -1276,7 +1284,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|||
|
||||
kvm_free_irq_routing(kvm);
|
||||
for (i = 0; i < KVM_NR_BUSES; i++) {
|
||||
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
|
||||
struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
|
||||
|
||||
if (bus)
|
||||
kvm_io_bus_destroy(bus);
|
||||
|
|
@ -5843,6 +5851,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
|
||||
{
|
||||
/*
|
||||
* Ensure that any updates to kvm_buses[] observed by the previous vCPU
|
||||
* machine instruction are also visible to the vCPU machine instruction
|
||||
* that triggered this call.
|
||||
*/
|
||||
smp_mb__after_srcu_read_lock();
|
||||
|
||||
return srcu_dereference(kvm->buses[idx], &kvm->srcu);
|
||||
}
|
||||
|
||||
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, const void *val)
|
||||
{
|
||||
|
|
@ -5855,7 +5875,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
|||
.len = len,
|
||||
};
|
||||
|
||||
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
||||
bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
r = __kvm_io_bus_write(vcpu, bus, &range, val);
|
||||
|
|
@ -5874,7 +5894,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
|
|||
.len = len,
|
||||
};
|
||||
|
||||
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
||||
bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -5924,7 +5944,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
|||
.len = len,
|
||||
};
|
||||
|
||||
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
||||
bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
r = __kvm_io_bus_read(vcpu, bus, &range, val);
|
||||
|
|
@ -6033,7 +6053,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
|||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
|
||||
bus = kvm_get_bus_srcu(kvm, bus_idx);
|
||||
if (!bus)
|
||||
goto out_unlock;
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue