KVM: Drop @atomic param from gfn=>pfn and hva=>pfn APIs

Drop @atomic from the myriad "to_pfn" APIs now that all callers pass
"false", and remove a comment blurb about KVM running only the "GUP fast"
part in atomic context.

No functional change intended.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-13-seanjc@google.com>
pull/1085/head
Sean Christopherson 2024-10-10 11:23:14 -07:00 committed by Paolo Bonzini
parent 6419bc5207
commit e2d2ca71ac
9 changed files with 23 additions and 41 deletions

View File

@ -135,8 +135,8 @@ We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
For direct sp, we can easily avoid it since the spte of direct sp is fixed For direct sp, we can easily avoid it since the spte of direct sp is fixed
to gfn. For indirect sp, we disabled fast page fault for simplicity. to gfn. For indirect sp, we disabled fast page fault for simplicity.
A solution for indirect sp could be to pin the gfn, for example via A solution for indirect sp could be to pin the gfn before the cmpxchg. After
gfn_to_pfn_memslot_atomic, before the cmpxchg. After the pinning: the pinning:
- We have held the refcount of pfn; that means the pfn can not be freed and - We have held the refcount of pfn; that means the pfn can not be freed and
be reused for another gfn. be reused for another gfn.

View File

@ -1570,7 +1570,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mmu_seq = vcpu->kvm->mmu_invalidate_seq; mmu_seq = vcpu->kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
write_fault, &writable, NULL); write_fault, &writable, NULL);
if (pfn == KVM_PFN_ERR_HWPOISON) { if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift); kvm_send_hwpoison_signal(hva, vma_shift);

View File

@ -613,7 +613,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
write_ok = true; write_ok = true;
} else { } else {
/* Call KVM generic code to do the slow-path check */ /* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
writing, &write_ok, NULL); writing, &write_ok, NULL);
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;

View File

@ -852,7 +852,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long pfn; unsigned long pfn;
/* Call KVM generic code to do the slow-path check */ /* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
writing, upgrade_p, NULL); writing, upgrade_p, NULL);
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;

View File

@ -4387,9 +4387,9 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return kvm_faultin_pfn_private(vcpu, fault); return kvm_faultin_pfn_private(vcpu, fault);
async = false; async = false;
fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, false, fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, &async,
&async, fault->write, fault->write, &fault->map_writable,
&fault->map_writable, &fault->hva); &fault->hva);
if (!async) if (!async)
return RET_PF_CONTINUE; /* *pfn has correct page already */ return RET_PF_CONTINUE; /* *pfn has correct page already */
@ -4409,9 +4409,9 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
* to wait for IO. Note, gup always bails if it is unable to quickly * to wait for IO. Note, gup always bails if it is unable to quickly
* get a page and a fatal signal, i.e. SIGKILL, is pending. * get a page and a fatal signal, i.e. SIGKILL, is pending.
*/ */
fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true, fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, true, NULL,
NULL, fault->write, fault->write, &fault->map_writable,
&fault->map_writable, &fault->hva); &fault->hva);
return RET_PF_CONTINUE; return RET_PF_CONTINUE;
} }

View File

@ -1232,9 +1232,8 @@ kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable); bool *writable);
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool interruptible, bool *async, bool interruptible, bool *async,
bool write_fault, bool *writable, hva_t *hva); bool write_fault, bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_clean(kvm_pfn_t pfn);

View File

@ -2756,8 +2756,7 @@ static inline int check_user_page_hwpoison(unsigned long addr)
/* /*
* The fast path to get the writable pfn which will be stored in @pfn, * The fast path to get the writable pfn which will be stored in @pfn,
* true indicates success, otherwise false is returned. It's also the * true indicates success, otherwise false is returned.
* only part that runs if we can in atomic context.
*/ */
static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *pfn) bool *writable, kvm_pfn_t *pfn)
@ -2922,7 +2921,6 @@ out:
/* /*
* Pin guest page in memory and return its pfn. * Pin guest page in memory and return its pfn.
* @addr: host virtual address which maps memory to the guest * @addr: host virtual address which maps memory to the guest
* @atomic: whether this function is forbidden from sleeping
* @interruptible: whether the process can be interrupted by non-fatal signals * @interruptible: whether the process can be interrupted by non-fatal signals
* @async: whether this function need to wait IO complete if the * @async: whether this function need to wait IO complete if the
* host page is not in the memory * host page is not in the memory
@ -2934,22 +2932,16 @@ out:
* 2): @write_fault = false && @writable, @writable will tell the caller * 2): @write_fault = false && @writable, @writable will tell the caller
* whether the mapping is writable. * whether the mapping is writable.
*/ */
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool *async,
bool *async, bool write_fault, bool *writable) bool write_fault, bool *writable)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
kvm_pfn_t pfn; kvm_pfn_t pfn;
int npages, r; int npages, r;
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
return pfn; return pfn;
if (atomic)
return KVM_PFN_ERR_FAULT;
npages = hva_to_pfn_slow(addr, async, write_fault, interruptible, npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
writable, &pfn); writable, &pfn);
if (npages == 1) if (npages == 1)
@ -2986,7 +2978,7 @@ exit:
} }
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool interruptible, bool *async, bool interruptible, bool *async,
bool write_fault, bool *writable, hva_t *hva) bool write_fault, bool *writable, hva_t *hva)
{ {
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
@ -3008,33 +3000,24 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
writable = NULL; writable = NULL;
} }
return hva_to_pfn(addr, atomic, interruptible, async, write_fault, return hva_to_pfn(addr, interruptible, async, write_fault, writable);
writable);
} }
EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable) bool *writable)
{ {
return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false, return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
NULL, write_fault, writable, NULL); write_fault, writable, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{ {
return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true, return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
NULL, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
{
return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
NULL, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{ {
return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);

View File

@ -20,8 +20,8 @@
#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
#endif /* KVM_HAVE_MMU_RWLOCK */ #endif /* KVM_HAVE_MMU_RWLOCK */
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, kvm_pfn_t hva_to_pfn(unsigned long addr, bool interruptible, bool *async,
bool *async, bool write_fault, bool *writable); bool write_fault, bool *writable);
#ifdef CONFIG_HAVE_KVM_PFNCACHE #ifdef CONFIG_HAVE_KVM_PFNCACHE
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,

View File

@ -198,7 +198,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
} }
/* We always request a writeable mapping */ /* We always request a writeable mapping */
new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL); new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
if (is_error_noslot_pfn(new_pfn)) if (is_error_noslot_pfn(new_pfn))
goto out_error; goto out_error;