KVM: Add kvm_faultin_pfn() to specifically service guest page faults
Add a new dedicated API, kvm_faultin_pfn(), for servicing guest page faults, i.e. for getting pages/pfns that will be mapped into the guest via an mmu_notifier-protected KVM MMU. Keep struct kvm_follow_pfn buried in internal code, as having __kvm_faultin_pfn() take "out" params is actually cleaner for several architectures, e.g. it allows the caller to have its own "page fault" structure without having to marshal data to/from kvm_follow_pfn. Long term, common KVM would ideally provide a kvm_page_fault structure, a la x86's struct of the same name. But all architectures need to be converted to a common API before that can happen. Tested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-44-seanjc@google.com>pull/1085/head
parent
21dd877060
commit
1c7b627e93
|
|
@ -1231,6 +1231,18 @@ static inline void kvm_release_page_unused(struct page *page)
|
||||||
void kvm_release_page_clean(struct page *page);
|
void kvm_release_page_clean(struct page *page);
|
||||||
void kvm_release_page_dirty(struct page *page);
|
void kvm_release_page_dirty(struct page *page);
|
||||||
|
|
||||||
|
kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
|
unsigned int foll, bool *writable,
|
||||||
|
struct page **refcounted_page);
|
||||||
|
|
||||||
|
static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||||
|
bool write, bool *writable,
|
||||||
|
struct page **refcounted_page)
|
||||||
|
{
|
||||||
|
return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
|
||||||
|
write ? FOLL_WRITE : 0, writable, refcounted_page);
|
||||||
|
}
|
||||||
|
|
||||||
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
|
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
|
||||||
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
|
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
|
||||||
bool *writable);
|
bool *writable);
|
||||||
|
|
|
||||||
|
|
@ -3092,6 +3092,28 @@ kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_pfn);
|
EXPORT_SYMBOL_GPL(gfn_to_pfn);
|
||||||
|
|
||||||
|
kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
|
unsigned int foll, bool *writable,
|
||||||
|
struct page **refcounted_page)
|
||||||
|
{
|
||||||
|
struct kvm_follow_pfn kfp = {
|
||||||
|
.slot = slot,
|
||||||
|
.gfn = gfn,
|
||||||
|
.flags = foll,
|
||||||
|
.map_writable = writable,
|
||||||
|
.refcounted_page = refcounted_page,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!writable || !refcounted_page))
|
||||||
|
return KVM_PFN_ERR_FAULT;
|
||||||
|
|
||||||
|
*writable = false;
|
||||||
|
*refcounted_page = NULL;
|
||||||
|
|
||||||
|
return kvm_follow_pfn(&kfp);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__kvm_faultin_pfn);
|
||||||
|
|
||||||
int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
|
int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
struct page **pages, int nr_pages)
|
struct page **pages, int nr_pages)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue