KVM: pfncache: Add a map helper function
There is a pfncache unmap helper but mapping is open-coded. Arguably this is fine because mapping is done in only one place, hva_to_pfn_retry(), but adding the helper does make that function more readable. No functional change intended. Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-2-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>pull/819/head
parent
db7d6fbc10
commit
f39b80e3ff
|
|
@ -96,17 +96,32 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_gpc_check);
|
EXPORT_SYMBOL_GPL(kvm_gpc_check);
|
||||||
|
|
||||||
static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
|
static void *gpc_map(kvm_pfn_t pfn)
|
||||||
{
|
{
|
||||||
/* Unmap the old pfn/page if it was mapped before. */
|
|
||||||
if (!is_error_noslot_pfn(pfn) && khva) {
|
|
||||||
if (pfn_valid(pfn))
|
if (pfn_valid(pfn))
|
||||||
kunmap(pfn_to_page(pfn));
|
return kmap(pfn_to_page(pfn));
|
||||||
|
|
||||||
#ifdef CONFIG_HAS_IOMEM
|
#ifdef CONFIG_HAS_IOMEM
|
||||||
else
|
return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||||
memunmap(khva);
|
#else
|
||||||
|
return NULL;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gpc_unmap(kvm_pfn_t pfn, void *khva)
|
||||||
|
{
|
||||||
|
/* Unmap the old pfn/page if it was mapped before. */
|
||||||
|
if (is_error_noslot_pfn(pfn) || !khva)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (pfn_valid(pfn)) {
|
||||||
|
kunmap(pfn_to_page(pfn));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAS_IOMEM
|
||||||
|
memunmap(khva);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
|
static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
|
||||||
|
|
@ -175,7 +190,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||||
* the existing mapping and didn't create a new one.
|
* the existing mapping and didn't create a new one.
|
||||||
*/
|
*/
|
||||||
if (new_khva != old_khva)
|
if (new_khva != old_khva)
|
||||||
gpc_unmap_khva(new_pfn, new_khva);
|
gpc_unmap(new_pfn, new_khva);
|
||||||
|
|
||||||
kvm_release_pfn_clean(new_pfn);
|
kvm_release_pfn_clean(new_pfn);
|
||||||
|
|
||||||
|
|
@ -193,15 +208,11 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||||
* too must be done outside of gpc->lock!
|
* too must be done outside of gpc->lock!
|
||||||
*/
|
*/
|
||||||
if (gpc->usage & KVM_HOST_USES_PFN) {
|
if (gpc->usage & KVM_HOST_USES_PFN) {
|
||||||
if (new_pfn == gpc->pfn) {
|
if (new_pfn == gpc->pfn)
|
||||||
new_khva = old_khva;
|
new_khva = old_khva;
|
||||||
} else if (pfn_valid(new_pfn)) {
|
else
|
||||||
new_khva = kmap(pfn_to_page(new_pfn));
|
new_khva = gpc_map(new_pfn);
|
||||||
#ifdef CONFIG_HAS_IOMEM
|
|
||||||
} else {
|
|
||||||
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
if (!new_khva) {
|
if (!new_khva) {
|
||||||
kvm_release_pfn_clean(new_pfn);
|
kvm_release_pfn_clean(new_pfn);
|
||||||
goto out_error;
|
goto out_error;
|
||||||
|
|
@ -326,7 +337,7 @@ out_unlock:
|
||||||
mutex_unlock(&gpc->refresh_lock);
|
mutex_unlock(&gpc->refresh_lock);
|
||||||
|
|
||||||
if (unmap_old)
|
if (unmap_old)
|
||||||
gpc_unmap_khva(old_pfn, old_khva);
|
gpc_unmap(old_pfn, old_khva);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -412,7 +423,7 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
|
||||||
list_del(&gpc->list);
|
list_del(&gpc->list);
|
||||||
spin_unlock(&kvm->gpc_lock);
|
spin_unlock(&kvm->gpc_lock);
|
||||||
|
|
||||||
gpc_unmap_khva(old_pfn, old_khva);
|
gpc_unmap(old_pfn, old_khva);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
|
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue