KVM: Export KVM-internal symbols for sub-modules only

Rework the vast majority of KVM's exports to expose symbols only to KVM
submodules, i.e. to x86's kvm-{amd,intel}.ko and PPC's kvm-{pr,hv}.ko.
With few exceptions, KVM's exported APIs are intended (and safe) for KVM-
internal usage only.

Keep kvm_get_kvm(), kvm_get_kvm_safe(), and kvm_put_kvm() as normal
exports, as they are needed by VFIO, and are generally safe for external
usage (though ideally even the get/put APIs would be KVM-internal, and
VFIO would pin a VM by grabbing a reference to its associated file).

Implement a framework in kvm_types.h in anticipation of providing a macro
to restrict KVM-specific kernel exports, i.e. to provide symbol exports
for KVM if and only if KVM is built as one or more modules.

Link: https://lore.kernel.org/r/20250919003303.1355064-3-seanjc@google.com
Cc: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
pull/1354/merge
Sean Christopherson 2025-09-18 17:33:00 -07:00 committed by Paolo Bonzini
parent 15463eece9
commit 20c4892058
7 changed files with 110 additions and 75 deletions

View File

@ -3,7 +3,6 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_spu.h
generic-y += agp.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += early_ioremap.h

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PPC_KVM_TYPES_H
#define _ASM_PPC_KVM_TYPES_H
#if IS_MODULE(CONFIG_KVM_BOOK3S_64_PR) && IS_MODULE(CONFIG_KVM_BOOK3S_64_HV)
#define KVM_SUB_MODULES kvm-pr,kvm-hv
#elif IS_MODULE(CONFIG_KVM_BOOK3S_64_PR)
#define KVM_SUB_MODULES kvm-pr
#elif IS_MODULE(CONFIG_KVM_BOOK3S_64_HV)
#define KVM_SUB_MODULES kvm-hv
#else
#undef KVM_SUB_MODULES
#endif
#endif

View File

@ -2,6 +2,16 @@
#ifndef _ASM_X86_KVM_TYPES_H
#define _ASM_X86_KVM_TYPES_H
#if IS_MODULE(CONFIG_KVM_AMD) && IS_MODULE(CONFIG_KVM_INTEL)
#define KVM_SUB_MODULES kvm-amd,kvm-intel
#elif IS_MODULE(CONFIG_KVM_AMD)
#define KVM_SUB_MODULES kvm-amd
#elif IS_MODULE(CONFIG_KVM_INTEL)
#define KVM_SUB_MODULES kvm-intel
#else
#undef KVM_SUB_MODULES
#endif
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
#endif /* _ASM_X86_KVM_TYPES_H */

View File

@ -3,6 +3,23 @@
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
#include <linux/bits.h>
#include <linux/export.h>
#include <linux/types.h>
#include <asm/kvm_types.h>
#ifdef KVM_SUB_MODULES
#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
#else
#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
#endif
#ifndef __ASSEMBLER__
#include <linux/mutex.h>
#include <linux/spinlock_types.h>
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
@ -19,13 +36,6 @@ struct kvm_memslots;
enum kvm_mr_change;
#include <linux/bits.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <asm/kvm_types.h>
/*
* Address types:
*
@ -116,5 +126,6 @@ struct kvm_vcpu_stat_generic {
};
#define KVM_STATS_NAME_SIZE 48
#endif /* !__ASSEMBLER__ */
#endif /* __KVM_TYPES_H__ */

View File

@ -525,7 +525,7 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
return false;
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_irq_has_notifier);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{

View File

@ -702,7 +702,7 @@ out:
fput(file);
return r;
}
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn);
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
@ -785,5 +785,5 @@ put_folio_and_exit:
fput(file);
return ret && !i ? ret : i;
}
EXPORT_SYMBOL_GPL(kvm_gmem_populate);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
#endif

View File

@ -77,22 +77,22 @@ MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
/* The start value to grow halt_poll_ns from */
unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
/* Default halves per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_shrink = 2;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
/*
* Allow direct access (from KVM or the CPU) without MMU notifier protection
@ -170,7 +170,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
}
EXPORT_SYMBOL_GPL(vcpu_load);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_load);
void vcpu_put(struct kvm_vcpu *vcpu)
{
@ -180,7 +180,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
__this_cpu_write(kvm_running_vcpu, NULL);
preempt_enable();
}
EXPORT_SYMBOL_GPL(vcpu_put);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_put);
/* TODO: merge with kvm_arch_vcpu_should_kick */
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
@ -288,7 +288,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
return called;
}
EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_make_all_cpus_request);
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
@ -309,7 +309,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_remote_tlbs);
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{
@ -499,7 +499,7 @@ void kvm_destroy_vcpus(struct kvm *kvm)
atomic_set(&kvm->online_vcpus, 0);
}
EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
@ -1365,7 +1365,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm)
{
WARN_ON(refcount_dec_and_test(&kvm->users_count));
}
EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
@ -1397,7 +1397,7 @@ out_unlock:
}
return -EINTR;
}
EXPORT_SYMBOL_GPL(kvm_trylock_all_vcpus);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_trylock_all_vcpus);
int kvm_lock_all_vcpus(struct kvm *kvm)
{
@ -1422,7 +1422,7 @@ out_unlock:
}
return r;
}
EXPORT_SYMBOL_GPL(kvm_lock_all_vcpus);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lock_all_vcpus);
void kvm_unlock_all_vcpus(struct kvm *kvm)
{
@ -1434,7 +1434,7 @@ void kvm_unlock_all_vcpus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm)
mutex_unlock(&vcpu->mutex);
}
EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_unlock_all_vcpus);
/*
* Allocation size is twice as large as the actual dirty bitmap size.
@ -2142,7 +2142,7 @@ int kvm_set_internal_memslot(struct kvm *kvm,
return kvm_set_memory_region(kvm, mem);
}
EXPORT_SYMBOL_GPL(kvm_set_internal_memslot);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_internal_memslot);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region2 *mem)
@ -2201,7 +2201,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
*is_dirty = 1;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dirty_log);
#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
/**
@ -2636,7 +2636,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_memslot);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_memslot);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@ -2670,7 +2670,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
return NULL;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_memslot);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
@ -2678,7 +2678,7 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
return kvm_is_visible_memslot(memslot);
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_visible_gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@ -2686,7 +2686,7 @@ bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
return kvm_is_visible_memslot(memslot);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@ -2743,19 +2743,19 @@ unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
{
return gfn_to_hva_many(slot, gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva_memslot);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_hva);
/*
* Return the hva of a @gfn and the R/W attribute if possible.
@ -2819,7 +2819,7 @@ void kvm_release_page_clean(struct page *page)
kvm_set_page_accessed(page);
put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_clean);
void kvm_release_page_dirty(struct page *page)
{
@ -2829,7 +2829,7 @@ void kvm_release_page_dirty(struct page *page)
kvm_set_page_dirty(page);
kvm_release_page_clean(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_dirty);
static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
struct follow_pfnmap_args *map, bool writable)
@ -3073,7 +3073,7 @@ kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
return kvm_follow_pfn(&kfp);
}
EXPORT_SYMBOL_GPL(__kvm_faultin_pfn);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_faultin_pfn);
int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages)
@ -3090,7 +3090,7 @@ int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
}
EXPORT_SYMBOL_GPL(kvm_prefetch_pages);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prefetch_pages);
/*
* Don't use this API unless you are absolutely, positively certain that KVM
@ -3112,7 +3112,7 @@ struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write)
(void)kvm_follow_pfn(&kfp);
return refcounted_page;
}
EXPORT_SYMBOL_GPL(__gfn_to_page);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(__gfn_to_page);
int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
bool writable)
@ -3146,7 +3146,7 @@ int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
return map->hva ? 0 : -EFAULT;
}
EXPORT_SYMBOL_GPL(__kvm_vcpu_map);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_map);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
{
@ -3174,7 +3174,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
map->page = NULL;
map->pinned_page = NULL;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_unmap);
static int next_segment(unsigned long len, int offset)
{
@ -3210,7 +3210,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_page);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
@ -3219,7 +3219,7 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_page);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
@ -3239,7 +3239,7 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{
@ -3259,7 +3259,7 @@ int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned l
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
@ -3290,7 +3290,7 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_atomic);
/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
@ -3320,7 +3320,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_page);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
@ -3329,7 +3329,7 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest_page);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
@ -3350,7 +3350,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len)
@ -3371,7 +3371,7 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest);
static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
struct gfn_to_hva_cache *ghc,
@ -3420,7 +3420,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
@ -3451,14 +3451,14 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_offset_cached);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_cached);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
@ -3488,14 +3488,14 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_offset_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_cached);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
@ -3515,7 +3515,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
@ -3540,7 +3540,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty_in_slot);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
@ -3549,7 +3549,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
EXPORT_SYMBOL_GPL(mark_page_dirty);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@ -3558,7 +3558,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_mark_page_dirty);
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{
@ -3795,7 +3795,7 @@ out:
trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_halt);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
@ -3807,7 +3807,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
return false;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_wake_up);
#ifndef CONFIG_S390
/*
@ -3859,7 +3859,7 @@ void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
out:
put_cpu();
}
EXPORT_SYMBOL_GPL(__kvm_vcpu_kick);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_kick);
#endif /* !CONFIG_S390 */
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
@ -3882,7 +3882,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
return ret;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_yield_to);
/*
* Helper that checks whether a VCPU is eligible for directed yield.
@ -4037,7 +4037,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
/* Ensure vcpu is not eligible during next spinloop */
kvm_vcpu_set_dy_eligible(me, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_on_spin);
static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
{
@ -5019,7 +5019,7 @@ bool kvm_are_all_memslots_empty(struct kvm *kvm)
return true;
}
EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_are_all_memslots_empty);
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
@ -5474,7 +5474,7 @@ bool file_is_kvm(struct file *file)
{
return file && file->f_op == &kvm_vm_fops;
}
EXPORT_SYMBOL_GPL(file_is_kvm);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(file_is_kvm);
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
@ -5569,10 +5569,10 @@ static struct miscdevice kvm_dev = {
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
bool enable_virt_at_load = true;
module_param(enable_virt_at_load, bool, 0444);
EXPORT_SYMBOL_GPL(enable_virt_at_load);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
static DEFINE_PER_CPU(bool, virtualization_enabled);
static DEFINE_MUTEX(kvm_usage_lock);
@ -5723,7 +5723,7 @@ err_cpuhp:
--kvm_usage_count;
return r;
}
EXPORT_SYMBOL_GPL(kvm_enable_virtualization);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_virtualization);
void kvm_disable_virtualization(void)
{
@ -5736,7 +5736,7 @@ void kvm_disable_virtualization(void)
cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
kvm_arch_disable_virtualization();
}
EXPORT_SYMBOL_GPL(kvm_disable_virtualization);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_disable_virtualization);
static int kvm_init_virtualization(void)
{
@ -5885,7 +5885,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
EXPORT_SYMBOL_GPL(kvm_io_bus_write);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_write);
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
gpa_t addr, int len, const void *val, long cookie)
@ -5954,7 +5954,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
EXPORT_SYMBOL_GPL(kvm_io_bus_read);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_read);
static void __free_bus(struct rcu_head *rcu)
{
@ -6078,7 +6078,7 @@ out_unlock:
return iodev;
}
EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_get_dev);
static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
@ -6415,7 +6415,7 @@ struct kvm_vcpu *kvm_get_running_vcpu(void)
return vcpu;
}
EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_running_vcpu);
/**
* kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
@ -6550,7 +6550,7 @@ err_cpu_kick_mask:
kmem_cache_destroy(kvm_vcpu_cache);
return r;
}
EXPORT_SYMBOL_GPL(kvm_init);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init);
void kvm_exit(void)
{
@ -6573,4 +6573,4 @@ void kvm_exit(void)
kvm_async_pf_deinit();
kvm_irqfd_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_exit);