xen: replace XENFEAT_auto_translated_physmap with xen_pv_domain()
Instead of testing the XENFEAT_auto_translated_physmap feature, just use !xen_pv_domain() which is equivalent. This has the advantage that a kernel not built with CONFIG_XEN_PV will be smaller due to dead code elimination. Reviewed-by: Jason Andryuk <jason.andryuk@amd.com> Signed-off-by: Juergen Gross <jgross@suse.com> Message-ID: <20250826145608.10352-3-jgross@suse.com>pull/1354/merge
parent
34c605fe53
commit
0f4283123f
|
|
@ -12,9 +12,9 @@
|
||||||
#include <asm/extable.h>
|
#include <asm/extable.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
#include <xen/xen.h>
|
||||||
#include <xen/interface/xen.h>
|
#include <xen/interface/xen.h>
|
||||||
#include <xen/interface/grant_table.h>
|
#include <xen/interface/grant_table.h>
|
||||||
#include <xen/features.h>
|
|
||||||
|
|
||||||
/* Xen machine address */
|
/* Xen machine address */
|
||||||
typedef struct xmaddr {
|
typedef struct xmaddr {
|
||||||
|
|
@ -162,7 +162,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||||
* pfn_to_mfn. This will have to be removed when we figured
|
* pfn_to_mfn. This will have to be removed when we figured
|
||||||
* out which call.
|
* out which call.
|
||||||
*/
|
*/
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return pfn;
|
return pfn;
|
||||||
|
|
||||||
mfn = __pfn_to_mfn(pfn);
|
mfn = __pfn_to_mfn(pfn);
|
||||||
|
|
@ -175,7 +175,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||||
|
|
||||||
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
|
return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
|
||||||
|
|
@ -210,7 +210,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||||
* gfn_to_pfn. This will have to be removed when we figure
|
* gfn_to_pfn. This will have to be removed when we figure
|
||||||
* out which call.
|
* out which call.
|
||||||
*/
|
*/
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return mfn;
|
return mfn;
|
||||||
|
|
||||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||||
|
|
@ -242,7 +242,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
|
||||||
/* Pseudo-physical <-> Guest conversion */
|
/* Pseudo-physical <-> Guest conversion */
|
||||||
static inline unsigned long pfn_to_gfn(unsigned long pfn)
|
static inline unsigned long pfn_to_gfn(unsigned long pfn)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return pfn;
|
return pfn;
|
||||||
else
|
else
|
||||||
return pfn_to_mfn(pfn);
|
return pfn_to_mfn(pfn);
|
||||||
|
|
@ -250,7 +250,7 @@ static inline unsigned long pfn_to_gfn(unsigned long pfn)
|
||||||
|
|
||||||
static inline unsigned long gfn_to_pfn(unsigned long gfn)
|
static inline unsigned long gfn_to_pfn(unsigned long gfn)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return gfn;
|
return gfn;
|
||||||
else
|
else
|
||||||
return mfn_to_pfn(gfn);
|
return mfn_to_pfn(gfn);
|
||||||
|
|
@ -284,7 +284,7 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
|
||||||
{
|
{
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return mfn;
|
return mfn;
|
||||||
|
|
||||||
pfn = mfn_to_pfn(mfn);
|
pfn = mfn_to_pfn(mfn);
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
|
||||||
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
||||||
int nr, struct page **pages)
|
int nr, struct page **pages)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return xen_xlate_unmap_gfn_range(vma, nr, pages);
|
return xen_xlate_unmap_gfn_range(vma, nr, pages);
|
||||||
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
|
|
|
||||||
|
|
@ -686,7 +686,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (kmap_ops) {
|
if (kmap_ops) {
|
||||||
|
|
@ -769,7 +769,7 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
|
|
|
||||||
|
|
@ -302,7 +302,7 @@ static enum bp_state reserve_additional_memory(void)
|
||||||
* are not restored since this region is now known not to
|
* are not restored since this region is now known not to
|
||||||
* conflict with any devices.
|
* conflict with any devices.
|
||||||
*/
|
*/
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (xen_pv_domain()) {
|
||||||
unsigned long pfn, i;
|
unsigned long pfn, i;
|
||||||
|
|
||||||
pfn = PFN_DOWN(resource->start);
|
pfn = PFN_DOWN(resource->start);
|
||||||
|
|
@ -626,7 +626,7 @@ int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
|
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
|
||||||
|
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (xen_pv_domain()) {
|
||||||
ret = xen_alloc_p2m_entry(page_to_pfn(page));
|
ret = xen_alloc_p2m_entry(page_to_pfn(page));
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_undo;
|
goto out_undo;
|
||||||
|
|
|
||||||
|
|
@ -1183,7 +1183,7 @@ static int __init gntdev_init(void)
|
||||||
if (!xen_domain())
|
if (!xen_domain())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
|
use_ptemod = xen_pv_domain();
|
||||||
|
|
||||||
err = misc_register(&gntdev_miscdev);
|
err = misc_register(&gntdev_miscdev);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
|
|
|
||||||
|
|
@ -1449,7 +1449,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
|
||||||
unsigned int nr_gframes = end_idx + 1;
|
unsigned int nr_gframes = end_idx + 1;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (!xen_pv_domain()) {
|
||||||
struct xen_add_to_physmap xatp;
|
struct xen_add_to_physmap xatp;
|
||||||
unsigned int i = end_idx;
|
unsigned int i = end_idx;
|
||||||
rc = 0;
|
rc = 0;
|
||||||
|
|
@ -1570,7 +1570,7 @@ static int gnttab_setup(void)
|
||||||
if (max_nr_gframes < nr_grant_frames)
|
if (max_nr_gframes < nr_grant_frames)
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
|
if (!xen_pv_domain() && gnttab_shared.addr == NULL) {
|
||||||
gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
|
gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
|
||||||
if (gnttab_shared.addr == NULL) {
|
if (gnttab_shared.addr == NULL) {
|
||||||
pr_warn("gnttab share frames is not mapped!\n");
|
pr_warn("gnttab share frames is not mapped!\n");
|
||||||
|
|
@ -1588,7 +1588,7 @@ int gnttab_resume(void)
|
||||||
|
|
||||||
int gnttab_suspend(void)
|
int gnttab_suspend(void)
|
||||||
{
|
{
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
if (xen_pv_domain())
|
||||||
gnttab_interface->unmap_frames();
|
gnttab_interface->unmap_frames();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -271,7 +271,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
|
||||||
struct mmap_gfn_state state;
|
struct mmap_gfn_state state;
|
||||||
|
|
||||||
/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
|
/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
|
||||||
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
|
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
|
||||||
|
|
@ -353,7 +353,7 @@ static int mmap_batch_fn(void *data, int nr, void *state)
|
||||||
struct page **cur_pages = NULL;
|
struct page **cur_pages = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
cur_pages = &pages[st->index];
|
cur_pages = &pages[st->index];
|
||||||
|
|
||||||
BUG_ON(nr < 0);
|
BUG_ON(nr < 0);
|
||||||
|
|
@ -535,7 +535,7 @@ static long privcmd_ioctl_mmap_batch(
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (!xen_pv_domain()) {
|
||||||
ret = alloc_empty_pages(vma, nr_pages);
|
ret = alloc_empty_pages(vma, nr_pages);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
@ -779,8 +779,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
|
if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && !xen_pv_domain()) {
|
||||||
xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
||||||
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
|
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
@ -811,8 +810,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
|
if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && !xen_pv_domain()) {
|
||||||
xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
||||||
rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
|
rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
|
||||||
} else {
|
} else {
|
||||||
unsigned int domid =
|
unsigned int domid =
|
||||||
|
|
@ -1591,7 +1589,7 @@ static void privcmd_close(struct vm_area_struct *vma)
|
||||||
int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
|
int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
|
if (xen_pv_domain() || !numpgs || !pages)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
|
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,7 @@ static int fill_list(unsigned int nr_pages)
|
||||||
* are not restored since this region is now known not to
|
* are not restored since this region is now known not to
|
||||||
* conflict with any devices.
|
* conflict with any devices.
|
||||||
*/
|
*/
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (xen_pv_domain()) {
|
||||||
xen_pfn_t pfn = PFN_DOWN(res->start);
|
xen_pfn_t pfn = PFN_DOWN(res->start);
|
||||||
|
|
||||||
for (i = 0; i < alloc_pages; i++) {
|
for (i = 0; i < alloc_pages; i++) {
|
||||||
|
|
@ -184,7 +184,7 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
|
||||||
pages[i] = pg;
|
pages[i] = pg;
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (xen_pv_domain()) {
|
||||||
ret = xen_alloc_p2m_entry(page_to_pfn(pg));
|
ret = xen_alloc_p2m_entry(page_to_pfn(pg));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
unsigned int j;
|
unsigned int j;
|
||||||
|
|
|
||||||
|
|
@ -955,7 +955,7 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
|
||||||
void __init xenbus_ring_ops_init(void)
|
void __init xenbus_ring_ops_init(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
if (xen_pv_domain())
|
||||||
ring_ops = &ring_ops_pv;
|
ring_ops = &ring_ops_pv;
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -164,7 +164,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
|
||||||
{
|
{
|
||||||
if (flags & GNTMAP_contains_pte)
|
if (flags & GNTMAP_contains_pte)
|
||||||
map->host_addr = addr;
|
map->host_addr = addr;
|
||||||
else if (xen_feature(XENFEAT_auto_translated_physmap))
|
else if (!xen_pv_domain())
|
||||||
map->host_addr = __pa(addr);
|
map->host_addr = __pa(addr);
|
||||||
else
|
else
|
||||||
map->host_addr = addr;
|
map->host_addr = addr;
|
||||||
|
|
@ -181,7 +181,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
|
||||||
{
|
{
|
||||||
if (flags & GNTMAP_contains_pte)
|
if (flags & GNTMAP_contains_pte)
|
||||||
unmap->host_addr = addr;
|
unmap->host_addr = addr;
|
||||||
else if (xen_feature(XENFEAT_auto_translated_physmap))
|
else if (!xen_pv_domain())
|
||||||
unmap->host_addr = __pa(addr);
|
unmap->host_addr = __pa(addr);
|
||||||
else
|
else
|
||||||
unmap->host_addr = addr;
|
unmap->host_addr = addr;
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ static inline void xenmem_reservation_va_mapping_update(unsigned long count,
|
||||||
xen_pfn_t *frames)
|
xen_pfn_t *frames)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
if (xen_pv_domain())
|
||||||
__xenmem_reservation_va_mapping_update(count, pages, frames);
|
__xenmem_reservation_va_mapping_update(count, pages, frames);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
@ -48,7 +48,7 @@ static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
|
||||||
struct page **pages)
|
struct page **pages)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
if (xen_pv_domain())
|
||||||
__xenmem_reservation_va_mapping_reset(count, pages);
|
__xenmem_reservation_va_mapping_reset(count, pages);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
#include <linux/virtio_anchor.h>
|
#include <linux/virtio_anchor.h>
|
||||||
|
#include <xen/xen.h>
|
||||||
#include <xen/features.h>
|
#include <xen/features.h>
|
||||||
#include <asm/xen/interface.h>
|
#include <asm/xen/interface.h>
|
||||||
#include <xen/interface/vcpu.h>
|
#include <xen/interface/vcpu.h>
|
||||||
|
|
@ -116,7 +117,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
|
||||||
unsigned int domid,
|
unsigned int domid,
|
||||||
struct page **pages)
|
struct page **pages)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
|
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
|
||||||
prot, domid, pages);
|
prot, domid, pages);
|
||||||
|
|
||||||
|
|
@ -150,7 +151,7 @@ static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
|
||||||
int nr, int *err_ptr,
|
int nr, int *err_ptr,
|
||||||
pgprot_t prot, unsigned int domid)
|
pgprot_t prot, unsigned int domid)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
|
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
|
||||||
|
|
@ -175,7 +176,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
|
||||||
pgprot_t prot, unsigned int domid,
|
pgprot_t prot, unsigned int domid,
|
||||||
struct page **pages)
|
struct page **pages)
|
||||||
{
|
{
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (!xen_pv_domain())
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
|
return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue