iommufd/selftest: Add coverage for the new mmap interface

Extend the loopback test to a new mmap page.

Link: https://patch.msgid.link/r/b02b1220c955c3cf9ea5dd9fe9349ab1b4f8e20b.1752126748.git.nicolinc@nvidia.com
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Pranjal Shrivastava <praan@google.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
pull/1311/head
Nicolin Chen 2025-07-09 22:59:10 -07:00 committed by Jason Gunthorpe
parent 56e9a0d8e5
commit 80478a2b45
4 changed files with 59 additions and 1 deletions

View File

@ -232,12 +232,16 @@ struct iommu_hwpt_invalidate_selftest {
* (IOMMU_VIOMMU_TYPE_SELFTEST)
* @in_data: Input random data from user space
* @out_data: Output data (matching @in_data) to user space
* @out_mmap_offset: The offset argument for mmap syscall
* @out_mmap_length: The length argument for mmap syscall
*
* Simply set @out_data=@in_data for a loopback test
*/
struct iommu_viommu_selftest {
__u32 in_data;
__u32 out_data;
__aligned_u64 out_mmap_offset;
__aligned_u64 out_mmap_length;
};
/* Should not be equal to any defined value in enum iommu_viommu_invalidate_data_type */

View File

@ -152,6 +152,9 @@ struct mock_viommu {
struct mock_iommu_domain *s2_parent;
struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
struct mutex queue_mutex;
unsigned long mmap_offset;
u32 *page; /* Mmap page to test u32 type of in_data */
};
static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
@ -689,6 +692,10 @@ static void mock_viommu_destroy(struct iommufd_viommu *viommu)
if (refcount_dec_and_test(&mock_iommu->users))
complete(&mock_iommu->complete);
if (mock_viommu->mmap_offset)
iommufd_viommu_destroy_mmap(&mock_viommu->core,
mock_viommu->mmap_offset);
free_page((unsigned long)mock_viommu->page);
mutex_destroy(&mock_viommu->queue_mutex);
/* iommufd core frees mock_viommu and viommu */
@ -887,11 +894,28 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
if (rc)
return rc;
/* Allocate two pages */
mock_viommu->page =
(u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!mock_viommu->page)
return -ENOMEM;
rc = iommufd_viommu_alloc_mmap(&mock_viommu->core,
__pa(mock_viommu->page),
PAGE_SIZE * 2,
&mock_viommu->mmap_offset);
if (rc)
goto err_free_page;
/* For loopback tests on both the page and out_data */
*mock_viommu->page = data.in_data;
data.out_data = data.in_data;
data.out_mmap_length = PAGE_SIZE * 2;
data.out_mmap_offset = mock_viommu->mmap_offset;
rc = iommu_copy_struct_to_user(
user_data, &data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
if (rc)
return rc;
goto err_destroy_mmap;
}
refcount_inc(&mock_iommu->users);
@ -900,6 +924,13 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
viommu->ops = &mock_viommu_ops;
return 0;
err_destroy_mmap:
iommufd_viommu_destroy_mmap(&mock_viommu->core,
mock_viommu->mmap_offset);
err_free_page:
free_page((unsigned long)mock_viommu->page);
return rc;
}
static const struct iommu_ops mock_ops = {

View File

@ -2799,6 +2799,7 @@ TEST_F(iommufd_viommu, viommu_alloc_with_data)
struct iommu_viommu_selftest data = {
.in_data = 0xbeef,
};
uint32_t *test;
if (!self->device_id)
SKIP(return, "Skipping test for variant no_viommu");
@ -2807,6 +2808,24 @@ TEST_F(iommufd_viommu, viommu_alloc_with_data)
IOMMU_VIOMMU_TYPE_SELFTEST, &data, sizeof(data),
&self->viommu_id);
ASSERT_EQ(data.out_data, data.in_data);
/* Negative mmap tests -- offset and length cannot be changed */
test_err_mmap(ENXIO, data.out_mmap_length,
data.out_mmap_offset + PAGE_SIZE);
test_err_mmap(ENXIO, data.out_mmap_length,
data.out_mmap_offset + PAGE_SIZE * 2);
test_err_mmap(ENXIO, data.out_mmap_length / 2, data.out_mmap_offset);
test_err_mmap(ENXIO, data.out_mmap_length * 2, data.out_mmap_offset);
/* Now do a correct mmap for a loopback test */
test = mmap(NULL, data.out_mmap_length, PROT_READ | PROT_WRITE,
MAP_SHARED, self->fd, data.out_mmap_offset);
ASSERT_NE(MAP_FAILED, test);
ASSERT_EQ(data.in_data, *test);
/* The owner of the mmap region should be blocked */
EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, self->viommu_id));
munmap(test, data.out_mmap_length);
}
TEST_F(iommufd_viommu, vdevice_alloc)

View File

@ -56,6 +56,10 @@ static unsigned long PAGE_SIZE;
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
#define test_err_mmap(_errno, length, offset) \
EXPECT_ERRNO(_errno, (long)mmap(NULL, length, PROT_READ | PROT_WRITE, \
MAP_SHARED, self->fd, offset))
static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
{
int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;