xen: swiotlb: Convert mapping routine to rely on physical address

Switch to .map_phys callback instead of .map_page.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-13-3bbfe3a25cdf@kernel.org
pull/1354/merge
Leon Romanovsky 2025-10-15 12:12:59 +03:00 committed by Marek Szyprowski
parent 33d2c5ee10
commit 936a9f0cb1
1 changed files with 12 additions and 8 deletions

View File

@ -163,18 +163,22 @@ static void xen_grant_dma_free_pages(struct device *dev, size_t size,
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
}
static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
static dma_addr_t xen_grant_dma_map_phys(struct device *dev, phys_addr_t phys,
size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct xen_grant_dma_data *data;
unsigned long offset = offset_in_page(phys);
unsigned long dma_offset = xen_offset_in_page(offset),
pfn_offset = XEN_PFN_DOWN(offset);
unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
grant_ref_t grant;
dma_addr_t dma_handle;
if (unlikely(attrs & DMA_ATTR_MMIO))
return DMA_MAPPING_ERROR;
if (WARN_ON(dir == DMA_NONE))
return DMA_MAPPING_ERROR;
@ -190,7 +194,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
for (i = 0; i < n_pages; i++) {
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
pfn_to_gfn(page_to_xen_pfn(phys_to_page(phys)) + i + pfn_offset),
dir == DMA_TO_DEVICE);
}
@ -199,7 +203,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
return dma_handle;
}
static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
static void xen_grant_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@ -242,7 +246,7 @@ static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
return;
for_each_sg(sg, s, nents, i)
xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
xen_grant_dma_unmap_phys(dev, s->dma_address, sg_dma_len(s), dir,
attrs);
}
@ -257,7 +261,7 @@ static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
return -EINVAL;
for_each_sg(sg, s, nents, i) {
s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
s->dma_address = xen_grant_dma_map_phys(dev, sg_phys(s),
s->length, dir, attrs);
if (s->dma_address == DMA_MAPPING_ERROR)
goto out;
@ -286,8 +290,8 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.free_pages = xen_grant_dma_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.map_page = xen_grant_dma_map_page,
.unmap_page = xen_grant_dma_unmap_page,
.map_phys = xen_grant_dma_map_phys,
.unmap_phys = xen_grant_dma_unmap_phys,
.map_sg = xen_grant_dma_map_sg,
.unmap_sg = xen_grant_dma_unmap_sg,
.dma_supported = xen_grant_dma_supported,