drm/xe/pf: Clear all LMTT pages on alloc
Our LMEM buffer objects are not cleared by default on alloc and during VF provisioning we only setup LMTT PTEs for the actually provisioned LMEM range. But beyond that valid range we might leave some stale data that could either point to some other VFs allocations or even to the PF pages. Explicitly clear all new LMTT page to avoid the risk that a malicious VF would try to exploit that gap. While around add asserts to catch any undesired PTE overwrites and low-level debug traces to track LMTT PT life-cycle. Fixes:pull/1293/headb1d2040582("drm/xe/pf: Introduce Local Memory Translation Table") Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Lukasz Laguna <lukasz.laguna@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Piotr Piórkowski <piotr.piorkowski@intel.com> Link: https://lore.kernel.org/r/20250701220052.1612-1-michal.wajdeczko@intel.com (cherry picked from commit3fae6918a3) Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
parent
d7b8f8e208
commit
705a412a36
|
|
@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
|
|||
}
|
||||
|
||||
lmtt_assert(lmtt, xe_bo_is_vram(bo));
|
||||
lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
|
||||
|
||||
xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size);
|
||||
|
||||
pt->level = level;
|
||||
pt->bo = bo;
|
||||
|
|
@ -91,6 +94,9 @@ out:
|
|||
|
||||
static void lmtt_pt_free(struct xe_lmtt_pt *pt)
|
||||
{
|
||||
lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
|
||||
pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
|
||||
|
||||
xe_bo_unpin_map_no_vm(pt->bo);
|
||||
kfree(pt);
|
||||
}
|
||||
|
|
@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
|
|||
|
||||
switch (lmtt->ops->lmtt_pte_size(level)) {
|
||||
case sizeof(u32):
|
||||
lmtt_assert(lmtt, !overflows_type(pte, u32));
|
||||
lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
|
||||
|
||||
xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
|
||||
break;
|
||||
case sizeof(u64):
|
||||
lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
|
||||
|
||||
xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
Loading…
Reference in New Issue