memregion: Support fine grained invalidate by cpu_cache_invalidate_memregion()
Extend cpu_cache_invalidate_memregion() to support invalidating a particular range of memory by introducing start and length parameters. Control of types of invalidation is left for when use cases turn up. For now everything is Clean and Invalidate. Where the range is unknown, use the provided cpu_cache_invalidate_all() helper to act as documentation of intent in a fashion that is clearer than passing (0, -1) to cpu_cache_invalidate_memregion(). Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Conor Dooley <conor.dooley@microchip.com>pull/1354/merge
parent
f49ae86483
commit
b43652d867
|
|
@ -368,7 +368,7 @@ bool cpu_cache_has_invalidate_memregion(void)
|
|||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
|
||||
|
||||
int cpu_cache_invalidate_memregion(void)
|
||||
int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
|
||||
{
|
||||
if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion()))
|
||||
return -ENXIO;
|
||||
|
|
|
|||
|
|
@ -228,7 +228,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
cpu_cache_invalidate_memregion();
|
||||
if (!cxlr->params.res)
|
||||
return -ENXIO;
|
||||
cpu_cache_invalidate_memregion(cxlr->params.res->start,
|
||||
resource_size(cxlr->params.res));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ static void nd_region_remove(struct device *dev)
|
|||
* here is ok.
|
||||
*/
|
||||
if (cpu_cache_has_invalidate_memregion())
|
||||
cpu_cache_invalidate_memregion();
|
||||
cpu_cache_invalidate_all();
|
||||
}
|
||||
|
||||
static int child_notify(struct device *dev, void *data)
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ static int nd_region_invalidate_memregion(struct nd_region *nd_region)
|
|||
}
|
||||
}
|
||||
|
||||
cpu_cache_invalidate_memregion();
|
||||
cpu_cache_invalidate_all();
|
||||
out:
|
||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
||||
|
|
|
|||
|
|
@ -27,6 +27,9 @@ static inline void memregion_free(int id)
|
|||
/**
|
||||
* cpu_cache_invalidate_memregion - drop any CPU cached data for
|
||||
* memregion
|
||||
* @start: start physical address of the target memory region.
|
||||
* @len: length of the target memory region. -1 for all the regions of
|
||||
* the target type.
|
||||
*
|
||||
* Perform cache maintenance after a memory event / operation that
|
||||
* changes the contents of physical memory in a cache-incoherent manner.
|
||||
|
|
@ -45,7 +48,7 @@ static inline void memregion_free(int id)
|
|||
* the cache maintenance.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
|
||||
int cpu_cache_invalidate_memregion(void);
|
||||
int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
|
||||
bool cpu_cache_has_invalidate_memregion(void);
|
||||
#else
|
||||
static inline bool cpu_cache_has_invalidate_memregion(void)
|
||||
|
|
@ -53,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline int cpu_cache_invalidate_memregion(void)
|
||||
static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
|
||||
{
|
||||
WARN_ON_ONCE("CPU cache invalidation required");
|
||||
return -ENXIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int cpu_cache_invalidate_all(void)
|
||||
{
|
||||
return cpu_cache_invalidate_memregion(0, -1);
|
||||
}
|
||||
|
||||
#endif /* _MEMREGION_H_ */
|
||||
|
|
|
|||
Loading…
Reference in New Issue