drm/msm/a6xx: Avoid gx gbit halt during rpm suspend
As per the downstream driver, gx gbif halt is required only during recovery sequence. So lets avoid it during regular rpm suspend. Signed-off-by: Akhil P Oommen <quic_akhilpo@quicinc.com> Patchwork: https://patchwork.freedesktop.org/patch/515279/ Link: https://lore.kernel.org/r/20221216223253.1.Ice9c47bfeb1fddb8dc377a3491a043a3ee7fca7d@changeid Signed-off-by: Rob Clark <robdclark@chromium.org>pull/520/merge
parent
13ef096e34
commit
f4a75b5933
|
|
@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
|
||||||
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
||||||
#define GBIF_ARB_HALT_MASK BIT(1)
|
#define GBIF_ARB_HALT_MASK BIT(1)
|
||||||
|
|
||||||
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
|
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
|
||||||
|
bool gx_off)
|
||||||
{
|
{
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||||
|
|
||||||
|
|
@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Halt the gx side of GBIF */
|
if (gx_off) {
|
||||||
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
|
/* Halt the gx side of GBIF */
|
||||||
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
|
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
|
||||||
|
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
|
||||||
|
}
|
||||||
|
|
||||||
/* Halt new client requests on GBIF */
|
/* Halt new client requests on GBIF */
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
||||||
|
|
@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
|
||||||
/* Halt the gmu cm3 core */
|
/* Halt the gmu cm3 core */
|
||||||
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
|
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
|
||||||
|
|
||||||
a6xx_bus_clear_pending_transactions(adreno_gpu);
|
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
|
||||||
|
|
||||||
/* Reset GPU core blocks */
|
/* Reset GPU core blocks */
|
||||||
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
|
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
|
||||||
|
|
@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
a6xx_bus_clear_pending_transactions(adreno_gpu);
|
a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
|
||||||
|
|
||||||
/* tell the GMU we want to slumber */
|
/* tell the GMU we want to slumber */
|
||||||
ret = a6xx_gmu_notify_slumber(gmu);
|
ret = a6xx_gmu_notify_slumber(gmu);
|
||||||
|
|
|
||||||
|
|
@ -1277,6 +1277,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
|
||||||
if (hang_debug)
|
if (hang_debug)
|
||||||
a6xx_dump(gpu);
|
a6xx_dump(gpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To handle recovery specific sequences during the rpm suspend we are
|
||||||
|
* about to trigger
|
||||||
|
*/
|
||||||
|
a6xx_gpu->hung = true;
|
||||||
|
|
||||||
/* Halt SQE first */
|
/* Halt SQE first */
|
||||||
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
|
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
|
||||||
|
|
||||||
|
|
@ -1319,6 +1325,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
|
||||||
mutex_unlock(&gpu->active_lock);
|
mutex_unlock(&gpu->active_lock);
|
||||||
|
|
||||||
msm_gpu_hw_init(gpu);
|
msm_gpu_hw_init(gpu);
|
||||||
|
a6xx_gpu->hung = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
|
static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@ struct a6xx_gpu {
|
||||||
void *llc_slice;
|
void *llc_slice;
|
||||||
void *htw_llc_slice;
|
void *htw_llc_slice;
|
||||||
bool have_mmu500;
|
bool have_mmu500;
|
||||||
|
bool hung;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
|
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue