amd-drm-next-6.19-2025-12-02:
amdgpu: - Unified MES fix - SMU 11 unbalanced irq fix - Fix for driver reloading on APUs - pp_table sysfs fix - Fix memory leak in fence handling - HDMI fix - DC cursor fixes - eDP panel parsing fix - Brightness fix - DC analog fixes - EDID retry fixes - UserQ fixes - RAS fixes - IP discovery fix - Add missing locking in amdgpu_ttm_access_memory_sdma() - Smart Power OLED fix - PRT and page fault fixes for GC 6-8 - VMID reservation fix - ACP platform device fix - Add missing vm fault handling for GC 11-12 - VPE fix amdkfd: - Partitioning fix -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaS9gvQAKCRC93/aFa7yZ 2HICAPkBqwuuwbVI+6D5I+OKZ9R7RjMx54A5yIoB+CgTniSmMAD/fwY+qHgYRfAe tOp3dm6zakcdu7ZKJJS5Q2HrhiFU/AY= =X0Np -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.19-2025-12-02' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.19-2025-12-02: amdgpu: - Unified MES fix - SMU 11 unbalanced irq fix - Fix for driver reloading on APUs - pp_table sysfs fix - Fix memory leak in fence handling - HDMI fix - DC cursor fixes - eDP panel parsing fix - Brightness fix - DC analog fixes - EDID retry fixes - UserQ fixes - RAS fixes - IP discovery fix - Add missing locking in amdgpu_ttm_access_memory_sdma() - Smart Power OLED fix - PRT and page fault fixes for GC 6-8 - VMID reservation fix - ACP platform device fix - Add missing vm fault handling for GC 11-12 - VPE fix amdkfd: - Partitioning fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20251202220101.2039347-1-alexander.deucher@amd.compull/1354/merge
commit
0692602def
|
|
@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
|
||||
|
||||
adev->acp.acp_cell[0].name = "acp_audio_dma";
|
||||
adev->acp.acp_cell[0].id = 0;
|
||||
adev->acp.acp_cell[0].num_resources = 3;
|
||||
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
|
||||
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
|
||||
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
|
||||
|
||||
adev->acp.acp_cell[1].name = "designware-i2s";
|
||||
adev->acp.acp_cell[1].id = 1;
|
||||
adev->acp.acp_cell[1].num_resources = 1;
|
||||
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
|
||||
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
|
||||
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
|
||||
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
|
||||
r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL);
|
||||
if (r)
|
||||
goto failure;
|
||||
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
|
||||
|
|
@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
|
||||
|
||||
adev->acp.acp_cell[0].name = "acp_audio_dma";
|
||||
adev->acp.acp_cell[0].id = 0;
|
||||
adev->acp.acp_cell[0].num_resources = 5;
|
||||
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
|
||||
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
|
||||
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
|
||||
|
||||
adev->acp.acp_cell[1].name = "designware-i2s";
|
||||
adev->acp.acp_cell[1].id = 1;
|
||||
adev->acp.acp_cell[1].num_resources = 1;
|
||||
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
|
||||
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
|
||||
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
|
||||
|
||||
adev->acp.acp_cell[2].name = "designware-i2s";
|
||||
adev->acp.acp_cell[2].id = 2;
|
||||
adev->acp.acp_cell[2].num_resources = 1;
|
||||
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
|
||||
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
|
||||
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
|
||||
|
||||
adev->acp.acp_cell[3].name = "designware-i2s";
|
||||
adev->acp.acp_cell[3].id = 3;
|
||||
adev->acp.acp_cell[3].num_resources = 1;
|
||||
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
|
||||
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
|
||||
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
|
||||
|
||||
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
|
||||
r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
|
|
|
|||
|
|
@ -2665,6 +2665,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
|||
chip_name = "navi12";
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->discovery.bin)
|
||||
return 0;
|
||||
chip_name = "cyan_skillfish";
|
||||
break;
|
||||
}
|
||||
|
|
@ -3680,6 +3682,20 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
|
|||
"failed to release exclusive mode on fini\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver reload on the APU can fail due to firmware validation because
|
||||
* the PSP is always running, as it is shared across the whole SoC.
|
||||
* This same issue does not occur on dGPU because it has a mechanism
|
||||
* that checks whether the PSP is running. A solution for those issues
|
||||
* in the APU is to trigger a GPU reset, but this should be done during
|
||||
* the unload phase to avoid adding boot latency and screen flicker.
|
||||
*/
|
||||
if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
|
||||
r = amdgpu_asic_reset(adev);
|
||||
if (r)
|
||||
dev_err(adev->dev, "asic reset on %s failed\n", __func__);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -597,6 +597,9 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
|||
/* reserve engine 5 for firmware */
|
||||
if (adev->enable_mes)
|
||||
vm_inv_engs[i] &= ~(1 << 5);
|
||||
/* reserve engine 6 for uni mes */
|
||||
if (adev->enable_uni_mes)
|
||||
vm_inv_engs[i] &= ~(1 << 6);
|
||||
/* reserve mmhub engine 3 for firmware */
|
||||
if (adev->enable_umsch_mm)
|
||||
vm_inv_engs[i] &= ~(1 << 3);
|
||||
|
|
|
|||
|
|
@ -86,6 +86,11 @@ enum amdgpu_memory_partition {
|
|||
|
||||
#define AMDGPU_MAX_MEM_RANGES 8
|
||||
|
||||
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80
|
||||
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40
|
||||
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
|
||||
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
|
||||
|
||||
/*
|
||||
* GMC page fault information
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -224,6 +224,7 @@ err_fence:
|
|||
kfree((*job)->hw_fence);
|
||||
err_job:
|
||||
kfree(*job);
|
||||
*job = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
@ -245,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
|
|||
if (r) {
|
||||
if (entity)
|
||||
drm_sched_job_cleanup(&(*job)->base);
|
||||
kfree((*job)->hw_vm_fence);
|
||||
kfree((*job)->hw_fence);
|
||||
kfree(*job);
|
||||
*job = NULL;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
|
|
|||
|
|
@ -150,6 +150,8 @@ static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
|
|||
|
||||
#ifdef CONFIG_X86_MCE_AMD
|
||||
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
|
||||
static void
|
||||
amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
|
||||
struct mce_notifier_adev_list {
|
||||
struct amdgpu_device *devs[MAX_GPU_INSTANCE];
|
||||
int num_gpu;
|
||||
|
|
@ -3954,7 +3956,9 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
|
|||
mutex_unlock(&con->recovery_lock);
|
||||
|
||||
amdgpu_ras_critical_region_init(adev);
|
||||
|
||||
#ifdef CONFIG_X86_MCE_AMD
|
||||
amdgpu_unregister_bad_pages_mca_notifier(adev);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
/* recovery end */
|
||||
|
|
@ -4988,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
|
|||
notifier_registered = true;
|
||||
}
|
||||
}
|
||||
static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
if (!notifier_registered && !mce_adev_list.num_gpu)
|
||||
return;
|
||||
for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
|
||||
if (mce_adev_list.devs[i] == adev)
|
||||
mce_adev_list.devs[i] = NULL;
|
||||
if (!mce_adev_list.devs[i])
|
||||
++j;
|
||||
}
|
||||
|
||||
if (j == mce_adev_list.num_gpu) {
|
||||
mce_adev_list.num_gpu = 0;
|
||||
/* Unregister x86 notifier with MCE subsystem. */
|
||||
if (notifier_registered) {
|
||||
mce_unregister_decode_chain(&amdgpu_bad_page_nb);
|
||||
notifier_registered = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
|
||||
|
|
|
|||
|
|
@ -520,9 +520,14 @@ static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
|
|||
return -ENOMEM;
|
||||
|
||||
if (!(*offset)) {
|
||||
/* Need at least 12 bytes for the header on the first read */
|
||||
if (size < ring_header_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(buf, ring_header, ring_header_size))
|
||||
return -EFAULT;
|
||||
buf += ring_header_size;
|
||||
size -= ring_header_size;
|
||||
}
|
||||
|
||||
r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
|
||||
|
|
|
|||
|
|
@ -1329,7 +1329,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
|
|||
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
|
||||
flags |= AMDGPU_PTE_SYSTEM;
|
||||
|
||||
if (ttm->caching == ttm_cached)
|
||||
if (ttm && ttm->caching == ttm_cached)
|
||||
flags |= AMDGPU_PTE_SNOOPED;
|
||||
}
|
||||
|
||||
|
|
@ -1486,6 +1486,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&adev->mman.gtt_window_lock);
|
||||
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
|
||||
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
|
||||
src_mm.start;
|
||||
|
|
@ -1500,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
|
|||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
|
||||
fence = amdgpu_job_submit(job);
|
||||
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||
|
||||
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
|
||||
r = -ETIMEDOUT;
|
||||
|
|
|
|||
|
|
@ -1069,7 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
|
|||
}
|
||||
|
||||
/* Prepare a TLB flush fence to be attached to PTs */
|
||||
if (!params->unlocked && vm->is_compute_context) {
|
||||
if (!params->unlocked) {
|
||||
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
|
||||
|
||||
/* Makes sure no PD/PT is freed before the flush */
|
||||
|
|
@ -2093,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = before->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(before, &vm->va);
|
||||
if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (before->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
|
@ -2108,7 +2108,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = after->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(after, &vm->va);
|
||||
if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (after->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
|
@ -2916,8 +2916,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
switch (args->in.op) {
|
||||
case AMDGPU_VM_OP_RESERVE_VMID:
|
||||
/* We only have requirement to reserve vmid from gfxhub */
|
||||
amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
|
||||
break;
|
||||
return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
|
||||
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
||||
amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
|
|||
/* enable irqs */
|
||||
cik_ih_enable_interrupts(adev);
|
||||
|
||||
if (adev->irq.ih_soft.ring_size)
|
||||
adev->irq.ih_soft.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
|
|||
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
goto out;
|
||||
|
||||
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
|
||||
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
|
||||
/* When a ring buffer overflow happen start parsing interrupt
|
||||
|
|
@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
|
|||
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
|
||||
WREG32(mmIH_RB_CNTL, tmp);
|
||||
}
|
||||
|
||||
out:
|
||||
return (wptr & ih->ptr_mask);
|
||||
}
|
||||
|
||||
|
|
@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_init(adev);
|
||||
|
||||
return r;
|
||||
|
|
|
|||
|
|
@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
|
|||
/* enable interrupts */
|
||||
cz_ih_enable_interrupts(adev);
|
||||
|
||||
if (adev->irq.ih_soft.ring_size)
|
||||
adev->irq.ih_soft.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
|
|||
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
goto out;
|
||||
|
||||
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
|
||||
goto out;
|
||||
|
||||
|
|
@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_init(adev);
|
||||
|
||||
return r;
|
||||
|
|
|
|||
|
|
@ -5874,9 +5874,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
if (flags & AMDGPU_IB_PREEMPTED)
|
||||
control |= INDIRECT_BUFFER_PRE_RESUME(1);
|
||||
|
||||
if (vmid)
|
||||
if (vmid && !ring->adev->gfx.rs64_enable)
|
||||
gfx_v11_0_ring_emit_de_meta(ring,
|
||||
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
|
||||
!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
|
|
|
|||
|
|
@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
|
|||
uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
|
||||
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
|
||||
bool retry_fault = !!(entry->src_data[1] & 0x80);
|
||||
bool write_fault = !!(entry->src_data[1] & 0x20);
|
||||
bool retry_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
|
||||
bool write_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
|
||||
struct amdgpu_task_info *task_info;
|
||||
uint32_t status = 0;
|
||||
u64 addr;
|
||||
|
|
|
|||
|
|
@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
|
|||
uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
|
||||
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
|
||||
bool retry_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
|
||||
bool write_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
|
||||
uint32_t status = 0;
|
||||
u64 addr;
|
||||
|
||||
addr = (u64)entry->src_data[0] << 12;
|
||||
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
|
||||
|
||||
if (retry_fault) {
|
||||
/* Returning 1 here also prevents sending the IV to the KFD */
|
||||
|
||||
/* Process it only if it's the first fault for this address */
|
||||
if (entry->ih != &adev->irq.ih_soft &&
|
||||
amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
|
||||
entry->timestamp))
|
||||
return 1;
|
||||
|
||||
/* Delegate it to a different ring if the hardware hasn't
|
||||
* already done it.
|
||||
*/
|
||||
if (entry->ih == &adev->irq.ih) {
|
||||
amdgpu_irq_delegate(adev, entry, 8);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Try to handle the recoverable page faults by filling page
|
||||
* tables
|
||||
*/
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
|
||||
entry->timestamp, write_fault))
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* Issue a dummy read to wait for the status register to
|
||||
|
|
|
|||
|
|
@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
|
|||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
struct amdgpu_vmhub *hub;
|
||||
bool retry_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
|
||||
bool write_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
|
||||
uint32_t status = 0;
|
||||
u64 addr;
|
||||
|
||||
|
|
@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
|
|||
else
|
||||
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
|
||||
|
||||
if (retry_fault) {
|
||||
/* Returning 1 here also prevents sending the IV to the KFD */
|
||||
|
||||
/* Process it only if it's the first fault for this address */
|
||||
if (entry->ih != &adev->irq.ih_soft &&
|
||||
amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
|
||||
entry->timestamp))
|
||||
return 1;
|
||||
|
||||
/* Delegate it to a different ring if the hardware hasn't
|
||||
* already done it.
|
||||
*/
|
||||
if (entry->ih == &adev->irq.ih) {
|
||||
amdgpu_irq_delegate(adev, entry, 8);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Try to handle the recoverable page faults by filling page
|
||||
* tables
|
||||
*/
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
|
||||
entry->timestamp, write_fault))
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* Issue a dummy read to wait for the status register to
|
||||
|
|
|
|||
|
|
@ -610,23 +610,21 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
|
||||
u32 status, u32 addr, u32 mc_client)
|
||||
u32 status, u32 addr)
|
||||
{
|
||||
u32 mc_id;
|
||||
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
|
||||
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
PROTECTIONS);
|
||||
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
|
||||
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
|
||||
|
||||
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_ID);
|
||||
|
||||
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n",
|
||||
protections, vmid, addr,
|
||||
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_RW) ?
|
||||
"write" : "read", block, mc_client, mc_id);
|
||||
"write" : "read", mc_id);
|
||||
}
|
||||
|
||||
static const u32 mc_cg_registers[] = {
|
||||
|
|
@ -1072,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 addr, status;
|
||||
|
||||
/* Delegate to the soft IRQ handler ring */
|
||||
if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
|
||||
amdgpu_irq_delegate(adev, entry, 4);
|
||||
return 1;
|
||||
}
|
||||
|
||||
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
|
||||
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
|
||||
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
|
||||
|
|
@ -1079,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
|
|||
if (!addr && !status)
|
||||
return 0;
|
||||
|
||||
amdgpu_vm_update_fault_cache(adev, entry->pasid,
|
||||
((u64)addr) << AMDGPU_GPU_PAGE_SHIFT,
|
||||
status, AMDGPU_GFXHUB(0));
|
||||
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
|
||||
gmc_v6_0_set_fault_enable_default(adev, false);
|
||||
|
||||
|
|
@ -1089,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
|
|||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
|
||||
gmc_v6_0_vm_decode_fault(adev, status, addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1261,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 addr, status, mc_client, vmid;
|
||||
|
||||
/* Delegate to the soft IRQ handler ring */
|
||||
if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
|
||||
amdgpu_irq_delegate(adev, entry, 4);
|
||||
return 1;
|
||||
}
|
||||
|
||||
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
|
||||
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
|
||||
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
|
||||
|
|
|
|||
|
|
@ -1439,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Delegate to the soft IRQ handler ring */
|
||||
if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
|
||||
amdgpu_irq_delegate(adev, entry, 4);
|
||||
return 1;
|
||||
}
|
||||
|
||||
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
|
||||
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
|
||||
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
|
||||
|
|
|
|||
|
|
@ -544,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
bool retry_fault = !!(entry->src_data[1] & 0x80);
|
||||
bool write_fault = !!(entry->src_data[1] & 0x20);
|
||||
bool retry_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
|
||||
bool write_fault = !!(entry->src_data[1] &
|
||||
AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
|
||||
uint32_t status = 0, cid = 0, rw = 0, fed = 0;
|
||||
struct amdgpu_task_info *task_info;
|
||||
struct amdgpu_vmhub *hub;
|
||||
|
|
|
|||
|
|
@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
|
|||
/* enable interrupts */
|
||||
iceland_ih_enable_interrupts(adev);
|
||||
|
||||
if (adev->irq.ih_soft.ring_size)
|
||||
adev->irq.ih_soft.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
|
|||
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
goto out;
|
||||
|
||||
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
|
||||
goto out;
|
||||
|
||||
|
|
@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_init(adev);
|
||||
|
||||
return r;
|
||||
|
|
|
|||
|
|
@ -1390,7 +1390,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
|
||||
break;
|
||||
case IP_VERSION(6, 0, 3):
|
||||
if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
|
||||
if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
|
||||
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
|
||||
break;
|
||||
case IP_VERSION(6, 1, 0):
|
||||
|
|
|
|||
|
|
@ -96,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
|
|||
pci_set_master(adev->pdev);
|
||||
si_ih_enable_interrupts(adev);
|
||||
|
||||
if (adev->irq.ih_soft.ring_size)
|
||||
adev->irq.ih_soft.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -112,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
|
|||
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
goto out;
|
||||
|
||||
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
|
||||
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
|
||||
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
|
||||
|
|
@ -127,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
|
|||
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
|
||||
WREG32(IH_RB_CNTL, tmp);
|
||||
}
|
||||
|
||||
out:
|
||||
return (wptr & ih->ptr_mask);
|
||||
}
|
||||
|
||||
|
|
@ -175,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return amdgpu_irq_init(adev);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -853,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
|
|||
{
|
||||
u32 sol_reg;
|
||||
|
||||
/* CP hangs in IGT reloading test on RN, reset to WA */
|
||||
if (adev->asic_type == CHIP_RENOIR)
|
||||
return true;
|
||||
|
||||
if (amdgpu_gmc_need_reset_on_init(adev))
|
||||
return true;
|
||||
if (amdgpu_psp_tos_reload_needed(adev))
|
||||
|
|
|
|||
|
|
@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
|
|||
/* enable interrupts */
|
||||
tonga_ih_enable_interrupts(adev);
|
||||
|
||||
if (adev->irq.ih_soft.ring_size)
|
||||
adev->irq.ih_soft.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
|
|||
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
goto out;
|
||||
|
||||
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
|
||||
goto out;
|
||||
|
||||
|
|
@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->irq.ih.use_doorbell = true;
|
||||
adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
|
||||
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
|
|||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
|
|||
|
||||
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
|
||||
case IP_VERSION(13, 0, 12):
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) &&
|
||||
amdgpu_dpm_reset_vcn_is_supported(adev) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -530,7 +530,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
|||
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
|
||||
dev->gpu->kfd->sdma_fw_version);
|
||||
sysfs_show_64bit_prop(buffer, offs, "unique_id",
|
||||
dev->gpu->xcp ?
|
||||
dev->gpu->xcp &&
|
||||
(dev->gpu->xcp->xcp_mgr->mode !=
|
||||
AMDGPU_SPX_PARTITION_MODE) ?
|
||||
dev->gpu->xcp->unique_id :
|
||||
dev->gpu->adev->unique_id);
|
||||
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
|
||||
|
|
|
|||
|
|
@ -3932,6 +3932,97 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
|
||||
{
|
||||
if (!sink1 || !sink2)
|
||||
return false;
|
||||
if (sink1->sink_signal != sink2->sink_signal)
|
||||
return false;
|
||||
|
||||
if (sink1->dc_edid.length != sink2->dc_edid.length)
|
||||
return false;
|
||||
|
||||
if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
|
||||
sink1->dc_edid.length) != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: hdmi_hpd_debounce_work
|
||||
*
|
||||
* HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
|
||||
* (such as during power save transitions), this delay determines how long to
|
||||
* wait before processing the HPD event. This allows distinguishing between a
|
||||
* physical unplug (>hdmi_hpd_debounce_delay)
|
||||
* and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
|
||||
*
|
||||
* If the toggle is less than this delay, the driver compares sink capabilities
|
||||
* and permits a hotplug event if they changed.
|
||||
*
|
||||
* The default value of 1500ms was chosen based on experimental testing with
|
||||
* various monitors that exhibit spontaneous HPD toggling behavior.
|
||||
*/
|
||||
static void hdmi_hpd_debounce_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(to_delayed_work(work), struct amdgpu_dm_connector,
|
||||
hdmi_hpd_debounce_work);
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool fake_reconnect = false;
|
||||
bool reallow_idle = false;
|
||||
bool ret = false;
|
||||
guard(mutex)(&aconnector->hpd_lock);
|
||||
|
||||
/* Re-detect the display */
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reallow_idle = true;
|
||||
}
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Apply workaround delay for certain panels */
|
||||
apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
|
||||
/* Compare sinks to determine if this was a spontaneous HPD toggle */
|
||||
if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
|
||||
/*
|
||||
* Sinks match - this was a spontaneous HDMI HPD toggle.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
|
||||
fake_reconnect = true;
|
||||
}
|
||||
|
||||
/* Update connector state */
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* Only notify OS if sink actually changed */
|
||||
if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
/* Release the cached sink reference */
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (reallow_idle && dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
|
|
@ -3941,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool ret = false;
|
||||
bool debounce_required = false;
|
||||
|
||||
if (adev->dm.disable_hpd_irq)
|
||||
return;
|
||||
|
|
@ -3963,6 +4055,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
|
||||
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
|
||||
|
||||
/*
|
||||
* Check for HDMI disconnect with debounce enabled.
|
||||
*/
|
||||
debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
|
||||
dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
|
||||
new_connection_type == dc_connection_none &&
|
||||
aconnector->dc_link->local_sink != NULL);
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
|
||||
|
|
@ -3972,7 +4072,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else if (debounce_required) {
|
||||
/*
|
||||
* HDMI disconnect detected - schedule delayed work instead of
|
||||
* processing immediately. This allows us to coalesce spurious
|
||||
* HDMI signals from physical unplugs.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
|
||||
aconnector->hdmi_hpd_debounce_delay_ms);
|
||||
|
||||
/* Cache the current sink for later comparison */
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_retain(aconnector->hdmi_prev_sink);
|
||||
|
||||
/* Schedule delayed detection. */
|
||||
if (mod_delayed_work(system_wq,
|
||||
&aconnector->hdmi_hpd_debounce_work,
|
||||
msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
|
||||
drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
|
||||
|
||||
} else {
|
||||
|
||||
/* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
|
||||
if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
|
||||
return;
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
|
|
@ -4998,6 +5125,21 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
|
|||
struct dc_link *link;
|
||||
u32 brightness;
|
||||
bool rc, reallow_idle = false;
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) {
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (aconnector->bl_idx != bl_idx)
|
||||
continue;
|
||||
|
||||
/* if connector is off, save the brightness for next time it's on */
|
||||
if (!aconnector->base.encoder) {
|
||||
dm->brightness[bl_idx] = user_brightness;
|
||||
dm->actual_brightness[bl_idx] = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm, bl_idx);
|
||||
caps = &dm->backlight_caps[bl_idx];
|
||||
|
|
@ -7594,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
|||
if (aconnector->mst_mgr.dev)
|
||||
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
|
||||
|
||||
/* Cancel and flush any pending HDMI HPD debounce work */
|
||||
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
if (aconnector->bl_idx != -1) {
|
||||
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
|
||||
dm->backlight_dev[aconnector->bl_idx] = NULL;
|
||||
|
|
@ -8690,8 +8839,14 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
|
|||
if (!(amdgpu_freesync_vid_mode && drm_edid))
|
||||
return;
|
||||
|
||||
if (!amdgpu_dm_connector->dc_sink || amdgpu_dm_connector->dc_sink->edid_caps.analog ||
|
||||
!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
|
||||
if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link)
|
||||
return;
|
||||
|
||||
if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
|
||||
return;
|
||||
|
||||
if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) &&
|
||||
amdgpu_dm_connector->dc_sink->edid_caps.analog)
|
||||
return;
|
||||
|
||||
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
|
||||
|
|
@ -8703,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
|||
{
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
|
||||
struct drm_encoder *encoder;
|
||||
const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
|
||||
struct dc_link_settings *verified_link_cap =
|
||||
&amdgpu_dm_connector->dc_link->verified_link_cap;
|
||||
const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
|
||||
struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap;
|
||||
const struct dc *dc = dc_link->dc;
|
||||
|
||||
encoder = amdgpu_dm_connector_to_encoder(connector);
|
||||
|
||||
|
|
@ -8718,7 +8873,9 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
|||
amdgpu_dm_connector->num_modes +=
|
||||
drm_add_modes_noedid(connector, 1920, 1080);
|
||||
|
||||
if (amdgpu_dm_connector->dc_sink && amdgpu_dm_connector->dc_sink->edid_caps.analog) {
|
||||
if (amdgpu_dm_connector->dc_sink &&
|
||||
amdgpu_dm_connector->dc_sink->edid_caps.analog &&
|
||||
dc_connector_supports_analog(dc_link->link_id.id)) {
|
||||
/* Analog monitor connected by DAC load detection.
|
||||
* Add common modes. It will be up to the user to select one that works.
|
||||
*/
|
||||
|
|
@ -8773,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
|
||||
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
* which means HPD hot plug not supported
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@
|
|||
|
||||
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
|
||||
|
||||
#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
|
||||
/*
|
||||
#include "include/amdgpu_dal_power_if.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
|
|
@ -819,6 +820,11 @@ struct amdgpu_dm_connector {
|
|||
bool pack_sdp_v1_3;
|
||||
enum adaptive_sync_type as_type;
|
||||
struct amdgpu_hdmi_vsdb_info vsdb_info;
|
||||
|
||||
/* HDMI HPD debounce support */
|
||||
unsigned int hdmi_hpd_debounce_delay_ms;
|
||||
struct delayed_work hdmi_hpd_debounce_work;
|
||||
struct dc_sink *hdmi_prev_sink;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
|
|
|||
|
|
@ -998,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
|||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct i2c_adapter *ddc;
|
||||
int retry = 3;
|
||||
enum dc_edid_status edid_status;
|
||||
int retry = 25;
|
||||
enum dc_edid_status edid_status = EDID_NO_RESPONSE;
|
||||
const struct drm_edid *drm_edid;
|
||||
const struct edid *edid;
|
||||
|
||||
|
|
@ -1029,7 +1029,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
|||
}
|
||||
|
||||
if (!drm_edid)
|
||||
return EDID_NO_RESPONSE;
|
||||
continue;
|
||||
|
||||
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
|
||||
if (!edid ||
|
||||
|
|
@ -1047,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
|||
&sink->dc_edid,
|
||||
&sink->edid_caps);
|
||||
|
||||
} while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
|
||||
} while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
|
||||
|
||||
if (edid_status != EDID_OK)
|
||||
DRM_ERROR("EDID err: %d, on connector: %s",
|
||||
|
|
|
|||
|
|
@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
|
|||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
uint64_t effective_kbps = (uint64_t)kbps;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
|
||||
effective_kbps *= 1006;
|
||||
effective_kbps = div_u64(effective_kbps, 1000);
|
||||
}
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
uint64_t pbn_effective = (uint64_t)pbn;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
|
||||
pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
|
||||
else
|
||||
pbn_effective *= 1000;
|
||||
|
||||
return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
|
||||
}
|
||||
|
||||
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
|
||||
|
|
@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
|||
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
|
||||
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
kbps = pbn_to_kbps(pbn, false);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
|
|
@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
|||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
|
|
@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
|||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
int var_pbn;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
|||
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||
var_pbn = vars[next_index].pbn;
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
|
|
@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
struct drm_connector_state *new_conn_state;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
|
@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
|
|
@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
|
@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
|
@ -1763,18 +1762,6 @@ clean_exit:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t kbps_from_pbn(unsigned int pbn)
|
||||
{
|
||||
uint64_t kbps = (uint64_t)pbn;
|
||||
|
||||
kbps *= (1000000 / PEAK_FACTOR_X1000);
|
||||
kbps *= 8;
|
||||
kbps *= 54;
|
||||
kbps /= 64;
|
||||
|
||||
return (uint32_t)kbps;
|
||||
}
|
||||
|
||||
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
|
||||
struct dc_dsc_bw_range *bw_range)
|
||||
{
|
||||
|
|
@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
dc_link_get_highest_encoding_format(stream->link));
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
|
||||
|
||||
/* pick the end to end bw bottleneck */
|
||||
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
|
|
@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
|
||||
|
||||
if (immediate_upstream_port) {
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
|
||||
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
} else {
|
||||
/* For topology LCT 1 case - only one mstb*/
|
||||
|
|
|
|||
|
|
@ -805,7 +805,7 @@ static enum bp_result bios_parser_dac_load_detection(
|
|||
|
||||
if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
|
||||
device_id_mask = ATOM_S0_CRT1_MASK;
|
||||
else if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
|
||||
else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT)
|
||||
device_id_mask = ATOM_S0_CRT2_MASK;
|
||||
else
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
|
|
|||
|
|
@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1(
|
|||
/* not provided by VBIOS */
|
||||
info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
|
||||
|
||||
info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
|
||||
& ATOM_HSYNC_POLARITY);
|
||||
info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
|
||||
& ATOM_VSYNC_POLARITY);
|
||||
info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
|
||||
ATOM_HSYNC_POLARITY);
|
||||
info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
|
||||
ATOM_VSYNC_POLARITY);
|
||||
|
||||
/* not provided by VBIOS */
|
||||
info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
|
||||
|
|
|
|||
|
|
@ -303,6 +303,7 @@ static bool create_links(
|
|||
link->link_id.id = CONNECTOR_ID_VIRTUAL;
|
||||
link->link_id.enum_id = ENUM_ID_1;
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
|
||||
link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
|
||||
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
|
||||
|
||||
if (!link->link_enc) {
|
||||
|
|
@ -2146,6 +2147,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
if (!dcb->funcs->is_accelerated_mode(dcb)) {
|
||||
disable_vbios_mode_if_required(dc, context);
|
||||
dc->hwss.enable_accelerated_mode(dc, context);
|
||||
} else if (get_seamless_boot_stream_count(dc->current_state) > 0) {
|
||||
/* If the previous Stream still retains the apply seamless boot flag,
|
||||
* it means the OS has not actually performed a flip yet.
|
||||
* At this point, if we receive dc_commit_streams again, we should
|
||||
* once more check whether the actual HW timing matches what the OS
|
||||
* has provided
|
||||
*/
|
||||
disable_vbios_mode_if_required(dc, context);
|
||||
}
|
||||
|
||||
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
|
||||
|
|
@ -6003,6 +6012,12 @@ bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_
|
|||
if (pipe_ctx)
|
||||
otg_inst = pipe_ctx->stream_res.tg->inst;
|
||||
|
||||
// before enable smart power OLED, we need to call set pipe for DMUB to set ABM config
|
||||
if (enable) {
|
||||
if (dc->hwss.set_pipe && pipe_ctx)
|
||||
dc->hwss.set_pipe(pipe_ctx);
|
||||
}
|
||||
|
||||
// fill in cmd
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
|
|
@ -6511,6 +6526,567 @@ void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst,
|
|||
out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
|
||||
}
|
||||
|
||||
bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state)
|
||||
{
|
||||
struct dc_state *context;
|
||||
struct resource_context *res_ctx;
|
||||
int i;
|
||||
|
||||
if (!dc || !dc->current_state || !state) {
|
||||
if (state)
|
||||
state->state_valid = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Initialize the state structure */
|
||||
memset(state, 0, sizeof(struct dc_register_software_state));
|
||||
|
||||
context = dc->current_state;
|
||||
res_ctx = &context->res_ctx;
|
||||
|
||||
/* Count active pipes and streams */
|
||||
state->active_pipe_count = 0;
|
||||
state->active_stream_count = context->stream_count;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (res_ctx->pipe_ctx[i].stream)
|
||||
state->active_pipe_count++;
|
||||
}
|
||||
|
||||
/* Capture HUBP programming state for each pipe */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
state->hubp[i].valid_stream = false;
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
|
||||
state->hubp[i].valid_stream = true;
|
||||
|
||||
/* HUBP register programming variables */
|
||||
if (pipe_ctx->stream_res.tg)
|
||||
state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst;
|
||||
|
||||
state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0;
|
||||
|
||||
state->hubp[i].valid_plane_state = false;
|
||||
if (pipe_ctx->plane_state) {
|
||||
state->hubp[i].valid_plane_state = true;
|
||||
state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format;
|
||||
state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation;
|
||||
state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0;
|
||||
|
||||
/* Surface size */
|
||||
if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) {
|
||||
state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width;
|
||||
state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height;
|
||||
}
|
||||
|
||||
/* Viewport dimensions from scaler data */
|
||||
if (pipe_ctx->plane_state->src_rect.width > 0) {
|
||||
state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width;
|
||||
state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height;
|
||||
state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x;
|
||||
state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y;
|
||||
}
|
||||
|
||||
/* DCC settings */
|
||||
state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0;
|
||||
state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks;
|
||||
state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk;
|
||||
|
||||
/* Surface pitch */
|
||||
state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch;
|
||||
state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch;
|
||||
state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch;
|
||||
state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c;
|
||||
|
||||
/* Surface addresses - primary */
|
||||
state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part;
|
||||
state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part;
|
||||
state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part;
|
||||
state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part;
|
||||
|
||||
/* TMZ settings */
|
||||
state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
|
||||
state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
|
||||
|
||||
/* Tiling configuration */
|
||||
state->hubp[i].min_dc_gfx_version9 = false;
|
||||
if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) {
|
||||
state->hubp[i].min_dc_gfx_version9 = true;
|
||||
state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle;
|
||||
state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes;
|
||||
state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks;
|
||||
state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave;
|
||||
state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines;
|
||||
state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se;
|
||||
state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs;
|
||||
}
|
||||
}
|
||||
|
||||
/* DML Request Size Configuration */
|
||||
if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) {
|
||||
state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size;
|
||||
state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size;
|
||||
state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size;
|
||||
state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size;
|
||||
state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size;
|
||||
state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size;
|
||||
state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height;
|
||||
state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear;
|
||||
}
|
||||
|
||||
/* Chroma request size configuration */
|
||||
if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) {
|
||||
state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size;
|
||||
state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size;
|
||||
state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size;
|
||||
state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size;
|
||||
state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size;
|
||||
state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size;
|
||||
state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height;
|
||||
state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear;
|
||||
}
|
||||
|
||||
/* DML expansion modes */
|
||||
state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode;
|
||||
state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode;
|
||||
state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode;
|
||||
state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode;
|
||||
|
||||
/* DML DLG parameters - nominal */
|
||||
state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank;
|
||||
state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank;
|
||||
state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip;
|
||||
state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip;
|
||||
|
||||
/* DML prefetch settings */
|
||||
state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch;
|
||||
state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch;
|
||||
state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c;
|
||||
|
||||
/* TTU parameters */
|
||||
state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm;
|
||||
state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm;
|
||||
state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip;
|
||||
state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank;
|
||||
}
|
||||
|
||||
/* Capture HUBBUB programming state */
|
||||
if (dc->res_pool->hubbub) {
|
||||
/* Individual DET buffer sizes - software state variables that program DET registers */
|
||||
for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) {
|
||||
uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb;
|
||||
switch (i) {
|
||||
case 0:
|
||||
state->hubbub.det0_size = det_size;
|
||||
break;
|
||||
case 1:
|
||||
state->hubbub.det1_size = det_size;
|
||||
break;
|
||||
case 2:
|
||||
state->hubbub.det2_size = det_size;
|
||||
break;
|
||||
case 3:
|
||||
state->hubbub.det3_size = det_size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Compression buffer configuration - software state that programs COMPBUF_SIZE register */
|
||||
// TODO: Handle logic for legacy DCN pre-DCN401
|
||||
state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
|
||||
}
|
||||
|
||||
/* Capture DPP programming state for each pipe */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
|
||||
state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0;
|
||||
|
||||
if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) {
|
||||
/* Access dscl_prog_data directly - this contains the actual software state used for register programming */
|
||||
struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data;
|
||||
|
||||
/* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */
|
||||
state->dpp[i].recout_start_x = dscl_data->recout.x;
|
||||
state->dpp[i].recout_start_y = dscl_data->recout.y;
|
||||
state->dpp[i].recout_width = dscl_data->recout.width;
|
||||
state->dpp[i].recout_height = dscl_data->recout.height;
|
||||
|
||||
/* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */
|
||||
state->dpp[i].mpc_width = dscl_data->mpc_size.width;
|
||||
state->dpp[i].mpc_height = dscl_data->mpc_size.height;
|
||||
|
||||
/* DSCL mode - software state that programs SCL_MODE registers */
|
||||
state->dpp[i].dscl_mode = dscl_data->dscl_mode;
|
||||
|
||||
/* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */
|
||||
state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio
|
||||
state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio
|
||||
|
||||
/* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */
|
||||
state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
|
||||
state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
|
||||
}
|
||||
}
|
||||
|
||||
/* Capture essential clock state for underflow analysis */
|
||||
if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) {
|
||||
/* Core display clocks affecting bandwidth and timing */
|
||||
state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
|
||||
|
||||
/* Per-pipe clock configuration - only capture what's essential */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
if (pipe_ctx->stream) {
|
||||
/* Essential clocks that directly affect underflow risk */
|
||||
state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz;
|
||||
state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10;
|
||||
state->dccg.dppclk_enable[i] = 1;
|
||||
|
||||
/* DP stream clock only for DP signals */
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
state->dccg.dpstreamclk_enable[i] = 1;
|
||||
} else {
|
||||
state->dccg.dpstreamclk_enable[i] = 0;
|
||||
}
|
||||
} else {
|
||||
/* Inactive pipe - no clocks */
|
||||
state->dccg.dppclk_khz[i] = 0;
|
||||
state->dccg.pixclk_khz[i] = 0;
|
||||
state->dccg.dppclk_enable[i] = 0;
|
||||
if (i < 4) {
|
||||
state->dccg.dpstreamclk_enable[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* DSC clock state - only when actually using DSC */
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL;
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
|
||||
state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */
|
||||
} else {
|
||||
state->dccg.dscclk_khz[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */
|
||||
for (i = 0; i < 2; i++) {
|
||||
state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Capture essential DSC configuration for underflow analysis */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
|
||||
/* DSC is enabled - capture essential configuration */
|
||||
state->dsc[i].dsc_clock_enable = 1;
|
||||
|
||||
/* DSC configuration affecting bandwidth and timing */
|
||||
struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg;
|
||||
state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h;
|
||||
state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v;
|
||||
state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel;
|
||||
|
||||
/* OPP pipe source for DSC forwarding */
|
||||
if (pipe_ctx->stream_res.opp) {
|
||||
state->dsc[i].dscrm_dsc_forward_enable = 1;
|
||||
state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
|
||||
} else {
|
||||
state->dsc[i].dscrm_dsc_forward_enable = 0;
|
||||
state->dsc[i].dscrm_dsc_opp_pipe_source = 0;
|
||||
}
|
||||
} else {
|
||||
/* DSC not enabled - clear all fields */
|
||||
memset(&state->dsc[i], 0, sizeof(state->dsc[i]));
|
||||
}
|
||||
}
|
||||
|
||||
/* Capture MPC programming state - comprehensive register field coverage */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->plane_state && pipe_ctx->stream) {
|
||||
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
||||
|
||||
/* MPCC blending tree and mode control - capture actual blend configuration */
|
||||
state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
|
||||
state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
|
||||
state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
|
||||
state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
|
||||
state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value;
|
||||
state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0;
|
||||
state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */
|
||||
state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */
|
||||
|
||||
/* MPCC blending tree connections - capture tree topology */
|
||||
if (pipe_ctx->bottom_pipe) {
|
||||
state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx;
|
||||
} else {
|
||||
state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
|
||||
}
|
||||
state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */
|
||||
|
||||
/* MPCC output gamma control - capture gamma programming */
|
||||
if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) {
|
||||
state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */
|
||||
state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */
|
||||
state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */
|
||||
} else {
|
||||
state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */
|
||||
state->mpc.mpcc_ogam_select[i] = 0;
|
||||
state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
|
||||
}
|
||||
|
||||
/* MPCC pipe assignment and operational status */
|
||||
if (pipe_ctx->stream_res.opp) {
|
||||
state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst;
|
||||
} else {
|
||||
state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
|
||||
}
|
||||
|
||||
/* MPCC status indicators - active pipe state */
|
||||
state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */
|
||||
state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */
|
||||
|
||||
} else {
|
||||
/* Pipe not active - set disabled/idle state for all fields */
|
||||
state->mpc.mpcc_mode[i] = 0;
|
||||
state->mpc.mpcc_alpha_blend_mode[i] = 0;
|
||||
state->mpc.mpcc_alpha_multiplied_mode[i] = 0;
|
||||
state->mpc.mpcc_blnd_active_overlap_only[i] = 0;
|
||||
state->mpc.mpcc_global_alpha[i] = 0;
|
||||
state->mpc.mpcc_global_gain[i] = 0;
|
||||
state->mpc.mpcc_bg_bpc[i] = 0;
|
||||
state->mpc.mpcc_bot_gain_mode[i] = 0;
|
||||
state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
|
||||
state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */
|
||||
state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */
|
||||
state->mpc.mpcc_ogam_select[i] = 0;
|
||||
state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
|
||||
state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
|
||||
state->mpc.mpcc_idle[i] = 1; /* Idle */
|
||||
state->mpc.mpcc_busy[i] = 0; /* Not busy */
|
||||
}
|
||||
}
|
||||
|
||||
/* Capture OPP programming state for each pipe - comprehensive register field coverage */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->stream_res.opp) {
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
|
||||
/* OPP Pipe Control */
|
||||
state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */
|
||||
|
||||
/* Display Pattern Generator (DPG) Control - 19 fields */
|
||||
if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
|
||||
state->opp[i].dpg_enable = 1;
|
||||
} else {
|
||||
/* Video mode - DPG disabled */
|
||||
state->opp[i].dpg_enable = 0;
|
||||
}
|
||||
|
||||
/* Format Control (FMT) - 18 fields */
|
||||
state->opp[i].fmt_pixel_encoding = timing->pixel_encoding;
|
||||
|
||||
/* Chroma subsampling mode based on pixel encoding */
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
|
||||
state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */
|
||||
} else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
|
||||
state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */
|
||||
} else {
|
||||
state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */
|
||||
}
|
||||
|
||||
state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
|
||||
state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
|
||||
|
||||
/* Dithering control based on bit depth */
|
||||
if (timing->display_color_depth < COLOR_DEPTH_121212) {
|
||||
state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */
|
||||
state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */
|
||||
state->opp[i].fmt_spatial_dither_enable = 1;
|
||||
state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */
|
||||
state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth;
|
||||
state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */
|
||||
} else {
|
||||
state->opp[i].fmt_spatial_dither_frame_counter_max = 0;
|
||||
state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0;
|
||||
state->opp[i].fmt_spatial_dither_enable = 0;
|
||||
state->opp[i].fmt_spatial_dither_mode = 0;
|
||||
state->opp[i].fmt_spatial_dither_depth = 0;
|
||||
state->opp[i].fmt_temporal_dither_enable = 0;
|
||||
}
|
||||
|
||||
/* Truncation control for bit depth reduction */
|
||||
if (timing->display_color_depth < COLOR_DEPTH_121212) {
|
||||
state->opp[i].fmt_truncate_enable = 1;
|
||||
state->opp[i].fmt_truncate_depth = timing->display_color_depth;
|
||||
state->opp[i].fmt_truncate_mode = 0; /* Round mode */
|
||||
} else {
|
||||
state->opp[i].fmt_truncate_enable = 0;
|
||||
state->opp[i].fmt_truncate_depth = 0;
|
||||
state->opp[i].fmt_truncate_mode = 0;
|
||||
}
|
||||
|
||||
/* Data clamping control */
|
||||
state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */
|
||||
state->opp[i].fmt_clamp_color_format = timing->pixel_encoding;
|
||||
|
||||
/* Dynamic expansion for limited range content */
|
||||
if (timing->pixel_encoding != PIXEL_ENCODING_RGB) {
|
||||
state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */
|
||||
state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */
|
||||
} else {
|
||||
state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */
|
||||
state->opp[i].fmt_dynamic_exp_mode = 0;
|
||||
}
|
||||
|
||||
/* Legacy field for compatibility */
|
||||
state->opp[i].fmt_bit_depth_control = timing->display_color_depth;
|
||||
|
||||
/* Output Buffer (OPPBUF) Control - 6 fields */
|
||||
state->opp[i].oppbuf_active_width = timing->h_addressable;
|
||||
state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */
|
||||
|
||||
/* Multi-Stream Output (MSO) / ODM segmentation */
|
||||
if (pipe_ctx->next_odm_pipe) {
|
||||
state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */
|
||||
state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */
|
||||
} else {
|
||||
state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */
|
||||
state->opp[i].oppbuf_overlap_pixel_num = 0;
|
||||
}
|
||||
|
||||
/* 3D/Stereo control */
|
||||
if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) {
|
||||
state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */
|
||||
state->opp[i].oppbuf_3d_vact_space2_size = 30;
|
||||
} else {
|
||||
state->opp[i].oppbuf_3d_vact_space1_size = 0;
|
||||
state->opp[i].oppbuf_3d_vact_space2_size = 0;
|
||||
}
|
||||
|
||||
/* DSC Forward Config - 3 fields */
|
||||
if (timing->dsc_cfg.num_slices_h > 0) {
|
||||
state->opp[i].dscrm_dsc_forward_enable = 1;
|
||||
state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
|
||||
state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */
|
||||
} else {
|
||||
state->opp[i].dscrm_dsc_forward_enable = 0;
|
||||
state->opp[i].dscrm_dsc_opp_pipe_source = 0;
|
||||
state->opp[i].dscrm_dsc_forward_enable_status = 0;
|
||||
}
|
||||
} else {
|
||||
/* No OPP resource - set all fields to disabled state */
|
||||
memset(&state->opp[i], 0, sizeof(state->opp[i]));
|
||||
}
|
||||
}
|
||||
|
||||
/* Capture OPTC programming state for each pipe - comprehensive register field coverage */
|
||||
for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->stream_res.tg) {
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
|
||||
state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst;
|
||||
|
||||
/* OTG_CONTROL register - 5 fields */
|
||||
state->optc[i].otg_master_enable = 1; /* Active stream */
|
||||
state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */
|
||||
state->optc[i].otg_start_point_cntl = 0; /* Normal start */
|
||||
state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0;
|
||||
state->optc[i].otg_out_mux = 0; /* Direct output */
|
||||
|
||||
/* OTG Horizontal Timing - 7 fields */
|
||||
state->optc[i].otg_h_total = timing->h_total;
|
||||
state->optc[i].otg_h_blank_start = timing->h_addressable;
|
||||
state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch;
|
||||
state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch;
|
||||
state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width;
|
||||
state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1;
|
||||
state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */
|
||||
|
||||
/* OTG Vertical Timing - 7 fields */
|
||||
state->optc[i].otg_v_total = timing->v_total;
|
||||
state->optc[i].otg_v_blank_start = timing->v_addressable;
|
||||
state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch;
|
||||
state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch;
|
||||
state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width;
|
||||
state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1;
|
||||
state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */
|
||||
|
||||
/* Initialize remaining core fields with appropriate defaults */
|
||||
// TODO: Update logic for accurate vtotal min/max
|
||||
state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */
|
||||
state->optc[i].otg_v_total_min = timing->v_total - 50;
|
||||
state->optc[i].otg_v_total_mid = timing->v_total;
|
||||
|
||||
/* ODM configuration */
|
||||
// TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe
|
||||
if (pipe_ctx->next_odm_pipe) {
|
||||
state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
|
||||
state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0;
|
||||
state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */
|
||||
} else {
|
||||
state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
|
||||
state->optc[i].optc_seg1_src_sel = 0;
|
||||
state->optc[i].optc_num_of_input_segment = 0; /* Single segment */
|
||||
}
|
||||
|
||||
/* DSC configuration */
|
||||
if (timing->dsc_cfg.num_slices_h > 0) {
|
||||
state->optc[i].optc_dsc_mode = 1; /* DSC enabled */
|
||||
state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */
|
||||
state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h;
|
||||
} else {
|
||||
state->optc[i].optc_dsc_mode = 0;
|
||||
state->optc[i].optc_dsc_bytes_per_pixel = 0;
|
||||
state->optc[i].optc_dsc_slice_width = 0;
|
||||
}
|
||||
|
||||
/* Essential control fields */
|
||||
state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
|
||||
state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0;
|
||||
state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */
|
||||
state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */
|
||||
|
||||
/* Initialize other key fields to defaults */
|
||||
state->optc[i].optc_input_pix_clk_en = 1;
|
||||
state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable;
|
||||
state->optc[i].otg_vready_offset = 1;
|
||||
state->optc[i].otg_vstartup_start = timing->v_addressable + 10;
|
||||
state->optc[i].otg_vupdate_offset = 0;
|
||||
state->optc[i].otg_vupdate_width = 5;
|
||||
} else {
|
||||
/* No timing generator resource - initialize all fields to 0 */
|
||||
memset(&state->optc[i], 0, sizeof(state->optc[i]));
|
||||
}
|
||||
}
|
||||
|
||||
state->state_valid = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void dc_log_preos_dmcub_info(const struct dc *dc)
|
||||
{
|
||||
dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ void color_space_to_black_color(
|
|||
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
|
||||
break;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Remove default and add case for all color space
|
||||
* so when we forget to add new color space
|
||||
* compiler will give a warning
|
||||
|
|
@ -1293,6 +1293,9 @@ void hwss_execute_sequence(struct dc *dc,
|
|||
case HUBP_MEM_PROGRAM_VIEWPORT:
|
||||
hwss_hubp_mem_program_viewport(params);
|
||||
break;
|
||||
case ABORT_CURSOR_OFFLOAD_UPDATE:
|
||||
hwss_abort_cursor_offload_update(params);
|
||||
break;
|
||||
case SET_CURSOR_ATTRIBUTE:
|
||||
hwss_set_cursor_attribute(params);
|
||||
break;
|
||||
|
|
@ -1318,7 +1321,7 @@ void hwss_execute_sequence(struct dc *dc,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add OPTC pipe control lock to block sequence
|
||||
*/
|
||||
void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1335,7 +1338,7 @@ void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP set flip control GSL to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1350,7 +1353,7 @@ void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP program triplebuffer to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1367,7 +1370,7 @@ void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP update plane address to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1382,7 +1385,7 @@ void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DPP set input transfer function to block sequence
|
||||
*/
|
||||
void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1399,7 +1402,7 @@ void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DPP program gamut remap to block sequence
|
||||
*/
|
||||
void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1412,7 +1415,7 @@ void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DPP program bias and scale to block sequence
|
||||
*/
|
||||
void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx)
|
||||
|
|
@ -1424,7 +1427,7 @@ void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add OPTC program manual trigger to block sequence
|
||||
*/
|
||||
void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1437,7 +1440,7 @@ void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DPP set output transfer function to block sequence
|
||||
*/
|
||||
void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1454,7 +1457,7 @@ void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_stat
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC update visual confirm to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1471,7 +1474,7 @@ void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC power on MPC mem PWR to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1488,7 +1491,7 @@ void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC set output CSC to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1507,7 +1510,7 @@ void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC set OCSC default to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1526,7 +1529,7 @@ void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DMUB send DMCUB command to block sequence
|
||||
*/
|
||||
void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1543,7 +1546,7 @@ void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DMUB SubVP save surface address to block sequence
|
||||
*/
|
||||
void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1560,7 +1563,7 @@ void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP wait for DCC meta propagation to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1575,7 +1578,7 @@ void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP wait pipe read start to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1588,7 +1591,7 @@ void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HWS apply update flags for phantom to block sequence
|
||||
*/
|
||||
void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1601,7 +1604,7 @@ void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *se
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HWS update phantom VP position to block sequence
|
||||
*/
|
||||
void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1618,7 +1621,7 @@ void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_st
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add OPTC set ODM combine to block sequence
|
||||
*/
|
||||
void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1636,7 +1639,7 @@ void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add OPTC set ODM bypass to block sequence
|
||||
*/
|
||||
void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1659,7 +1662,7 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
|
|||
dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add TG program global sync to block sequence
|
||||
*/
|
||||
void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1682,7 +1685,7 @@ void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add TG wait for state to block sequence
|
||||
*/
|
||||
void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1697,7 +1700,7 @@ void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add TG set VTG params to block sequence
|
||||
*/
|
||||
void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1714,7 +1717,7 @@ void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add TG setup vertical interrupt2 to block sequence
|
||||
*/
|
||||
void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1728,7 +1731,7 @@ void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_stat
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add DPP set HDR multiplier to block sequence
|
||||
*/
|
||||
void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1742,7 +1745,7 @@ void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP program DET size to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1785,7 +1788,7 @@ void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *se
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP program DET segments to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1802,7 +1805,7 @@ void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add OPP set dynamic expansion to block sequence
|
||||
*/
|
||||
void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1821,7 +1824,7 @@ void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add OPP program FMT to block sequence
|
||||
*/
|
||||
void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1852,7 +1855,7 @@ void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add ABM set pipe to block sequence
|
||||
*/
|
||||
void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1867,7 +1870,7 @@ void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add ABM set level to block sequence
|
||||
*/
|
||||
void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1882,7 +1885,7 @@ void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add TG enable CRTC to block sequence
|
||||
*/
|
||||
void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1895,7 +1898,7 @@ void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP wait flip pending to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
|
||||
|
|
@ -1912,7 +1915,7 @@ void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add TG wait double buffer pending to block sequence
|
||||
*/
|
||||
void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
|
||||
|
|
@ -3076,6 +3079,15 @@ void hwss_hubp_mem_program_viewport(union block_sequence_params *params)
|
|||
hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c);
|
||||
}
|
||||
|
||||
void hwss_abort_cursor_offload_update(union block_sequence_params *params)
|
||||
{
|
||||
struct dc *dc = params->abort_cursor_offload_update_params.dc;
|
||||
struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx;
|
||||
|
||||
if (dc && dc->hwss.abort_cursor_offload_update)
|
||||
dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
|
||||
}
|
||||
|
||||
void hwss_set_cursor_attribute(union block_sequence_params *params)
|
||||
{
|
||||
struct dc *dc = params->set_cursor_attribute_params.dc;
|
||||
|
|
@ -3270,7 +3282,7 @@ void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_st
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC update blending to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
|
||||
|
|
@ -3287,7 +3299,7 @@ void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC insert plane to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
|
||||
|
|
@ -3312,7 +3324,7 @@ void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add MPC assert idle MPCC to block sequence
|
||||
*/
|
||||
void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
|
||||
|
|
@ -3327,7 +3339,7 @@ void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Helper function to add HUBP set blank to block sequence
|
||||
*/
|
||||
void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
|
||||
|
|
@ -3934,6 +3946,18 @@ void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
|
|||
}
|
||||
}
|
||||
|
||||
void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
|
||||
seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE;
|
||||
seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc;
|
||||
seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx;
|
||||
(*seq_state->num_steps)++;
|
||||
}
|
||||
}
|
||||
|
||||
void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
|
|
|
|||
|
|
@ -99,6 +99,40 @@
|
|||
|
||||
#define UNABLE_TO_SPLIT -1
|
||||
|
||||
static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx,
|
||||
int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe)
|
||||
{
|
||||
struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
|
||||
|
||||
if (current_snapshot->line_count >= MAX_PIPES)
|
||||
return;
|
||||
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe;
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx;
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx;
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx;
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst;
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst;
|
||||
current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst;
|
||||
|
||||
current_snapshot->line_count++;
|
||||
}
|
||||
|
||||
static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state)
|
||||
{
|
||||
// Move to next snapshot slot (circular buffer)
|
||||
dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS;
|
||||
|
||||
// Clear the new snapshot
|
||||
struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
|
||||
memset(current_snapshot, 0, sizeof(*current_snapshot));
|
||||
|
||||
// Set metadata
|
||||
current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx);
|
||||
current_snapshot->stream_count = state->stream_count;
|
||||
current_snapshot->phantom_stream_count = state->phantom_stream_count;
|
||||
}
|
||||
|
||||
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
{
|
||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||
|
|
@ -2311,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a,
|
|||
|
||||
static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
|
||||
int stream_idx, int slice_idx, int plane_idx, int slice_count,
|
||||
bool is_primary)
|
||||
bool is_primary, bool is_phantom_pipe)
|
||||
{
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
// new format for logging: bit storing code
|
||||
if (slice_idx == 0 && plane_idx == 0 && is_primary) {
|
||||
/* case 0 (OTG master pipe with plane) */
|
||||
DC_LOG_DC(" | plane%d slice%d stream%d|",
|
||||
|
|
@ -2323,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
|
|||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst);
|
||||
capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst, is_phantom_pipe);
|
||||
} else if (slice_idx == 0 && plane_idx == -1) {
|
||||
/* case 1 (OTG master pipe without plane) */
|
||||
DC_LOG_DC(" | slice%d stream%d|",
|
||||
|
|
@ -2331,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
|
|||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst);
|
||||
capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst, is_phantom_pipe);
|
||||
} else if (slice_idx != 0 && plane_idx == 0 && is_primary) {
|
||||
/* case 2 (OPP head pipe with plane) */
|
||||
DC_LOG_DC(" | plane%d slice%d | |",
|
||||
|
|
@ -2338,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
|
|||
DC_LOG_DC(" |DPP%d----OPP%d----| |",
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst);
|
||||
capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst, is_phantom_pipe);
|
||||
} else if (slice_idx != 0 && plane_idx == -1) {
|
||||
/* case 3 (OPP head pipe without plane) */
|
||||
DC_LOG_DC(" | slice%d | |", slice_idx);
|
||||
DC_LOG_DC(" |DPG%d----OPP%d----| |",
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst);
|
||||
capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst, is_phantom_pipe);
|
||||
} else if (slice_idx == slice_count - 1) {
|
||||
/* case 4 (DPP pipe in last slice) */
|
||||
DC_LOG_DC(" | plane%d | |", plane_idx);
|
||||
DC_LOG_DC(" |DPP%d----| |",
|
||||
pipe->plane_res.dpp->inst);
|
||||
capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst, is_phantom_pipe);
|
||||
} else {
|
||||
/* case 5 (DPP pipe not in last slice) */
|
||||
DC_LOG_DC(" | plane%d | | |", plane_idx);
|
||||
DC_LOG_DC(" |DPP%d----| | |",
|
||||
pipe->plane_res.dpp->inst);
|
||||
capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
|
||||
pipe->plane_res.dpp->inst,
|
||||
pipe->stream_res.opp->inst,
|
||||
pipe->stream_res.tg->inst, is_phantom_pipe);
|
||||
}
|
||||
}
|
||||
|
||||
static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
|
||||
struct pipe_ctx *otg_master, int stream_idx)
|
||||
struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe)
|
||||
{
|
||||
struct pipe_ctx *opp_heads[MAX_PIPES];
|
||||
struct pipe_ctx *dpp_pipes[MAX_PIPES];
|
||||
|
|
@ -2384,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
|
|||
resource_log_pipe(dc, dpp_pipes[dpp_idx],
|
||||
stream_idx, slice_idx,
|
||||
plane_idx, slice_count,
|
||||
is_primary);
|
||||
is_primary, is_phantom_pipe);
|
||||
}
|
||||
} else {
|
||||
resource_log_pipe(dc, opp_heads[slice_idx],
|
||||
stream_idx, slice_idx, plane_idx,
|
||||
slice_count, true);
|
||||
slice_count, true, is_phantom_pipe);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -2420,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
|
|||
struct pipe_ctx *otg_master;
|
||||
int stream_idx, phantom_stream_idx;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
bool is_phantom_pipe = false;
|
||||
|
||||
// Start a new snapshot for this topology update
|
||||
start_new_topology_snapshot(dc, state);
|
||||
|
||||
DC_LOG_DC(" pipe topology update");
|
||||
DC_LOG_DC(" ________________________");
|
||||
|
|
@ -2433,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
|
|||
if (!otg_master)
|
||||
continue;
|
||||
|
||||
resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
|
||||
resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
|
||||
}
|
||||
if (state->phantom_stream_count > 0) {
|
||||
is_phantom_pipe = true;
|
||||
DC_LOG_DC(" | (phantom pipes) |");
|
||||
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
|
||||
if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
|
||||
|
|
@ -2448,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
|
|||
if (!otg_master)
|
||||
continue;
|
||||
|
||||
resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
|
||||
resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
|
||||
}
|
||||
}
|
||||
DC_LOG_DC(" |________________________|\n");
|
||||
|
|
|
|||
|
|
@ -737,9 +737,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
|
|||
{
|
||||
uint8_t i;
|
||||
bool ret = false;
|
||||
struct dc *dc = stream->ctx->dc;
|
||||
struct resource_context *res_ctx =
|
||||
&dc->current_state->res_ctx;
|
||||
struct dc *dc;
|
||||
struct resource_context *res_ctx;
|
||||
|
||||
if (!stream->ctx)
|
||||
return false;
|
||||
|
||||
dc = stream->ctx->dc;
|
||||
res_ctx = &dc->current_state->res_ctx;
|
||||
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
|
|||
struct dcn_optc_reg_state;
|
||||
struct dcn_dccg_reg_state;
|
||||
|
||||
#define DC_VER "3.2.358"
|
||||
#define DC_VER "3.2.359"
|
||||
|
||||
/**
|
||||
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
|
||||
|
|
@ -899,6 +899,7 @@ struct dc_debug_data {
|
|||
uint32_t ltFailCount;
|
||||
uint32_t i2cErrorCount;
|
||||
uint32_t auxErrorCount;
|
||||
struct pipe_topology_history topology_history;
|
||||
};
|
||||
|
||||
struct dc_phy_addr_space_config {
|
||||
|
|
@ -2792,4 +2793,491 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, st
|
|||
|
||||
void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data);
|
||||
|
||||
/**
|
||||
* Software state variables used to program register fields across the display pipeline
|
||||
*/
|
||||
struct dc_register_software_state {
|
||||
/* HUBP register programming variables for each pipe */
|
||||
struct {
|
||||
bool valid_plane_state;
|
||||
bool valid_stream;
|
||||
bool min_dc_gfx_version9;
|
||||
uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */
|
||||
uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */
|
||||
uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */
|
||||
uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */
|
||||
uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */
|
||||
uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */
|
||||
uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */
|
||||
uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */
|
||||
uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */
|
||||
uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */
|
||||
uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */
|
||||
uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */
|
||||
uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */
|
||||
uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */
|
||||
uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */
|
||||
|
||||
/* Additional DCC configuration */
|
||||
uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */
|
||||
uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */
|
||||
|
||||
/* Surface pitch configuration */
|
||||
uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */
|
||||
uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */
|
||||
uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */
|
||||
uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */
|
||||
|
||||
/* Surface addresses */
|
||||
uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */
|
||||
uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */
|
||||
uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */
|
||||
uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */
|
||||
|
||||
/* TMZ configuration */
|
||||
uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */
|
||||
uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */
|
||||
|
||||
/* Tiling configuration */
|
||||
uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */
|
||||
uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */
|
||||
uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */
|
||||
uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */
|
||||
uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */
|
||||
uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */
|
||||
uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */
|
||||
|
||||
/* DML Request Size Configuration - Luma */
|
||||
uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */
|
||||
uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */
|
||||
uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */
|
||||
uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */
|
||||
uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */
|
||||
uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */
|
||||
uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */
|
||||
uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */
|
||||
|
||||
/* DML Request Size Configuration - Chroma */
|
||||
uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */
|
||||
uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */
|
||||
uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */
|
||||
uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */
|
||||
uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */
|
||||
uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */
|
||||
uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */
|
||||
uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */
|
||||
|
||||
/* DML Expansion Modes */
|
||||
uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */
|
||||
uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */
|
||||
uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */
|
||||
uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */
|
||||
|
||||
/* DML DLG parameters - nominal */
|
||||
uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */
|
||||
uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */
|
||||
uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */
|
||||
uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */
|
||||
|
||||
/* DML prefetch settings */
|
||||
uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */
|
||||
uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */
|
||||
uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */
|
||||
|
||||
/* TTU parameters */
|
||||
uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */
|
||||
uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */
|
||||
uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */
|
||||
uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */
|
||||
} hubp[MAX_PIPES];
|
||||
|
||||
/* HUBBUB register programming variables */
|
||||
struct {
|
||||
/* Individual DET buffer control per pipe - software state that programs DET registers */
|
||||
uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */
|
||||
uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */
|
||||
uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */
|
||||
uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */
|
||||
|
||||
/* Compression buffer control - software state that programs COMPBUF registers */
|
||||
uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */
|
||||
uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */
|
||||
uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */
|
||||
} hubbub;
|
||||
|
||||
/* DPP register programming variables for each pipe (simplified for available fields) */
|
||||
struct {
|
||||
uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */
|
||||
|
||||
/* Recout (Rectangle of Interest) configuration */
|
||||
uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */
|
||||
uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */
|
||||
uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */
|
||||
uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */
|
||||
|
||||
/* MPC (Multiple Pipe/Plane Combiner) size configuration */
|
||||
uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */
|
||||
uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */
|
||||
|
||||
/* DSCL mode configuration */
|
||||
uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */
|
||||
|
||||
/* Scaler ratios (simplified to integer parts) */
|
||||
uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */
|
||||
uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */
|
||||
|
||||
/* Basic scaler taps */
|
||||
uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */
|
||||
uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */
|
||||
} dpp[MAX_PIPES];
|
||||
|
||||
/* DCCG register programming variables */
|
||||
struct {
|
||||
/* Core Display Clock Control */
|
||||
uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */
|
||||
uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */
|
||||
|
||||
/* DPP Clock Control - 4 fields per pipe */
|
||||
uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */
|
||||
uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */
|
||||
uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */
|
||||
uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */
|
||||
uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */
|
||||
|
||||
/* DSC Clock Control - 4 fields per DSC resource */
|
||||
uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */
|
||||
uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */
|
||||
uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */
|
||||
uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */
|
||||
|
||||
/* Pixel Clock Control - per pipe */
|
||||
uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */
|
||||
uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */
|
||||
uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */
|
||||
uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */
|
||||
uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */
|
||||
uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */
|
||||
uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */
|
||||
|
||||
/* DTBCLK DTO Control - 4 DTOs */
|
||||
uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */
|
||||
uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */
|
||||
uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */
|
||||
|
||||
/* DP Stream Clock Control - 4 pipes */
|
||||
uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */
|
||||
uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */
|
||||
uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */
|
||||
uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */
|
||||
|
||||
/* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */
|
||||
uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */
|
||||
uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */
|
||||
uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */
|
||||
|
||||
/* SYMCLK32 SE Control - 4 instances */
|
||||
uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */
|
||||
uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */
|
||||
uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */
|
||||
|
||||
/* SYMCLK32 LE Control - 2 instances */
|
||||
uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */
|
||||
uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */
|
||||
uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */
|
||||
|
||||
/* DPIA Clock Control */
|
||||
uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */
|
||||
uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */
|
||||
uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */
|
||||
uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */
|
||||
uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */
|
||||
uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */
|
||||
|
||||
/* Clock Gating Control */
|
||||
uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */
|
||||
uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
|
||||
uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
|
||||
|
||||
/* VSync Control */
|
||||
uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */
|
||||
uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */
|
||||
uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */
|
||||
|
||||
/* Time Base Control */
|
||||
uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */
|
||||
uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */
|
||||
} dccg;
|
||||
|
||||
/* DSC essential configuration for underflow analysis */
|
||||
struct {
|
||||
/* DSC active state - critical for bandwidth analysis */
|
||||
uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */
|
||||
|
||||
/* DSC configuration affecting bandwidth and timing */
|
||||
uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */
|
||||
uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */
|
||||
uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */
|
||||
|
||||
/* OPP integration - affects pipeline flow */
|
||||
uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */
|
||||
uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */
|
||||
} dsc[MAX_PIPES];
|
||||
|
||||
/* MPC register programming variables */
|
||||
struct {
|
||||
/* MPCC blending tree and mode control */
|
||||
uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */
|
||||
uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */
|
||||
uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */
|
||||
uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */
|
||||
uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */
|
||||
uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */
|
||||
uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */
|
||||
uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */
|
||||
|
||||
/* MPCC blending tree connections */
|
||||
uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */
|
||||
uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */
|
||||
|
||||
/* MPCC output gamma control */
|
||||
uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */
|
||||
uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */
|
||||
uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */
|
||||
|
||||
/* MPCC pipe assignment and status */
|
||||
uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */
|
||||
uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */
|
||||
uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */
|
||||
|
||||
/* MPC output processing */
|
||||
uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */
|
||||
uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */
|
||||
} mpc;
|
||||
|
||||
/* OPP register programming variables for each pipe */
|
||||
struct {
|
||||
/* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */
|
||||
uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */
|
||||
|
||||
/* Format Control (FMT) - 18 fields from FMT_CONTROL register */
|
||||
uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */
|
||||
uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */
|
||||
uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */
|
||||
uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */
|
||||
uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */
|
||||
uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */
|
||||
uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */
|
||||
uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */
|
||||
uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */
|
||||
uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */
|
||||
uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */
|
||||
uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */
|
||||
uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */
|
||||
uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */
|
||||
uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */
|
||||
uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */
|
||||
uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */
|
||||
uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */
|
||||
|
||||
/* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */
|
||||
uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */
|
||||
|
||||
/* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */
|
||||
uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */
|
||||
uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */
|
||||
uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */
|
||||
|
||||
/* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */
|
||||
uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */
|
||||
uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */
|
||||
uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */
|
||||
uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */
|
||||
uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */
|
||||
uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */
|
||||
|
||||
/* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */
|
||||
uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */
|
||||
uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */
|
||||
uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */
|
||||
} opp[MAX_PIPES];
|
||||
|
||||
/* OPTC register programming variables for each pipe */
|
||||
struct {
|
||||
uint32_t otg_master_inst;
|
||||
|
||||
/* OTG_CONTROL register - 5 fields for OTG control */
|
||||
uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */
|
||||
uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */
|
||||
uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */
|
||||
uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */
|
||||
uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */
|
||||
|
||||
/* OTG Horizontal Timing - 7 fields */
|
||||
uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */
|
||||
uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */
|
||||
uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */
|
||||
uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */
|
||||
uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */
|
||||
uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */
|
||||
uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */
|
||||
|
||||
/* OTG Vertical Timing - 7 fields */
|
||||
uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */
|
||||
uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */
|
||||
uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */
|
||||
uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */
|
||||
uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */
|
||||
uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */
|
||||
uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */
|
||||
|
||||
/* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */
|
||||
uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */
|
||||
uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */
|
||||
uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */
|
||||
uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */
|
||||
uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */
|
||||
uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */
|
||||
uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */
|
||||
uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */
|
||||
uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */
|
||||
|
||||
/* OPTC Data Source and ODM - 6 fields */
|
||||
uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */
|
||||
uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */
|
||||
uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */
|
||||
uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */
|
||||
uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */
|
||||
uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */
|
||||
|
||||
/* OPTC Data Format and DSC - 4 fields */
|
||||
uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */
|
||||
uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */
|
||||
uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */
|
||||
uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */
|
||||
uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */
|
||||
|
||||
/* OPTC Clock and Underflow Control - 4 fields */
|
||||
uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */
|
||||
uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */
|
||||
uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */
|
||||
uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */
|
||||
uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */
|
||||
|
||||
/* OTG Stereo and 3D Control - 6 fields */
|
||||
uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */
|
||||
uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */
|
||||
uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */
|
||||
uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */
|
||||
uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */
|
||||
uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */
|
||||
uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */
|
||||
|
||||
/* OTG GSL (Global Sync Lock) Control - 5 fields */
|
||||
uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */
|
||||
uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */
|
||||
uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */
|
||||
uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */
|
||||
uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */
|
||||
|
||||
/* OTG DRR Advanced Control - 4 fields */
|
||||
uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */
|
||||
uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */
|
||||
uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */
|
||||
uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */
|
||||
|
||||
/* OTG DSC Position Control - 2 fields */
|
||||
uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */
|
||||
uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */
|
||||
|
||||
/* OTG Double Buffer Control - 2 fields */
|
||||
uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */
|
||||
uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */
|
||||
|
||||
/* OTG Vertical Interrupts - 6 fields */
|
||||
uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */
|
||||
uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */
|
||||
uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */
|
||||
uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */
|
||||
uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */
|
||||
uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */
|
||||
|
||||
/* OTG Global Sync Parameters - 6 fields */
|
||||
uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */
|
||||
uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */
|
||||
uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */
|
||||
uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */
|
||||
uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */
|
||||
uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */
|
||||
|
||||
/* OTG Manual Trigger Control - 11 fields */
|
||||
uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */
|
||||
uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */
|
||||
uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */
|
||||
uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */
|
||||
uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */
|
||||
uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */
|
||||
uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */
|
||||
uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */
|
||||
uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */
|
||||
uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */
|
||||
uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */
|
||||
uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */
|
||||
|
||||
/* OTG Static Screen and Update Control - 6 fields */
|
||||
uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */
|
||||
uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */
|
||||
uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */
|
||||
uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */
|
||||
uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */
|
||||
uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */
|
||||
|
||||
/* VTG Control - 3 fields */
|
||||
uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */
|
||||
uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */
|
||||
uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */
|
||||
|
||||
/* OTG Status (Read-Only) - 12 fields */
|
||||
uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */
|
||||
uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */
|
||||
uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */
|
||||
uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */
|
||||
uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */
|
||||
uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */
|
||||
uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */
|
||||
uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */
|
||||
uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */
|
||||
uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */
|
||||
uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */
|
||||
} optc[MAX_PIPES];
|
||||
|
||||
/* Metadata */
|
||||
uint32_t active_pipe_count;
|
||||
uint32_t active_stream_count;
|
||||
bool state_valid;
|
||||
};
|
||||
|
||||
/**
|
||||
* dc_capture_register_software_state() - Capture software state for register programming
|
||||
* @dc: DC context containing current display configuration
|
||||
* @state: Pointer to dc_register_software_state structure to populate
|
||||
*
|
||||
* Extracts all software state variables that are used to program hardware register
|
||||
* fields across the display driver pipeline. This provides a complete snapshot
|
||||
* of the software configuration that drives hardware register programming.
|
||||
*
|
||||
* The function traverses the DC context and extracts values from:
|
||||
* - Stream configurations (timing, format, DSC settings)
|
||||
* - Plane states (surface format, rotation, scaling, cursor)
|
||||
* - Pipe contexts (resource allocation, blending, viewport)
|
||||
* - Clock manager (display clocks, DPP clocks, pixel clocks)
|
||||
* - Resource context (DET buffer allocation, ODM configuration)
|
||||
*
|
||||
* This is essential for underflow debugging as it captures the exact software
|
||||
* state that determines how registers are programmed, allowing analysis of
|
||||
* whether underflow is caused by incorrect register programming or timing issues.
|
||||
*
|
||||
* Return: true if state was successfully captured, false on error
|
||||
*/
|
||||
bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state);
|
||||
|
||||
#endif /* DC_INTERFACE_H_ */
|
||||
|
|
|
|||
|
|
@ -1157,6 +1157,16 @@ struct dprx_states {
|
|||
bool cable_id_written;
|
||||
};
|
||||
|
||||
union dpcd_panel_replay_capability_supported {
|
||||
struct {
|
||||
unsigned char PANEL_REPLAY_SUPPORT :1;
|
||||
unsigned char SELECTIVE_UPDATE_SUPPORT :1;
|
||||
unsigned char EARLY_TRANSPORT_SUPPORT :1;
|
||||
unsigned char RESERVED :5;
|
||||
} bits;
|
||||
unsigned char raw;
|
||||
};
|
||||
|
||||
enum dpcd_downstream_port_max_bpc {
|
||||
DOWN_STREAM_MAX_8BPC = 0,
|
||||
DOWN_STREAM_MAX_10BPC,
|
||||
|
|
@ -1280,6 +1290,7 @@ struct dpcd_caps {
|
|||
struct edp_psr_info psr_info;
|
||||
|
||||
struct replay_info pr_info;
|
||||
union dpcd_panel_replay_capability_supported pr_caps_supported;
|
||||
uint16_t edp_oled_emission_rate;
|
||||
union dp_receive_port0_cap receive_port0_cap;
|
||||
/* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
|
||||
|
|
@ -1346,6 +1357,31 @@ union dpcd_replay_configuration {
|
|||
unsigned char raw;
|
||||
};
|
||||
|
||||
union panel_replay_enable_and_configuration_1 {
|
||||
struct {
|
||||
unsigned char PANEL_REPLAY_ENABLE :1;
|
||||
unsigned char PANEL_REPLAY_CRC_ENABLE :1;
|
||||
unsigned char IRQ_HPD_ASSDP_MISSING :1;
|
||||
unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1;
|
||||
unsigned char IRQ_HPD_RFB_ERROR :1;
|
||||
unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1;
|
||||
unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1;
|
||||
unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1;
|
||||
} bits;
|
||||
unsigned char raw;
|
||||
};
|
||||
|
||||
union panel_replay_enable_and_configuration_2 {
|
||||
struct {
|
||||
unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1;
|
||||
unsigned char RESERVED :1;
|
||||
unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1;
|
||||
unsigned char SU_Y_GRANULARITY_EXT_VALUE :4;
|
||||
unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1;
|
||||
} bits;
|
||||
unsigned char raw;
|
||||
};
|
||||
|
||||
union dpcd_alpm_configuration {
|
||||
struct {
|
||||
unsigned char ENABLE : 1;
|
||||
|
|
|
|||
|
|
@ -941,6 +941,12 @@ enum dc_psr_version {
|
|||
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
enum dc_replay_version {
|
||||
DC_FREESYNC_REPLAY = 0,
|
||||
DC_VESA_PANEL_REPLAY = 1,
|
||||
DC_REPLAY_VERSION_UNSUPPORTED = 0XFF,
|
||||
};
|
||||
|
||||
/* Possible values of display_endpoint_id.endpoint */
|
||||
enum display_endpoint_type {
|
||||
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
|
||||
|
|
@ -1093,6 +1099,7 @@ enum replay_FW_Message_type {
|
|||
Replay_Set_Residency_Frameupdate_Timer,
|
||||
Replay_Set_Pseudo_VTotal,
|
||||
Replay_Disabled_Adaptive_Sync_SDP,
|
||||
Replay_Set_Version,
|
||||
Replay_Set_General_Cmd,
|
||||
};
|
||||
|
||||
|
|
@ -1128,6 +1135,8 @@ union replay_low_refresh_rate_enable_options {
|
|||
};
|
||||
|
||||
struct replay_config {
|
||||
/* Replay version */
|
||||
enum dc_replay_version replay_version;
|
||||
/* Replay feature is supported */
|
||||
bool replay_supported;
|
||||
/* Replay caps support DPCD & EDID caps*/
|
||||
|
|
|
|||
|
|
@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup(
|
|||
REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
|
||||
DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
|
||||
|
||||
REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
|
||||
if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO)
|
||||
REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
|
||||
DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1568,7 +1568,7 @@ void dce110_stream_encoder_construct(
|
|||
enc110->se_mask = se_mask;
|
||||
}
|
||||
|
||||
static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {0};
|
||||
static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {};
|
||||
|
||||
void dce110_analog_stream_encoder_construct(
|
||||
struct dce110_stream_encoder *enc110,
|
||||
|
|
|
|||
|
|
@ -387,6 +387,19 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
|
|||
cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
|
||||
cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
|
||||
break;
|
||||
case Replay_Set_Version:
|
||||
//Header
|
||||
cmd.replay_set_version.header.sub_type =
|
||||
DMUB_CMD__REPLAY_SET_VERSION;
|
||||
cmd.replay_set_version.header.payload_bytes =
|
||||
sizeof(struct dmub_rb_cmd_replay_set_version) -
|
||||
sizeof(struct dmub_cmd_header);
|
||||
//Cmd Body
|
||||
cmd.replay_set_version.replay_set_version_data.panel_inst =
|
||||
cmd_element->version_data.panel_inst;
|
||||
cmd.replay_set_version.replay_set_version_data.version =
|
||||
cmd_element->version_data.version;
|
||||
break;
|
||||
case Replay_Set_General_Cmd:
|
||||
//Header
|
||||
cmd.replay_set_general_cmd.header.sub_type =
|
||||
|
|
|
|||
|
|
@ -1893,7 +1893,7 @@ struct display_mode_lib_scratch_st {
|
|||
struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params;
|
||||
};
|
||||
|
||||
/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
|
||||
/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
|
||||
struct display_mode_lib_st {
|
||||
dml_uint_t project;
|
||||
|
||||
|
|
|
|||
|
|
@ -685,7 +685,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
uint32_t early_control = 0;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
|
||||
link_hwss->setup_stream_attribute(pipe_ctx);
|
||||
link_hwss->setup_stream_encoder(pipe_ctx);
|
||||
|
||||
dc->hwss.update_info_frame(pipe_ctx);
|
||||
|
|
@ -1103,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|||
if (!pipe_ctx->stream)
|
||||
return;
|
||||
|
||||
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
dc = pipe_ctx->stream->ctx->dc;
|
||||
clk_mgr = dc->clk_mgr;
|
||||
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
|
||||
|
|
@ -1139,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|||
if (!pipe_ctx || !pipe_ctx->stream)
|
||||
return;
|
||||
|
||||
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
dc = pipe_ctx->stream->ctx->dc;
|
||||
clk_mgr = dc->clk_mgr;
|
||||
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
|
||||
|
|
@ -1193,8 +1198,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
|
||||
if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
|
||||
link_hwss->reset_stream_encoder(pipe_ctx);
|
||||
|
||||
|
|
|
|||
|
|
@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
|
|||
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
|
||||
* 1, 1000);
|
||||
*/
|
||||
|
||||
/* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
|
||||
if (!power_on) {
|
||||
struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
|
||||
if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
|
||||
dpp5->funcs->dpp_force_disable_cursor(dpp5);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
|
@ -3055,8 +3063,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
|
||||
}
|
||||
|
||||
link_hwss->setup_stream_attribute(pipe_ctx);
|
||||
|
||||
if (dc->res_pool->dccg->funcs->set_pixel_rate_div)
|
||||
dc->res_pool->dccg->funcs->set_pixel_rate_div(
|
||||
dc->res_pool->dccg,
|
||||
|
|
|
|||
|
|
@ -974,8 +974,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
}
|
||||
|
||||
link_hwss->setup_stream_attribute(pipe_ctx);
|
||||
|
||||
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
|
||||
dc->res_pool->dccg->funcs->set_pixel_rate_div(
|
||||
dc->res_pool->dccg,
|
||||
|
|
@ -3675,6 +3673,8 @@ void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
|
|||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
|
||||
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
|
||||
|
||||
hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
|
||||
|
||||
hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
|
||||
|
||||
/* Step 15: Cursor position setup */
|
||||
|
|
|
|||
|
|
@ -696,6 +696,11 @@ struct hubp_program_mcache_id_and_split_coordinate_params {
|
|||
struct mcache_regs_struct *mcache_regs;
|
||||
};
|
||||
|
||||
struct abort_cursor_offload_update_params {
|
||||
struct dc *dc;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
};
|
||||
|
||||
struct set_cursor_attribute_params {
|
||||
struct dc *dc;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
|
|
@ -842,6 +847,7 @@ union block_sequence_params {
|
|||
struct mpc_insert_plane_params mpc_insert_plane_params;
|
||||
struct dpp_set_scaler_params dpp_set_scaler_params;
|
||||
struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params;
|
||||
struct abort_cursor_offload_update_params abort_cursor_offload_update_params;
|
||||
struct set_cursor_attribute_params set_cursor_attribute_params;
|
||||
struct set_cursor_position_params set_cursor_position_params;
|
||||
struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params;
|
||||
|
|
@ -960,6 +966,7 @@ enum block_sequence_func {
|
|||
MPC_INSERT_PLANE,
|
||||
DPP_SET_SCALER,
|
||||
HUBP_MEM_PROGRAM_VIEWPORT,
|
||||
ABORT_CURSOR_OFFLOAD_UPDATE,
|
||||
SET_CURSOR_ATTRIBUTE,
|
||||
SET_CURSOR_POSITION,
|
||||
SET_CURSOR_SDR_WHITE_LEVEL,
|
||||
|
|
@ -1565,6 +1572,8 @@ void hwss_dpp_set_scaler(union block_sequence_params *params);
|
|||
|
||||
void hwss_hubp_mem_program_viewport(union block_sequence_params *params);
|
||||
|
||||
void hwss_abort_cursor_offload_update(union block_sequence_params *params);
|
||||
|
||||
void hwss_set_cursor_attribute(union block_sequence_params *params);
|
||||
|
||||
void hwss_set_cursor_position(union block_sequence_params *params);
|
||||
|
|
@ -1961,6 +1970,10 @@ void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
|
|||
const struct rect *viewport,
|
||||
const struct rect *viewport_c);
|
||||
|
||||
void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx);
|
||||
|
|
|
|||
|
|
@ -79,6 +79,32 @@
|
|||
#define MAX_HPO_DP2_ENCODERS 4
|
||||
#define MAX_HPO_DP2_LINK_ENCODERS 4
|
||||
|
||||
/* Pipe topology snapshot structures */
|
||||
#define MAX_TOPOLOGY_SNAPSHOTS 4
|
||||
|
||||
struct pipe_topology_line {
|
||||
bool is_phantom_pipe;
|
||||
int plane_idx;
|
||||
int slice_idx;
|
||||
int stream_idx;
|
||||
int dpp_inst;
|
||||
int opp_inst;
|
||||
int tg_inst;
|
||||
};
|
||||
|
||||
struct pipe_topology_snapshot {
|
||||
struct pipe_topology_line pipe_log_lines[MAX_PIPES];
|
||||
int line_count;
|
||||
uint64_t timestamp_us;
|
||||
int stream_count;
|
||||
int phantom_stream_count;
|
||||
};
|
||||
|
||||
struct pipe_topology_history {
|
||||
struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS];
|
||||
int current_snapshot_index;
|
||||
};
|
||||
|
||||
struct gamma_curve {
|
||||
uint32_t offset;
|
||||
uint32_t segments_num;
|
||||
|
|
|
|||
|
|
@ -1224,8 +1224,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
|||
break;
|
||||
}
|
||||
|
||||
sink->edid_caps.analog &= dc_connector_supports_analog(link->link_id.id);
|
||||
|
||||
// Check if edid is the same
|
||||
if ((prev_sink) &&
|
||||
(edid_status == EDID_THE_SAME || edid_status == EDID_OK))
|
||||
|
|
|
|||
|
|
@ -2226,7 +2226,11 @@ static enum dc_status enable_link(
|
|||
{
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->link;
|
||||
struct dc_link *link = NULL;
|
||||
|
||||
if (stream == NULL)
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
link = stream->link;
|
||||
|
||||
/* There's some scenarios where driver is unloaded with display
|
||||
* still enabled. When driver is reloaded, it may cause a display
|
||||
|
|
@ -2374,8 +2378,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
|
|||
set_avmute(pipe_ctx, true);
|
||||
}
|
||||
|
||||
if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
|
||||
update_psp_stream_config(pipe_ctx, true);
|
||||
dc->hwss.blank_stream(pipe_ctx);
|
||||
|
|
@ -2464,6 +2467,7 @@ void link_set_dpms_on(
|
|||
struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
|
||||
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
|
||||
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
|
||||
bool apply_edp_fast_boot_optimization =
|
||||
pipe_ctx->stream->apply_edp_fast_boot_optimization;
|
||||
|
||||
|
|
@ -2508,6 +2512,8 @@ void link_set_dpms_on(
|
|||
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
|
||||
}
|
||||
|
||||
link_hwss->setup_stream_attribute(pipe_ctx);
|
||||
|
||||
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
|
||||
|
||||
// Enable VPG before building infoframe
|
||||
|
|
@ -2660,8 +2666,7 @@ void link_set_dpms_on(
|
|||
enable_stream_features(pipe_ctx);
|
||||
update_psp_stream_config(pipe_ctx, false);
|
||||
|
||||
if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
|
||||
dc->hwss.enable_audio_stream(pipe_ctx);
|
||||
dc->hwss.enable_audio_stream(pipe_ctx);
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
|
||||
set_avmute(pipe_ctx, false);
|
||||
|
|
|
|||
|
|
@ -529,16 +529,16 @@ static bool construct_phy(struct dc_link *link,
|
|||
enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder);
|
||||
enc_init_data.analog_engine = find_analog_engine(link);
|
||||
|
||||
if (!transmitter_supported(enc_init_data.transmitter) &&
|
||||
!analog_engine_supported(enc_init_data.analog_engine)) {
|
||||
DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
|
||||
return false;
|
||||
}
|
||||
|
||||
link->ep_type = DISPLAY_ENDPOINT_PHY;
|
||||
|
||||
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
|
||||
|
||||
if (!transmitter_supported(enc_init_data.transmitter) &&
|
||||
!analog_engine_supported(enc_init_data.analog_engine)) {
|
||||
DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
|
||||
goto unsupported_fail;
|
||||
}
|
||||
|
||||
if (bios->funcs->get_disp_connector_caps_info) {
|
||||
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
|
||||
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
|
||||
|
|
@ -787,6 +787,7 @@ static bool construct_phy(struct dc_link *link,
|
|||
|
||||
link->psr_settings.psr_vtotal_control_support = false;
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
|
||||
link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
|
||||
|
||||
DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
|
||||
return true;
|
||||
|
|
@ -805,6 +806,7 @@ create_fail:
|
|||
link->hpd_gpio = NULL;
|
||||
}
|
||||
|
||||
unsupported_fail:
|
||||
DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
|
@ -868,6 +870,7 @@ static bool construct_dpia(struct dc_link *link,
|
|||
/* TODO: Create link encoder */
|
||||
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
|
||||
link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
|
||||
|
||||
return true;
|
||||
|
||||
|
|
|
|||
|
|
@ -1713,7 +1713,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
uint32_t read_dpcd_retry_cnt = 20;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
|
|
@ -1756,12 +1756,13 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
}
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
/*
|
||||
* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
|
|
@ -2091,6 +2092,11 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
|
||||
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
|
||||
|
||||
core_link_read_dpcd(link,
|
||||
DP_PANEL_REPLAY_CAPABILITY_SUPPORT,
|
||||
&link->dpcd_caps.pr_caps_supported.raw,
|
||||
sizeof(link->dpcd_caps.pr_caps_supported.raw));
|
||||
|
||||
/* Read DP tunneling information. */
|
||||
status = dpcd_get_tunneling_device_data(link);
|
||||
if (status != DC_OK)
|
||||
|
|
|
|||
|
|
@ -949,7 +949,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
|
|||
/* Set power optimization flag */
|
||||
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
|
||||
if (replay != NULL && link->replay_settings.replay_feature_enabled &&
|
||||
replay->funcs->replay_set_power_opt) {
|
||||
replay->funcs->replay_set_power_opt) {
|
||||
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
|
||||
link->replay_settings.replay_power_opt_active = *power_opts;
|
||||
}
|
||||
|
|
@ -984,7 +984,117 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
|
|||
return true;
|
||||
}
|
||||
|
||||
bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
|
||||
static bool edp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream)
|
||||
{
|
||||
/* To-do: Setup Replay */
|
||||
struct dc *dc;
|
||||
struct dmub_replay *replay;
|
||||
int i;
|
||||
unsigned int panel_inst;
|
||||
struct replay_context replay_context = { 0 };
|
||||
unsigned int lineTimeInNs = 0;
|
||||
|
||||
union panel_replay_enable_and_configuration_1 pr_config_1 = { 0 };
|
||||
union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 };
|
||||
|
||||
union dpcd_alpm_configuration alpm_config;
|
||||
|
||||
replay_context.controllerId = CONTROLLER_ID_UNDEFINED;
|
||||
|
||||
if (!link)
|
||||
return false;
|
||||
|
||||
//Clear Panel Replay enable & config
|
||||
dm_helpers_dp_write_dpcd(link->ctx, link,
|
||||
DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
|
||||
(uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
|
||||
|
||||
dm_helpers_dp_write_dpcd(link->ctx, link,
|
||||
DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
|
||||
(uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
|
||||
|
||||
if (!(link->replay_settings.config.replay_supported))
|
||||
return false;
|
||||
|
||||
dc = link->ctx->dc;
|
||||
|
||||
//not sure should keep or not
|
||||
replay = dc->res_pool->replay;
|
||||
|
||||
if (!replay)
|
||||
return false;
|
||||
|
||||
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
|
||||
return false;
|
||||
|
||||
replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel;
|
||||
replay_context.digbe_inst = link->link_enc->transmitter;
|
||||
replay_context.digfe_inst = link->link_enc->preferred_engine;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (dc->current_state->res_ctx.pipe_ctx[i].stream
|
||||
== stream) {
|
||||
/* dmcu -1 for all controller id values,
|
||||
* therefore +1 here
|
||||
*/
|
||||
replay_context.controllerId =
|
||||
dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
lineTimeInNs =
|
||||
((stream->timing.h_total * 1000000) /
|
||||
(stream->timing.pix_clk_100hz / 10)) + 1;
|
||||
|
||||
replay_context.line_time_in_ns = lineTimeInNs;
|
||||
|
||||
link->replay_settings.replay_feature_enabled =
|
||||
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
|
||||
|
||||
if (link->replay_settings.replay_feature_enabled) {
|
||||
pr_config_1.bits.PANEL_REPLAY_ENABLE = 1;
|
||||
pr_config_1.bits.PANEL_REPLAY_CRC_ENABLE = 1;
|
||||
pr_config_1.bits.IRQ_HPD_ASSDP_MISSING = 1;
|
||||
pr_config_1.bits.IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR = 1;
|
||||
pr_config_1.bits.IRQ_HPD_RFB_ERROR = 1;
|
||||
pr_config_1.bits.IRQ_HPD_ACTIVE_FRAME_CRC_ERROR = 1;
|
||||
pr_config_1.bits.PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE = 1;
|
||||
pr_config_1.bits.PANEL_REPLAY_EARLY_TRANSPORT_ENABLE = 1;
|
||||
|
||||
pr_config_2.bits.SINK_REFRESH_RATE_UNLOCK_GRANTED = 0;
|
||||
pr_config_2.bits.SU_Y_GRANULARITY_EXT_VALUE_ENABLED = 0;
|
||||
pr_config_2.bits.SU_REGION_SCAN_LINE_CAPTURE_INDICATION = 0;
|
||||
|
||||
dm_helpers_dp_write_dpcd(link->ctx, link,
|
||||
DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
|
||||
(uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
|
||||
|
||||
dm_helpers_dp_write_dpcd(link->ctx, link,
|
||||
DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
|
||||
(uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
|
||||
|
||||
//ALPM Setup
|
||||
memset(&alpm_config, 0, sizeof(alpm_config));
|
||||
alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
|
||||
|
||||
if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
|
||||
alpm_config.bits.ALPM_MODE_SEL = 1;
|
||||
alpm_config.bits.ACDS_PERIOD_DURATION = 1;
|
||||
}
|
||||
|
||||
dm_helpers_dp_write_dpcd(
|
||||
link->ctx,
|
||||
link,
|
||||
DP_RECEIVER_ALPM_CONFIG,
|
||||
&alpm_config.raw,
|
||||
sizeof(alpm_config.raw));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool edp_setup_freesync_replay(struct dc_link *link, const struct dc_stream_state *stream)
|
||||
{
|
||||
/* To-do: Setup Replay */
|
||||
struct dc *dc;
|
||||
|
|
@ -1080,6 +1190,18 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
|
|||
return true;
|
||||
}
|
||||
|
||||
bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
|
||||
{
|
||||
if (!link)
|
||||
return false;
|
||||
if (link->replay_settings.config.replay_version == DC_VESA_PANEL_REPLAY)
|
||||
return edp_setup_panel_replay(link, stream);
|
||||
else if (link->replay_settings.config.replay_version == DC_FREESYNC_REPLAY)
|
||||
return edp_setup_freesync_replay(link, stream);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is general Interface for Replay to set an 32 bit variable to dmub
|
||||
* replay_FW_Message_type: Indicates which instruction or variable pass to DMUB
|
||||
|
|
|
|||
|
|
@ -227,7 +227,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
|
|||
#define LE_DCN401_REG_LIST_RI(id) \
|
||||
LE_DCN3_REG_LIST_RI(id), \
|
||||
SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
|
||||
SRI_ARR(DIG_BE_CLK_CNTL, DIG, id)
|
||||
SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\
|
||||
SR_ARR(DIO_CLK_CNTL, id)
|
||||
|
||||
/* DPP */
|
||||
#define DPP_REG_LIST_DCN401_COMMON_RI(id) \
|
||||
|
|
|
|||
|
|
@ -44,11 +44,6 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
|
|||
struct dc_crtc_timing *crtc_timing,
|
||||
bool is_dual_link) {}
|
||||
|
||||
static void virtual_stream_encoder_lvds_set_stream_attribute(
|
||||
struct stream_encoder *enc,
|
||||
struct dc_crtc_timing *crtc_timing)
|
||||
{}
|
||||
|
||||
static void virtual_stream_encoder_set_throttled_vcp_size(
|
||||
struct stream_encoder *enc,
|
||||
struct fixed31_32 avg_time_slots_per_mtp)
|
||||
|
|
@ -120,8 +115,6 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
|
|||
virtual_stream_encoder_hdmi_set_stream_attribute,
|
||||
.dvi_set_stream_attribute =
|
||||
virtual_stream_encoder_dvi_set_stream_attribute,
|
||||
.lvds_set_stream_attribute =
|
||||
virtual_stream_encoder_lvds_set_stream_attribute,
|
||||
.set_throttled_vcp_size =
|
||||
virtual_stream_encoder_set_throttled_vcp_size,
|
||||
.update_hdmi_info_packets =
|
||||
|
|
|
|||
|
|
@ -2647,6 +2647,7 @@ struct dmub_cmd_fams2_global_config {
|
|||
|
||||
union dmub_cmd_fams2_config {
|
||||
struct dmub_cmd_fams2_global_config global;
|
||||
// coverity[cert_dcl37_c_violation:FALSE] errno.h, stddef.h, stdint.h not included in atombios.h
|
||||
struct dmub_fams2_stream_static_state stream; //v0
|
||||
union {
|
||||
struct dmub_fams2_cmd_stream_static_base_state base;
|
||||
|
|
|
|||
|
|
@ -30,6 +30,22 @@
|
|||
#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
|
||||
#define DP_SINK_HW_REVISION_START 0x409
|
||||
#endif
|
||||
/* Panel Replay*/
|
||||
#ifndef DP_PANEL_REPLAY_CAPABILITY_SUPPORT // can remove this once the define gets into linux drm_dp_helper.h
|
||||
#define DP_PANEL_REPLAY_CAPABILITY_SUPPORT 0x0b0
|
||||
#endif /* DP_PANEL_REPLAY_CAPABILITY_SUPPORT */
|
||||
#ifndef DP_PANEL_REPLAY_CAPABILITY // can remove this once the define gets into linux drm_dp_helper.h
|
||||
#define DP_PANEL_REPLAY_CAPABILITY 0x0b1
|
||||
#endif /* DP_PANEL_REPLAY_CAPABILITY */
|
||||
#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 // can remove this once the define gets into linux drm_dp_helper.h
|
||||
#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 0x1b0
|
||||
#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 */
|
||||
#ifndef DP_PANEL_REPLAY_ENABLE // can remove this once the define gets into linux drm_dp_helper.h
|
||||
#define DP_PANEL_REPLAY_ENABLE (1 << 0)
|
||||
#endif /* DP_PANEL_REPLAY_ENABLE */
|
||||
#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 // can remove this once the define gets into linux drm_dp_helper.h
|
||||
#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 0x1b1
|
||||
#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 */
|
||||
|
||||
enum dpcd_revision {
|
||||
DPCD_REV_10 = 0x10,
|
||||
|
|
|
|||
|
|
@ -1037,6 +1037,9 @@ void calculate_replay_link_off_frame_count(struct dc_link *link,
|
|||
uint8_t max_link_off_frame_count = 0;
|
||||
uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0;
|
||||
|
||||
if (!link || link->replay_settings.config.replay_version != DC_FREESYNC_REPLAY)
|
||||
return;
|
||||
|
||||
max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
|
||||
pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
|
||||
|
||||
|
|
|
|||
|
|
@ -1187,8 +1187,11 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
|
|||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
int ret = 0;
|
||||
|
||||
if (!pp_funcs->get_pp_table)
|
||||
return 0;
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
|
||||
|
|
@ -1715,7 +1718,10 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
|
|||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
int ret = 0;
|
||||
|
||||
if (!pp_funcs->set_pp_table)
|
||||
if (!buf || !size)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
|
|
|||
|
|
@ -2506,7 +2506,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
|
|||
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
|
||||
.attr_update = pp_dpm_clk_default_attr_update),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
|
||||
|
|
@ -2638,6 +2638,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
|||
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
|
||||
-EOPNOTSUPP)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_table)) {
|
||||
int ret;
|
||||
char *tmp = NULL;
|
||||
|
||||
ret = amdgpu_dpm_get_pp_table(adev, &tmp);
|
||||
if (ret == -EOPNOTSUPP || !tmp)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
else
|
||||
*states = ATTR_STATE_SUPPORTED;
|
||||
}
|
||||
|
||||
switch (gc_ver) {
|
||||
|
|
|
|||
|
|
@ -631,9 +631,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
|
|||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
|
||||
if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
|
||||
if (!hwmgr || !hwmgr->pm_en || !table)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hwmgr->soft_pp_table)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
*table = (char *)hwmgr->soft_pp_table;
|
||||
return hwmgr->soft_pp_table_size;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -634,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (smu_table->hardcode_pptable)
|
||||
*table = smu_table->hardcode_pptable;
|
||||
|
|
@ -1669,9 +1669,12 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
|||
if (adev->in_suspend && smu_is_dpm_running(smu)) {
|
||||
dev_info(adev->dev, "dpm has been enabled\n");
|
||||
ret = smu_system_features_control(smu, true);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed system features control!\n");
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return smu_enable_thermal_alert(smu);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
|
|||
|
||||
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
|
||||
{
|
||||
return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
|
||||
int ret = 0;
|
||||
|
||||
if (smu->smu_table.thermal_controller_type)
|
||||
ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint16_t convert_to_vddc(uint8_t vid)
|
||||
|
|
|
|||
|
|
@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
|
|||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
if (down_read_trylock(&rdev->exclusive_lock)) {
|
||||
radeon_fence_process(rdev, ring);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue