Merge tag 'drm-misc-next-2025-10-02' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v6.19: UAPI Changes: Cross-subsystem Changes: - fbcon cleanups. - Make drivers depend on FB_TILEBLITTING instead of selecting it, and hide FB_MODE_HELPERS. Core Changes: - More preparations for rust. - Throttle dirty worker with vblank - Use drm_for_each_bridge_in_chain_scoped in drm's bridge code and assorted fixes. - Ensure drm_client_modeset tests are enabled in UML. - Rename ttm_bo_put to ttm_bo_fini, as a further step in removing the TTM bo refcount. - Add POST_LT_ADJ_REQ training sequence. - Show list of removed but still allocated bridges. - Add a simulated vblank interrupt for hardware without it, and add some helpers to use them in vkms and hypervdrm. Driver Changes: - Assorted small fixes, cleanups and updates to host1x, tegra, panthor, amdxdna, gud, vc4, ssd130x, ivpu, panfrost, panthor, sysfb, bridge/sn65dsi86, solomon, ast, tidss. - Convert drivers from using .round_rate() to .determine_rate() - Add support for KD116N3730A07/A12, chromebook mt8189, JT101TM023, LQ079L1SX01, raspberrypi 5" panels. - Improve reclocking on tegra186+ with nouveau. - Improve runtime pm in amdxdna. - Add support for HTX_PAI in imx. - Use a helper to calculate dumb buffer sizes in most drivers. Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://lore.kernel.org/r/b412fb91-8545-466a-8102-d89c0f2758a7@linux.intel.compull/1354/merge
commit
6200442de0
|
|
@ -167,7 +167,7 @@ ForEachMacros:
|
|||
- 'drm_connector_for_each_possible_encoder'
|
||||
- 'drm_exec_for_each_locked_object'
|
||||
- 'drm_exec_for_each_locked_object_reverse'
|
||||
- 'drm_for_each_bridge_in_chain'
|
||||
- 'drm_for_each_bridge_in_chain_scoped'
|
||||
- 'drm_for_each_connector_iter'
|
||||
- 'drm_for_each_crtc'
|
||||
- 'drm_for_each_crtc_reverse'
|
||||
|
|
|
|||
|
|
@ -49,6 +49,10 @@ properties:
|
|||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: HDMI output port
|
||||
|
||||
port@2:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: Parallel audio input port
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@1
|
||||
|
|
@ -98,5 +102,13 @@ examples:
|
|||
remote-endpoint = <&hdmi0_con>;
|
||||
};
|
||||
};
|
||||
|
||||
port@2 {
|
||||
reg = <2>;
|
||||
|
||||
endpoint {
|
||||
remote-endpoint = <&pai_to_hdmi_tx>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/imx/fsl,imx8mp-hdmi-pai.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Freescale i.MX8MP HDMI Parallel Audio Interface
|
||||
|
||||
maintainers:
|
||||
- Shengjiu Wang <shengjiu.wang@nxp.com>
|
||||
|
||||
description:
|
||||
The HDMI TX Parallel Audio Interface (HTX_PAI) is a bridge between the
|
||||
Audio Subsystem to the HDMI TX Controller.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: fsl,imx8mp-hdmi-pai
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
const: apb
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
port:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description: Output to the HDMI TX controller.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
- power-domains
|
||||
- port
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/imx8mp-clock.h>
|
||||
#include <dt-bindings/power/imx8mp-power.h>
|
||||
|
||||
audio-bridge@32fc4800 {
|
||||
compatible = "fsl,imx8mp-hdmi-pai";
|
||||
reg = <0x32fc4800 0x800>;
|
||||
interrupt-parent = <&irqsteer_hdmi>;
|
||||
interrupts = <14>;
|
||||
clocks = <&clk IMX8MP_CLK_HDMI_APB>;
|
||||
clock-names = "apb";
|
||||
power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_PAI>;
|
||||
|
||||
port {
|
||||
pai_to_hdmi_tx: endpoint {
|
||||
remote-endpoint = <&hdmi_tx_from_pai>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
@ -20,6 +20,7 @@ properties:
|
|||
- bananapi,lhr050h41
|
||||
- bestar,bsd1218-a101kl68
|
||||
- feixin,k101-im2byl02
|
||||
- raspberrypi,dsi-5inch
|
||||
- raspberrypi,dsi-7inch
|
||||
- startek,kd050hdfia020
|
||||
- tdo,tl050hdv35
|
||||
|
|
@ -30,6 +31,7 @@ properties:
|
|||
maxItems: 1
|
||||
|
||||
backlight: true
|
||||
port: true
|
||||
power-supply: true
|
||||
reset-gpios: true
|
||||
rotation: true
|
||||
|
|
|
|||
|
|
@ -184,6 +184,8 @@ properties:
|
|||
- innolux,n156bge-l21
|
||||
# Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
|
||||
- innolux,zj070na-01p
|
||||
# JuTouch Technology Co.. 10" JT101TM023 WXGA (1280 x 800) LVDS panel
|
||||
- jutouch,jt101tm023
|
||||
# Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
|
||||
- koe,tx14d24vm1bpa
|
||||
# Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
|
||||
|
|
|
|||
|
|
@ -0,0 +1,99 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/sharp,lq079l1sx01.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Sharp Microelectronics 7.9" WQXGA TFT LCD panel
|
||||
|
||||
maintainers:
|
||||
- Svyatoslav Ryhel <clamor95@gmail.com>
|
||||
|
||||
description: >
|
||||
This panel requires a dual-channel DSI host to operate and it supports
|
||||
only left-right split mode, where each channel drives the left or right
|
||||
half of the screen and only video mode.
|
||||
|
||||
Each of the DSI channels controls a separate DSI peripheral.
|
||||
The peripheral driven by the first link (DSI-LINK1), left one, is
|
||||
considered the primary peripheral and controls the device.
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common-dual.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: sharp,lq079l1sx01
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
avdd-supply:
|
||||
description: regulator that supplies the analog voltage
|
||||
|
||||
vddio-supply:
|
||||
description: regulator that supplies the I/O voltage
|
||||
|
||||
vsp-supply:
|
||||
description: positive boost supply regulator
|
||||
|
||||
vsn-supply:
|
||||
description: negative boost supply regulator
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
|
||||
backlight: true
|
||||
ports: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- avdd-supply
|
||||
- vddio-supply
|
||||
- ports
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
dsi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
panel@0 {
|
||||
compatible = "sharp,lq079l1sx01";
|
||||
reg = <0>;
|
||||
|
||||
reset-gpios = <&gpio 59 GPIO_ACTIVE_LOW>;
|
||||
|
||||
avdd-supply = <&avdd_lcd>;
|
||||
vddio-supply = <&vdd_lcd_io>;
|
||||
vsp-supply = <&vsp_5v5_lcd>;
|
||||
vsn-supply = <&vsn_5v5_lcd>;
|
||||
|
||||
backlight = <&backlight>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
panel_in0: endpoint {
|
||||
remote-endpoint = <&dsi0_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
panel_in1: endpoint {
|
||||
remote-endpoint = <&dsi1_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
...
|
||||
|
|
@ -835,6 +835,8 @@ patternProperties:
|
|||
description: JOZ BV
|
||||
"^jty,.*":
|
||||
description: JTY
|
||||
"^jutouch,.*":
|
||||
description: JuTouch Technology Co., Ltd.
|
||||
"^kam,.*":
|
||||
description: Kamstrup A/S
|
||||
"^karo,.*":
|
||||
|
|
|
|||
|
|
@ -92,6 +92,18 @@ GEM Atomic Helper Reference
|
|||
.. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c
|
||||
:export:
|
||||
|
||||
VBLANK Helper Reference
|
||||
-----------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: include/drm/drm_vblank_helper.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c
|
||||
:export:
|
||||
|
||||
Simple KMS Helper Reference
|
||||
===========================
|
||||
|
||||
|
|
|
|||
|
|
@ -623,6 +623,43 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
|
|||
|
||||
Level: Advanced
|
||||
|
||||
Implement a new DUMB_CREATE2 ioctl
|
||||
----------------------------------
|
||||
|
||||
The current DUMB_CREATE ioctl is not well defined. Instead of a pixel and
|
||||
framebuffer format, it only accepts a color mode of vague semantics. Assuming
|
||||
a linear framebuffer, the color mode gives an idea of the supported pixel
|
||||
format. But userspace effectively has to guess the correct values. It really
|
||||
only works reliably with framebuffers in XRGB8888. Userspace has begun to
|
||||
workaround these limitations by computing arbitrary format's buffer sizes and
|
||||
calculating their sizes in terms of XRGB8888 pixels.
|
||||
|
||||
One possible solution is a new ioctl DUMB_CREATE2. It should accept a DRM
|
||||
format and a format modifier to resolve the color mode's ambiguity. As
|
||||
framebuffers can be multi-planar, the new ioctl has to return the buffer size,
|
||||
pitch and GEM handle for each individual color plane.
|
||||
|
||||
In the first step, the new ioctl can be limited to the current features of
|
||||
the existing DUMB_CREATE. Individual drivers can then be extended to support
|
||||
multi-planar formats. Rockchip might require this and would be a good candidate.
|
||||
|
||||
It might also be helpful to userspace to query information about the size of
|
||||
a potential buffer, if allocated. Userspace would supply geometry and format;
|
||||
the kernel would return minimal allocation sizes and scanline pitch. There is
|
||||
interest to allocate that memory from another device and provide it to the
|
||||
DRM driver (say via dma-buf).
|
||||
|
||||
Another requested feature is the ability to allocate a buffer by size, without
|
||||
format. Accelators use this for their buffer allocation and it could likely be
|
||||
generalized.
|
||||
|
||||
In addition to the kernel implementation, there must be user-space support
|
||||
for the new ioctl. There's code in Mesa that might be able to use the new
|
||||
call.
|
||||
|
||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>
|
||||
|
||||
Level: Advanced
|
||||
|
||||
Better Testing
|
||||
==============
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ amdxdna-y := \
|
|||
amdxdna_mailbox.o \
|
||||
amdxdna_mailbox_helper.o \
|
||||
amdxdna_pci_drv.o \
|
||||
amdxdna_pm.o \
|
||||
amdxdna_sysfs.o \
|
||||
amdxdna_ubuf.o \
|
||||
npu1_regs.o \
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@
|
|||
#include "amdxdna_gem.h"
|
||||
#include "amdxdna_mailbox.h"
|
||||
#include "amdxdna_pci_drv.h"
|
||||
#include "amdxdna_pm.h"
|
||||
|
||||
static bool force_cmdlist;
|
||||
module_param(force_cmdlist, bool, 0600);
|
||||
|
|
@ -88,7 +89,7 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = aie2_config_cu(hwctx);
|
||||
ret = aie2_config_cu(hwctx, NULL);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
|
||||
goto out;
|
||||
|
|
@ -167,14 +168,11 @@ static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
|
|||
|
||||
int aie2_hwctx_resume(struct amdxdna_client *client)
|
||||
{
|
||||
struct amdxdna_dev *xdna = client->xdna;
|
||||
|
||||
/*
|
||||
* The resume path cannot guarantee that mailbox channel can be
|
||||
* regenerated. If this happen, when submit message to this
|
||||
* mailbox channel, error will return.
|
||||
*/
|
||||
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
|
||||
return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
|
||||
}
|
||||
|
||||
|
|
@ -184,6 +182,8 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
|
|||
struct dma_fence *fence = job->fence;
|
||||
|
||||
trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
|
||||
|
||||
amdxdna_pm_suspend_put(job->hwctx->client->xdna);
|
||||
job->hwctx->priv->completed++;
|
||||
dma_fence_signal(fence);
|
||||
|
||||
|
|
@ -531,7 +531,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
|
|||
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||
.credit_limit = HWCTX_MAX_CMDS,
|
||||
.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
|
||||
.name = hwctx->name,
|
||||
.name = "amdxdna_js",
|
||||
.dev = xdna->ddev.dev,
|
||||
};
|
||||
struct drm_gpu_scheduler *sched;
|
||||
|
|
@ -697,6 +697,14 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
|
|||
kfree(hwctx->cus);
|
||||
}
|
||||
|
||||
static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
|
||||
{
|
||||
struct amdxdna_hwctx *hwctx = handle;
|
||||
|
||||
amdxdna_pm_suspend_put(hwctx->client->xdna);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
|
||||
{
|
||||
struct amdxdna_hwctx_param_config_cu *config = buf;
|
||||
|
|
@ -728,10 +736,14 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
|
|||
if (!hwctx->cus)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = aie2_config_cu(hwctx);
|
||||
ret = amdxdna_pm_resume_get(xdna);
|
||||
if (ret)
|
||||
goto free_cus;
|
||||
|
||||
ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
|
||||
goto free_cus;
|
||||
goto pm_suspend_put;
|
||||
}
|
||||
|
||||
wmb(); /* To avoid locking in command submit when check status */
|
||||
|
|
@ -739,6 +751,8 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
|
|||
|
||||
return 0;
|
||||
|
||||
pm_suspend_put:
|
||||
amdxdna_pm_suspend_put(xdna);
|
||||
free_cus:
|
||||
kfree(hwctx->cus);
|
||||
hwctx->cus = NULL;
|
||||
|
|
@ -862,11 +876,15 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
|
|||
goto free_chain;
|
||||
}
|
||||
|
||||
ret = amdxdna_pm_resume_get(xdna);
|
||||
if (ret)
|
||||
goto cleanup_job;
|
||||
|
||||
retry:
|
||||
ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
|
||||
if (ret) {
|
||||
XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
|
||||
goto cleanup_job;
|
||||
goto suspend_put;
|
||||
}
|
||||
|
||||
for (i = 0; i < job->bo_cnt; i++) {
|
||||
|
|
@ -874,7 +892,7 @@ retry:
|
|||
if (ret) {
|
||||
XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
|
||||
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
|
||||
goto cleanup_job;
|
||||
goto suspend_put;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -889,12 +907,12 @@ retry:
|
|||
msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
|
||||
} else if (time_after(jiffies, timeout)) {
|
||||
ret = -ETIME;
|
||||
goto cleanup_job;
|
||||
goto suspend_put;
|
||||
}
|
||||
|
||||
ret = aie2_populate_range(abo);
|
||||
if (ret)
|
||||
goto cleanup_job;
|
||||
goto suspend_put;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
|
@ -920,6 +938,8 @@ retry:
|
|||
|
||||
return 0;
|
||||
|
||||
suspend_put:
|
||||
amdxdna_pm_suspend_put(xdna);
|
||||
cleanup_job:
|
||||
drm_sched_job_cleanup(&job->base);
|
||||
free_chain:
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
|
|||
if (!ndev->mgmt_chann)
|
||||
return -ENODEV;
|
||||
|
||||
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
|
||||
drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock));
|
||||
ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
|
||||
if (ret == -ETIME) {
|
||||
xdna_mailbox_stop_channel(ndev->mgmt_chann);
|
||||
|
|
@ -377,15 +377,17 @@ int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr,
|
|||
return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
|
||||
}
|
||||
|
||||
int aie2_config_cu(struct amdxdna_hwctx *hwctx)
|
||||
int aie2_config_cu(struct amdxdna_hwctx *hwctx,
|
||||
int (*notify_cb)(void *, void __iomem *, size_t))
|
||||
{
|
||||
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
|
||||
struct amdxdna_dev *xdna = hwctx->client->xdna;
|
||||
u32 shift = xdna->dev_info->dev_mem_buf_shift;
|
||||
DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
|
||||
struct config_cu_req req = { 0 };
|
||||
struct xdna_mailbox_msg msg;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdxdna_gem_obj *abo;
|
||||
int ret, i;
|
||||
int i;
|
||||
|
||||
if (!chann)
|
||||
return -ENODEV;
|
||||
|
|
@ -423,18 +425,12 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
|
|||
}
|
||||
req.num_cus = hwctx->cus->num_cus;
|
||||
|
||||
ret = xdna_send_msg_wait(xdna, chann, &msg);
|
||||
if (ret == -ETIME)
|
||||
aie2_destroy_context(xdna->dev_handle, hwctx);
|
||||
|
||||
if (resp.status == AIE2_STATUS_SUCCESS) {
|
||||
XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
|
||||
msg.opcode, resp.status, ret);
|
||||
return ret;
|
||||
msg.send_data = (u8 *)&req;
|
||||
msg.send_size = sizeof(req);
|
||||
msg.handle = hwctx;
|
||||
msg.opcode = MSG_OP_CONFIG_CU;
|
||||
msg.notify_cb = notify_cb;
|
||||
return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
|
||||
}
|
||||
|
||||
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@
|
|||
#include "amdxdna_gem.h"
|
||||
#include "amdxdna_mailbox.h"
|
||||
#include "amdxdna_pci_drv.h"
|
||||
#include "amdxdna_pm.h"
|
||||
|
||||
static int aie2_max_col = XRS_MAX_COL;
|
||||
module_param(aie2_max_col, uint, 0600);
|
||||
|
|
@ -223,15 +224,6 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (!ndev->async_events)
|
||||
return 0;
|
||||
|
||||
ret = aie2_error_async_events_send(ndev);
|
||||
if (ret) {
|
||||
XDNA_ERR(ndev->xdna, "Send async events failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -257,6 +249,8 @@ static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -338,6 +332,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
|
|||
ndev->mbox = NULL;
|
||||
aie2_psp_stop(ndev->psp_hdl);
|
||||
aie2_smu_fini(ndev);
|
||||
aie2_error_async_events_free(ndev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
ndev->dev_status = AIE2_DEV_INIT;
|
||||
|
|
@ -424,6 +419,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
|
|||
goto destroy_mgmt_chann;
|
||||
}
|
||||
|
||||
ret = aie2_mgmt_fw_query(ndev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
|
||||
goto destroy_mgmt_chann;
|
||||
}
|
||||
|
||||
ret = aie2_error_async_events_alloc(ndev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
|
||||
goto destroy_mgmt_chann;
|
||||
}
|
||||
|
||||
ndev->dev_status = AIE2_DEV_START;
|
||||
|
||||
return 0;
|
||||
|
|
@ -459,7 +466,6 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna)
|
|||
struct amdxdna_client *client;
|
||||
int ret;
|
||||
|
||||
guard(mutex)(&xdna->dev_lock);
|
||||
ret = aie2_hw_start(xdna);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Start hardware failed, %d", ret);
|
||||
|
|
@ -565,13 +571,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
|
|||
goto release_fw;
|
||||
}
|
||||
|
||||
ret = aie2_mgmt_fw_query(ndev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
|
||||
goto stop_hw;
|
||||
}
|
||||
ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
|
||||
|
||||
xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
|
||||
for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
|
||||
xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
|
||||
|
|
@ -587,30 +586,10 @@ static int aie2_init(struct amdxdna_dev *xdna)
|
|||
goto stop_hw;
|
||||
}
|
||||
|
||||
ret = aie2_error_async_events_alloc(ndev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
|
||||
goto stop_hw;
|
||||
}
|
||||
|
||||
ret = aie2_error_async_events_send(ndev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
|
||||
goto async_event_free;
|
||||
}
|
||||
|
||||
/* Issue a command to make sure firmware handled async events */
|
||||
ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Re-query firmware version failed");
|
||||
goto async_event_free;
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
amdxdna_pm_init(xdna);
|
||||
return 0;
|
||||
|
||||
async_event_free:
|
||||
aie2_error_async_events_free(ndev);
|
||||
stop_hw:
|
||||
aie2_hw_stop(xdna);
|
||||
release_fw:
|
||||
|
|
@ -621,10 +600,8 @@ release_fw:
|
|||
|
||||
static void aie2_fini(struct amdxdna_dev *xdna)
|
||||
{
|
||||
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
|
||||
|
||||
amdxdna_pm_fini(xdna);
|
||||
aie2_hw_stop(xdna);
|
||||
aie2_error_async_events_free(ndev);
|
||||
}
|
||||
|
||||
static int aie2_get_aie_status(struct amdxdna_client *client,
|
||||
|
|
@ -856,6 +833,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
|
|||
if (!drm_dev_enter(&xdna->ddev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
ret = amdxdna_pm_resume_get(xdna);
|
||||
if (ret)
|
||||
goto dev_exit;
|
||||
|
||||
switch (args->param) {
|
||||
case DRM_AMDXDNA_QUERY_AIE_STATUS:
|
||||
ret = aie2_get_aie_status(client, args);
|
||||
|
|
@ -882,8 +863,11 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
|
|||
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
amdxdna_pm_suspend_put(xdna);
|
||||
XDNA_DBG(xdna, "Got param %d", args->param);
|
||||
|
||||
dev_exit:
|
||||
drm_dev_exit(idx);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -898,6 +882,12 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client,
|
|||
|
||||
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
|
||||
|
||||
if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
|
||||
XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
|
||||
args->element_size, args->num_element);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
array_args.element_size = min(args->element_size,
|
||||
sizeof(struct amdxdna_drm_hwctx_entry));
|
||||
array_args.buffer = args->buffer;
|
||||
|
|
@ -926,6 +916,10 @@ static int aie2_get_array(struct amdxdna_client *client,
|
|||
if (!drm_dev_enter(&xdna->ddev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
ret = amdxdna_pm_resume_get(xdna);
|
||||
if (ret)
|
||||
goto dev_exit;
|
||||
|
||||
switch (args->param) {
|
||||
case DRM_AMDXDNA_HW_CONTEXT_ALL:
|
||||
ret = aie2_query_ctx_status_array(client, args);
|
||||
|
|
@ -934,8 +928,11 @@ static int aie2_get_array(struct amdxdna_client *client,
|
|||
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
amdxdna_pm_suspend_put(xdna);
|
||||
XDNA_DBG(xdna, "Got param %d", args->param);
|
||||
|
||||
dev_exit:
|
||||
drm_dev_exit(idx);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -974,6 +971,10 @@ static int aie2_set_state(struct amdxdna_client *client,
|
|||
if (!drm_dev_enter(&xdna->ddev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
ret = amdxdna_pm_resume_get(xdna);
|
||||
if (ret)
|
||||
goto dev_exit;
|
||||
|
||||
switch (args->param) {
|
||||
case DRM_AMDXDNA_SET_POWER_MODE:
|
||||
ret = aie2_set_power_mode(client, args);
|
||||
|
|
@ -984,6 +985,8 @@ static int aie2_set_state(struct amdxdna_client *client,
|
|||
break;
|
||||
}
|
||||
|
||||
amdxdna_pm_suspend_put(xdna);
|
||||
dev_exit:
|
||||
drm_dev_exit(idx);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -272,7 +272,8 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
|
|||
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
|
||||
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
|
||||
void *handle, int (*cb)(void*, void __iomem *, size_t));
|
||||
int aie2_config_cu(struct amdxdna_hwctx *hwctx);
|
||||
int aie2_config_cu(struct amdxdna_hwctx *hwctx,
|
||||
int (*notify_cb)(void *, void __iomem *, size_t));
|
||||
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
|
||||
int (*notify_cb)(void *, void __iomem *, size_t));
|
||||
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include "aie2_pci.h"
|
||||
#include "amdxdna_pci_drv.h"
|
||||
#include "amdxdna_pm.h"
|
||||
|
||||
#define SMU_RESULT_OK 1
|
||||
|
||||
|
|
@ -59,12 +60,16 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
|
|||
u32 freq;
|
||||
int ret;
|
||||
|
||||
ret = amdxdna_pm_resume_get(ndev->xdna);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
|
||||
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
|
||||
if (ret) {
|
||||
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
|
||||
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
|
||||
return ret;
|
||||
goto suspend_put;
|
||||
}
|
||||
ndev->npuclk_freq = freq;
|
||||
|
||||
|
|
@ -73,8 +78,10 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
|
|||
if (ret) {
|
||||
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
|
||||
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
|
||||
return ret;
|
||||
goto suspend_put;
|
||||
}
|
||||
|
||||
amdxdna_pm_suspend_put(ndev->xdna);
|
||||
ndev->hclk_freq = freq;
|
||||
ndev->dpm_level = dpm_level;
|
||||
|
||||
|
|
@ -82,26 +89,35 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
|
|||
ndev->npuclk_freq, ndev->hclk_freq);
|
||||
|
||||
return 0;
|
||||
|
||||
suspend_put:
|
||||
amdxdna_pm_suspend_put(ndev->xdna);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = amdxdna_pm_resume_get(ndev->xdna);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
|
||||
if (ret) {
|
||||
XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
|
||||
dpm_level, ret);
|
||||
return ret;
|
||||
goto suspend_put;
|
||||
}
|
||||
|
||||
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
|
||||
if (ret) {
|
||||
XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
|
||||
dpm_level, ret);
|
||||
return ret;
|
||||
goto suspend_put;
|
||||
}
|
||||
|
||||
amdxdna_pm_suspend_put(ndev->xdna);
|
||||
ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
|
||||
ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
|
||||
ndev->dpm_level = dpm_level;
|
||||
|
|
@ -110,6 +126,10 @@ int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
|
|||
ndev->npuclk_freq, ndev->hclk_freq);
|
||||
|
||||
return 0;
|
||||
|
||||
suspend_put:
|
||||
amdxdna_pm_suspend_put(ndev->xdna);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
|
||||
|
|
|
|||
|
|
@ -161,19 +161,14 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
|
|||
if (args->ext || args->ext_flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (!drm_dev_enter(dev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
|
||||
if (!hwctx) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
if (!hwctx)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
|
||||
XDNA_ERR(xdna, "Access QoS info failed");
|
||||
ret = -EFAULT;
|
||||
goto free_hwctx;
|
||||
kfree(hwctx);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
hwctx->client = client;
|
||||
|
|
@ -181,30 +176,36 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
|
|||
hwctx->num_tiles = args->num_tiles;
|
||||
hwctx->mem_size = args->mem_size;
|
||||
hwctx->max_opc = args->max_opc;
|
||||
|
||||
guard(mutex)(&xdna->dev_lock);
|
||||
|
||||
if (!drm_dev_enter(dev, &idx)) {
|
||||
ret = -ENODEV;
|
||||
goto free_hwctx;
|
||||
}
|
||||
|
||||
ret = xdna->dev_info->ops->hwctx_init(hwctx);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
|
||||
goto dev_exit;
|
||||
}
|
||||
|
||||
hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
|
||||
if (!hwctx->name) {
|
||||
ret = -ENOMEM;
|
||||
goto fini_hwctx;
|
||||
}
|
||||
|
||||
ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
|
||||
XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
|
||||
&client->next_hwctxid, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
|
||||
goto free_hwctx;
|
||||
}
|
||||
|
||||
hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
|
||||
if (!hwctx->name) {
|
||||
ret = -ENOMEM;
|
||||
goto rm_id;
|
||||
}
|
||||
|
||||
mutex_lock(&xdna->dev_lock);
|
||||
ret = xdna->dev_info->ops->hwctx_init(hwctx);
|
||||
if (ret) {
|
||||
mutex_unlock(&xdna->dev_lock);
|
||||
XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
|
||||
goto free_name;
|
||||
}
|
||||
|
||||
args->handle = hwctx->id;
|
||||
args->syncobj_handle = hwctx->syncobj_hdl;
|
||||
mutex_unlock(&xdna->dev_lock);
|
||||
|
||||
atomic64_set(&hwctx->job_submit_cnt, 0);
|
||||
atomic64_set(&hwctx->job_free_cnt, 0);
|
||||
|
|
@ -214,12 +215,12 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
|
|||
|
||||
free_name:
|
||||
kfree(hwctx->name);
|
||||
rm_id:
|
||||
xa_erase(&client->hwctx_xa, hwctx->id);
|
||||
fini_hwctx:
|
||||
xdna->dev_info->ops->hwctx_fini(hwctx);
|
||||
dev_exit:
|
||||
drm_dev_exit(idx);
|
||||
free_hwctx:
|
||||
kfree(hwctx);
|
||||
exit:
|
||||
drm_dev_exit(idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -431,11 +432,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
|
|||
goto unlock_srcu;
|
||||
}
|
||||
|
||||
if (hwctx->status != HWCTX_STAT_READY) {
|
||||
XDNA_ERR(xdna, "HW Context is not ready");
|
||||
ret = -EINVAL;
|
||||
goto unlock_srcu;
|
||||
}
|
||||
|
||||
job->hwctx = hwctx;
|
||||
job->mm = current->mm;
|
||||
|
|
|
|||
|
|
@ -392,35 +392,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
|
|||
.vunmap = drm_gem_dmabuf_vunmap,
|
||||
};
|
||||
|
||||
static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
|
||||
{
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
|
||||
|
||||
iosys_map_clear(map);
|
||||
|
||||
dma_resv_assert_held(obj->resv);
|
||||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
|
||||
int ret;
|
||||
|
||||
if (is_import_bo(abo))
|
||||
dma_buf_vmap(abo->dma_buf, map);
|
||||
ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
|
||||
else
|
||||
drm_gem_shmem_object_vmap(obj, map);
|
||||
ret = drm_gem_vmap(to_gobj(abo), &map);
|
||||
|
||||
if (!map->vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
*vaddr = map.vaddr;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
|
||||
struct iosys_map map;
|
||||
|
||||
dma_resv_assert_held(obj->resv);
|
||||
if (!abo->mem.kva)
|
||||
return;
|
||||
|
||||
iosys_map_set_vaddr(&map, abo->mem.kva);
|
||||
|
||||
if (is_import_bo(abo))
|
||||
dma_buf_vunmap(abo->dma_buf, map);
|
||||
dma_buf_vunmap_unlocked(abo->dma_buf, &map);
|
||||
else
|
||||
drm_gem_shmem_object_vunmap(obj, map);
|
||||
drm_gem_vunmap(to_gobj(abo), &map);
|
||||
}
|
||||
|
||||
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
|
||||
|
|
@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
|||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
|
||||
|
||||
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
|
||||
|
||||
|
|
@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
|||
if (abo->type == AMDXDNA_BO_DEV_HEAP)
|
||||
drm_mm_takedown(&abo->mm);
|
||||
|
||||
drm_gem_vunmap(gobj, &map);
|
||||
amdxdna_gem_obj_vunmap(abo);
|
||||
mutex_destroy(&abo->lock);
|
||||
|
||||
if (is_import_bo(abo)) {
|
||||
|
|
@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
|
|||
.pin = drm_gem_shmem_object_pin,
|
||||
.unpin = drm_gem_shmem_object_unpin,
|
||||
.get_sg_table = drm_gem_shmem_object_get_sg_table,
|
||||
.vmap = amdxdna_gem_obj_vmap,
|
||||
.vunmap = amdxdna_gem_obj_vunmap,
|
||||
.vmap = drm_gem_shmem_object_vmap,
|
||||
.vunmap = drm_gem_shmem_object_vunmap,
|
||||
.mmap = amdxdna_gem_obj_mmap,
|
||||
.vm_ops = &drm_gem_shmem_vm_ops,
|
||||
.export = amdxdna_gem_prime_export,
|
||||
|
|
@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
|
|||
struct drm_file *filp)
|
||||
{
|
||||
struct amdxdna_client *client = filp->driver_priv;
|
||||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(dev);
|
||||
struct amdxdna_gem_obj *abo;
|
||||
int ret;
|
||||
|
|
@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
|
|||
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
|
||||
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
|
||||
|
||||
ret = drm_gem_vmap(to_gobj(abo), &map);
|
||||
ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
|
||||
goto release_obj;
|
||||
}
|
||||
abo->mem.kva = map.vaddr;
|
||||
|
||||
client->dev_heap = abo;
|
||||
drm_gem_object_get(to_gobj(abo));
|
||||
|
|
@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
|
|||
struct amdxdna_drm_create_bo *args,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(dev);
|
||||
struct amdxdna_gem_obj *abo;
|
||||
int ret;
|
||||
|
|
@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
|
|||
abo->type = AMDXDNA_BO_CMD;
|
||||
abo->client = filp->driver_priv;
|
||||
|
||||
ret = drm_gem_vmap(to_gobj(abo), &map);
|
||||
ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
|
||||
goto release_obj;
|
||||
}
|
||||
abo->mem.kva = map.vaddr;
|
||||
|
||||
return abo;
|
||||
|
||||
|
|
|
|||
|
|
@ -194,7 +194,8 @@ static void mailbox_release_msg(struct mailbox_channel *mb_chann,
|
|||
{
|
||||
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
|
||||
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
|
||||
mb_msg->notify_cb(mb_msg->handle, NULL, 0);
|
||||
if (mb_msg->notify_cb)
|
||||
mb_msg->notify_cb(mb_msg->handle, NULL, 0);
|
||||
kfree(mb_msg);
|
||||
}
|
||||
|
||||
|
|
@ -248,7 +249,7 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
|
|||
{
|
||||
struct mailbox_msg *mb_msg;
|
||||
int msg_id;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
msg_id = header->id;
|
||||
if (!mailbox_validate_msgid(msg_id)) {
|
||||
|
|
@ -265,9 +266,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
|
|||
|
||||
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
|
||||
header->opcode, header->total_size, header->id);
|
||||
ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
|
||||
if (unlikely(ret))
|
||||
MB_ERR(mb_chann, "Message callback ret %d", ret);
|
||||
if (mb_msg->notify_cb) {
|
||||
ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
|
||||
if (unlikely(ret))
|
||||
MB_ERR(mb_chann, "Message callback ret %d", ret);
|
||||
}
|
||||
|
||||
kfree(mb_msg);
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -13,13 +13,11 @@
|
|||
#include <drm/gpu_scheduler.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "amdxdna_ctx.h"
|
||||
#include "amdxdna_gem.h"
|
||||
#include "amdxdna_pci_drv.h"
|
||||
|
||||
#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
|
||||
#include "amdxdna_pm.h"
|
||||
|
||||
MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
|
||||
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
|
||||
|
|
@ -61,17 +59,9 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
|
|||
struct amdxdna_client *client;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(ddev->dev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
client = kzalloc(sizeof(*client), GFP_KERNEL);
|
||||
if (!client) {
|
||||
ret = -ENOMEM;
|
||||
goto put_rpm;
|
||||
}
|
||||
if (!client)
|
||||
return -ENOMEM;
|
||||
|
||||
client->pid = pid_nr(rcu_access_pointer(filp->pid));
|
||||
client->xdna = xdna;
|
||||
|
|
@ -106,9 +96,6 @@ unbind_sva:
|
|||
iommu_sva_unbind_device(client->sva);
|
||||
failed:
|
||||
kfree(client);
|
||||
put_rpm:
|
||||
pm_runtime_mark_last_busy(ddev->dev);
|
||||
pm_runtime_put_autosuspend(ddev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -130,8 +117,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
|
|||
|
||||
XDNA_DBG(xdna, "pid %d closed", client->pid);
|
||||
kfree(client);
|
||||
pm_runtime_mark_last_busy(ddev->dev);
|
||||
pm_runtime_put_autosuspend(ddev->dev);
|
||||
}
|
||||
|
||||
static int amdxdna_flush(struct file *f, fl_owner_t id)
|
||||
|
|
@ -310,19 +295,12 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto failed_dev_fini;
|
||||
}
|
||||
|
||||
pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_allow(dev);
|
||||
|
||||
ret = drm_dev_register(&xdna->ddev, 0);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
|
||||
pm_runtime_forbid(dev);
|
||||
goto failed_sysfs_fini;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
return 0;
|
||||
|
||||
failed_sysfs_fini:
|
||||
|
|
@ -339,14 +317,10 @@ destroy_notifier_wq:
|
|||
static void amdxdna_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
|
||||
struct device *dev = &pdev->dev;
|
||||
struct amdxdna_client *client;
|
||||
|
||||
destroy_workqueue(xdna->notifier_wq);
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_forbid(dev);
|
||||
|
||||
drm_dev_unplug(&xdna->ddev);
|
||||
amdxdna_sysfs_fini(xdna);
|
||||
|
||||
|
|
@ -365,29 +339,9 @@ static void amdxdna_remove(struct pci_dev *pdev)
|
|||
mutex_unlock(&xdna->dev_lock);
|
||||
}
|
||||
|
||||
static int amdxdna_pmops_suspend(struct device *dev)
|
||||
{
|
||||
struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
|
||||
|
||||
if (!xdna->dev_info->ops->suspend)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return xdna->dev_info->ops->suspend(xdna);
|
||||
}
|
||||
|
||||
static int amdxdna_pmops_resume(struct device *dev)
|
||||
{
|
||||
struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
|
||||
|
||||
if (!xdna->dev_info->ops->resume)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return xdna->dev_info->ops->resume(xdna);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops amdxdna_pm_ops = {
|
||||
SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
|
||||
RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL)
|
||||
SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
|
||||
RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
|
||||
};
|
||||
|
||||
static struct pci_driver amdxdna_pci_driver = {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#ifndef _AMDXDNA_PCI_DRV_H_
|
||||
#define _AMDXDNA_PCI_DRV_H_
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
|
|
@ -99,6 +100,7 @@ struct amdxdna_dev {
|
|||
struct amdxdna_fw_ver fw_ver;
|
||||
struct rw_semaphore notifier_lock; /* for mmu notifier*/
|
||||
struct workqueue_struct *notifier_wq;
|
||||
bool rpm_on;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -0,0 +1,94 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2025, Advanced Micro Devices, Inc.
|
||||
*/
|
||||
|
||||
#include <drm/amdxdna_accel.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "amdxdna_pm.h"
|
||||
|
||||
#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
|
||||
|
||||
int amdxdna_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
|
||||
int ret = -EOPNOTSUPP;
|
||||
bool rpm;
|
||||
|
||||
if (xdna->dev_info->ops->suspend) {
|
||||
rpm = xdna->rpm_on;
|
||||
xdna->rpm_on = false;
|
||||
ret = xdna->dev_info->ops->suspend(xdna);
|
||||
xdna->rpm_on = rpm;
|
||||
}
|
||||
|
||||
XDNA_DBG(xdna, "Suspend done ret %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdxdna_pm_resume(struct device *dev)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
|
||||
int ret = -EOPNOTSUPP;
|
||||
bool rpm;
|
||||
|
||||
if (xdna->dev_info->ops->resume) {
|
||||
rpm = xdna->rpm_on;
|
||||
xdna->rpm_on = false;
|
||||
ret = xdna->dev_info->ops->resume(xdna);
|
||||
xdna->rpm_on = rpm;
|
||||
}
|
||||
|
||||
XDNA_DBG(xdna, "Resume done ret %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdxdna_pm_resume_get(struct amdxdna_dev *xdna)
|
||||
{
|
||||
struct device *dev = xdna->ddev.dev;
|
||||
int ret;
|
||||
|
||||
if (!xdna->rpm_on)
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Resume failed: %d", ret);
|
||||
pm_runtime_set_suspended(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna)
|
||||
{
|
||||
struct device *dev = xdna->ddev.dev;
|
||||
|
||||
if (!xdna->rpm_on)
|
||||
return;
|
||||
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
|
||||
void amdxdna_pm_init(struct amdxdna_dev *xdna)
|
||||
{
|
||||
struct device *dev = xdna->ddev.dev;
|
||||
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_allow(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
xdna->rpm_on = true;
|
||||
}
|
||||
|
||||
void amdxdna_pm_fini(struct amdxdna_dev *xdna)
|
||||
{
|
||||
struct device *dev = xdna->ddev.dev;
|
||||
|
||||
xdna->rpm_on = false;
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_forbid(dev);
|
||||
}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2025, Advanced Micro Devices, Inc.
|
||||
*/
|
||||
|
||||
#ifndef _AMDXDNA_PM_H_
|
||||
#define _AMDXDNA_PM_H_
|
||||
|
||||
#include "amdxdna_pci_drv.h"
|
||||
|
||||
int amdxdna_pm_suspend(struct device *dev);
|
||||
int amdxdna_pm_resume(struct device *dev);
|
||||
int amdxdna_pm_resume_get(struct amdxdna_dev *xdna);
|
||||
void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
|
||||
void amdxdna_pm_init(struct amdxdna_dev *xdna);
|
||||
void amdxdna_pm_fini(struct amdxdna_dev *xdna);
|
||||
|
||||
#endif /* _AMDXDNA_PM_H_ */
|
||||
|
|
@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent)
|
|||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
|
||||
|
||||
static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw,
|
||||
int band, const char *name)
|
||||
{
|
||||
seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n",
|
||||
name,
|
||||
hw->hws.grace_period[band],
|
||||
hw->hws.process_grace_period[band],
|
||||
hw->hws.process_quantum[band]);
|
||||
}
|
||||
|
||||
static int priority_bands_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct ivpu_device *vdev = s->private;
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
|
||||
for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
|
||||
band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
|
||||
switch (band) {
|
||||
case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
|
||||
seq_puts(s, "Idle: ");
|
||||
break;
|
||||
|
||||
case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
|
||||
seq_puts(s, "Normal: ");
|
||||
break;
|
||||
|
||||
case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
|
||||
seq_puts(s, "Focus: ");
|
||||
break;
|
||||
|
||||
case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
|
||||
seq_puts(s, "Realtime: ");
|
||||
break;
|
||||
}
|
||||
|
||||
seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
|
||||
hw->hws.grace_period[band], hw->hws.process_grace_period[band],
|
||||
hw->hws.process_quantum[band]);
|
||||
}
|
||||
print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle");
|
||||
print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal");
|
||||
print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus");
|
||||
print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -200,6 +200,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
|
|||
case DRM_IVPU_PARAM_CAPABILITIES:
|
||||
args->value = ivpu_is_capable(vdev, args->index);
|
||||
break;
|
||||
case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
|
||||
args->value = ivpu_fw_preempt_buf_size(vdev);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
|
@ -377,8 +380,7 @@ int ivpu_boot(struct ivpu_device *vdev)
|
|||
drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
|
||||
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
|
||||
|
||||
/* Update boot params located at first 4KB of FW memory */
|
||||
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
|
||||
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
|
||||
|
||||
ret = ivpu_hw_boot_fw(vdev);
|
||||
if (ret) {
|
||||
|
|
|
|||
|
|
@ -17,15 +17,10 @@
|
|||
#include "ivpu_ipc.h"
|
||||
#include "ivpu_pm.h"
|
||||
|
||||
#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
|
||||
#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
|
||||
#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
|
||||
#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
|
||||
#define FW_RUNTIME_MAX_SIZE SZ_512M
|
||||
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
|
||||
#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
|
||||
#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
|
||||
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
|
||||
#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K
|
||||
#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M
|
||||
|
||||
#define WATCHDOG_MSS_REDIRECT 32
|
||||
#define WATCHDOG_NCE_REDIRECT 33
|
||||
|
|
@ -131,9 +126,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
|
||||
bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range)
|
||||
{
|
||||
if (addr < range_start || addr + size > range_start + range_size)
|
||||
u64 addr_end;
|
||||
|
||||
if (!range || check_add_overflow(addr, size, &addr_end))
|
||||
return false;
|
||||
|
||||
if (addr < range->start || addr_end > range->end)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
@ -151,11 +151,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
|
|||
return VPU_SCHEDULING_MODE_HW;
|
||||
}
|
||||
|
||||
static void
|
||||
ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
|
||||
{
|
||||
struct ivpu_fw_info *fw = vdev->fw;
|
||||
u32 primary_preempt_buf_size, secondary_preempt_buf_size;
|
||||
|
||||
if (fw_hdr->preemption_buffer_1_max_size)
|
||||
primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
|
||||
else
|
||||
primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
|
||||
|
||||
if (fw_hdr->preemption_buffer_2_max_size)
|
||||
secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
|
||||
else
|
||||
secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n",
|
||||
primary_preempt_buf_size, secondary_preempt_buf_size);
|
||||
|
||||
if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE ||
|
||||
secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) {
|
||||
ivpu_warn(vdev, "Preemption buffers size too small\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE ||
|
||||
secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) {
|
||||
ivpu_warn(vdev, "Preemption buffers size too big\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (fw->sched_mode != VPU_SCHEDULING_MODE_HW)
|
||||
return;
|
||||
|
||||
if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
|
||||
return;
|
||||
|
||||
vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
|
||||
vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static int ivpu_fw_parse(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_fw_info *fw = vdev->fw;
|
||||
const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
|
||||
u64 runtime_addr, image_load_addr, runtime_size, image_size;
|
||||
struct ivpu_addr_range fw_image_range;
|
||||
u64 boot_params_addr, boot_params_size;
|
||||
u64 fw_version_addr, fw_version_size;
|
||||
u64 runtime_addr, runtime_size;
|
||||
u64 image_load_addr, image_size;
|
||||
|
||||
if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
|
||||
ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
|
||||
|
|
@ -167,18 +212,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
runtime_addr = fw_hdr->boot_params_load_address;
|
||||
runtime_size = fw_hdr->runtime_size;
|
||||
image_load_addr = fw_hdr->image_load_address;
|
||||
image_size = fw_hdr->image_size;
|
||||
boot_params_addr = fw_hdr->boot_params_load_address;
|
||||
boot_params_size = SZ_4K;
|
||||
|
||||
if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
|
||||
ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
|
||||
if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) {
|
||||
ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
|
||||
ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
|
||||
fw_version_addr = fw_hdr->firmware_version_load_address;
|
||||
fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K);
|
||||
|
||||
if (fw_version_size != SZ_4K) {
|
||||
ivpu_err(vdev, "Invalid firmware version size: %u\n",
|
||||
fw_hdr->firmware_version_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) {
|
||||
ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
runtime_addr = fw_hdr->image_load_address;
|
||||
runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size;
|
||||
|
||||
image_load_addr = fw_hdr->image_load_address;
|
||||
image_size = fw_hdr->image_size;
|
||||
|
||||
if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) {
|
||||
ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n",
|
||||
runtime_addr, runtime_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
@ -187,23 +251,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (image_load_addr < runtime_addr ||
|
||||
image_load_addr + image_size > runtime_addr + runtime_size) {
|
||||
ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
|
||||
if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) {
|
||||
ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n",
|
||||
image_load_addr, image_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) {
|
||||
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
|
||||
ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fw_hdr->entry_point < image_load_addr ||
|
||||
fw_hdr->entry_point >= image_load_addr + image_size) {
|
||||
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
|
||||
return -EINVAL;
|
||||
}
|
||||
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
|
||||
fw_hdr->header_version, fw_hdr->image_format);
|
||||
|
||||
|
|
@ -217,6 +283,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
|||
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
|
||||
return -EINVAL;
|
||||
|
||||
fw->boot_params_addr = boot_params_addr;
|
||||
fw->boot_params_size = boot_params_size;
|
||||
fw->fw_version_addr = fw_version_addr;
|
||||
fw->fw_version_size = fw_version_size;
|
||||
fw->runtime_addr = runtime_addr;
|
||||
fw->runtime_size = runtime_size;
|
||||
fw->image_load_offset = image_load_addr - runtime_addr;
|
||||
|
|
@ -235,22 +305,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
|||
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
|
||||
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
|
||||
|
||||
if (fw_hdr->preemption_buffer_1_max_size)
|
||||
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
|
||||
else
|
||||
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
|
||||
ivpu_preemption_config_parse(vdev, fw_hdr);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n",
|
||||
ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not");
|
||||
|
||||
if (fw_hdr->preemption_buffer_2_max_size)
|
||||
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
|
||||
else
|
||||
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
|
||||
ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
|
||||
fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
|
||||
|
||||
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
|
||||
fw_hdr->ro_section_size,
|
||||
fw_hdr->image_load_address,
|
||||
fw_hdr->image_size)) {
|
||||
if (fw_hdr->ro_section_start_address &&
|
||||
!ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size,
|
||||
&fw_image_range)) {
|
||||
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
|
||||
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
|
||||
return -EINVAL;
|
||||
|
|
@ -259,12 +320,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
|||
fw->read_only_addr = fw_hdr->ro_section_start_address;
|
||||
fw->read_only_size = fw_hdr->ro_section_size;
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
|
||||
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
|
||||
fw->runtime_addr, image_load_addr, fw->entry_point);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n",
|
||||
fw->boot_params_addr, fw->boot_params_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n",
|
||||
fw->fw_version_addr, fw->fw_version_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n",
|
||||
fw->runtime_addr, fw->runtime_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n",
|
||||
fw->image_load_offset, fw->image_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
|
||||
fw->read_only_addr, fw->read_only_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point);
|
||||
ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -291,39 +358,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
|
|||
IVPU_PRINT_WA(disable_d0i3_msg);
|
||||
}
|
||||
|
||||
static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_fw_info *fw = vdev->fw;
|
||||
u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
|
||||
u64 size = FW_SHARED_MEM_SIZE;
|
||||
|
||||
if (start + size > FW_GLOBAL_MEM_END) {
|
||||
ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_fw_info *fw = vdev->fw;
|
||||
struct ivpu_addr_range fw_range;
|
||||
int log_verb_size;
|
||||
int ret;
|
||||
|
||||
ret = ivpu_fw_update_global_range(vdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size,
|
||||
DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
|
||||
if (!fw->mem_bp) {
|
||||
ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fw_range.start = fw->runtime_addr;
|
||||
fw_range.end = fw->runtime_addr + fw->runtime_size;
|
||||
fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
|
||||
DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
|
||||
fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size,
|
||||
DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
|
||||
if (!fw->mem_fw_ver) {
|
||||
ivpu_err(vdev, "Failed to create firmware version memory buffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_bp;
|
||||
}
|
||||
|
||||
fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size,
|
||||
DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
|
||||
if (!fw->mem) {
|
||||
ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_free_fw_ver;
|
||||
}
|
||||
|
||||
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
|
||||
|
|
@ -372,6 +433,10 @@ err_free_log_crit:
|
|||
ivpu_bo_free(fw->mem_log_crit);
|
||||
err_free_fw_mem:
|
||||
ivpu_bo_free(fw->mem);
|
||||
err_free_fw_ver:
|
||||
ivpu_bo_free(fw->mem_fw_ver);
|
||||
err_free_bp:
|
||||
ivpu_bo_free(fw->mem_bp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -387,10 +452,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
|
|||
ivpu_bo_free(fw->mem_log_verb);
|
||||
ivpu_bo_free(fw->mem_log_crit);
|
||||
ivpu_bo_free(fw->mem);
|
||||
ivpu_bo_free(fw->mem_fw_ver);
|
||||
ivpu_bo_free(fw->mem_bp);
|
||||
|
||||
fw->mem_log_verb = NULL;
|
||||
fw->mem_log_crit = NULL;
|
||||
fw->mem = NULL;
|
||||
fw->mem_fw_ver = NULL;
|
||||
fw->mem_bp = NULL;
|
||||
}
|
||||
|
||||
int ivpu_fw_init(struct ivpu_device *vdev)
|
||||
|
|
@ -483,11 +552,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
|
|||
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
|
||||
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
|
||||
boot_params->global_memory_allocator_base);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
|
||||
boot_params->global_memory_allocator_size);
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
|
||||
boot_params->shave_nn_fw_base);
|
||||
|
||||
|
|
@ -495,10 +559,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
|
|||
boot_params->watchdog_irq_mss);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
|
||||
boot_params->watchdog_irq_nce);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
|
||||
boot_params->host_to_vpu_irq);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
|
||||
boot_params->job_done_irq);
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
|
||||
boot_params->host_version_id);
|
||||
|
|
@ -546,6 +606,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
|
|||
boot_params->system_time_us);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
|
||||
boot_params->power_profile);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n",
|
||||
boot_params->vpu_uses_ecc_mca_signal);
|
||||
}
|
||||
|
||||
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
|
||||
|
|
@ -572,6 +634,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
|||
return;
|
||||
}
|
||||
|
||||
memset(boot_params, 0, sizeof(*boot_params));
|
||||
vdev->pm->is_warmboot = false;
|
||||
|
||||
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
|
||||
|
|
@ -647,6 +710,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
|||
boot_params->d0i3_entry_vpu_ts = 0;
|
||||
if (IVPU_WA(disable_d0i2))
|
||||
boot_params->power_profile |= BIT(1);
|
||||
boot_params->vpu_uses_ecc_mca_signal =
|
||||
ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0;
|
||||
|
||||
boot_params->system_time_us = ktime_to_us(ktime_get_real());
|
||||
wmb(); /* Flush WC buffers after writing bootparams */
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
* Copyright (C) 2020-2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_FW_H__
|
||||
|
|
@ -19,10 +19,16 @@ struct ivpu_fw_info {
|
|||
const struct firmware *file;
|
||||
const char *name;
|
||||
char version[FW_VERSION_STR_SIZE];
|
||||
struct ivpu_bo *mem_bp;
|
||||
struct ivpu_bo *mem_fw_ver;
|
||||
struct ivpu_bo *mem;
|
||||
struct ivpu_bo *mem_shave_nn;
|
||||
struct ivpu_bo *mem_log_crit;
|
||||
struct ivpu_bo *mem_log_verb;
|
||||
u64 boot_params_addr;
|
||||
u64 boot_params_size;
|
||||
u64 fw_version_addr;
|
||||
u64 fw_version_size;
|
||||
u64 runtime_addr;
|
||||
u32 runtime_size;
|
||||
u64 image_load_offset;
|
||||
|
|
@ -42,6 +48,7 @@ struct ivpu_fw_info {
|
|||
u64 last_heartbeat;
|
||||
};
|
||||
|
||||
bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range);
|
||||
int ivpu_fw_init(struct ivpu_device *vdev);
|
||||
void ivpu_fw_fini(struct ivpu_device *vdev);
|
||||
void ivpu_fw_load(struct ivpu_device *vdev);
|
||||
|
|
@ -52,4 +59,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
|
|||
return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
|
||||
}
|
||||
|
||||
static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size;
|
||||
}
|
||||
|
||||
#endif /* __IVPU_FW_H__ */
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
#include <drm/drm_utils.h>
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_fw.h"
|
||||
#include "ivpu_gem.h"
|
||||
#include "ivpu_hw.h"
|
||||
#include "ivpu_mmu.h"
|
||||
|
|
@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
|
|||
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
|
||||
{
|
||||
ivpu_dbg(vdev, BO,
|
||||
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
|
||||
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
|
||||
"%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
|
||||
action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
|
||||
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
|
||||
(bool)drm_gem_is_imported(&bo->base.base));
|
||||
}
|
||||
|
|
@ -43,22 +44,46 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
|
|||
dma_resv_unlock(bo->base.base.resv);
|
||||
}
|
||||
|
||||
static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo)
|
||||
{
|
||||
struct sg_table *sgt = bo->base.sgt;
|
||||
|
||||
drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
|
||||
|
||||
ivpu_bo_lock(bo);
|
||||
|
||||
if (!sgt) {
|
||||
sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt))
|
||||
ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt));
|
||||
else
|
||||
bo->base.sgt = sgt;
|
||||
}
|
||||
|
||||
ivpu_bo_unlock(bo);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/*
|
||||
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
|
||||
* ivpu_bo_bind() - pin the backing physical pages and map them to VPU.
|
||||
*
|
||||
* This function pins physical memory pages, then maps the physical pages
|
||||
* to IOMMU address space and finally updates the VPU MMU page tables
|
||||
* to allow the VPU to translate VPU address to IOMMU address.
|
||||
*/
|
||||
int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
|
||||
int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
|
||||
{
|
||||
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
|
||||
struct sg_table *sgt;
|
||||
int ret = 0;
|
||||
|
||||
ivpu_dbg_bo(vdev, bo, "pin");
|
||||
ivpu_dbg_bo(vdev, bo, "bind");
|
||||
|
||||
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
|
||||
if (bo->base.base.import_attach)
|
||||
sgt = ivpu_bo_map_attachment(vdev, bo);
|
||||
else
|
||||
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
|
||||
|
|
@ -99,7 +124,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
|
|||
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
|
||||
if (!ret) {
|
||||
bo->ctx = ctx;
|
||||
bo->ctx_id = ctx->id;
|
||||
bo->vpu_addr = bo->mm_node.start;
|
||||
ivpu_dbg_bo(vdev, bo, "vaddr");
|
||||
} else {
|
||||
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
|
||||
}
|
||||
|
|
@ -115,7 +142,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
|
|||
{
|
||||
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
|
||||
|
||||
lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
|
||||
dma_resv_assert_held(bo->base.base.resv);
|
||||
|
||||
if (bo->mmu_mapped) {
|
||||
drm_WARN_ON(&vdev->drm, !bo->ctx);
|
||||
|
|
@ -134,9 +161,14 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
|
|||
return;
|
||||
|
||||
if (bo->base.sgt) {
|
||||
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
|
||||
sg_free_table(bo->base.sgt);
|
||||
kfree(bo->base.sgt);
|
||||
if (bo->base.base.import_attach) {
|
||||
dma_buf_unmap_attachment(bo->base.base.import_attach,
|
||||
bo->base.sgt, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
|
||||
sg_free_table(bo->base.sgt);
|
||||
kfree(bo->base.sgt);
|
||||
}
|
||||
bo->base.sgt = NULL;
|
||||
}
|
||||
}
|
||||
|
|
@ -182,10 +214,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
|
|||
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct ivpu_device *vdev = to_ivpu_device(dev);
|
||||
struct device *attach_dev = dev->dev;
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sgt;
|
||||
struct drm_gem_object *obj;
|
||||
struct ivpu_bo *bo;
|
||||
int ret;
|
||||
|
||||
attach = dma_buf_attach(dma_buf, attach_dev);
|
||||
|
|
@ -194,25 +227,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
|
|||
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
|
||||
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
|
||||
if (IS_ERR(obj)) {
|
||||
ret = PTR_ERR(obj);
|
||||
goto fail_unmap;
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
obj->import_attach = attach;
|
||||
obj->resv = dma_buf->resv;
|
||||
|
||||
bo = to_ivpu_bo(obj);
|
||||
|
||||
mutex_lock(&vdev->bo_list_lock);
|
||||
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
|
||||
mutex_unlock(&vdev->bo_list_lock);
|
||||
|
||||
ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
|
||||
|
||||
return obj;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
dma_buf_put(dma_buf);
|
||||
|
|
@ -220,7 +253,7 @@ fail_detach:
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
|
||||
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem;
|
||||
struct ivpu_bo *bo;
|
||||
|
|
@ -238,7 +271,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
|
|||
return ERR_CAST(shmem);
|
||||
|
||||
bo = to_ivpu_bo(&shmem->base);
|
||||
bo->ctx_id = ctx_id;
|
||||
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
|
||||
bo->flags = flags;
|
||||
|
||||
|
|
@ -246,7 +278,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
|
|||
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
|
||||
mutex_unlock(&vdev->bo_list_lock);
|
||||
|
||||
ivpu_dbg_bo(vdev, bo, "alloc");
|
||||
ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
|
@ -281,6 +313,8 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
|
|||
|
||||
ivpu_dbg_bo(vdev, bo, "free");
|
||||
|
||||
drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node));
|
||||
|
||||
mutex_lock(&vdev->bo_list_lock);
|
||||
list_del(&bo->bo_list_node);
|
||||
mutex_unlock(&vdev->bo_list_lock);
|
||||
|
|
@ -290,11 +324,15 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
|
|||
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
|
||||
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
|
||||
|
||||
ivpu_bo_lock(bo);
|
||||
ivpu_bo_unbind_locked(bo);
|
||||
ivpu_bo_unlock(bo);
|
||||
|
||||
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
|
||||
drm_WARN_ON(&vdev->drm, bo->ctx);
|
||||
|
||||
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
|
||||
drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
|
||||
drm_gem_shmem_free(&bo->base);
|
||||
}
|
||||
|
||||
|
|
@ -326,19 +364,23 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
|||
if (size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
|
||||
bo = ivpu_bo_alloc(vdev, size, args->flags);
|
||||
if (IS_ERR(bo)) {
|
||||
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
|
||||
bo, file_priv->ctx.id, args->size, args->flags);
|
||||
return PTR_ERR(bo);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0);
|
||||
|
||||
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
|
||||
bo, file_priv->ctx.id, args->size, args->flags);
|
||||
else
|
||||
} else {
|
||||
args->vpu_addr = bo->vpu_addr;
|
||||
drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1);
|
||||
}
|
||||
|
||||
drm_gem_object_put(&bo->base.base);
|
||||
|
||||
|
|
@ -360,7 +402,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|||
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
|
||||
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
|
||||
|
||||
bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
|
||||
bo = ivpu_bo_alloc(vdev, size, flags);
|
||||
if (IS_ERR(bo)) {
|
||||
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
|
||||
bo, range->start, size, flags);
|
||||
|
|
@ -371,7 +413,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|||
if (ret)
|
||||
goto err_put;
|
||||
|
||||
ret = ivpu_bo_pin(bo);
|
||||
ret = ivpu_bo_bind(bo);
|
||||
if (ret)
|
||||
goto err_put;
|
||||
|
||||
|
|
@ -391,6 +433,21 @@ err_put:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags)
|
||||
{
|
||||
struct ivpu_addr_range range;
|
||||
|
||||
if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) {
|
||||
ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (ivpu_hw_range_init(vdev, &range, addr, size))
|
||||
return NULL;
|
||||
|
||||
return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
|
||||
}
|
||||
|
||||
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
|
||||
{
|
||||
return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2025 Intel Corporation
|
||||
*/
|
||||
#ifndef __IVPU_GEM_H__
|
||||
#define __IVPU_GEM_H__
|
||||
|
|
@ -24,13 +24,14 @@ struct ivpu_bo {
|
|||
bool mmu_mapped;
|
||||
};
|
||||
|
||||
int ivpu_bo_pin(struct ivpu_bo *bo);
|
||||
int ivpu_bo_bind(struct ivpu_bo *bo);
|
||||
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
|
||||
|
||||
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
|
||||
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
|
||||
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
||||
struct ivpu_addr_range *range, u64 size, u32 flags);
|
||||
struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags);
|
||||
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
|
||||
void ivpu_bo_free(struct ivpu_bo *bo);
|
||||
|
||||
|
|
@ -96,4 +97,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
|
|||
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
|
||||
}
|
||||
|
||||
static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
|
||||
{
|
||||
return bo->flags & DRM_IVPU_BO_MAPPABLE;
|
||||
}
|
||||
|
||||
#endif /* __IVPU_GEM_H__ */
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@
|
|||
#include "ivpu_hw_btrs.h"
|
||||
#include "ivpu_hw_ip.h"
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/msr.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
|
@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
|
|||
MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
|
||||
#endif
|
||||
|
||||
#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */
|
||||
|
||||
#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff
|
||||
|
||||
static char *platform_to_str(u32 platform)
|
||||
{
|
||||
switch (platform) {
|
||||
|
|
@ -147,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev)
|
|||
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
|
||||
}
|
||||
|
||||
int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
|
||||
{
|
||||
u64 end;
|
||||
|
||||
if (!range || check_add_overflow(start, size, &end)) {
|
||||
ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
range->start = start;
|
||||
range->end = end;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void memory_ranges_init(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G);
|
||||
} else {
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G);
|
||||
ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
|
||||
vdev->hw->ranges.dma = vdev->hw->ranges.user;
|
||||
}
|
||||
|
||||
drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start,
|
||||
FW_SHARED_MEM_ALIGNMENT));
|
||||
}
|
||||
|
||||
static int wp_enable(struct ivpu_device *vdev)
|
||||
|
|
@ -373,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
|
|||
pm_runtime_mark_last_busy(vdev->drm.dev);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev)
|
||||
{
|
||||
unsigned long long msr_integrity_caps;
|
||||
int ret;
|
||||
|
||||
if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
|
||||
return false;
|
||||
|
||||
ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps);
|
||||
if (ret) {
|
||||
ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret);
|
||||
return false;
|
||||
}
|
||||
|
||||
ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps);
|
||||
|
||||
return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ struct ivpu_hw_info {
|
|||
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
|
||||
} irq;
|
||||
struct {
|
||||
struct ivpu_addr_range runtime;
|
||||
struct ivpu_addr_range global;
|
||||
struct ivpu_addr_range user;
|
||||
struct ivpu_addr_range shave;
|
||||
|
|
@ -51,6 +52,8 @@ struct ivpu_hw_info {
|
|||
};
|
||||
|
||||
int ivpu_hw_init(struct ivpu_device *vdev);
|
||||
int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start,
|
||||
u64 size);
|
||||
int ivpu_hw_power_up(struct ivpu_device *vdev);
|
||||
int ivpu_hw_power_down(struct ivpu_device *vdev);
|
||||
int ivpu_hw_reset(struct ivpu_device *vdev);
|
||||
|
|
@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev);
|
|||
void ivpu_hw_irq_enable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_irq_disable(struct ivpu_device *vdev);
|
||||
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
|
||||
bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev);
|
||||
|
||||
static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
|
||||
{
|
||||
|
|
@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
|
|||
return vdev->hw->irq.ip_irq_handler(vdev, irq);
|
||||
}
|
||||
|
||||
static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
|
||||
{
|
||||
range->start = start;
|
||||
range->end = start + size;
|
||||
}
|
||||
|
||||
static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
|
||||
{
|
||||
return range->end - range->start;
|
||||
|
|
|
|||
|
|
@ -752,7 +752,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
|
|||
}
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
|
||||
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent)
|
||||
{
|
||||
u32 val = 0;
|
||||
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
|
|||
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
|
||||
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
|
||||
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
|
||||
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent);
|
||||
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent);
|
||||
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
|
||||
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
|
||||
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
|
||||
|
|
|
|||
|
|
@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
|
|||
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
|
||||
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
|
||||
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
|
||||
|
||||
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
|
||||
ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
|
||||
if (ivpu_fw_preempt_buf_size(vdev) == 0)
|
||||
return 0;
|
||||
|
||||
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
|
||||
primary_size, DRM_IVPU_BO_WC);
|
||||
vdev->fw->primary_preempt_buf_size,
|
||||
DRM_IVPU_BO_WC);
|
||||
if (!cmdq->primary_preempt_buf) {
|
||||
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
|
||||
secondary_size, DRM_IVPU_BO_WC);
|
||||
vdev->fw->secondary_preempt_buf_size,
|
||||
DRM_IVPU_BO_WC);
|
||||
if (!cmdq->secondary_preempt_buf) {
|
||||
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
|
||||
goto err_free_primary;
|
||||
|
|
@ -66,20 +64,39 @@ err_free_primary:
|
|||
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
|
||||
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
|
||||
return;
|
||||
|
||||
if (cmdq->primary_preempt_buf)
|
||||
ivpu_bo_free(cmdq->primary_preempt_buf);
|
||||
if (cmdq->secondary_preempt_buf)
|
||||
ivpu_bo_free(cmdq->secondary_preempt_buf);
|
||||
}
|
||||
|
||||
static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv,
|
||||
struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Use preemption buffer provided by the user space */
|
||||
if (job->primary_preempt_buf)
|
||||
return 0;
|
||||
|
||||
if (!cmdq->primary_preempt_buf) {
|
||||
/* Allocate per command queue preemption buffers */
|
||||
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Use preemption buffers allocated by the kernel */
|
||||
job->primary_preempt_buf = cmdq->primary_preempt_buf;
|
||||
job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
int ret;
|
||||
|
||||
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
|
||||
if (!cmdq)
|
||||
|
|
@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
|
|||
if (!cmdq->mem)
|
||||
goto err_free_cmdq;
|
||||
|
||||
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
|
||||
|
||||
return cmdq;
|
||||
|
||||
err_free_cmdq:
|
||||
|
|
@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
|
|||
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
|
||||
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
|
||||
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
|
||||
cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
|
||||
else
|
||||
} else {
|
||||
xa_erase(&vdev->db_xa, cmdq->db_id);
|
||||
cmdq->db_id = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
|||
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
|
||||
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
|
||||
|
||||
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
|
||||
if (cmdq->primary_preempt_buf) {
|
||||
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
|
||||
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
|
||||
}
|
||||
if (job->primary_preempt_buf) {
|
||||
entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr;
|
||||
entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf);
|
||||
}
|
||||
|
||||
if (cmdq->secondary_preempt_buf) {
|
||||
entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
|
||||
entry->secondary_preempt_buf_size =
|
||||
ivpu_bo_size(cmdq->secondary_preempt_buf);
|
||||
}
|
||||
if (job->secondary_preempt_buf) {
|
||||
entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr;
|
||||
entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf);
|
||||
}
|
||||
|
||||
wmb(); /* Ensure that tail is updated after filling entry */
|
||||
|
|
@ -661,6 +673,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
|
|||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n",
|
||||
job->job_id, ret);
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
job->cmdq_id = cmdq->id;
|
||||
|
||||
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
|
||||
|
|
@ -714,7 +733,7 @@ err_unlock:
|
|||
|
||||
static int
|
||||
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
|
||||
u32 buf_count, u32 commands_offset)
|
||||
u32 buf_count, u32 commands_offset, u32 preempt_buffer_index)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = job->file_priv;
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
|
|
@ -732,7 +751,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
|
|||
|
||||
job->bos[i] = to_ivpu_bo(obj);
|
||||
|
||||
ret = ivpu_bo_pin(job->bos[i]);
|
||||
ret = ivpu_bo_bind(job->bos[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -750,6 +769,20 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
|
|||
|
||||
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
|
||||
|
||||
if (preempt_buffer_index) {
|
||||
struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
|
||||
|
||||
if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
|
||||
ivpu_warn(vdev, "Preemption buffer is too small\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ivpu_bo_is_mappable(preempt_bo)) {
|
||||
ivpu_warn(vdev, "Preemption buffer cannot be mappable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
job->primary_preempt_buf = preempt_bo;
|
||||
}
|
||||
|
||||
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
|
||||
&acquire_ctx);
|
||||
if (ret) {
|
||||
|
|
@ -780,7 +813,7 @@ unlock_reservations:
|
|||
|
||||
static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
|
||||
u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
|
||||
u8 priority)
|
||||
u32 preempt_buffer_index, u8 priority)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct ivpu_job *job;
|
||||
|
|
@ -812,7 +845,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
|
|||
goto err_exit_dev;
|
||||
}
|
||||
|
||||
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
|
||||
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
|
||||
preempt_buffer_index);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
|
||||
goto err_destroy_job;
|
||||
|
|
@ -866,7 +900,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
priority = ivpu_job_to_jsm_priority(args->priority);
|
||||
|
||||
return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
|
||||
(void __user *)args->buffers_ptr, args->commands_offset, priority);
|
||||
(void __user *)args->buffers_ptr, args->commands_offset, 0, priority);
|
||||
}
|
||||
|
||||
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
|
@ -883,6 +917,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->preempt_buffer_index >= args->buffer_count)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(args->commands_offset, 8))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
@ -893,7 +930,8 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
return -EBADFD;
|
||||
|
||||
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
|
||||
(void __user *)args->buffers_ptr, args->commands_offset, 0);
|
||||
(void __user *)args->buffers_ptr, args->commands_offset,
|
||||
args->preempt_buffer_index, 0);
|
||||
}
|
||||
|
||||
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
|
@ -1012,7 +1050,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
|
|||
|
||||
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
|
||||
if (ivpu_jsm_reset_engine(vdev, 0))
|
||||
return;
|
||||
goto runtime_put;
|
||||
|
||||
mutex_lock(&vdev->context_list_lock);
|
||||
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
|
||||
|
|
@ -1036,7 +1074,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
|
|||
goto runtime_put;
|
||||
|
||||
if (ivpu_jsm_hws_resume_engine(vdev, 0))
|
||||
return;
|
||||
goto runtime_put;
|
||||
/*
|
||||
* In hardware scheduling mode NPU already has stopped processing jobs
|
||||
* and won't send us any further notifications, thus we have to free job related resources
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
* Copyright (C) 2020-2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_JOB_H__
|
||||
|
|
@ -15,12 +15,17 @@ struct ivpu_device;
|
|||
struct ivpu_file_priv;
|
||||
|
||||
/**
|
||||
* struct ivpu_cmdq - Object representing device queue used to send jobs.
|
||||
* @jobq: Pointer to job queue memory shared with the device
|
||||
* @mem: Memory allocated for the job queue, shared with device
|
||||
* @entry_count Number of job entries in the queue
|
||||
* @db_id: Doorbell assigned to this job queue
|
||||
* @db_registered: True if doorbell is registered in device
|
||||
* struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU.
|
||||
* Tracks queue memory, preemption buffers, and metadata for job management.
|
||||
* @jobq: Pointer to job queue memory shared with the device
|
||||
* @primary_preempt_buf: Primary preemption buffer for this queue (optional)
|
||||
* @secondary_preempt_buf: Secondary preemption buffer for this queue (optional)
|
||||
* @mem: Memory allocated for the job queue, shared with device
|
||||
* @entry_count: Number of job entries in the queue
|
||||
* @id: Unique command queue ID
|
||||
* @db_id: Doorbell ID assigned to this job queue
|
||||
* @priority: Priority level of the command queue
|
||||
* @is_legacy: True if this is a legacy command queue
|
||||
*/
|
||||
struct ivpu_cmdq {
|
||||
struct vpu_job_queue *jobq;
|
||||
|
|
@ -35,16 +40,21 @@ struct ivpu_cmdq {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
|
||||
* Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
|
||||
* This is a unit of execution, and be tracked by the job_id for
|
||||
* any status reporting from VPU FW through IPC JOB RET/DONE message.
|
||||
* @file_priv: The client that submitted this job
|
||||
* @job_id: Job ID for KMD tracking and job status reporting from VPU FW
|
||||
* @status: Status of the Job from IPC JOB RET/DONE message
|
||||
* @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
|
||||
* @submit_status_offset: Offset within batch buffer where job completion handler
|
||||
will update the job status
|
||||
* struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU.
|
||||
* Each job is a unit of execution, tracked by job_id for status reporting from VPU FW.
|
||||
* The structure holds all resources and metadata needed for job submission, execution,
|
||||
* and completion handling.
|
||||
* @vdev: Pointer to the VPU device
|
||||
* @file_priv: The client context that submitted this job
|
||||
* @done_fence: Fence signaled when job completes
|
||||
* @cmd_buf_vpu_addr: VPU address of the command buffer for this job
|
||||
* @cmdq_id: Command queue ID used for submission
|
||||
* @job_id: Unique job ID for tracking and status reporting
|
||||
* @engine_idx: Engine index for job execution
|
||||
* @primary_preempt_buf: Primary preemption buffer for job
|
||||
* @secondary_preempt_buf: Secondary preemption buffer for job (optional)
|
||||
* @bo_count: Number of buffer objects associated with this job
|
||||
* @bos: Array of buffer objects used by the job (batch buffer is at index 0)
|
||||
*/
|
||||
struct ivpu_job {
|
||||
struct ivpu_device *vdev;
|
||||
|
|
@ -54,6 +64,8 @@ struct ivpu_job {
|
|||
u32 cmdq_id;
|
||||
u32 job_id;
|
||||
u32 engine_idx;
|
||||
struct ivpu_bo *primary_preempt_buf;
|
||||
struct ivpu_bo *secondary_preempt_buf;
|
||||
size_t bo_count;
|
||||
struct ivpu_bo *bos[] __counted_by(bo_count);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -568,7 +568,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
|
|||
mutex_init(&ctx->lock);
|
||||
|
||||
if (!context_id) {
|
||||
start = vdev->hw->ranges.global.start;
|
||||
start = vdev->hw->ranges.runtime.start;
|
||||
end = vdev->hw->ranges.shave.end;
|
||||
} else {
|
||||
start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
|
|||
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_fw_info *fw = vdev->fw;
|
||||
struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
|
||||
struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp);
|
||||
|
||||
if (!bp->save_restore_ret_address) {
|
||||
ivpu_pm_prepare_cold_boot(vdev);
|
||||
|
|
@ -502,6 +502,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
|
|||
else
|
||||
ret = ivpu_pm_dct_disable(vdev);
|
||||
|
||||
if (!ret)
|
||||
ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent);
|
||||
if (!ret) {
|
||||
/* Convert percent to U1.7 format */
|
||||
u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100);
|
||||
|
||||
ivpu_hw_btrs_dct_set_status(vdev, enable, val);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,16 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright (c) 2020-2024, Intel Corporation.
|
||||
* Copyright (c) 2020-2025, Intel Corporation.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup Jsm
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief JSM shared definitions
|
||||
*
|
||||
* @ingroup Jsm
|
||||
* @brief JSM shared definitions
|
||||
* @{
|
||||
*/
|
||||
#ifndef VPU_JSM_API_H
|
||||
#define VPU_JSM_API_H
|
||||
|
|
@ -22,12 +23,12 @@
|
|||
/*
|
||||
* Minor version changes when API backward compatibility is preserved.
|
||||
*/
|
||||
#define VPU_JSM_API_VER_MINOR 29
|
||||
#define VPU_JSM_API_VER_MINOR 32
|
||||
|
||||
/*
|
||||
* API header changed (field names, documentation, formatting) but API itself has not been changed
|
||||
*/
|
||||
#define VPU_JSM_API_VER_PATCH 0
|
||||
#define VPU_JSM_API_VER_PATCH 5
|
||||
|
||||
/*
|
||||
* Index in the API version table
|
||||
|
|
@ -71,9 +72,12 @@
|
|||
#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
|
||||
#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
|
||||
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
|
||||
/* Job status returned when the job was preempted mid-inference */
|
||||
/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */
|
||||
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
|
||||
/* Job status returned when the job was preempted mid-command */
|
||||
#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU
|
||||
#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
|
||||
#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU
|
||||
|
||||
/*
|
||||
* Host <-> VPU IPC channels.
|
||||
|
|
@ -134,11 +138,21 @@ enum {
|
|||
* 2. Native fence queues are only supported on VPU 40xx onwards.
|
||||
*/
|
||||
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
|
||||
|
||||
/*
|
||||
* Enable turbo mode for testing NPU performance; not recommended for regular usage.
|
||||
*/
|
||||
VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
|
||||
VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U),
|
||||
/*
|
||||
* Queue error detection mode flag
|
||||
* For 'interactive' queues (this bit not set), the FW will identify queues that have not
|
||||
* completed a job inside the TDR timeout as in error as part of engine reset sequence.
|
||||
* For 'non-interactive' queues (this bit set), the FW will identify queues that have not
|
||||
* progressed the heartbeat inside the non-interactive no-progress timeout as in error as
|
||||
* part of engine reset sequence. Additionally, there is an upper limit applied to these
|
||||
* queues: even if they progress the heartbeat, if they run longer than non-interactive
|
||||
* timeout, then the FW will also identify them as in error.
|
||||
*/
|
||||
VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U)
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -209,7 +223,7 @@ enum {
|
|||
*/
|
||||
#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
|
||||
|
||||
/*
|
||||
/**
|
||||
* Job scheduling priority bands for both hardware scheduling and OS scheduling.
|
||||
*/
|
||||
enum vpu_job_scheduling_priority_band {
|
||||
|
|
@ -220,16 +234,16 @@ enum vpu_job_scheduling_priority_band {
|
|||
VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* Job format.
|
||||
* Jobs defines the actual workloads to be executed by a given engine.
|
||||
*/
|
||||
struct vpu_job_queue_entry {
|
||||
/**< Address of VPU commands batch buffer */
|
||||
/** Address of VPU commands batch buffer */
|
||||
u64 batch_buf_addr;
|
||||
/**< Job ID */
|
||||
/** Job ID */
|
||||
u32 job_id;
|
||||
/**< Flags bit field, see VPU_JOB_FLAGS_* above */
|
||||
/** Flags bit field, see VPU_JOB_FLAGS_* above */
|
||||
u32 flags;
|
||||
/**
|
||||
* Doorbell ring timestamp taken by KMD from SoC's global system clock, in
|
||||
|
|
@ -237,20 +251,20 @@ struct vpu_job_queue_entry {
|
|||
* to match other profiling timestamps.
|
||||
*/
|
||||
u64 doorbell_timestamp;
|
||||
/**< Extra id for job tracking, used only in the firmware perf traces */
|
||||
/** Extra id for job tracking, used only in the firmware perf traces */
|
||||
u64 host_tracking_id;
|
||||
/**< Address of the primary preemption buffer to use for this job */
|
||||
/** Address of the primary preemption buffer to use for this job */
|
||||
u64 primary_preempt_buf_addr;
|
||||
/**< Size of the primary preemption buffer to use for this job */
|
||||
/** Size of the primary preemption buffer to use for this job */
|
||||
u32 primary_preempt_buf_size;
|
||||
/**< Size of secondary preemption buffer to use for this job */
|
||||
/** Size of secondary preemption buffer to use for this job */
|
||||
u32 secondary_preempt_buf_size;
|
||||
/**< Address of secondary preemption buffer to use for this job */
|
||||
/** Address of secondary preemption buffer to use for this job */
|
||||
u64 secondary_preempt_buf_addr;
|
||||
u64 reserved_0;
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* Inline command format.
|
||||
* Inline commands are the commands executed at scheduler level (typically,
|
||||
* synchronization directives). Inline command and job objects must be of
|
||||
|
|
@ -258,34 +272,36 @@ struct vpu_job_queue_entry {
|
|||
*/
|
||||
struct vpu_inline_cmd {
|
||||
u64 reserved_0;
|
||||
/* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
|
||||
/** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
|
||||
u32 type;
|
||||
/* Flags bit field, see VPU_JOB_FLAGS_* above. */
|
||||
/** Flags bit field, see VPU_JOB_FLAGS_* above. */
|
||||
u32 flags;
|
||||
/* Inline command payload. Depends on inline command type. */
|
||||
union {
|
||||
/* Fence (wait and signal) commands' payload. */
|
||||
struct {
|
||||
/* Fence object handle. */
|
||||
/** Inline command payload. Depends on inline command type. */
|
||||
union payload {
|
||||
/** Fence (wait and signal) commands' payload. */
|
||||
struct fence {
|
||||
/** Fence object handle. */
|
||||
u64 fence_handle;
|
||||
/* User VA of the current fence value. */
|
||||
/** User VA of the current fence value. */
|
||||
u64 current_value_va;
|
||||
/* User VA of the monitored fence value (read-only). */
|
||||
/** User VA of the monitored fence value (read-only). */
|
||||
u64 monitored_value_va;
|
||||
/* Value to wait for or write in fence location. */
|
||||
/** Value to wait for or write in fence location. */
|
||||
u64 value;
|
||||
/* User VA of the log buffer in which to add log entry on completion. */
|
||||
/** User VA of the log buffer in which to add log entry on completion. */
|
||||
u64 log_buffer_va;
|
||||
/* NPU private data. */
|
||||
/** NPU private data. */
|
||||
u64 npu_private_data;
|
||||
} fence;
|
||||
/* Other commands do not have a payload. */
|
||||
/* Payload definition for future inline commands can be inserted here. */
|
||||
/**
|
||||
* Other commands do not have a payload:
|
||||
* Payload definition for future inline commands can be inserted here.
|
||||
*/
|
||||
u64 reserved_1[6];
|
||||
} payload;
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* Job queue slots can be populated either with job objects or inline command objects.
|
||||
*/
|
||||
union vpu_jobq_slot {
|
||||
|
|
@ -293,7 +309,7 @@ union vpu_jobq_slot {
|
|||
struct vpu_inline_cmd inline_cmd;
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* Job queue control registers.
|
||||
*/
|
||||
struct vpu_job_queue_header {
|
||||
|
|
@ -301,18 +317,18 @@ struct vpu_job_queue_header {
|
|||
u32 head;
|
||||
u32 tail;
|
||||
u32 flags;
|
||||
/* Set to 1 to indicate priority_band field is valid */
|
||||
/** Set to 1 to indicate priority_band field is valid */
|
||||
u32 priority_band_valid;
|
||||
/*
|
||||
/**
|
||||
* Priority for the work of this job queue, valid only if the HWS is NOT used
|
||||
* and the `priority_band_valid` is set to 1. It is applied only during
|
||||
* the VPU_JSM_MSG_REGISTER_DB message processing.
|
||||
* The device firmware might use the `priority_band` to optimize the power
|
||||
* and the @ref priority_band_valid is set to 1. It is applied only during
|
||||
* the @ref VPU_JSM_MSG_REGISTER_DB message processing.
|
||||
* The device firmware might use the priority_band to optimize the power
|
||||
* management logic, but it will not affect the order of jobs.
|
||||
* Available priority bands: @see enum vpu_job_scheduling_priority_band
|
||||
*/
|
||||
u32 priority_band;
|
||||
/* Inside realtime band assigns a further priority, limited to 0..31 range */
|
||||
/** Inside realtime band assigns a further priority, limited to 0..31 range */
|
||||
u32 realtime_priority_level;
|
||||
u32 reserved_0[9];
|
||||
};
|
||||
|
|
@ -337,16 +353,16 @@ enum vpu_trace_entity_type {
|
|||
VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* HWS specific log buffer header details.
|
||||
* Total size is 32 bytes.
|
||||
*/
|
||||
struct vpu_hws_log_buffer_header {
|
||||
/* Written by VPU after adding a log entry. Initialised by host to 0. */
|
||||
/** Written by VPU after adding a log entry. Initialised by host to 0. */
|
||||
u32 first_free_entry_index;
|
||||
/* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
|
||||
/** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
|
||||
u32 wraparound_count;
|
||||
/*
|
||||
/**
|
||||
* This is the number of buffers that can be stored in the log buffer provided by the host.
|
||||
* It is written by host before passing buffer to VPU. VPU should consider it read-only.
|
||||
*/
|
||||
|
|
@ -354,14 +370,14 @@ struct vpu_hws_log_buffer_header {
|
|||
u64 reserved[2];
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* HWS specific log buffer entry details.
|
||||
* Total size is 32 bytes.
|
||||
*/
|
||||
struct vpu_hws_log_buffer_entry {
|
||||
/* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
|
||||
/** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
|
||||
u64 vpu_timestamp;
|
||||
/*
|
||||
/**
|
||||
* Operation type:
|
||||
* 0 - context state change
|
||||
* 1 - queue new work
|
||||
|
|
@ -371,7 +387,7 @@ struct vpu_hws_log_buffer_entry {
|
|||
*/
|
||||
u32 operation_type;
|
||||
u32 reserved;
|
||||
/* Operation data depends on operation type */
|
||||
/** Operation data depends on operation type */
|
||||
u64 operation_data[2];
|
||||
};
|
||||
|
||||
|
|
@ -381,51 +397,54 @@ enum vpu_hws_native_fence_log_type {
|
|||
VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
|
||||
};
|
||||
|
||||
/* HWS native fence log buffer header. */
|
||||
/** HWS native fence log buffer header. */
|
||||
struct vpu_hws_native_fence_log_header {
|
||||
union {
|
||||
struct {
|
||||
/* Index of the first free entry in buffer. */
|
||||
/** Index of the first free entry in buffer. */
|
||||
u32 first_free_entry_idx;
|
||||
/* Incremented each time NPU wraps around the buffer to write next entry. */
|
||||
/**
|
||||
* Incremented each time NPU wraps around
|
||||
* the buffer to write next entry.
|
||||
*/
|
||||
u32 wraparound_count;
|
||||
};
|
||||
/* Field allowing atomic update of both fields above. */
|
||||
/** Field allowing atomic update of both fields above. */
|
||||
u64 atomic_wraparound_and_entry_idx;
|
||||
};
|
||||
/* Log buffer type, see enum vpu_hws_native_fence_log_type. */
|
||||
/** Log buffer type, see enum vpu_hws_native_fence_log_type. */
|
||||
u64 type;
|
||||
/* Allocated number of entries in the log buffer. */
|
||||
/** Allocated number of entries in the log buffer. */
|
||||
u64 entry_nb;
|
||||
u64 reserved[2];
|
||||
};
|
||||
|
||||
/* Native fence log operation types. */
|
||||
/** Native fence log operation types. */
|
||||
enum vpu_hws_native_fence_log_op {
|
||||
VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
|
||||
VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
|
||||
};
|
||||
|
||||
/* HWS native fence log entry. */
|
||||
/** HWS native fence log entry. */
|
||||
struct vpu_hws_native_fence_log_entry {
|
||||
/* Newly signaled/unblocked fence value. */
|
||||
/** Newly signaled/unblocked fence value. */
|
||||
u64 fence_value;
|
||||
/* Native fence object handle to which this operation belongs. */
|
||||
/** Native fence object handle to which this operation belongs. */
|
||||
u64 fence_handle;
|
||||
/* Operation type, see enum vpu_hws_native_fence_log_op. */
|
||||
/** Operation type, see enum vpu_hws_native_fence_log_op. */
|
||||
u64 op_type;
|
||||
u64 reserved_0;
|
||||
/*
|
||||
/**
|
||||
* VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
|
||||
* wait was started (in NPU SysTime).
|
||||
*/
|
||||
u64 fence_wait_start_ts;
|
||||
u64 reserved_1;
|
||||
/* Timestamp at which fence operation was completed (in NPU SysTime). */
|
||||
/** Timestamp at which fence operation was completed (in NPU SysTime). */
|
||||
u64 fence_end_ts;
|
||||
};
|
||||
|
||||
/* Native fence log buffer. */
|
||||
/** Native fence log buffer. */
|
||||
struct vpu_hws_native_fence_log_buffer {
|
||||
struct vpu_hws_native_fence_log_header header;
|
||||
struct vpu_hws_native_fence_log_entry entry[];
|
||||
|
|
@ -450,8 +469,21 @@ enum vpu_ipc_msg_type {
|
|||
* after preemption or when resubmitting jobs to the queue.
|
||||
*/
|
||||
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
|
||||
/**
|
||||
* OS scheduling doorbell register command
|
||||
* @see vpu_ipc_msg_payload_register_db
|
||||
*/
|
||||
VPU_JSM_MSG_REGISTER_DB = 0x1102,
|
||||
/**
|
||||
* OS scheduling doorbell unregister command
|
||||
* @see vpu_ipc_msg_payload_unregister_db
|
||||
*/
|
||||
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
|
||||
/**
|
||||
* Query engine heartbeat. Heartbeat is expected to increase monotonically
|
||||
* and increase while work is being progressed by NPU.
|
||||
* @see vpu_ipc_msg_payload_query_engine_hb
|
||||
*/
|
||||
VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
|
||||
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
|
||||
VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
|
||||
|
|
@ -477,6 +509,7 @@ enum vpu_ipc_msg_type {
|
|||
* aborted and removed from internal scheduling queues. All doorbells assigned
|
||||
* to the host_ssid are unregistered and any internal FW resources belonging to
|
||||
* the host_ssid are released.
|
||||
* @see vpu_ipc_msg_payload_ssid_release
|
||||
*/
|
||||
VPU_JSM_MSG_SSID_RELEASE = 0x110e,
|
||||
/**
|
||||
|
|
@ -504,26 +537,51 @@ enum vpu_ipc_msg_type {
|
|||
* @see vpu_jsm_metric_streamer_start
|
||||
*/
|
||||
VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
|
||||
/** Control command: Priority band setup */
|
||||
/**
|
||||
* Control command: Priority band setup
|
||||
* @see vpu_ipc_msg_payload_hws_priority_band_setup
|
||||
*/
|
||||
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
|
||||
/** Control command: Create command queue */
|
||||
/**
|
||||
* Control command: Create command queue
|
||||
* @see vpu_ipc_msg_payload_hws_create_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
|
||||
/** Control command: Destroy command queue */
|
||||
/**
|
||||
* Control command: Destroy command queue
|
||||
* @see vpu_ipc_msg_payload_hws_destroy_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
|
||||
/** Control command: Set context scheduling properties */
|
||||
/**
|
||||
* Control command: Set context scheduling properties
|
||||
* @see vpu_ipc_msg_payload_hws_set_context_sched_properties
|
||||
*/
|
||||
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
|
||||
/*
|
||||
/**
|
||||
* Register a doorbell to notify VPU of new work. The doorbell may later be
|
||||
* deallocated or reassigned to another context.
|
||||
* @see vpu_jsm_hws_register_db
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
|
||||
/** Control command: Log buffer setting */
|
||||
/**
|
||||
* Control command: Log buffer setting
|
||||
* @see vpu_ipc_msg_payload_hws_set_scheduling_log
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
|
||||
/* Control command: Suspend command queue. */
|
||||
/**
|
||||
* Control command: Suspend command queue.
|
||||
* @see vpu_ipc_msg_payload_hws_suspend_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
|
||||
/* Control command: Resume command queue */
|
||||
/**
|
||||
* Control command: Resume command queue
|
||||
* @see vpu_ipc_msg_payload_hws_resume_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
|
||||
/* Control command: Resume engine after reset */
|
||||
/**
|
||||
* Control command: Resume engine after reset
|
||||
* @see vpu_ipc_msg_payload_hws_resume_engine
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
|
||||
/* Control command: Enable survivability/DCT mode */
|
||||
VPU_JSM_MSG_DCT_ENABLE = 0x111c,
|
||||
|
|
@ -540,7 +598,8 @@ enum vpu_ipc_msg_type {
|
|||
VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
|
||||
/**
|
||||
* Control dyndbg behavior by executing a dyndbg command; equivalent to
|
||||
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
|
||||
* Linux command:
|
||||
* @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim
|
||||
*/
|
||||
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
|
||||
/**
|
||||
|
|
@ -550,15 +609,26 @@ enum vpu_ipc_msg_type {
|
|||
|
||||
/* IPC Device -> Host, Job completion */
|
||||
VPU_JSM_MSG_JOB_DONE = 0x2100,
|
||||
/* IPC Device -> Host, Fence signalled */
|
||||
/**
|
||||
* IPC Device -> Host, Fence signalled
|
||||
* @see vpu_ipc_msg_payload_native_fence_signalled
|
||||
*/
|
||||
VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
|
||||
|
||||
/* IPC Device -> Host, Async command completion */
|
||||
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
|
||||
/**
|
||||
* IPC Device -> Host, engine reset complete
|
||||
* @see vpu_ipc_msg_payload_engine_reset_done
|
||||
*/
|
||||
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
|
||||
VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
|
||||
VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
|
||||
VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
|
||||
/**
|
||||
* Response to query engine heartbeat.
|
||||
* @see vpu_ipc_msg_payload_query_engine_hb_done
|
||||
*/
|
||||
VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
|
||||
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
|
||||
VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
|
||||
|
|
@ -575,7 +645,10 @@ enum vpu_ipc_msg_type {
|
|||
VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
|
||||
/** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
|
||||
VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
|
||||
/** Response to VPU_JSM_MSG_SSID_RELEASE. */
|
||||
/**
|
||||
* Response to VPU_JSM_MSG_SSID_RELEASE.
|
||||
* @see vpu_ipc_msg_payload_ssid_release
|
||||
*/
|
||||
VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
|
||||
/**
|
||||
* Response to VPU_JSM_MSG_METRIC_STREAMER_START.
|
||||
|
|
@ -605,29 +678,56 @@ enum vpu_ipc_msg_type {
|
|||
/**
|
||||
* Asynchronous event sent from the VPU to the host either when the current
|
||||
* metric buffer is full or when the VPU has collected a multiple of
|
||||
* @notify_sample_count samples as indicated through the start command
|
||||
* (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
|
||||
* metric data.
|
||||
* @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated
|
||||
* through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns
|
||||
* information about collected metric data.
|
||||
* @see vpu_jsm_metric_streamer_done
|
||||
*/
|
||||
VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
|
||||
/** Response to control command: Priority band setup */
|
||||
/**
|
||||
* Response to control command: Priority band setup
|
||||
* @see vpu_ipc_msg_payload_hws_priority_band_setup
|
||||
*/
|
||||
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
|
||||
/** Response to control command: Create command queue */
|
||||
/**
|
||||
* Response to control command: Create command queue
|
||||
* @see vpu_ipc_msg_payload_hws_create_cmdq_rsp
|
||||
*/
|
||||
VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
|
||||
/** Response to control command: Destroy command queue */
|
||||
/**
|
||||
* Response to control command: Destroy command queue
|
||||
* @see vpu_ipc_msg_payload_hws_destroy_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
|
||||
/** Response to control command: Set context scheduling properties */
|
||||
/**
|
||||
* Response to control command: Set context scheduling properties
|
||||
* @see vpu_ipc_msg_payload_hws_set_context_sched_properties
|
||||
*/
|
||||
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
|
||||
/** Response to control command: Log buffer setting */
|
||||
/**
|
||||
* Response to control command: Log buffer setting
|
||||
* @see vpu_ipc_msg_payload_hws_set_scheduling_log
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
|
||||
/* IPC Device -> Host, HWS notify index entry of log buffer written */
|
||||
/**
|
||||
* IPC Device -> Host, HWS notify index entry of log buffer written
|
||||
* @see vpu_ipc_msg_payload_hws_scheduling_log_notification
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
|
||||
/* IPC Device -> Host, HWS completion of a context suspend request */
|
||||
/**
|
||||
* IPC Device -> Host, HWS completion of a context suspend request
|
||||
* @see vpu_ipc_msg_payload_hws_suspend_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
|
||||
/* Response to control command: Resume command queue */
|
||||
/**
|
||||
* Response to control command: Resume command queue
|
||||
* @see vpu_ipc_msg_payload_hws_resume_cmdq
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
|
||||
/* Response to control command: Resume engine command response */
|
||||
/**
|
||||
* Response to control command: Resume engine command response
|
||||
* @see vpu_ipc_msg_payload_hws_resume_engine
|
||||
*/
|
||||
VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
|
||||
/* Response to control command: Enable survivability/DCT mode */
|
||||
VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
|
||||
|
|
@ -670,40 +770,44 @@ struct vpu_ipc_msg_payload_engine_preempt {
|
|||
u32 preempt_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief Register doorbell command structure.
|
||||
/**
|
||||
* Register doorbell command structure.
|
||||
* This structure supports doorbell registration for only OS scheduling.
|
||||
* @see VPU_JSM_MSG_REGISTER_DB
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_register_db {
|
||||
/* Index of the doorbell to register. */
|
||||
/** Index of the doorbell to register. */
|
||||
u32 db_idx;
|
||||
/* Reserved */
|
||||
/** Reserved */
|
||||
u32 reserved_0;
|
||||
/* Virtual address in Global GTT pointing to the start of job queue. */
|
||||
/** Virtual address in Global GTT pointing to the start of job queue. */
|
||||
u64 jobq_base;
|
||||
/* Size of the job queue in bytes. */
|
||||
/** Size of the job queue in bytes. */
|
||||
u32 jobq_size;
|
||||
/* Host sub-stream ID for the context assigned to the doorbell. */
|
||||
/** Host sub-stream ID for the context assigned to the doorbell. */
|
||||
u32 host_ssid;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Unregister doorbell command structure.
|
||||
* Unregister doorbell command structure.
|
||||
* Request structure to unregister a doorbell for both HW and OS scheduling.
|
||||
* @see VPU_JSM_MSG_UNREGISTER_DB
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_unregister_db {
|
||||
/* Index of the doorbell to unregister. */
|
||||
/** Index of the doorbell to unregister. */
|
||||
u32 db_idx;
|
||||
/* Reserved */
|
||||
/** Reserved */
|
||||
u32 reserved_0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Heartbeat request structure
|
||||
* @see VPU_JSM_MSG_QUERY_ENGINE_HB
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_query_engine_hb {
|
||||
/* Engine to return heartbeat value. */
|
||||
/** Engine to return heartbeat value. */
|
||||
u32 engine_idx;
|
||||
/* Reserved */
|
||||
/** Reserved */
|
||||
u32 reserved_0;
|
||||
};
|
||||
|
||||
|
|
@ -723,10 +827,14 @@ struct vpu_ipc_msg_payload_power_level {
|
|||
u32 reserved_0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure for requesting ssid release
|
||||
* @see VPU_JSM_MSG_SSID_RELEASE
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_ssid_release {
|
||||
/* Host sub-stream ID for the context to be released. */
|
||||
/** Host sub-stream ID for the context to be released. */
|
||||
u32 host_ssid;
|
||||
/* Reserved */
|
||||
/** Reserved */
|
||||
u32 reserved_0;
|
||||
};
|
||||
|
||||
|
|
@ -752,7 +860,7 @@ struct vpu_jsm_metric_streamer_start {
|
|||
u64 sampling_rate;
|
||||
/**
|
||||
* If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
|
||||
* after every @notify_sample_count samples is collected or dropped by the VPU.
|
||||
* after every @ref notify_sample_count samples is collected or dropped by the VPU.
|
||||
* If set to UINT_MAX the VPU will only generate a notification when the metric
|
||||
* buffer is full. If set to 0 the VPU will never generate a notification.
|
||||
*/
|
||||
|
|
@ -762,9 +870,9 @@ struct vpu_jsm_metric_streamer_start {
|
|||
* Address and size of the buffer where the VPU will write metric data. The
|
||||
* VPU writes all counters from enabled metric groups one after another. If
|
||||
* there is no space left to write data at the next sample period the VPU
|
||||
* will switch to the next buffer (@see next_buffer_addr) and will optionally
|
||||
* send a notification to the host driver if @notify_sample_count is non-zero.
|
||||
* If @next_buffer_addr is NULL the VPU will stop collecting metric data.
|
||||
* will switch to the next buffer (@ref next_buffer_addr) and will optionally
|
||||
* send a notification to the host driver if @ref notify_sample_count is non-zero.
|
||||
* If @ref next_buffer_addr is NULL the VPU will stop collecting metric data.
|
||||
*/
|
||||
u64 buffer_addr;
|
||||
u64 buffer_size;
|
||||
|
|
@ -844,38 +952,47 @@ struct vpu_ipc_msg_payload_job_done {
|
|||
u64 cmdq_id;
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* Notification message upon native fence signalling.
|
||||
* @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_native_fence_signalled {
|
||||
/* Engine ID. */
|
||||
/** Engine ID. */
|
||||
u32 engine_idx;
|
||||
/* Host SSID. */
|
||||
/** Host SSID. */
|
||||
u32 host_ssid;
|
||||
/* CMDQ ID */
|
||||
/** CMDQ ID */
|
||||
u64 cmdq_id;
|
||||
/* Fence object handle. */
|
||||
/** Fence object handle. */
|
||||
u64 fence_handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure
|
||||
* which contains which queues caused reset if FW was able to detect any error.
|
||||
* @see vpu_ipc_msg_payload_engine_reset_done
|
||||
*/
|
||||
struct vpu_jsm_engine_reset_context {
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved_0;
|
||||
/* Command queue id */
|
||||
/** Command queue id */
|
||||
u64 cmdq_id;
|
||||
/* See VPU_ENGINE_RESET_CONTEXT_* defines */
|
||||
/** See VPU_ENGINE_RESET_CONTEXT_* defines */
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* Engine reset response.
|
||||
* @see VPU_JSM_MSG_ENGINE_RESET_DONE
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_engine_reset_done {
|
||||
/* Engine ordinal */
|
||||
/** Engine ordinal */
|
||||
u32 engine_idx;
|
||||
/* Number of impacted contexts */
|
||||
/** Number of impacted contexts */
|
||||
u32 num_impacted_contexts;
|
||||
/* Array of impacted command queue ids and their flags */
|
||||
/** Array of impacted command queue ids and their flags */
|
||||
struct vpu_jsm_engine_reset_context
|
||||
impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
|
||||
};
|
||||
|
|
@ -912,12 +1029,16 @@ struct vpu_ipc_msg_payload_unregister_db_done {
|
|||
u32 reserved_0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure for heartbeat response
|
||||
* @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_query_engine_hb_done {
|
||||
/* Engine returning heartbeat value. */
|
||||
/** Engine returning heartbeat value. */
|
||||
u32 engine_idx;
|
||||
/* Reserved */
|
||||
/** Reserved */
|
||||
u32 reserved_0;
|
||||
/* Heartbeat value. */
|
||||
/** Heartbeat value. */
|
||||
u64 heartbeat;
|
||||
};
|
||||
|
||||
|
|
@ -937,7 +1058,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
|
|||
u8 power_limit[16];
|
||||
};
|
||||
|
||||
/* HWS priority band setup request / response */
|
||||
/**
|
||||
* HWS priority band setup request / response
|
||||
* @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_priority_band_setup {
|
||||
/*
|
||||
* Grace period in 100ns units when preempting another priority band for
|
||||
|
|
@ -964,15 +1088,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
|
|||
* TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
|
||||
*/
|
||||
u32 tdr_timeout;
|
||||
/* Non-interactive queue timeout for no progress of heartbeat in milliseconds.
|
||||
* Default value of 0 meaning no timeout.
|
||||
*/
|
||||
u32 non_interactive_no_progress_timeout;
|
||||
/*
|
||||
* Non-interactive queue upper limit timeout value in milliseconds. Default
|
||||
* value of 0 meaning no timeout.
|
||||
*/
|
||||
u32 non_interactive_timeout;
|
||||
};
|
||||
|
||||
/*
|
||||
/**
|
||||
* @brief HWS create command queue request.
|
||||
* Host will create a command queue via this command.
|
||||
* Note: Cmdq group is a handle of an object which
|
||||
* may contain one or more command queues.
|
||||
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
|
||||
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_create_cmdq {
|
||||
/* Process id */
|
||||
|
|
@ -993,66 +1125,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
|
|||
u32 reserved_0;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief HWS create command queue response.
|
||||
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
|
||||
/**
|
||||
* HWS create command queue response.
|
||||
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
|
||||
/* Process id */
|
||||
/** Process id */
|
||||
u64 process_id;
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/* Engine for which queue is being created */
|
||||
/** Engine for which queue is being created */
|
||||
u32 engine_idx;
|
||||
/* Command queue group */
|
||||
/** Command queue group */
|
||||
u64 cmdq_group;
|
||||
/* Command queue id */
|
||||
/** Command queue id */
|
||||
u64 cmdq_id;
|
||||
};
|
||||
|
||||
/* HWS destroy command queue request / response */
|
||||
/**
|
||||
* HWS destroy command queue request / response
|
||||
* @see VPU_JSM_MSG_DESTROY_CMD_QUEUE
|
||||
* @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_destroy_cmdq {
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved;
|
||||
/* Command queue id */
|
||||
/** Command queue id */
|
||||
u64 cmdq_id;
|
||||
};
|
||||
|
||||
/* HWS set context scheduling properties request / response */
|
||||
/**
|
||||
* HWS set context scheduling properties request / response
|
||||
* @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
|
||||
* @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved_0;
|
||||
/* Command queue id */
|
||||
/** Command queue id */
|
||||
u64 cmdq_id;
|
||||
/*
|
||||
/**
|
||||
* Priority band to assign to work of this context.
|
||||
* Available priority bands: @see enum vpu_job_scheduling_priority_band
|
||||
*/
|
||||
u32 priority_band;
|
||||
/* Inside realtime band assigns a further priority */
|
||||
/** Inside realtime band assigns a further priority */
|
||||
u32 realtime_priority_level;
|
||||
/* Priority relative to other contexts in the same process */
|
||||
/** Priority relative to other contexts in the same process */
|
||||
s32 in_process_priority;
|
||||
/* Zero padding / Reserved */
|
||||
/** Zero padding / Reserved */
|
||||
u32 reserved_1;
|
||||
/*
|
||||
/**
|
||||
* Context quantum relative to other contexts of same priority in the same process
|
||||
* Minimum value supported by NPU is 1ms (10000 in 100ns units).
|
||||
*/
|
||||
u64 context_quantum;
|
||||
/* Grace period when preempting context of the same priority within the same process */
|
||||
/** Grace period when preempting context of the same priority within the same process */
|
||||
u64 grace_period_same_priority;
|
||||
/* Grace period when preempting context of a lower priority within the same process */
|
||||
/** Grace period when preempting context of a lower priority within the same process */
|
||||
u64 grace_period_lower_priority;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief Register doorbell command structure.
|
||||
/**
|
||||
* Register doorbell command structure.
|
||||
* This structure supports doorbell registration for both HW and OS scheduling.
|
||||
* Note: Queue base and size are added here so that the same structure can be used for
|
||||
* OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
|
||||
|
|
@ -1061,27 +1200,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
|
|||
* @see VPU_JSM_MSG_HWS_REGISTER_DB
|
||||
*/
|
||||
struct vpu_jsm_hws_register_db {
|
||||
/* Index of the doorbell to register. */
|
||||
/** Index of the doorbell to register. */
|
||||
u32 db_id;
|
||||
/* Host sub-stream ID for the context assigned to the doorbell. */
|
||||
/** Host sub-stream ID for the context assigned to the doorbell. */
|
||||
u32 host_ssid;
|
||||
/* ID of the command queue associated with the doorbell. */
|
||||
/** ID of the command queue associated with the doorbell. */
|
||||
u64 cmdq_id;
|
||||
/* Virtual address pointing to the start of command queue. */
|
||||
/** Virtual address pointing to the start of command queue. */
|
||||
u64 cmdq_base;
|
||||
/* Size of the command queue in bytes. */
|
||||
/** Size of the command queue in bytes. */
|
||||
u64 cmdq_size;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief Structure to set another buffer to be used for scheduling-related logging.
|
||||
/**
|
||||
* Structure to set another buffer to be used for scheduling-related logging.
|
||||
* The size of the logging buffer and the number of entries is defined as part of the
|
||||
* buffer itself as described next.
|
||||
* The log buffer received from the host is made up of;
|
||||
* - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
|
||||
* - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header.
|
||||
* The header contains the number of log entries in the buffer.
|
||||
* - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
|
||||
* 'struct vpu_hws_log_buffer_entry'.
|
||||
* @ref vpu_hws_log_buffer_entry.
|
||||
* The entry contains the VPU timestamp, operation type and data.
|
||||
* The host should provide the notify index value of log buffer to VPU. This is a
|
||||
* value defined within the log buffer and when written to will generate the
|
||||
|
|
@ -1095,30 +1234,30 @@ struct vpu_jsm_hws_register_db {
|
|||
* @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_set_scheduling_log {
|
||||
/* Engine ordinal */
|
||||
/** Engine ordinal */
|
||||
u32 engine_idx;
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/*
|
||||
/**
|
||||
* VPU log buffer virtual address.
|
||||
* Set to 0 to disable logging for this engine.
|
||||
*/
|
||||
u64 vpu_log_buffer_va;
|
||||
/*
|
||||
/**
|
||||
* Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
|
||||
* is generated when an event log is written to this index.
|
||||
*/
|
||||
u64 notify_index;
|
||||
/*
|
||||
/**
|
||||
* Field is now deprecated, will be removed when KMD is updated to support removal
|
||||
*/
|
||||
u32 enable_extra_events;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved_0;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief The scheduling log notification is generated by VPU when it writes
|
||||
/**
|
||||
* The scheduling log notification is generated by VPU when it writes
|
||||
* an event into the log buffer at the notify_index. VPU notifies host with
|
||||
* VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
|
||||
* message from VPU to host.
|
||||
|
|
@ -1126,14 +1265,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
|
|||
* @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
|
||||
/* Engine ordinal */
|
||||
/** Engine ordinal */
|
||||
u32 engine_idx;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved_0;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief HWS suspend command queue request and done structure.
|
||||
/**
|
||||
* HWS suspend command queue request and done structure.
|
||||
* Host will request the suspend of contexts and VPU will;
|
||||
* - Suspend all work on this context
|
||||
* - Preempt any running work
|
||||
|
|
@ -1152,21 +1291,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
|
|||
* @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_suspend_cmdq {
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved_0;
|
||||
/* Command queue id */
|
||||
/** Command queue id */
|
||||
u64 cmdq_id;
|
||||
/*
|
||||
/**
|
||||
* Suspend fence value - reported by the VPU suspend context
|
||||
* completed once suspend is complete.
|
||||
*/
|
||||
u64 suspend_fence_value;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief HWS Resume command queue request / response structure.
|
||||
/**
|
||||
* HWS Resume command queue request / response structure.
|
||||
* Host will request the resume of a context;
|
||||
* - VPU will resume all work on this context
|
||||
* - Scheduler will allow this context to be scheduled
|
||||
|
|
@ -1174,25 +1313,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq {
|
|||
* @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_resume_cmdq {
|
||||
/* Host SSID */
|
||||
/** Host SSID */
|
||||
u32 host_ssid;
|
||||
/* Zero Padding */
|
||||
/** Zero Padding */
|
||||
u32 reserved_0;
|
||||
/* Command queue id */
|
||||
/** Command queue id */
|
||||
u64 cmdq_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief HWS Resume engine request / response structure.
|
||||
* After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
|
||||
/**
|
||||
* HWS Resume engine request / response structure.
|
||||
* After a HWS engine reset, all scheduling is stopped on VPU until an engine resume.
|
||||
* Host shall send this command to resume scheduling of any valid queue.
|
||||
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE
|
||||
* @see VPU_JSM_MSG_HWS_ENGINE_RESUME
|
||||
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
|
||||
*/
|
||||
struct vpu_ipc_msg_payload_hws_resume_engine {
|
||||
/* Engine to be resumed */
|
||||
/** Engine to be resumed */
|
||||
u32 engine_idx;
|
||||
/* Reserved */
|
||||
/** Reserved */
|
||||
u32 reserved_0;
|
||||
};
|
||||
|
||||
|
|
@ -1326,7 +1465,7 @@ struct vpu_jsm_metric_streamer_done {
|
|||
/**
|
||||
* Metric group description placed in the metric buffer after successful completion
|
||||
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
|
||||
* @vpu_jsm_metric_counter_descriptor records.
|
||||
* @ref vpu_jsm_metric_counter_descriptor records.
|
||||
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
|
||||
*/
|
||||
struct vpu_jsm_metric_group_descriptor {
|
||||
|
|
|
|||
|
|
@ -150,7 +150,8 @@ drm_kms_helper-y := \
|
|||
drm_plane_helper.o \
|
||||
drm_probe_helper.o \
|
||||
drm_self_refresh_helper.o \
|
||||
drm_simple_kms_helper.o
|
||||
drm_simple_kms_helper.o \
|
||||
drm_vblank_helper.o
|
||||
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
|
||||
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
|
||||
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
|
||||
|
|
|
|||
|
|
@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
|||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
amdgpu_hmm_unregister(aobj);
|
||||
ttm_bo_put(&aobj->tbo);
|
||||
ttm_bo_fini(&aobj->tbo);
|
||||
}
|
||||
|
||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,9 @@
|
|||
ast-y := \
|
||||
ast_2000.o \
|
||||
ast_2100.o \
|
||||
ast_2200.o \
|
||||
ast_2300.o \
|
||||
ast_2400.o \
|
||||
ast_2500.o \
|
||||
ast_2600.o \
|
||||
ast_cursor.o \
|
||||
|
|
@ -14,7 +16,6 @@ ast-y := \
|
|||
ast_dp501.o \
|
||||
ast_dp.o \
|
||||
ast_drv.o \
|
||||
ast_main.o \
|
||||
ast_mm.o \
|
||||
ast_mode.o \
|
||||
ast_post.o \
|
||||
|
|
|
|||
|
|
@ -27,6 +27,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
#include "ast_post.h"
|
||||
|
|
@ -147,3 +150,101 @@ int ast_2000_post(struct ast_device *ast)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mode setting
|
||||
*/
|
||||
|
||||
const struct ast_vbios_dclk_info ast_2000_dclk_table[] = {
|
||||
{0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
|
||||
{0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
|
||||
{0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
|
||||
{0x76, 0x63, 0x01}, /* 03: VCLK36 */
|
||||
{0xee, 0x67, 0x01}, /* 04: VCLK40 */
|
||||
{0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
|
||||
{0xc6, 0x64, 0x01}, /* 06: VCLK50 */
|
||||
{0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
|
||||
{0x80, 0x64, 0x00}, /* 08: VCLK65 */
|
||||
{0x7b, 0x63, 0x00}, /* 09: VCLK75 */
|
||||
{0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
|
||||
{0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
|
||||
{0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
|
||||
{0x85, 0x24, 0x00}, /* 0d: VCLK135 */
|
||||
{0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
|
||||
{0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
|
||||
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
|
||||
{0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
|
||||
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
|
||||
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
|
||||
{0x47, 0x6c, 0x80}, /* 15: VCLK71 */
|
||||
{0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
|
||||
{0x77, 0x58, 0x80}, /* 17: VCLK119 */
|
||||
{0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
|
||||
{0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
|
||||
{0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Device initialization
|
||||
*/
|
||||
|
||||
void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post)
|
||||
{
|
||||
enum ast_tx_chip tx_chip = AST_TX_NONE;
|
||||
u8 vgacra3;
|
||||
|
||||
/*
|
||||
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
|
||||
* enabled, in that case, assume we have a SIL164 TMDS transmitter
|
||||
*
|
||||
* Don't make that assumption if we the chip wasn't enabled and
|
||||
* is at power-on reset, otherwise we'll incorrectly "detect" a
|
||||
* SIL164 when there is none.
|
||||
*/
|
||||
if (!need_post) {
|
||||
vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
|
||||
if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
|
||||
tx_chip = AST_TX_SIL164;
|
||||
}
|
||||
|
||||
__ast_device_set_tx_chip(ast, tx_chip);
|
||||
}
|
||||
|
||||
struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2000_detect_tx_chip(ast, need_post);
|
||||
|
||||
if (need_post) {
|
||||
ret = ast_post_gpu(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
#include "ast_post.h"
|
||||
|
|
@ -386,3 +389,85 @@ int ast_2100_post(struct ast_device *ast)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Widescreen detection
|
||||
*/
|
||||
|
||||
/* Try to detect WSXGA+ on Gen2+ */
|
||||
bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
|
||||
{
|
||||
u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
|
||||
|
||||
if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
|
||||
return true;
|
||||
if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Try to detect WUXGA on Gen2+ */
|
||||
bool __ast_2100_detect_wuxga(struct ast_device *ast)
|
||||
{
|
||||
u8 vgacrd1;
|
||||
|
||||
if (ast->support_fullhd) {
|
||||
vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
|
||||
if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ast_2100_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
if (__ast_2100_detect_wsxga_p(ast)) {
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->chip == AST2100)
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
|
||||
struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2000_detect_tx_chip(ast, need_post);
|
||||
|
||||
if (need_post) {
|
||||
ret = ast_post_gpu(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ast_2100_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,85 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*/
|
||||
/*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
|
||||
static void ast_2200_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
if (__ast_2100_detect_wsxga_p(ast)) {
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->chip == AST2200)
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
|
||||
struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2000_detect_tx_chip(ast, need_post);
|
||||
|
||||
if (need_post) {
|
||||
ret = ast_post_gpu(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ast_2200_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
|
@ -27,6 +27,12 @@
|
|||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
#include "ast_post.h"
|
||||
|
|
@ -1326,3 +1332,125 @@ int ast_2300_post(struct ast_device *ast)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Device initialization
|
||||
*/
|
||||
|
||||
void ast_2300_detect_tx_chip(struct ast_device *ast)
|
||||
{
|
||||
enum ast_tx_chip tx_chip = AST_TX_NONE;
|
||||
struct drm_device *dev = &ast->base;
|
||||
u8 vgacrd1;
|
||||
|
||||
/*
|
||||
* On AST GEN4+, look at the configuration set by the SoC in
|
||||
* the SOC scratch register #1 bits 11:8 (interestingly marked
|
||||
* as "reserved" in the spec)
|
||||
*/
|
||||
vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
|
||||
AST_IO_VGACRD1_TX_TYPE_MASK);
|
||||
switch (vgacrd1) {
|
||||
/*
|
||||
* GEN4 to GEN6
|
||||
*/
|
||||
case AST_IO_VGACRD1_TX_SIL164_VBIOS:
|
||||
tx_chip = AST_TX_SIL164;
|
||||
break;
|
||||
case AST_IO_VGACRD1_TX_DP501_VBIOS:
|
||||
ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL);
|
||||
if (ast->dp501_fw_addr) {
|
||||
/* backup firmware */
|
||||
if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) {
|
||||
drmm_kfree(dev, ast->dp501_fw_addr);
|
||||
ast->dp501_fw_addr = NULL;
|
||||
}
|
||||
}
|
||||
fallthrough;
|
||||
case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
|
||||
tx_chip = AST_TX_DP501;
|
||||
break;
|
||||
/*
|
||||
* GEN7+
|
||||
*/
|
||||
case AST_IO_VGACRD1_TX_ASTDP:
|
||||
tx_chip = AST_TX_ASTDP;
|
||||
break;
|
||||
/*
|
||||
* Several of the listed TX chips are not explicitly supported
|
||||
* by the ast driver. If these exist in real-world devices, they
|
||||
* are most likely reported as VGA or SIL164 outputs. We warn here
|
||||
* to get bug reports for these devices. If none come in for some
|
||||
* time, we can begin to fail device probing on these values.
|
||||
*/
|
||||
case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
|
||||
drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
|
||||
break;
|
||||
case AST_IO_VGACRD1_TX_CH7003_VBIOS:
|
||||
drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
|
||||
break;
|
||||
case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
|
||||
drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
|
||||
break;
|
||||
}
|
||||
|
||||
__ast_device_set_tx_chip(ast, tx_chip);
|
||||
}
|
||||
|
||||
static void ast_2300_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) {
|
||||
ast->support_wsxga_p = true;
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
|
||||
struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2300_detect_tx_chip(ast);
|
||||
|
||||
if (need_post) {
|
||||
ret = ast_post_gpu(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* map reserved buffer */
|
||||
ast->dp501_fw_buf = NULL;
|
||||
if (ast->vram_size < pci_resource_len(pdev, 0)) {
|
||||
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
|
||||
if (!ast->dp501_fw_buf)
|
||||
drm_info(dev, "failed to map reserved buffer!\n");
|
||||
}
|
||||
|
||||
ast_2300_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
|
||||
static void ast_2400_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) {
|
||||
ast->support_wsxga_p = true;
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
|
||||
struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2300_detect_tx_chip(ast);
|
||||
|
||||
if (need_post) {
|
||||
ret = ast_post_gpu(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* map reserved buffer */
|
||||
ast->dp501_fw_buf = NULL;
|
||||
if (ast->vram_size < pci_resource_len(pdev, 0)) {
|
||||
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
|
||||
if (!ast->dp501_fw_buf)
|
||||
drm_info(dev, "failed to map reserved buffer!\n");
|
||||
}
|
||||
|
||||
ast_2400_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
@ -27,7 +27,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
|
|
@ -567,3 +569,99 @@ int ast_2500_post(struct ast_device *ast)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mode setting
|
||||
*/
|
||||
|
||||
const struct ast_vbios_dclk_info ast_2500_dclk_table[] = {
|
||||
{0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
|
||||
{0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
|
||||
{0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
|
||||
{0x76, 0x63, 0x01}, /* 03: VCLK36 */
|
||||
{0xee, 0x67, 0x01}, /* 04: VCLK40 */
|
||||
{0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
|
||||
{0xc6, 0x64, 0x01}, /* 06: VCLK50 */
|
||||
{0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
|
||||
{0x80, 0x64, 0x00}, /* 08: VCLK65 */
|
||||
{0x7b, 0x63, 0x00}, /* 09: VCLK75 */
|
||||
{0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
|
||||
{0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
|
||||
{0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
|
||||
{0x85, 0x24, 0x00}, /* 0d: VCLK135 */
|
||||
{0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
|
||||
{0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
|
||||
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
|
||||
{0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
|
||||
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
|
||||
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
|
||||
{0x47, 0x6c, 0x80}, /* 15: VCLK71 */
|
||||
{0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
|
||||
{0x58, 0x01, 0x42}, /* 17: VCLK119 */
|
||||
{0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
|
||||
{0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
|
||||
{0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Device initialization
|
||||
*/
|
||||
|
||||
static void ast_2500_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) {
|
||||
ast->support_wsxga_p = true;
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
|
||||
struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2300_detect_tx_chip(ast);
|
||||
|
||||
if (need_post) {
|
||||
ret = ast_post_gpu(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* map reserved buffer */
|
||||
ast->dp501_fw_buf = NULL;
|
||||
if (ast->vram_size < pci_resource_len(pdev, 0)) {
|
||||
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
|
||||
if (!ast->dp501_fw_buf)
|
||||
drm_info(dev, "failed to map reserved buffer!\n");
|
||||
}
|
||||
|
||||
ast_2500_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@
|
|||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
#include "ast_post.h"
|
||||
|
||||
|
|
@ -42,3 +46,62 @@ int ast_2600_post(struct ast_device *ast)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Device initialization
|
||||
*/
|
||||
|
||||
static void ast_2600_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
ast->support_wsxga_p = true;
|
||||
ast->support_fullhd = true;
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
|
||||
struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast_device_init(ast, chip, config_mode, regs, ioregs);
|
||||
|
||||
ast_2300_detect_tx_chip(ast);
|
||||
|
||||
switch (ast->tx_chip) {
|
||||
case AST_TX_ASTDP:
|
||||
ret = ast_post_gpu(ast);
|
||||
break;
|
||||
default:
|
||||
ret = 0;
|
||||
if (need_post)
|
||||
ret = ast_post_gpu(ast);
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ast_2600_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@
|
|||
#include <drm/drm_fbdev_shmem.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
|
|
@ -46,6 +47,32 @@ static int ast_modeset = -1;
|
|||
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
|
||||
module_param_named(modeset, ast_modeset, int, 0400);
|
||||
|
||||
void ast_device_init(struct ast_device *ast,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs)
|
||||
{
|
||||
ast->chip = chip;
|
||||
ast->config_mode = config_mode;
|
||||
ast->regs = regs;
|
||||
ast->ioregs = ioregs;
|
||||
}
|
||||
|
||||
void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip)
|
||||
{
|
||||
static const char * const info_str[] = {
|
||||
"analog VGA",
|
||||
"Sil164 TMDS transmitter",
|
||||
"DP501 DisplayPort transmitter",
|
||||
"ASPEED DisplayPort transmitter",
|
||||
};
|
||||
|
||||
drm_info(&ast->base, "Using %s\n", info_str[tx_chip]);
|
||||
|
||||
ast->tx_chip = tx_chip;
|
||||
}
|
||||
|
||||
/*
|
||||
* DRM driver
|
||||
*/
|
||||
|
|
@ -266,7 +293,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
|
|||
*chip_out = chip;
|
||||
*config_mode_out = config_mode;
|
||||
|
||||
return 0;
|
||||
return __AST_CHIP_GEN(chip);
|
||||
}
|
||||
|
||||
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
@ -277,6 +304,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
void __iomem *ioregs;
|
||||
enum ast_config_mode config_mode;
|
||||
enum ast_chip chip;
|
||||
unsigned int chip_gen;
|
||||
struct drm_device *drm;
|
||||
bool need_post = false;
|
||||
|
||||
|
|
@ -349,10 +377,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return ret;
|
||||
|
||||
ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
chip_gen = ret;
|
||||
|
||||
drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
|
||||
switch (chip_gen) {
|
||||
case 1:
|
||||
drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
case 2:
|
||||
drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
case 3:
|
||||
drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
case 4:
|
||||
drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
case 5:
|
||||
drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
case 6:
|
||||
drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
case 7:
|
||||
drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode,
|
||||
regs, ioregs, need_post);
|
||||
break;
|
||||
default:
|
||||
dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (IS_ERR(drm))
|
||||
return PTR_ERR(drm);
|
||||
pci_set_drvdata(pdev, drm);
|
||||
|
|
|
|||
|
|
@ -217,14 +217,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
|
|||
return container_of(dev, struct ast_device, base);
|
||||
}
|
||||
|
||||
struct drm_device *ast_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
static inline unsigned long __ast_gen(struct ast_device *ast)
|
||||
{
|
||||
return __AST_CHIP_GEN(ast->chip);
|
||||
|
|
@ -415,21 +407,88 @@ struct ast_crtc_state {
|
|||
|
||||
int ast_mm_init(struct ast_device *ast);
|
||||
|
||||
/* ast_drv.c */
|
||||
void ast_device_init(struct ast_device *ast,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs);
|
||||
void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip);
|
||||
|
||||
/* ast_2000.c */
|
||||
int ast_2000_post(struct ast_device *ast);
|
||||
extern const struct ast_vbios_dclk_info ast_2000_dclk_table[];
|
||||
void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post);
|
||||
struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast_2100.c */
|
||||
int ast_2100_post(struct ast_device *ast);
|
||||
bool __ast_2100_detect_wsxga_p(struct ast_device *ast);
|
||||
bool __ast_2100_detect_wuxga(struct ast_device *ast);
|
||||
struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast_2200.c */
|
||||
struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast_2300.c */
|
||||
int ast_2300_post(struct ast_device *ast);
|
||||
void ast_2300_detect_tx_chip(struct ast_device *ast);
|
||||
struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast_2400.c */
|
||||
struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast_2500.c */
|
||||
void ast_2500_patch_ahb(void __iomem *regs);
|
||||
int ast_2500_post(struct ast_device *ast);
|
||||
extern const struct ast_vbios_dclk_info ast_2500_dclk_table[];
|
||||
struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast_2600.c */
|
||||
int ast_2600_post(struct ast_device *ast);
|
||||
struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post);
|
||||
|
||||
/* ast post */
|
||||
int ast_post_gpu(struct ast_device *ast);
|
||||
|
|
|
|||
|
|
@ -1,268 +0,0 @@
|
|||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include "ast_drv.h"
|
||||
|
||||
/* Try to detect WSXGA+ on Gen2+ */
|
||||
static bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
|
||||
{
|
||||
u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
|
||||
|
||||
if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
|
||||
return true;
|
||||
if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Try to detect WUXGA on Gen2+ */
|
||||
static bool __ast_2100_detect_wuxga(struct ast_device *ast)
|
||||
{
|
||||
u8 vgacrd1;
|
||||
|
||||
if (ast->support_fullhd) {
|
||||
vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
|
||||
if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ast_detect_widescreen(struct ast_device *ast)
|
||||
{
|
||||
ast->support_wsxga_p = false;
|
||||
ast->support_fullhd = false;
|
||||
ast->support_wuxga = false;
|
||||
|
||||
if (AST_GEN(ast) >= 7) {
|
||||
ast->support_wsxga_p = true;
|
||||
ast->support_fullhd = true;
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
} else if (AST_GEN(ast) >= 6) {
|
||||
if (__ast_2100_detect_wsxga_p(ast))
|
||||
ast->support_wsxga_p = true;
|
||||
else if (ast->chip == AST2510)
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->support_wsxga_p)
|
||||
ast->support_fullhd = true;
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
} else if (AST_GEN(ast) >= 5) {
|
||||
if (__ast_2100_detect_wsxga_p(ast))
|
||||
ast->support_wsxga_p = true;
|
||||
else if (ast->chip == AST1400)
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->support_wsxga_p)
|
||||
ast->support_fullhd = true;
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
} else if (AST_GEN(ast) >= 4) {
|
||||
if (__ast_2100_detect_wsxga_p(ast))
|
||||
ast->support_wsxga_p = true;
|
||||
else if (ast->chip == AST1300)
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->support_wsxga_p)
|
||||
ast->support_fullhd = true;
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
} else if (AST_GEN(ast) >= 3) {
|
||||
if (__ast_2100_detect_wsxga_p(ast))
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->support_wsxga_p) {
|
||||
if (ast->chip == AST2200)
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
} else if (AST_GEN(ast) >= 2) {
|
||||
if (__ast_2100_detect_wsxga_p(ast))
|
||||
ast->support_wsxga_p = true;
|
||||
if (ast->support_wsxga_p) {
|
||||
if (ast->chip == AST2100)
|
||||
ast->support_fullhd = true;
|
||||
}
|
||||
if (__ast_2100_detect_wuxga(ast))
|
||||
ast->support_wuxga = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
|
||||
{
|
||||
static const char * const info_str[] = {
|
||||
"analog VGA",
|
||||
"Sil164 TMDS transmitter",
|
||||
"DP501 DisplayPort transmitter",
|
||||
"ASPEED DisplayPort transmitter",
|
||||
};
|
||||
|
||||
struct drm_device *dev = &ast->base;
|
||||
u8 vgacra3, vgacrd1;
|
||||
|
||||
/* Check 3rd Tx option (digital output afaik) */
|
||||
ast->tx_chip = AST_TX_NONE;
|
||||
|
||||
if (AST_GEN(ast) <= 3) {
|
||||
/*
|
||||
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
|
||||
* enabled, in that case, assume we have a SIL164 TMDS transmitter
|
||||
*
|
||||
* Don't make that assumption if we the chip wasn't enabled and
|
||||
* is at power-on reset, otherwise we'll incorrectly "detect" a
|
||||
* SIL164 when there is none.
|
||||
*/
|
||||
if (!need_post) {
|
||||
vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
|
||||
if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
|
||||
ast->tx_chip = AST_TX_SIL164;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* On AST GEN4+, look at the configuration set by the SoC in
|
||||
* the SOC scratch register #1 bits 11:8 (interestingly marked
|
||||
* as "reserved" in the spec)
|
||||
*/
|
||||
vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
|
||||
AST_IO_VGACRD1_TX_TYPE_MASK);
|
||||
switch (vgacrd1) {
|
||||
/*
|
||||
* GEN4 to GEN6
|
||||
*/
|
||||
case AST_IO_VGACRD1_TX_SIL164_VBIOS:
|
||||
ast->tx_chip = AST_TX_SIL164;
|
||||
break;
|
||||
case AST_IO_VGACRD1_TX_DP501_VBIOS:
|
||||
ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
|
||||
if (ast->dp501_fw_addr) {
|
||||
/* backup firmware */
|
||||
if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) {
|
||||
drmm_kfree(dev, ast->dp501_fw_addr);
|
||||
ast->dp501_fw_addr = NULL;
|
||||
}
|
||||
}
|
||||
fallthrough;
|
||||
case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
|
||||
ast->tx_chip = AST_TX_DP501;
|
||||
break;
|
||||
/*
|
||||
* GEN7+
|
||||
*/
|
||||
case AST_IO_VGACRD1_TX_ASTDP:
|
||||
ast->tx_chip = AST_TX_ASTDP;
|
||||
break;
|
||||
/*
|
||||
* Several of the listed TX chips are not explicitly supported
|
||||
* by the ast driver. If these exist in real-world devices, they
|
||||
* are most likely reported as VGA or SIL164 outputs. We warn here
|
||||
* to get bug reports for these devices. If none come in for some
|
||||
* time, we can begin to fail device probing on these values.
|
||||
*/
|
||||
case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
|
||||
drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n",
|
||||
vgacrd1, AST_GEN(ast));
|
||||
break;
|
||||
case AST_IO_VGACRD1_TX_CH7003_VBIOS:
|
||||
drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n",
|
||||
vgacrd1, AST_GEN(ast));
|
||||
break;
|
||||
case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
|
||||
drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n",
|
||||
vgacrd1, AST_GEN(ast));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
|
||||
}
|
||||
|
||||
struct drm_device *ast_device_create(struct pci_dev *pdev,
|
||||
const struct drm_driver *drv,
|
||||
enum ast_chip chip,
|
||||
enum ast_config_mode config_mode,
|
||||
void __iomem *regs,
|
||||
void __iomem *ioregs,
|
||||
bool need_post)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct ast_device *ast;
|
||||
int ret;
|
||||
|
||||
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
|
||||
if (IS_ERR(ast))
|
||||
return ERR_CAST(ast);
|
||||
dev = &ast->base;
|
||||
|
||||
ast->chip = chip;
|
||||
ast->config_mode = config_mode;
|
||||
ast->regs = regs;
|
||||
ast->ioregs = ioregs;
|
||||
|
||||
ast_detect_tx_chip(ast, need_post);
|
||||
switch (ast->tx_chip) {
|
||||
case AST_TX_ASTDP:
|
||||
ret = ast_post_gpu(ast);
|
||||
break;
|
||||
default:
|
||||
ret = 0;
|
||||
if (need_post)
|
||||
ret = ast_post_gpu(ast);
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = ast_mm_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* map reserved buffer */
|
||||
ast->dp501_fw_buf = NULL;
|
||||
if (ast->vram_size < pci_resource_len(pdev, 0)) {
|
||||
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
|
||||
if (!ast->dp501_fw_buf)
|
||||
drm_info(dev, "failed to map reserved buffer!\n");
|
||||
}
|
||||
|
||||
ast_detect_widescreen(ast);
|
||||
|
||||
ret = ast_mode_config_init(ast);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
@ -373,9 +373,9 @@ static void ast_set_dclk_reg(struct ast_device *ast,
|
|||
const struct ast_vbios_dclk_info *clk_info;
|
||||
|
||||
if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
|
||||
clk_info = &dclk_table_ast2500[vmode->dclk_index];
|
||||
clk_info = &ast_2500_dclk_table[vmode->dclk_index];
|
||||
else
|
||||
clk_info = &dclk_table[vmode->dclk_index];
|
||||
clk_info = &ast_2000_dclk_table[vmode->dclk_index];
|
||||
|
||||
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
|
||||
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
|
||||
|
|
|
|||
|
|
@ -33,66 +33,6 @@
|
|||
#define HiCModeIndex 3
|
||||
#define TrueCModeIndex 4
|
||||
|
||||
static const struct ast_vbios_dclk_info dclk_table[] = {
|
||||
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
|
||||
{0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
|
||||
{0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
|
||||
{0x76, 0x63, 0x01}, /* 03: VCLK36 */
|
||||
{0xEE, 0x67, 0x01}, /* 04: VCLK40 */
|
||||
{0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
|
||||
{0xC6, 0x64, 0x01}, /* 06: VCLK50 */
|
||||
{0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
|
||||
{0x80, 0x64, 0x00}, /* 08: VCLK65 */
|
||||
{0x7B, 0x63, 0x00}, /* 09: VCLK75 */
|
||||
{0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
|
||||
{0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
|
||||
{0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
|
||||
{0x85, 0x24, 0x00}, /* 0D: VCLK135 */
|
||||
{0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
|
||||
{0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
|
||||
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
|
||||
{0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
|
||||
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
|
||||
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
|
||||
{0x47, 0x6c, 0x80}, /* 15: VCLK71 */
|
||||
{0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
|
||||
{0x77, 0x58, 0x80}, /* 17: VCLK119 */
|
||||
{0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
|
||||
{0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
|
||||
{0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */
|
||||
};
|
||||
|
||||
static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
|
||||
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
|
||||
{0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
|
||||
{0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
|
||||
{0x76, 0x63, 0x01}, /* 03: VCLK36 */
|
||||
{0xEE, 0x67, 0x01}, /* 04: VCLK40 */
|
||||
{0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
|
||||
{0xC6, 0x64, 0x01}, /* 06: VCLK50 */
|
||||
{0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
|
||||
{0x80, 0x64, 0x00}, /* 08: VCLK65 */
|
||||
{0x7B, 0x63, 0x00}, /* 09: VCLK75 */
|
||||
{0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
|
||||
{0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
|
||||
{0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
|
||||
{0x85, 0x24, 0x00}, /* 0D: VCLK135 */
|
||||
{0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
|
||||
{0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
|
||||
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
|
||||
{0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
|
||||
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
|
||||
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
|
||||
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
|
||||
{0x47, 0x6c, 0x80}, /* 15: VCLK71 */
|
||||
{0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
|
||||
{0x58, 0x01, 0x42}, /* 17: VCLK119 */
|
||||
{0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
|
||||
{0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
|
||||
{0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */
|
||||
};
|
||||
|
||||
static const struct ast_vbios_stdtable vbios_stdtable[] = {
|
||||
/* MD_2_3_400 */
|
||||
{
|
||||
|
|
|
|||
|
|
@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
|
|||
depends on OF
|
||||
depends on COMMON_CLK
|
||||
select DRM_DW_HDMI
|
||||
imply DRM_IMX8MP_HDMI_PAI
|
||||
imply DRM_IMX8MP_HDMI_PVI
|
||||
imply PHY_FSL_SAMSUNG_HDMI_PHY
|
||||
help
|
||||
Choose this to enable support for the internal HDMI encoder found
|
||||
on the i.MX8MP SoC.
|
||||
|
||||
config DRM_IMX8MP_HDMI_PAI
|
||||
tristate "Freescale i.MX8MP HDMI PAI bridge support"
|
||||
depends on OF
|
||||
select DRM_DW_HDMI
|
||||
select REGMAP
|
||||
select REGMAP_MMIO
|
||||
help
|
||||
Choose this to enable support for the internal HDMI TX Parallel
|
||||
Audio Interface found on the Freescale i.MX8MP SoC.
|
||||
|
||||
config DRM_IMX8MP_HDMI_PVI
|
||||
tristate "Freescale i.MX8MP HDMI PVI bridge support"
|
||||
depends on OF
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
|
||||
obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o
|
||||
obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
|
||||
obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o
|
||||
obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
|
||||
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
|
||||
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
|
||||
|
|
|
|||
|
|
@ -0,0 +1,158 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright 2025 NXP
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <drm/bridge/dw_hdmi.h>
|
||||
#include <sound/asoundef.h>
|
||||
|
||||
#define HTX_PAI_CTRL 0x00
|
||||
#define ENABLE BIT(0)
|
||||
|
||||
#define HTX_PAI_CTRL_EXT 0x04
|
||||
#define WTMK_HIGH_MASK GENMASK(31, 24)
|
||||
#define WTMK_LOW_MASK GENMASK(23, 16)
|
||||
#define NUM_CH_MASK GENMASK(10, 8)
|
||||
#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n))
|
||||
#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n))
|
||||
#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1)
|
||||
|
||||
#define HTX_PAI_FIELD_CTRL 0x08
|
||||
#define PRE_SEL GENMASK(28, 24)
|
||||
#define D_SEL GENMASK(23, 20)
|
||||
#define V_SEL GENMASK(19, 15)
|
||||
#define U_SEL GENMASK(14, 10)
|
||||
#define C_SEL GENMASK(9, 5)
|
||||
#define P_SEL GENMASK(4, 0)
|
||||
|
||||
struct imx8mp_hdmi_pai {
|
||||
struct regmap *regmap;
|
||||
};
|
||||
|
||||
static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel,
|
||||
int width, int rate, int non_pcm,
|
||||
int iec958)
|
||||
{
|
||||
const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
|
||||
struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
|
||||
int val;
|
||||
|
||||
/* PAI set control extended */
|
||||
val = WTMK_HIGH(3) | WTMK_LOW(3);
|
||||
val |= NUM_CH(channel);
|
||||
regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val);
|
||||
|
||||
/* IEC60958 format */
|
||||
if (iec958) {
|
||||
val = FIELD_PREP_CONST(P_SEL,
|
||||
__bf_shf(IEC958_SUBFRAME_PARITY));
|
||||
val |= FIELD_PREP_CONST(C_SEL,
|
||||
__bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS));
|
||||
val |= FIELD_PREP_CONST(U_SEL,
|
||||
__bf_shf(IEC958_SUBFRAME_USER_DATA));
|
||||
val |= FIELD_PREP_CONST(V_SEL,
|
||||
__bf_shf(IEC958_SUBFRAME_VALIDITY));
|
||||
val |= FIELD_PREP_CONST(D_SEL,
|
||||
__bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK));
|
||||
val |= FIELD_PREP_CONST(PRE_SEL,
|
||||
__bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK));
|
||||
} else {
|
||||
/*
|
||||
* The allowed PCM widths are 24bit and 32bit, as they are supported
|
||||
* by aud2htx module.
|
||||
* for 24bit, D_SEL = 0, select all the bits.
|
||||
* for 32bit, D_SEL = 8, select 24bit in MSB.
|
||||
*/
|
||||
val = FIELD_PREP(D_SEL, width - 24);
|
||||
}
|
||||
|
||||
regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val);
|
||||
|
||||
/* PAI start running */
|
||||
regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE);
|
||||
}
|
||||
|
||||
static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi)
|
||||
{
|
||||
const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
|
||||
struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
|
||||
|
||||
/* Stop PAI */
|
||||
regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0);
|
||||
}
|
||||
|
||||
static const struct regmap_config imx8mp_hdmi_pai_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.val_bits = 32,
|
||||
.max_register = HTX_PAI_FIELD_CTRL,
|
||||
};
|
||||
|
||||
static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct dw_hdmi_plat_data *plat_data = data;
|
||||
struct imx8mp_hdmi_pai *hdmi_pai;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
|
||||
hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL);
|
||||
if (!hdmi_pai)
|
||||
return -ENOMEM;
|
||||
|
||||
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base,
|
||||
&imx8mp_hdmi_pai_regmap_config);
|
||||
if (IS_ERR(hdmi_pai->regmap)) {
|
||||
dev_err(dev, "regmap init failed\n");
|
||||
return PTR_ERR(hdmi_pai->regmap);
|
||||
}
|
||||
|
||||
plat_data->enable_audio = imx8mp_hdmi_pai_enable;
|
||||
plat_data->disable_audio = imx8mp_hdmi_pai_disable;
|
||||
plat_data->priv_audio = hdmi_pai;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct component_ops imx8mp_hdmi_pai_ops = {
|
||||
.bind = imx8mp_hdmi_pai_bind,
|
||||
};
|
||||
|
||||
static int imx8mp_hdmi_pai_probe(struct platform_device *pdev)
|
||||
{
|
||||
return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops);
|
||||
}
|
||||
|
||||
static void imx8mp_hdmi_pai_remove(struct platform_device *pdev)
|
||||
{
|
||||
component_del(&pdev->dev, &imx8mp_hdmi_pai_ops);
|
||||
}
|
||||
|
||||
static const struct of_device_id imx8mp_hdmi_pai_of_table[] = {
|
||||
{ .compatible = "fsl,imx8mp-hdmi-pai" },
|
||||
{ /* Sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table);
|
||||
|
||||
static struct platform_driver imx8mp_hdmi_pai_platform_driver = {
|
||||
.probe = imx8mp_hdmi_pai_probe,
|
||||
.remove = imx8mp_hdmi_pai_remove,
|
||||
.driver = {
|
||||
.name = "imx8mp-hdmi-pai",
|
||||
.of_match_table = imx8mp_hdmi_pai_of_table,
|
||||
},
|
||||
};
|
||||
module_platform_driver(imx8mp_hdmi_pai_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
@ -5,11 +5,13 @@
|
|||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <drm/bridge/dw_hdmi.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/drm_of.h>
|
||||
|
||||
struct imx8mp_hdmi {
|
||||
struct dw_hdmi_plat_data plat_data;
|
||||
|
|
@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = {
|
|||
.update_hpd = dw_hdmi_phy_update_hpd,
|
||||
};
|
||||
|
||||
static int imx8mp_dw_hdmi_bind(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = component_bind_all(dev, &hdmi->plat_data);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "component_bind_all failed!\n");
|
||||
|
||||
hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data);
|
||||
if (IS_ERR(hdmi->dw_hdmi)) {
|
||||
component_unbind_all(dev, &hdmi->plat_data);
|
||||
return PTR_ERR(hdmi->dw_hdmi);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx8mp_dw_hdmi_unbind(struct device *dev)
|
||||
{
|
||||
struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
dw_hdmi_remove(hdmi->dw_hdmi);
|
||||
|
||||
component_unbind_all(dev, &hdmi->plat_data);
|
||||
}
|
||||
|
||||
static const struct component_master_ops imx8mp_dw_hdmi_ops = {
|
||||
.bind = imx8mp_dw_hdmi_bind,
|
||||
.unbind = imx8mp_dw_hdmi_unbind,
|
||||
};
|
||||
|
||||
static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_hdmi_plat_data *plat_data;
|
||||
struct component_match *match = NULL;
|
||||
struct device_node *remote;
|
||||
struct imx8mp_hdmi *hdmi;
|
||||
|
||||
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
|
||||
|
|
@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
|
|||
plat_data->priv_data = hdmi;
|
||||
plat_data->phy_force_vendor = true;
|
||||
|
||||
hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
|
||||
if (IS_ERR(hdmi->dw_hdmi))
|
||||
return PTR_ERR(hdmi->dw_hdmi);
|
||||
|
||||
platform_set_drvdata(pdev, hdmi);
|
||||
|
||||
/* port@2 is for hdmi_pai device */
|
||||
remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
|
||||
if (!remote) {
|
||||
hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
|
||||
if (IS_ERR(hdmi->dw_hdmi))
|
||||
return PTR_ERR(hdmi->dw_hdmi);
|
||||
} else {
|
||||
drm_of_component_match_add(dev, &match, component_compare_of, remote);
|
||||
|
||||
of_node_put(remote);
|
||||
|
||||
return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
|
||||
struct device_node *remote;
|
||||
|
||||
dw_hdmi_remove(hdmi->dw_hdmi);
|
||||
remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
|
||||
if (remote) {
|
||||
of_node_put(remote);
|
||||
|
||||
component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops);
|
||||
} else {
|
||||
dw_hdmi_remove(hdmi->dw_hdmi);
|
||||
}
|
||||
}
|
||||
|
||||
static int imx8mp_dw_hdmi_pm_suspend(struct device *dev)
|
||||
|
|
|
|||
|
|
@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev)
|
|||
pm_runtime_disable(&pdev->dev);
|
||||
}
|
||||
|
||||
static int imx8qxp_ldb_runtime_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int imx8qxp_ldb_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
|
||||
|
|
@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev)
|
|||
}
|
||||
|
||||
static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
|
||||
RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL)
|
||||
RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
|
||||
|
|
|
|||
|
|
@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data,
|
|||
params->iec.status[0] & IEC958_AES0_NONAUDIO);
|
||||
dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
|
||||
|
||||
if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
|
||||
dw_hdmi_set_sample_iec958(dw->data.hdmi, 1);
|
||||
else
|
||||
dw_hdmi_set_sample_iec958(dw->data.hdmi, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -177,6 +177,7 @@ struct dw_hdmi {
|
|||
|
||||
spinlock_t audio_lock;
|
||||
struct mutex audio_mutex;
|
||||
unsigned int sample_iec958;
|
||||
unsigned int sample_non_pcm;
|
||||
unsigned int sample_width;
|
||||
unsigned int sample_rate;
|
||||
|
|
@ -198,6 +199,12 @@ struct dw_hdmi {
|
|||
enum drm_connector_status last_connector_result;
|
||||
};
|
||||
|
||||
const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi)
|
||||
{
|
||||
return hdmi->plat_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data);
|
||||
|
||||
#define HDMI_IH_PHY_STAT0_RX_SENSE \
|
||||
(HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
|
||||
HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
|
||||
|
|
@ -712,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm);
|
||||
|
||||
void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958)
|
||||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi->sample_iec958 = iec958;
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958);
|
||||
|
||||
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
|
||||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
|
|
@ -843,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
|
|||
hdmi->channels,
|
||||
hdmi->sample_width,
|
||||
hdmi->sample_rate,
|
||||
hdmi->sample_non_pcm);
|
||||
hdmi->sample_non_pcm,
|
||||
hdmi->sample_iec958);
|
||||
}
|
||||
|
||||
static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
|
||||
|
|
|
|||
|
|
@ -106,10 +106,21 @@
|
|||
#define SN_PWM_EN_INV_REG 0xA5
|
||||
#define SN_PWM_INV_MASK BIT(0)
|
||||
#define SN_PWM_EN_MASK BIT(1)
|
||||
|
||||
#define SN_IRQ_EN_REG 0xE0
|
||||
#define IRQ_EN BIT(0)
|
||||
|
||||
#define SN_IRQ_EVENTS_EN_REG 0xE6
|
||||
#define HPD_INSERTION_EN BIT(1)
|
||||
#define HPD_REMOVAL_EN BIT(2)
|
||||
|
||||
#define SN_AUX_CMD_STATUS_REG 0xF4
|
||||
#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
|
||||
#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
|
||||
#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
|
||||
#define SN_IRQ_STATUS_REG 0xF5
|
||||
#define HPD_REMOVAL_STATUS BIT(2)
|
||||
#define HPD_INSERTION_STATUS BIT(1)
|
||||
|
||||
#define MIN_DSI_CLK_FREQ_MHZ 40
|
||||
|
||||
|
|
@ -152,7 +163,9 @@
|
|||
* @ln_assign: Value to program to the LN_ASSIGN register.
|
||||
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
|
||||
* @comms_enabled: If true then communication over the aux channel is enabled.
|
||||
* @hpd_enabled: If true then HPD events are enabled.
|
||||
* @comms_mutex: Protects modification of comms_enabled.
|
||||
* @hpd_mutex: Protects modification of hpd_enabled.
|
||||
*
|
||||
* @gchip: If we expose our GPIOs, this is used.
|
||||
* @gchip_output: A cache of whether we've set GPIOs to output. This
|
||||
|
|
@ -190,7 +203,9 @@ struct ti_sn65dsi86 {
|
|||
u8 ln_assign;
|
||||
u8 ln_polrs;
|
||||
bool comms_enabled;
|
||||
bool hpd_enabled;
|
||||
struct mutex comms_mutex;
|
||||
struct mutex hpd_mutex;
|
||||
|
||||
#if defined(CONFIG_OF_GPIO)
|
||||
struct gpio_chip gchip;
|
||||
|
|
@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
|
|||
.max_register = 0xFF,
|
||||
};
|
||||
|
||||
static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg,
|
||||
u8 *val)
|
||||
{
|
||||
int ret;
|
||||
unsigned int reg_val;
|
||||
|
||||
ret = regmap_read(pdata->regmap, reg, ®_val);
|
||||
if (ret) {
|
||||
dev_err(pdata->dev, "fail to read raw reg %#x: %d\n",
|
||||
reg, ret);
|
||||
return ret;
|
||||
}
|
||||
*val = (u8)reg_val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
|
||||
unsigned int reg, u16 *val)
|
||||
{
|
||||
|
|
@ -379,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata)
|
|||
static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
|
||||
const struct i2c_client *client = to_i2c_client(pdata->dev);
|
||||
int ret;
|
||||
|
||||
ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
|
||||
|
|
@ -413,6 +446,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
|
|||
if (pdata->refclk)
|
||||
ti_sn65dsi86_enable_comms(pdata, NULL);
|
||||
|
||||
if (client->irq) {
|
||||
ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN,
|
||||
IRQ_EN);
|
||||
if (ret)
|
||||
dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -1211,6 +1251,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
|
|||
static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
|
||||
const struct i2c_client *client = to_i2c_client(pdata->dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Device needs to be powered on before reading the HPD state
|
||||
|
|
@ -1219,11 +1261,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
|
|||
*/
|
||||
|
||||
pm_runtime_get_sync(pdata->dev);
|
||||
|
||||
mutex_lock(&pdata->hpd_mutex);
|
||||
pdata->hpd_enabled = true;
|
||||
mutex_unlock(&pdata->hpd_mutex);
|
||||
|
||||
if (client->irq) {
|
||||
ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
|
||||
HPD_REMOVAL_EN | HPD_INSERTION_EN);
|
||||
if (ret)
|
||||
dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret);
|
||||
}
|
||||
}
|
||||
|
||||
static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
|
||||
const struct i2c_client *client = to_i2c_client(pdata->dev);
|
||||
int ret;
|
||||
|
||||
if (client->irq) {
|
||||
ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
|
||||
HPD_REMOVAL_EN | HPD_INSERTION_EN);
|
||||
if (ret)
|
||||
dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret);
|
||||
}
|
||||
|
||||
mutex_lock(&pdata->hpd_mutex);
|
||||
pdata->hpd_enabled = false;
|
||||
mutex_unlock(&pdata->hpd_mutex);
|
||||
|
||||
pm_runtime_put_autosuspend(pdata->dev);
|
||||
}
|
||||
|
|
@ -1309,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = private;
|
||||
struct drm_device *dev = pdata->bridge.dev;
|
||||
u8 status;
|
||||
int ret;
|
||||
bool hpd_event;
|
||||
|
||||
ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status);
|
||||
if (ret) {
|
||||
dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS);
|
||||
|
||||
dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status);
|
||||
if (!status)
|
||||
return IRQ_NONE;
|
||||
|
||||
ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status);
|
||||
if (ret) {
|
||||
dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* Only send the HPD event if we are bound with a device. */
|
||||
mutex_lock(&pdata->hpd_mutex);
|
||||
if (pdata->hpd_enabled && hpd_event)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
mutex_unlock(&pdata->hpd_mutex);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int ti_sn_bridge_probe(struct auxiliary_device *adev,
|
||||
const struct auxiliary_device_id *id)
|
||||
{
|
||||
|
|
@ -1931,6 +2032,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
|
|||
dev_set_drvdata(dev, pdata);
|
||||
pdata->dev = dev;
|
||||
|
||||
mutex_init(&pdata->hpd_mutex);
|
||||
mutex_init(&pdata->comms_mutex);
|
||||
|
||||
pdata->regmap = devm_regmap_init_i2c(client,
|
||||
|
|
@ -1971,6 +2073,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
|
|||
if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
|
||||
return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
|
||||
|
||||
if (client->irq) {
|
||||
ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL,
|
||||
ti_sn_bridge_interrupt,
|
||||
IRQF_ONESHOT,
|
||||
dev_name(pdata->dev), pdata);
|
||||
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to request interrupt\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Break ourselves up into a collection of aux devices. The only real
|
||||
* motiviation here is to solve the chicken-and-egg problem of probe
|
||||
|
|
|
|||
|
|
@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
|
|||
{
|
||||
struct drm_bridge_connector *bridge_connector =
|
||||
to_drm_bridge_connector(connector);
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
/* Notify all bridges in the pipeline of hotplug events. */
|
||||
drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
|
||||
drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) {
|
||||
if (bridge->funcs->hpd_notify)
|
||||
bridge->funcs->hpd_notify(bridge, status);
|
||||
}
|
||||
|
|
@ -639,7 +638,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
|
|||
struct drm_bridge_connector *bridge_connector;
|
||||
struct drm_connector *connector;
|
||||
struct i2c_adapter *ddc = NULL;
|
||||
struct drm_bridge *bridge, *panel_bridge = NULL;
|
||||
struct drm_bridge *panel_bridge = NULL;
|
||||
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
|
||||
unsigned int max_bpc = 8;
|
||||
bool support_hdcp = false;
|
||||
|
|
@ -667,7 +666,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
|
|||
* detection are available, we don't support hotplug detection at all.
|
||||
*/
|
||||
connector_type = DRM_MODE_CONNECTOR_Unknown;
|
||||
drm_for_each_bridge_in_chain(encoder, bridge) {
|
||||
drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
|
||||
if (!bridge->interlace_allowed)
|
||||
connector->interlace_allowed = false;
|
||||
if (!bridge->ycbcr_420_allowed)
|
||||
|
|
@ -818,7 +817,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
|
|||
|
||||
if (bridge_connector->bridge_hdmi_cec &&
|
||||
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
|
||||
bridge = bridge_connector->bridge_hdmi_cec;
|
||||
struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
|
||||
|
||||
ret = drmm_connector_hdmi_cec_notifier_register(connector,
|
||||
NULL,
|
||||
|
|
@ -829,7 +828,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
|
|||
|
||||
if (bridge_connector->bridge_hdmi_cec &&
|
||||
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
|
||||
bridge = bridge_connector->bridge_hdmi_cec;
|
||||
struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
|
||||
|
||||
ret = drmm_connector_hdmi_cec_register(connector,
|
||||
&drm_bridge_connector_hdmi_cec_funcs,
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
|
@ -123,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
|
||||
|
||||
bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
|
||||
|
||||
return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress);
|
||||
|
||||
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane)
|
||||
{
|
||||
|
|
@ -4128,22 +4137,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
|
|||
{
|
||||
int fxp, fxp_min, fxp_max, fxp_actual, f = 1;
|
||||
int ret;
|
||||
u8 pn, pn_min, pn_max;
|
||||
u8 pn, pn_min, pn_max, bit_count;
|
||||
|
||||
if (!bl->aux_set)
|
||||
return 0;
|
||||
|
||||
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
|
||||
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
|
||||
aux->name, ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
|
||||
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
|
||||
aux->name, ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
|
||||
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
|
||||
aux->name, ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
|
||||
if (unlikely(pn_min > pn_max)) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n",
|
||||
aux->name, pn_min, pn_max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per VESA eDP Spec v1.4b, section 3.3.10.2:
|
||||
* If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN,
|
||||
* the sink must use the MIN value as the effective PWM bit count.
|
||||
* Clamp the reported value to the [MIN, MAX] capability range to ensure
|
||||
* correct brightness scaling on compliant eDP panels.
|
||||
* Only enable this logic if the [MIN, MAX] range is valid in regard to Spec.
|
||||
*/
|
||||
pn = bit_count;
|
||||
if (bit_count < pn_min)
|
||||
pn = clamp(bit_count, pn_min, pn_max);
|
||||
|
||||
bl->max = (1 << pn) - 1;
|
||||
if (!driver_pwm_freq_hz)
|
||||
if (!driver_pwm_freq_hz) {
|
||||
if (pn != bit_count)
|
||||
goto bit_count_write_back;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set PWM Frequency divider to match desired frequency provided by the driver.
|
||||
|
|
@ -4167,21 +4215,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
|
|||
* - FxP is within 25% of desired value.
|
||||
* Note: 25% is arbitrary value and may need some tweak.
|
||||
*/
|
||||
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
|
||||
aux->name, ret);
|
||||
return 0;
|
||||
}
|
||||
ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
|
||||
aux->name, ret);
|
||||
return 0;
|
||||
}
|
||||
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
|
||||
/* Ensure frequency is within 25% of desired value */
|
||||
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
|
||||
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
|
||||
|
|
@ -4199,12 +4232,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
|
|||
break;
|
||||
}
|
||||
|
||||
bit_count_write_back:
|
||||
ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
|
||||
aux->name, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!driver_pwm_freq_hz)
|
||||
return 0;
|
||||
|
||||
bl->pwmgen_bit_count = pn;
|
||||
bl->max = (1 << pn) - 1;
|
||||
|
||||
|
|
|
|||
|
|
@ -1308,7 +1308,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
|
|||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_bridge_state *bridge_state;
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
if (!encoder)
|
||||
return 0;
|
||||
|
|
@ -1317,7 +1316,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
|
|||
"Adding all bridges for [encoder:%d:%s] to %p\n",
|
||||
encoder->base.id, encoder->name, state);
|
||||
|
||||
drm_for_each_bridge_in_chain(encoder, bridge) {
|
||||
drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
|
||||
/* Skip bridges that don't implement the atomic state hooks. */
|
||||
if (!bridge->funcs->atomic_duplicate_state)
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -197,15 +197,22 @@
|
|||
* driver.
|
||||
*/
|
||||
|
||||
/* Protect bridge_list and bridge_lingering_list */
|
||||
static DEFINE_MUTEX(bridge_lock);
|
||||
static LIST_HEAD(bridge_list);
|
||||
static LIST_HEAD(bridge_lingering_list);
|
||||
|
||||
static void __drm_bridge_free(struct kref *kref)
|
||||
{
|
||||
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
|
||||
|
||||
mutex_lock(&bridge_lock);
|
||||
list_del(&bridge->list);
|
||||
mutex_unlock(&bridge_lock);
|
||||
|
||||
if (bridge->funcs->destroy)
|
||||
bridge->funcs->destroy(bridge);
|
||||
|
||||
kfree(bridge->container);
|
||||
}
|
||||
|
||||
|
|
@ -273,6 +280,7 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bridge = container + offset;
|
||||
INIT_LIST_HEAD(&bridge->list);
|
||||
bridge->container = container;
|
||||
bridge->funcs = funcs;
|
||||
kref_init(&bridge->refcount);
|
||||
|
|
@ -286,10 +294,13 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
|
|||
EXPORT_SYMBOL(__devm_drm_bridge_alloc);
|
||||
|
||||
/**
|
||||
* drm_bridge_add - add the given bridge to the global bridge list
|
||||
* drm_bridge_add - register a bridge
|
||||
*
|
||||
* @bridge: bridge control structure
|
||||
*
|
||||
* Add the given bridge to the global list of bridges, where they can be
|
||||
* found by users via of_drm_find_bridge().
|
||||
*
|
||||
* The bridge to be added must have been allocated by
|
||||
* devm_drm_bridge_alloc().
|
||||
*/
|
||||
|
|
@ -300,6 +311,14 @@ void drm_bridge_add(struct drm_bridge *bridge)
|
|||
|
||||
drm_bridge_get(bridge);
|
||||
|
||||
/*
|
||||
* If the bridge was previously added and then removed, it is now
|
||||
* in bridge_lingering_list. Remove it or bridge_lingering_list will be
|
||||
* corrupted when adding this bridge to bridge_list below.
|
||||
*/
|
||||
if (!list_empty(&bridge->list))
|
||||
list_del_init(&bridge->list);
|
||||
|
||||
mutex_init(&bridge->hpd_mutex);
|
||||
|
||||
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
|
||||
|
|
@ -336,14 +355,19 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
|
|||
EXPORT_SYMBOL(devm_drm_bridge_add);
|
||||
|
||||
/**
|
||||
* drm_bridge_remove - remove the given bridge from the global bridge list
|
||||
* drm_bridge_remove - unregister a bridge
|
||||
*
|
||||
* @bridge: bridge control structure
|
||||
*
|
||||
* Remove the given bridge from the global list of registered bridges, so
|
||||
* it won't be found by users via of_drm_find_bridge(), and add it to the
|
||||
* lingering bridge list, to keep track of it until its allocated memory is
|
||||
* eventually freed.
|
||||
*/
|
||||
void drm_bridge_remove(struct drm_bridge *bridge)
|
||||
{
|
||||
mutex_lock(&bridge_lock);
|
||||
list_del_init(&bridge->list);
|
||||
list_move_tail(&bridge->list, &bridge_lingering_list);
|
||||
mutex_unlock(&bridge_lock);
|
||||
|
||||
mutex_destroy(&bridge->hpd_mutex);
|
||||
|
|
@ -1121,7 +1145,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
|
|||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_bridge_state *bridge_state, *next_bridge_state;
|
||||
struct drm_bridge *next_bridge;
|
||||
u32 output_flags = 0;
|
||||
|
||||
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
|
||||
|
|
@ -1130,7 +1153,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
|
|||
if (!bridge_state)
|
||||
return;
|
||||
|
||||
next_bridge = drm_bridge_get_next_bridge(bridge);
|
||||
struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
|
||||
|
||||
/*
|
||||
* Let's try to apply the most common case here, that is, propagate
|
||||
|
|
@ -1432,17 +1455,20 @@ EXPORT_SYMBOL(devm_drm_put_bridge);
|
|||
|
||||
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
|
||||
struct drm_bridge *bridge,
|
||||
unsigned int idx)
|
||||
unsigned int idx,
|
||||
bool lingering)
|
||||
{
|
||||
drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
|
||||
|
||||
drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount));
|
||||
drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
|
||||
lingering ? " [lingering]" : "");
|
||||
|
||||
drm_printf(p, "\ttype: [%d] %s\n",
|
||||
bridge->type,
|
||||
drm_get_connector_type_name(bridge->type));
|
||||
|
||||
if (bridge->of_node)
|
||||
/* The OF node could be freed after drm_bridge_remove() */
|
||||
if (bridge->of_node && !lingering)
|
||||
drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
|
||||
|
||||
drm_printf(p, "\tops: [0x%x]", bridge->ops);
|
||||
|
|
@ -1468,7 +1494,10 @@ static int allbridges_show(struct seq_file *m, void *data)
|
|||
mutex_lock(&bridge_lock);
|
||||
|
||||
list_for_each_entry(bridge, &bridge_list, list)
|
||||
drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
|
||||
drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
|
||||
|
||||
list_for_each_entry(bridge, &bridge_lingering_list, list)
|
||||
drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
|
||||
|
||||
mutex_unlock(&bridge_lock);
|
||||
|
||||
|
|
@ -1480,11 +1509,10 @@ static int encoder_bridges_show(struct seq_file *m, void *data)
|
|||
{
|
||||
struct drm_encoder *encoder = m->private;
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
struct drm_bridge *bridge;
|
||||
unsigned int idx = 0;
|
||||
|
||||
drm_for_each_bridge_in_chain(encoder, bridge)
|
||||
drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
|
||||
drm_for_each_bridge_in_chain_scoped(encoder, bridge)
|
||||
drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_client_modeset_dpms);
|
||||
|
||||
/**
|
||||
* drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur
|
||||
* @client: DRM client
|
||||
* @crtc_index: The ndex of the CRTC to wait on
|
||||
*
|
||||
* Block the caller until the given CRTC has seen a VBLANK. Do nothing
|
||||
* if the CRTC is disabled. If there's another DRM master present, fail
|
||||
* with -EBUSY.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or negative error code otherwise.
|
||||
*/
|
||||
int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
struct drm_crtc *crtc;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Rate-limit update frequency to vblank. If there's a DRM master
|
||||
* present, it could interfere while we're waiting for the vblank
|
||||
* event. Don't wait in this case.
|
||||
*/
|
||||
if (!drm_master_internal_acquire(dev))
|
||||
return -EBUSY;
|
||||
|
||||
crtc = client->modesets[crtc_index].crtc;
|
||||
|
||||
/*
|
||||
* Only wait for a vblank event if the CRTC is enabled, otherwise
|
||||
* just don't do anything, not even report an error.
|
||||
*/
|
||||
ret = drm_crtc_vblank_get(crtc);
|
||||
if (!ret) {
|
||||
drm_crtc_wait_one_vblank(crtc);
|
||||
drm_crtc_vblank_put(crtc);
|
||||
}
|
||||
|
||||
drm_master_internal_release(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank);
|
||||
|
||||
#ifdef CONFIG_DRM_KUNIT_TEST
|
||||
#include "tests/drm_client_modeset_test.c"
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_mode.h>
|
||||
|
||||
|
|
@ -57,6 +59,134 @@
|
|||
* a hardware-specific ioctl to allocate suitable buffer objects.
|
||||
*/
|
||||
|
||||
static int drm_mode_align_dumb(struct drm_mode_create_dumb *args,
|
||||
unsigned long hw_pitch_align,
|
||||
unsigned long hw_size_align)
|
||||
{
|
||||
u32 pitch = args->pitch;
|
||||
u32 size;
|
||||
|
||||
if (!pitch)
|
||||
return -EINVAL;
|
||||
|
||||
if (hw_pitch_align)
|
||||
pitch = roundup(pitch, hw_pitch_align);
|
||||
|
||||
if (!hw_size_align)
|
||||
hw_size_align = PAGE_SIZE;
|
||||
else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE))
|
||||
return -EINVAL; /* TODO: handle this if necessary */
|
||||
|
||||
if (check_mul_overflow(args->height, pitch, &size))
|
||||
return -EINVAL;
|
||||
size = ALIGN(size, hw_size_align);
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
args->pitch = pitch;
|
||||
args->size = size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers
|
||||
* @dev: DRM device
|
||||
* @args: Parameters for the dumb buffer
|
||||
* @hw_pitch_align: Hardware scanline alignment in bytes
|
||||
* @hw_size_align: Hardware buffer-size alignment in bytes
|
||||
*
|
||||
* The helper drm_mode_size_dumb() calculates the size of the buffer
|
||||
* allocation and the scanline size for a dumb buffer. Callers have to
|
||||
* set the buffers width, height and color mode in the argument @arg.
|
||||
* The helper validates the correctness of the input and tests for
|
||||
* possible overflows. If successful, it returns the dumb buffer's
|
||||
* required scanline pitch and size in &args.
|
||||
*
|
||||
* The parameter @hw_pitch_align allows the driver to specifies an
|
||||
* alignment for the scanline pitch, if the hardware requires any. The
|
||||
* calculated pitch will be a multiple of the alignment. The parameter
|
||||
* @hw_size_align allows to specify an alignment for buffer sizes. The
|
||||
* provided alignment should represent requirements of the graphics
|
||||
* hardware. drm_mode_size_dumb() handles GEM-related constraints
|
||||
* automatically across all drivers and hardware. For example, the
|
||||
* returned buffer size is always a multiple of PAGE_SIZE, which is
|
||||
* required by mmap().
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, or a negative error code otherwise.
|
||||
*/
|
||||
int drm_mode_size_dumb(struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args,
|
||||
unsigned long hw_pitch_align,
|
||||
unsigned long hw_size_align)
|
||||
{
|
||||
u64 pitch = 0;
|
||||
u32 fourcc;
|
||||
|
||||
/*
|
||||
* The scanline pitch depends on the buffer width and the color
|
||||
* format. The latter is specified as a color-mode constant for
|
||||
* which we first have to find the corresponding color format.
|
||||
*
|
||||
* Different color formats can have the same color-mode constant.
|
||||
* For example XRGB8888 and BGRX8888 both have a color mode of 32.
|
||||
* It is possible to use different formats for dumb-buffer allocation
|
||||
* and rendering as long as all involved formats share the same
|
||||
* color-mode constant.
|
||||
*/
|
||||
fourcc = drm_driver_color_mode_format(dev, args->bpp);
|
||||
if (fourcc != DRM_FORMAT_INVALID) {
|
||||
const struct drm_format_info *info = drm_format_info(fourcc);
|
||||
|
||||
if (!info)
|
||||
return -EINVAL;
|
||||
pitch = drm_format_info_min_pitch(info, 0, args->width);
|
||||
} else if (args->bpp) {
|
||||
/*
|
||||
* Some userspace throws in arbitrary values for bpp and
|
||||
* relies on the kernel to figure it out. In this case we
|
||||
* fall back to the old method of using bpp directly. The
|
||||
* over-commitment of memory from the rounding is acceptable
|
||||
* for compatibility with legacy userspace. We have a number
|
||||
* of deprecated legacy values that are explicitly supported.
|
||||
*/
|
||||
switch (args->bpp) {
|
||||
default:
|
||||
drm_warn_once(dev,
|
||||
"Unknown color mode %u; guessing buffer size.\n",
|
||||
args->bpp);
|
||||
fallthrough;
|
||||
/*
|
||||
* These constants represent various YUV formats supported by
|
||||
* drm_gem_afbc_get_bpp().
|
||||
*/
|
||||
case 12: // DRM_FORMAT_YUV420_8BIT
|
||||
case 15: // DRM_FORMAT_YUV420_10BIT
|
||||
case 30: // DRM_FORMAT_VUY101010
|
||||
fallthrough;
|
||||
/*
|
||||
* Used by Mesa and Gstreamer to allocate NV formats and others
|
||||
* as RGB buffers. Technically, XRGB16161616F formats are RGB,
|
||||
* but the dumb buffers are not supposed to be used for anything
|
||||
* beyond 32 bits per pixels.
|
||||
*/
|
||||
case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010
|
||||
case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F
|
||||
pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pitch || pitch > U32_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
args->pitch = pitch;
|
||||
|
||||
return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_size_dumb);
|
||||
|
||||
int drm_mode_create_dumb(struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args,
|
||||
struct drm_file *file_priv)
|
||||
|
|
@ -99,7 +229,30 @@ int drm_mode_create_dumb(struct drm_device *dev,
|
|||
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv)
|
||||
{
|
||||
return drm_mode_create_dumb(dev, data, file_priv);
|
||||
struct drm_mode_create_dumb *args = data;
|
||||
int err;
|
||||
|
||||
err = drm_mode_create_dumb(dev, args, file_priv);
|
||||
if (err) {
|
||||
args->handle = 0;
|
||||
args->pitch = 0;
|
||||
args->size = 0;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
if (!dev->driver->dumb_create)
|
||||
return -ENOSYS;
|
||||
|
||||
if (dev->driver->dumb_map_offset)
|
||||
return dev->driver->dumb_map_offset(file_priv, dev, args->handle,
|
||||
&args->offset);
|
||||
else
|
||||
return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
|
||||
&args->offset);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -120,17 +273,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
|
|||
void *data, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_mode_map_dumb *args = data;
|
||||
int err;
|
||||
|
||||
if (!dev->driver->dumb_create)
|
||||
return -ENOSYS;
|
||||
|
||||
if (dev->driver->dumb_map_offset)
|
||||
return dev->driver->dumb_map_offset(file_priv, dev,
|
||||
args->handle,
|
||||
&args->offset);
|
||||
else
|
||||
return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
|
||||
&args->offset);
|
||||
err = drm_mode_mmap_dumb(dev, args, file_priv);
|
||||
if (err)
|
||||
args->offset = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
|
||||
|
|
|
|||
|
|
@ -368,6 +368,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper)
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&helper->lock);
|
||||
drm_client_modeset_wait_for_vblank(&helper->client, 0);
|
||||
mutex_unlock(&helper->lock);
|
||||
|
||||
if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
|
||||
return;
|
||||
|
||||
|
|
@ -1068,15 +1072,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
|
|||
unsigned long arg)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fb_helper->lock);
|
||||
if (!drm_master_internal_acquire(dev)) {
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
guard(mutex)(&fb_helper->lock);
|
||||
|
||||
switch (cmd) {
|
||||
case FBIO_WAITFORVSYNC:
|
||||
|
|
@ -1096,28 +1094,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
|
|||
* make. If we're not smart enough here, one should
|
||||
* just consider switch the userspace to KMS.
|
||||
*/
|
||||
crtc = fb_helper->client.modesets[0].crtc;
|
||||
|
||||
/*
|
||||
* Only wait for a vblank event if the CRTC is
|
||||
* enabled, otherwise just don't do anythintg,
|
||||
* not even report an error.
|
||||
*/
|
||||
ret = drm_crtc_vblank_get(crtc);
|
||||
if (!ret) {
|
||||
drm_crtc_wait_one_vblank(crtc);
|
||||
drm_crtc_vblank_put(crtc);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
|
||||
drm_master_internal_release(dev);
|
||||
unlock:
|
||||
mutex_unlock(&fb_helper->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_ioctl);
|
||||
|
|
|
|||
|
|
@ -1165,97 +1165,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444);
|
||||
|
||||
/**
|
||||
* drm_fb_blit - Copy parts of a framebuffer to display memory
|
||||
* @dst: Array of display-memory addresses to copy to
|
||||
* @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
|
||||
* within @dst; can be NULL if scanlines are stored next to each other.
|
||||
* @dst_format: FOURCC code of the display's color format
|
||||
* @src: The framebuffer memory to copy from
|
||||
* @fb: The framebuffer to copy from
|
||||
* @clip: Clip rectangle area to copy
|
||||
* @state: Transform and conversion state
|
||||
*
|
||||
* This function copies parts of a framebuffer to display memory. If the
|
||||
* formats of the display and the framebuffer mismatch, the blit function
|
||||
* will attempt to convert between them during the process. The parameters @dst,
|
||||
* @dst_pitch and @src refer to arrays. Each array must have at least as many
|
||||
* entries as there are planes in @dst_format's format. Each entry stores the
|
||||
* value for the format's respective color plane at the same index.
|
||||
*
|
||||
* This function does not apply clipping on @dst (i.e. the destination is at the
|
||||
* top-left corner).
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or
|
||||
* -EINVAL if the color-format conversion failed, or
|
||||
* a negative error code otherwise.
|
||||
*/
|
||||
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
|
||||
const struct iosys_map *src, const struct drm_framebuffer *fb,
|
||||
const struct drm_rect *clip, struct drm_format_conv_state *state)
|
||||
{
|
||||
uint32_t fb_format = fb->format->format;
|
||||
|
||||
if (fb_format == dst_format) {
|
||||
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
|
||||
return 0;
|
||||
} else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
|
||||
drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
|
||||
return 0;
|
||||
} else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
|
||||
drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
|
||||
return 0;
|
||||
} else if (fb_format == DRM_FORMAT_XRGB8888) {
|
||||
if (dst_format == DRM_FORMAT_RGB565) {
|
||||
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_XRGB1555) {
|
||||
drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_ARGB1555) {
|
||||
drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_RGBA5551) {
|
||||
drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_RGB888) {
|
||||
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_BGR888) {
|
||||
drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_ARGB8888) {
|
||||
drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_XBGR8888) {
|
||||
drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_ABGR8888) {
|
||||
drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_XRGB2101010) {
|
||||
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_ARGB2101010) {
|
||||
drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_BGRX8888) {
|
||||
drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
|
||||
return 0;
|
||||
} else if (dst_format == DRM_FORMAT_RGB332) {
|
||||
drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",
|
||||
&fb_format, &dst_format);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_blit);
|
||||
|
||||
static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels)
|
||||
{
|
||||
u8 *dbuf8 = dbuf;
|
||||
|
|
|
|||
|
|
@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev)
|
|||
|
||||
vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
|
||||
GFP_KERNEL);
|
||||
if (!vma_offset_manager) {
|
||||
DRM_ERROR("out of memory\n");
|
||||
if (!vma_offset_manager)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev->vma_offset_manager = vma_offset_manager;
|
||||
drm_vma_offset_manager_init(vma_offset_manager,
|
||||
|
|
@ -785,9 +783,10 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
|
|||
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
|
||||
int count, struct drm_gem_object ***objs_out)
|
||||
{
|
||||
int ret;
|
||||
u32 *handles;
|
||||
struct drm_device *dev = filp->minor->dev;
|
||||
struct drm_gem_object **objs;
|
||||
u32 *handles;
|
||||
int ret;
|
||||
|
||||
if (!count)
|
||||
return 0;
|
||||
|
|
@ -807,7 +806,7 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
|
|||
|
||||
if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
|
||||
ret = -EFAULT;
|
||||
DRM_DEBUG("Failed to copy in GEM handles\n");
|
||||
drm_dbg_core(dev, "Failed to copy in GEM handles\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
@ -855,12 +854,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
|
|||
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
|
||||
bool wait_all, unsigned long timeout)
|
||||
{
|
||||
long ret;
|
||||
struct drm_device *dev = filep->minor->dev;
|
||||
struct drm_gem_object *obj;
|
||||
long ret;
|
||||
|
||||
obj = drm_gem_object_lookup(filep, handle);
|
||||
if (!obj) {
|
||||
DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
|
||||
drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <drm/drm.h>
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
|
||||
|
|
@ -304,9 +305,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv,
|
|||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct drm_gem_dma_object *dma_obj;
|
||||
int ret;
|
||||
|
||||
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
|
||||
args->size = args->pitch * args->height;
|
||||
ret = drm_mode_size_dumb(drm, args, SZ_8, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
|
||||
&args->handle);
|
||||
|
|
@ -582,7 +585,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
|
|||
|
||||
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to vmap PRIME buffer\n");
|
||||
drm_err(dev, "Failed to vmap PRIME buffer\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
#include <drm/drm.h>
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/drm_prime.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
|
@ -48,6 +49,64 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
|
|||
.vm_ops = &drm_gem_shmem_vm_ops,
|
||||
};
|
||||
|
||||
static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
|
||||
size_t size, bool private, struct vfsmount *gemfs)
|
||||
{
|
||||
struct drm_gem_object *obj = &shmem->base;
|
||||
int ret = 0;
|
||||
|
||||
if (!obj->funcs)
|
||||
obj->funcs = &drm_gem_shmem_funcs;
|
||||
|
||||
if (private) {
|
||||
drm_gem_private_object_init(dev, obj, size);
|
||||
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
|
||||
} else {
|
||||
ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
|
||||
}
|
||||
if (ret) {
|
||||
drm_gem_private_object_fini(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
if (ret)
|
||||
goto err_release;
|
||||
|
||||
INIT_LIST_HEAD(&shmem->madv_list);
|
||||
|
||||
if (!private) {
|
||||
/*
|
||||
* Our buffers are kept pinned, so allocating them
|
||||
* from the MOVABLE zone is a really bad idea, and
|
||||
* conflicts with CMA. See comments above new_inode()
|
||||
* why this is required _and_ expected if you're
|
||||
* going to pin these pages.
|
||||
*/
|
||||
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
|
||||
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_release:
|
||||
drm_gem_object_release(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_init - Initialize an allocated object.
|
||||
* @dev: DRM device
|
||||
* @obj: The allocated shmem GEM object.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or a negative error code on failure.
|
||||
*/
|
||||
int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
|
||||
{
|
||||
return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
|
||||
|
||||
static struct drm_gem_shmem_object *
|
||||
__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
|
||||
struct vfsmount *gemfs)
|
||||
|
|
@ -70,46 +129,13 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
|
|||
obj = &shmem->base;
|
||||
}
|
||||
|
||||
if (!obj->funcs)
|
||||
obj->funcs = &drm_gem_shmem_funcs;
|
||||
|
||||
if (private) {
|
||||
drm_gem_private_object_init(dev, obj, size);
|
||||
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
|
||||
} else {
|
||||
ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
|
||||
}
|
||||
ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
|
||||
if (ret) {
|
||||
drm_gem_private_object_fini(obj);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
if (ret)
|
||||
goto err_release;
|
||||
|
||||
INIT_LIST_HEAD(&shmem->madv_list);
|
||||
|
||||
if (!private) {
|
||||
/*
|
||||
* Our buffers are kept pinned, so allocating them
|
||||
* from the MOVABLE zone is a really bad idea, and
|
||||
* conflicts with CMA. See comments above new_inode()
|
||||
* why this is required _and_ expected if you're
|
||||
* going to pin these pages.
|
||||
*/
|
||||
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
|
||||
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
||||
kfree(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return shmem;
|
||||
|
||||
err_release:
|
||||
drm_gem_object_release(obj);
|
||||
err_free:
|
||||
kfree(obj);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
/**
|
||||
* drm_gem_shmem_create - Allocate an object with the given size
|
||||
|
|
@ -150,13 +176,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
|
|||
EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_free - Free resources associated with a shmem GEM object
|
||||
* @shmem: shmem GEM object to free
|
||||
* drm_gem_shmem_release - Release resources associated with a shmem GEM object.
|
||||
* @shmem: shmem GEM object
|
||||
*
|
||||
* This function cleans up the GEM object state and frees the memory used to
|
||||
* store the object itself.
|
||||
* This function cleans up the GEM object state, but does not free the memory used to store the
|
||||
* object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
|
||||
*/
|
||||
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
|
||||
void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
|
||||
{
|
||||
struct drm_gem_object *obj = &shmem->base;
|
||||
|
||||
|
|
@ -183,6 +209,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
|
|||
}
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_free - Free resources associated with a shmem GEM object
|
||||
* @shmem: shmem GEM object to free
|
||||
*
|
||||
* This function cleans up the GEM object state and frees the memory used to
|
||||
* store the object itself.
|
||||
*/
|
||||
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
|
||||
{
|
||||
drm_gem_shmem_release(shmem);
|
||||
kfree(shmem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
|
||||
|
|
@ -518,18 +557,11 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
|
|||
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
|
||||
int ret;
|
||||
|
||||
if (!args->pitch || !args->size) {
|
||||
args->pitch = min_pitch;
|
||||
args->size = PAGE_ALIGN(args->pitch * args->height);
|
||||
} else {
|
||||
/* ensure sane minimum values */
|
||||
if (args->pitch < min_pitch)
|
||||
args->pitch = min_pitch;
|
||||
if (args->size < args->pitch * args->height)
|
||||
args->size = PAGE_ALIGN(args->pitch * args->height);
|
||||
}
|
||||
ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
|
|||
|
||||
static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
|
||||
{
|
||||
/* We got here via ttm_bo_put(), which means that the
|
||||
/* We got here via ttm_bo_fini(), which means that the
|
||||
* TTM buffer object in 'bo' has already been cleaned
|
||||
* up; only release the GEM object.
|
||||
*/
|
||||
|
|
@ -234,11 +234,11 @@ EXPORT_SYMBOL(drm_gem_vram_create);
|
|||
* drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
|
||||
* @gbo: the GEM VRAM object
|
||||
*
|
||||
* See ttm_bo_put() for more information.
|
||||
* See ttm_bo_fini() for more information.
|
||||
*/
|
||||
void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
|
||||
{
|
||||
ttm_bo_put(&gbo->bo);
|
||||
ttm_bo_fini(&gbo->bo);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_vram_put);
|
||||
|
||||
|
|
|
|||
|
|
@ -136,8 +136,17 @@
|
|||
* vblanks after a timer has expired, which can be configured through the
|
||||
* ``vblankoffdelay`` module parameter.
|
||||
*
|
||||
* Drivers for hardware without support for vertical-blanking interrupts
|
||||
* must not call drm_vblank_init(). For such drivers, atomic helpers will
|
||||
* Drivers for hardware without support for vertical-blanking interrupts can
|
||||
* use DRM vblank timers to send vblank events at the rate of the current
|
||||
* display mode's refresh. While not synchronized to the hardware's
|
||||
* vertical-blanking regions, the timer helps DRM clients and compositors to
|
||||
* adapt their update cycle to the display output. Drivers should set up
|
||||
* vblanking as usual, but call drm_crtc_vblank_start_timer() and
|
||||
* drm_crtc_vblank_cancel_timer() as part of their atomic mode setting.
|
||||
* See also DRM vblank helpers for more information.
|
||||
*
|
||||
* Drivers without support for vertical-blanking interrupts nor timers must
|
||||
* not call drm_vblank_init(). For these drivers, atomic helpers will
|
||||
* automatically generate fake vblank events as part of the display update.
|
||||
* This functionality also can be controlled by the driver by enabling and
|
||||
* disabling struct drm_crtc_state.no_vblank.
|
||||
|
|
@ -508,6 +517,9 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
|
|||
drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
|
||||
drm_core_check_feature(dev, DRIVER_MODESET));
|
||||
|
||||
if (vblank->vblank_timer.crtc)
|
||||
hrtimer_cancel(&vblank->vblank_timer.timer);
|
||||
|
||||
drm_vblank_destroy_worker(vblank);
|
||||
timer_delete_sync(&vblank->disable_timer);
|
||||
}
|
||||
|
|
@ -2162,3 +2174,159 @@ err_free:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* VBLANK timer
|
||||
*/
|
||||
|
||||
static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer)
|
||||
{
|
||||
struct drm_vblank_crtc_timer *vtimer =
|
||||
container_of(timer, struct drm_vblank_crtc_timer, timer);
|
||||
struct drm_crtc *crtc = vtimer->crtc;
|
||||
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
ktime_t interval;
|
||||
u64 ret_overrun;
|
||||
bool succ;
|
||||
|
||||
spin_lock_irqsave(&vtimer->interval_lock, flags);
|
||||
interval = vtimer->interval;
|
||||
spin_unlock_irqrestore(&vtimer->interval_lock, flags);
|
||||
|
||||
if (!interval)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
ret_overrun = hrtimer_forward_now(&vtimer->timer, interval);
|
||||
if (ret_overrun != 1)
|
||||
drm_dbg_vbl(dev, "vblank timer overrun\n");
|
||||
|
||||
if (crtc_funcs->handle_vblank_timeout)
|
||||
succ = crtc_funcs->handle_vblank_timeout(crtc);
|
||||
else
|
||||
succ = drm_crtc_handle_vblank(crtc);
|
||||
if (!succ)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC
|
||||
* @crtc: the CRTC
|
||||
*
|
||||
* Drivers should call this function from their CRTC's enable_vblank
|
||||
* function to start a vblank timer. The timer will fire after the duration
|
||||
* of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or a negative errno code otherwise.
|
||||
*/
|
||||
int drm_crtc_vblank_start_timer(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
|
||||
struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vtimer->crtc) {
|
||||
/*
|
||||
* Set up the data structures on the first invocation.
|
||||
*/
|
||||
vtimer->crtc = crtc;
|
||||
spin_lock_init(&vtimer->interval_lock);
|
||||
hrtimer_setup(&vtimer->timer, drm_vblank_timer_function,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
} else {
|
||||
/*
|
||||
* Timer should not be active. If it is, wait for the
|
||||
* previous cancel operations to finish.
|
||||
*/
|
||||
while (hrtimer_active(&vtimer->timer))
|
||||
hrtimer_try_to_cancel(&vtimer->timer);
|
||||
}
|
||||
|
||||
drm_calc_timestamping_constants(crtc, &crtc->mode);
|
||||
|
||||
spin_lock_irqsave(&vtimer->interval_lock, flags);
|
||||
vtimer->interval = ns_to_ktime(vblank->framedur_ns);
|
||||
spin_unlock_irqrestore(&vtimer->interval_lock, flags);
|
||||
|
||||
hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_start_timer);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_start_timer - Cancels the given CRTC's vblank timer
|
||||
* @crtc: the CRTC
|
||||
*
|
||||
* Drivers should call this function from their CRTC's disable_vblank
|
||||
* function to stop a vblank timer.
|
||||
*/
|
||||
void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
|
||||
struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Calling hrtimer_cancel() can result in a deadlock with DRM's
|
||||
* vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So
|
||||
* clear interval and indicate cancellation. The timer function
|
||||
* will cancel itself on the next invocation.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&vtimer->interval_lock, flags);
|
||||
vtimer->interval = 0;
|
||||
spin_unlock_irqrestore(&vtimer->interval_lock, flags);
|
||||
|
||||
hrtimer_try_to_cancel(&vtimer->timer);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout
|
||||
* @crtc: The CRTC
|
||||
* @vblank_time: Returns the next vblank timestamp
|
||||
*
|
||||
* The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank
|
||||
* timestamp of the CRTC's vblank timer according to the timer's expiry
|
||||
* time.
|
||||
*/
|
||||
void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time)
|
||||
{
|
||||
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
|
||||
struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
|
||||
u64 cur_count;
|
||||
ktime_t cur_time;
|
||||
|
||||
if (!READ_ONCE(vblank->enabled)) {
|
||||
*vblank_time = ktime_get();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A concurrent vblank timeout could update the expires field before
|
||||
* we compare it with the vblank time. Hence we'd compare the old
|
||||
* expiry time to the new vblank time; deducing the timer had already
|
||||
* expired. Reread until we get consistent values from both fields.
|
||||
*/
|
||||
do {
|
||||
cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time);
|
||||
*vblank_time = READ_ONCE(vtimer->timer.node.expires);
|
||||
} while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time));
|
||||
|
||||
if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time)))
|
||||
return; /* Already expired */
|
||||
|
||||
/*
|
||||
* To prevent races we roll the hrtimer forward before we do any
|
||||
* interrupt processing - this is how real hw works (the interrupt
|
||||
* is only generated after all the vblank registers are updated)
|
||||
* and what the vblank core expects. Therefore we need to always
|
||||
* correct the timestamp by one frame.
|
||||
*/
|
||||
*vblank_time = ktime_sub(*vblank_time, vtimer->interval);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,176 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_modeset_helper_vtables.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_vblank_helper.h>
|
||||
|
||||
/**
|
||||
* DOC: overview
|
||||
*
|
||||
* The vblank helper library provides functions for supporting vertical
|
||||
* blanking in DRM drivers.
|
||||
*
|
||||
* For vblank timers, several callback implementations are available.
|
||||
* Drivers enable support for vblank timers by setting the vblank callbacks
|
||||
* in struct &drm_crtc_funcs to the helpers provided by this library. The
|
||||
* initializer macro DRM_CRTC_VBLANK_TIMER_FUNCS does this conveniently.
|
||||
* The driver further has to send the VBLANK event from its atomic_flush
|
||||
* callback and control vblank from the CRTC's atomic_enable and atomic_disable
|
||||
* callbacks. The callbacks are located in struct &drm_crtc_helper_funcs.
|
||||
* The vblank helper library provides implementations of these callbacks
|
||||
* for drivers without further requirements. The initializer macro
|
||||
* DRM_CRTC_HELPER_VBLANK_FUNCS sets them coveniently.
|
||||
*
|
||||
* Once the driver enables vblank support with drm_vblank_init(), each
|
||||
* CRTC's vblank timer fires according to the programmed display mode. By
|
||||
* default, the vblank timer invokes drm_crtc_handle_vblank(). Drivers with
|
||||
* more specific requirements can set their own handler function in
|
||||
* struct &drm_crtc_helper_funcs.handle_vblank_timeout.
|
||||
*/
|
||||
|
||||
/*
|
||||
* VBLANK helpers
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_atomic_flush -
|
||||
* Implements struct &drm_crtc_helper_funcs.atomic_flush
|
||||
* @crtc: The CRTC
|
||||
* @state: The atomic state to apply
|
||||
*
|
||||
* The helper drm_crtc_vblank_atomic_flush() implements atomic_flush of
|
||||
* struct drm_crtc_helper_funcs for CRTCs that only need to send out a
|
||||
* VBLANK event.
|
||||
*
|
||||
* See also struct &drm_crtc_helper_funcs.atomic_flush.
|
||||
*/
|
||||
void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
|
||||
event = crtc_state->event;
|
||||
crtc_state->event = NULL;
|
||||
|
||||
if (event) {
|
||||
if (drm_crtc_vblank_get(crtc) == 0)
|
||||
drm_crtc_arm_vblank_event(crtc, event);
|
||||
else
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_atomic_flush);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_atomic_enable - Implements struct &drm_crtc_helper_funcs.atomic_enable
|
||||
* @crtc: The CRTC
|
||||
* @state: The atomic state
|
||||
*
|
||||
* The helper drm_crtc_vblank_atomic_enable() implements atomic_enable
|
||||
* of struct drm_crtc_helper_funcs for CRTCs the only need to enable VBLANKs.
|
||||
*
|
||||
* See also struct &drm_crtc_helper_funcs.atomic_enable.
|
||||
*/
|
||||
void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_atomic_enable);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_atomic_disable - Implements struct &drm_crtc_helper_funcs.atomic_disable
|
||||
* @crtc: The CRTC
|
||||
* @state: The atomic state
|
||||
*
|
||||
* The helper drm_crtc_vblank_atomic_disable() implements atomic_disable
|
||||
* of struct drm_crtc_helper_funcs for CRTCs the only need to disable VBLANKs.
|
||||
*
|
||||
* See also struct &drm_crtc_funcs.atomic_disable.
|
||||
*/
|
||||
void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_off(crtc);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_atomic_disable);
|
||||
|
||||
/*
|
||||
* VBLANK timer
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_helper_enable_vblank_timer - Implements struct &drm_crtc_funcs.enable_vblank
|
||||
* @crtc: The CRTC
|
||||
*
|
||||
* The helper drm_crtc_vblank_helper_enable_vblank_timer() implements
|
||||
* enable_vblank of struct drm_crtc_helper_funcs for CRTCs that require
|
||||
* a VBLANK timer. It sets up the timer on the first invocation. The
|
||||
* started timer expires after the current frame duration. See struct
|
||||
* &drm_vblank_crtc.framedur_ns.
|
||||
*
|
||||
* See also struct &drm_crtc_helper_funcs.enable_vblank.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or a negative errno code otherwise.
|
||||
*/
|
||||
int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc)
|
||||
{
|
||||
return drm_crtc_vblank_start_timer(crtc);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_helper_enable_vblank_timer);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_helper_disable_vblank_timer - Implements struct &drm_crtc_funcs.disable_vblank
|
||||
* @crtc: The CRTC
|
||||
*
|
||||
* The helper drm_crtc_vblank_helper_disable_vblank_timer() implements
|
||||
* disable_vblank of struct drm_crtc_funcs for CRTCs that require a
|
||||
* VBLANK timer.
|
||||
*
|
||||
* See also struct &drm_crtc_helper_funcs.disable_vblank.
|
||||
*/
|
||||
void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc)
|
||||
{
|
||||
drm_crtc_vblank_cancel_timer(crtc);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_helper_disable_vblank_timer);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_helper_get_vblank_timestamp_from_timer -
|
||||
* Implements struct &drm_crtc_funcs.get_vblank_timestamp
|
||||
* @crtc: The CRTC
|
||||
* @max_error: Maximum acceptable error
|
||||
* @vblank_time: Returns the next vblank timestamp
|
||||
* @in_vblank_irq: True is called from drm_crtc_handle_vblank()
|
||||
*
|
||||
* The helper drm_crtc_helper_get_vblank_timestamp_from_timer() implements
|
||||
* get_vblank_timestamp of struct drm_crtc_funcs for CRTCs that require a
|
||||
* VBLANK timer. It returns the timestamp according to the timer's expiry
|
||||
* time.
|
||||
*
|
||||
* See also struct &drm_crtc_funcs.get_vblank_timestamp.
|
||||
*
|
||||
* Returns:
|
||||
* True on success, or false otherwise.
|
||||
*/
|
||||
bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
|
||||
int *max_error,
|
||||
ktime_t *vblank_time,
|
||||
bool in_vblank_irq)
|
||||
{
|
||||
drm_crtc_vblank_get_vblank_timeout(crtc, vblank_time);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_from_timer);
|
||||
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/shmem_fs.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_prime.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <drm/exynos_drm.h>
|
||||
|
|
@ -329,15 +330,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
|
|||
unsigned int flags;
|
||||
int ret;
|
||||
|
||||
ret = drm_mode_size_dumb(dev, args, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* allocate memory to be used for framebuffer.
|
||||
* - this callback would be called by user application
|
||||
* with DRM_IOCTL_MODE_CREATE_DUMB command.
|
||||
*/
|
||||
|
||||
args->pitch = args->width * ((args->bpp + 7) / 8);
|
||||
args->size = args->pitch * args->height;
|
||||
|
||||
if (is_drm_iommu_supported(dev))
|
||||
flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
|
||||
else
|
||||
|
|
|
|||
|
|
@ -50,48 +50,6 @@ static const struct vm_operations_struct psb_fbdev_vm_ops = {
|
|||
* struct fb_ops
|
||||
*/
|
||||
|
||||
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
|
||||
|
||||
static int psb_fbdev_fb_setcolreg(unsigned int regno,
|
||||
unsigned int red, unsigned int green,
|
||||
unsigned int blue, unsigned int transp,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
uint32_t v;
|
||||
|
||||
if (!fb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (regno > 255)
|
||||
return 1;
|
||||
|
||||
red = CMAP_TOHW(red, info->var.red.length);
|
||||
blue = CMAP_TOHW(blue, info->var.blue.length);
|
||||
green = CMAP_TOHW(green, info->var.green.length);
|
||||
transp = CMAP_TOHW(transp, info->var.transp.length);
|
||||
|
||||
v = (red << info->var.red.offset) |
|
||||
(green << info->var.green.offset) |
|
||||
(blue << info->var.blue.offset) |
|
||||
(transp << info->var.transp.offset);
|
||||
|
||||
if (regno < 16) {
|
||||
switch (fb->format->cpp[0] * 8) {
|
||||
case 16:
|
||||
((uint32_t *) info->pseudo_palette)[regno] = v;
|
||||
break;
|
||||
case 24:
|
||||
case 32:
|
||||
((uint32_t *) info->pseudo_palette)[regno] = v;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_pgoff != 0)
|
||||
|
|
@ -135,7 +93,6 @@ static const struct fb_ops psb_fbdev_fb_ops = {
|
|||
.owner = THIS_MODULE,
|
||||
__FB_DEFAULT_IOMEM_OPS_RDWR,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_setcolreg = psb_fbdev_fb_setcolreg,
|
||||
__FB_DEFAULT_IOMEM_OPS_DRAW,
|
||||
.fb_mmap = psb_fbdev_fb_mmap,
|
||||
.fb_destroy = psb_fbdev_fb_destroy,
|
||||
|
|
|
|||
|
|
@ -561,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
|
|||
continue; /* not a DRM property */
|
||||
|
||||
property = gud_connector_property_lookup(connector, prop);
|
||||
if (WARN_ON(IS_ERR(property)))
|
||||
if (drm_WARN_ON(drm, IS_ERR(property)))
|
||||
continue;
|
||||
|
||||
state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state);
|
||||
if (WARN_ON(IS_ERR(state_val)))
|
||||
if (drm_WARN_ON(drm, IS_ERR(state_val)))
|
||||
continue;
|
||||
|
||||
*state_val = val;
|
||||
|
|
@ -593,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
|
|||
unsigned int *state_val;
|
||||
|
||||
state_val = gud_connector_tv_state_val(prop, &connector_state->tv);
|
||||
if (WARN_ON_ONCE(IS_ERR(state_val)))
|
||||
if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val)))
|
||||
return PTR_ERR(state_val);
|
||||
|
||||
val = *state_val;
|
||||
|
|
@ -667,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (WARN_ON(connector->index != index))
|
||||
if (drm_WARN_ON(drm, connector->index != index))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
|
|||
size_t len;
|
||||
void *buf;
|
||||
|
||||
WARN_ON_ONCE(format->char_per_block[0] != 1);
|
||||
drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1);
|
||||
|
||||
/* Start on a byte boundary */
|
||||
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
|
||||
|
|
@ -138,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
|
|||
pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
drm_WARN_ON_ONCE(fb->dev, 1);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
|
@ -527,7 +527,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
|
|||
drm_connector_list_iter_end(&conn_iter);
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!connector_state))
|
||||
if (drm_WARN_ON_ONCE(plane->dev, !connector_state))
|
||||
return -ENOENT;
|
||||
|
||||
len = struct_size(req, properties,
|
||||
|
|
@ -539,7 +539,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
|
|||
gud_from_display_mode(&req->mode, mode);
|
||||
|
||||
req->format = gud_from_fourcc(format->format);
|
||||
if (WARN_ON_ONCE(!req->format)) {
|
||||
if (drm_WARN_ON_ONCE(plane->dev, !req->format)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -561,7 +561,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
|
|||
val = new_plane_state->rotation;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
drm_WARN_ON_ONCE(plane->dev, 1);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@
|
|||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_panic.h>
|
||||
#include <drm/drm_plane.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_vblank_helper.h>
|
||||
|
||||
#include "hyperv_drm.h"
|
||||
|
||||
|
|
@ -111,11 +113,15 @@ static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
|||
crtc_state->mode.hdisplay,
|
||||
crtc_state->mode.vdisplay,
|
||||
plane_state->fb->pitches[0]);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
|
||||
.atomic_check = drm_crtc_helper_atomic_check,
|
||||
.atomic_flush = drm_crtc_vblank_atomic_flush,
|
||||
.atomic_enable = hyperv_crtc_helper_atomic_enable,
|
||||
.atomic_disable = drm_crtc_vblank_atomic_disable,
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs hyperv_crtc_funcs = {
|
||||
|
|
@ -125,6 +131,7 @@ static const struct drm_crtc_funcs hyperv_crtc_funcs = {
|
|||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
|
||||
DRM_CRTC_VBLANK_TIMER_FUNCS,
|
||||
};
|
||||
|
||||
static int hyperv_plane_atomic_check(struct drm_plane *plane,
|
||||
|
|
@ -321,6 +328,10 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1029,7 +1029,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
GEM_BUG_ON(!obj->ttm.created);
|
||||
|
||||
ttm_bo_put(i915_gem_to_ttm(obj));
|
||||
ttm_bo_fini(i915_gem_to_ttm(obj));
|
||||
}
|
||||
|
||||
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
||||
|
|
@ -1325,7 +1325,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
|
|||
* If this function fails, it will call the destructor, but
|
||||
* our caller still owns the object. So no freeing in the
|
||||
* destructor until obj->ttm.created is true.
|
||||
* Similarly, in delayed_destroy, we can't call ttm_bo_put()
|
||||
* Similarly, in delayed_destroy, we can't call ttm_bo_fini()
|
||||
* until successful initialization.
|
||||
*/
|
||||
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_fbdev_dma.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
|
|
@ -141,17 +143,32 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
|
|||
struct drm_device *drm,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
u32 width = args->width;
|
||||
u32 fourcc;
|
||||
const struct drm_format_info *info;
|
||||
u64 pitch_align;
|
||||
int ret;
|
||||
|
||||
args->width = ALIGN(width, 8);
|
||||
|
||||
ret = drm_gem_dma_dumb_create(file_priv, drm, args);
|
||||
/*
|
||||
* Hardware requires the framebuffer width to be aligned to
|
||||
* multiples of 8. The mode-setting code handles this, but
|
||||
* the buffer pitch has to be aligned as well. Set the pitch
|
||||
* alignment accordingly, so that the each scanline fits into
|
||||
* the allocated buffer.
|
||||
*/
|
||||
fourcc = drm_driver_color_mode_format(drm, args->bpp);
|
||||
if (fourcc == DRM_FORMAT_INVALID)
|
||||
return -EINVAL;
|
||||
info = drm_format_info(fourcc);
|
||||
if (!info)
|
||||
return -EINVAL;
|
||||
pitch_align = drm_format_info_min_pitch(info, 0, SZ_8);
|
||||
if (!pitch_align || pitch_align > U32_MAX)
|
||||
return -EINVAL;
|
||||
ret = drm_mode_size_dumb(drm, args, pitch_align, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->width = width;
|
||||
return ret;
|
||||
return drm_gem_dma_dumb_create(file_priv, drm, args);
|
||||
}
|
||||
|
||||
static const struct drm_driver imx_drm_driver = {
|
||||
|
|
|
|||
|
|
@ -368,17 +368,20 @@ static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long *prate)
|
||||
static int clk_tve_di_determine_rate(struct clk_hw *hw,
|
||||
struct clk_rate_request *req)
|
||||
{
|
||||
unsigned long div;
|
||||
|
||||
div = *prate / rate;
|
||||
div = req->best_parent_rate / req->rate;
|
||||
if (div >= 4)
|
||||
return *prate / 4;
|
||||
req->rate = req->best_parent_rate / 4;
|
||||
else if (div >= 2)
|
||||
return *prate / 2;
|
||||
return *prate;
|
||||
req->rate = req->best_parent_rate / 2;
|
||||
else
|
||||
req->rate = req->best_parent_rate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
|
|
@ -409,7 +412,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
}
|
||||
|
||||
static const struct clk_ops clk_tve_di_ops = {
|
||||
.round_rate = clk_tve_di_round_rate,
|
||||
.determine_rate = clk_tve_di_determine_rate,
|
||||
.set_rate = clk_tve_di_set_rate,
|
||||
.recalc_rate = clk_tve_di_recalc_rate,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -134,10 +134,10 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
|
|||
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
|
||||
struct drm_display_info *di = &conn_state->connector->display_info;
|
||||
struct drm_bridge_state *next_bridge_state = NULL;
|
||||
struct drm_bridge *next_bridge;
|
||||
u32 bus_flags, bus_fmt;
|
||||
|
||||
next_bridge = drm_bridge_get_next_bridge(bridge);
|
||||
struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
|
||||
|
||||
if (next_bridge)
|
||||
next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
|
||||
next_bridge);
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/dma-buf.h>
|
||||
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_prime.h>
|
||||
|
|
@ -57,7 +58,7 @@ static void lsdc_gem_object_free(struct drm_gem_object *obj)
|
|||
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
|
||||
|
||||
if (tbo)
|
||||
ttm_bo_put(tbo);
|
||||
ttm_bo_fini(tbo);
|
||||
}
|
||||
|
||||
static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
|
|
@ -204,45 +205,31 @@ int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
|
|||
const struct lsdc_desc *descp = ldev->descp;
|
||||
u32 domain = LSDC_GEM_DOMAIN_VRAM;
|
||||
struct drm_gem_object *gobj;
|
||||
size_t size;
|
||||
u32 pitch;
|
||||
u32 handle;
|
||||
int ret;
|
||||
|
||||
if (!args->width || !args->height)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->bpp != 32 && args->bpp != 16)
|
||||
return -EINVAL;
|
||||
|
||||
pitch = args->width * args->bpp / 8;
|
||||
pitch = ALIGN(pitch, descp->pitch_align);
|
||||
size = pitch * args->height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Maximum single bo size allowed is the half vram size available */
|
||||
if (size > ldev->vram_size / 2) {
|
||||
drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
|
||||
if (args->size > ldev->vram_size / 2) {
|
||||
drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
|
||||
gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL);
|
||||
if (IS_ERR(gobj)) {
|
||||
drm_err(ddev, "Failed to create gem object\n");
|
||||
return PTR_ERR(gobj);
|
||||
}
|
||||
|
||||
ret = drm_gem_handle_create(file, gobj, &handle);
|
||||
ret = drm_gem_handle_create(file, gobj, &args->handle);
|
||||
|
||||
/* drop reference from allocate, handle holds it now */
|
||||
drm_gem_object_put(gobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->pitch = pitch;
|
||||
args->size = size;
|
||||
args->handle = handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -71,12 +71,15 @@ static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
|
|||
return best_div;
|
||||
}
|
||||
|
||||
static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long *prate)
|
||||
static int mcde_clk_div_determine_rate(struct clk_hw *hw,
|
||||
struct clk_rate_request *req)
|
||||
{
|
||||
int div = mcde_clk_div_choose_div(hw, rate, prate, true);
|
||||
int div = mcde_clk_div_choose_div(hw, req->rate,
|
||||
&req->best_parent_rate, true);
|
||||
|
||||
return DIV_ROUND_UP_ULL(*prate, div);
|
||||
req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
|
||||
|
|
@ -132,7 +135,7 @@ static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
static const struct clk_ops mcde_clk_div_ops = {
|
||||
.enable = mcde_clk_div_enable,
|
||||
.recalc_rate = mcde_clk_div_recalc_rate,
|
||||
.round_rate = mcde_clk_div_round_rate,
|
||||
.determine_rate = mcde_clk_div_determine_rate,
|
||||
.set_rate = mcde_clk_div_set_rate,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,10 @@
|
|||
#include <linux/shmem_fs.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_prime.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include <trace/events/gpu_mem.h>
|
||||
|
||||
|
|
@ -698,8 +700,29 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
|
|||
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
args->pitch = align_pitch(args->width, args->bpp);
|
||||
args->size = PAGE_ALIGN(args->pitch * args->height);
|
||||
u32 fourcc;
|
||||
const struct drm_format_info *info;
|
||||
u64 pitch_align;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Adreno needs pitch aligned to 32 pixels. Compute the number
|
||||
* of bytes for a block of 32 pixels at the given color format.
|
||||
* Use the result as pitch alignment.
|
||||
*/
|
||||
fourcc = drm_driver_color_mode_format(dev, args->bpp);
|
||||
if (fourcc == DRM_FORMAT_INVALID)
|
||||
return -EINVAL;
|
||||
info = drm_format_info(fourcc);
|
||||
if (!info)
|
||||
return -EINVAL;
|
||||
pitch_align = drm_format_info_min_pitch(info, 0, SZ_32);
|
||||
if (!pitch_align || pitch_align > U32_MAX)
|
||||
return -EINVAL;
|
||||
ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return msm_gem_new_handle(dev, file, args->size,
|
||||
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ config DRM_NOUVEAU
|
|||
select THERMAL if ACPI && X86
|
||||
select ACPI_VIDEO if ACPI && X86
|
||||
select SND_HDA_COMPONENT if SND_HDA_CORE
|
||||
select PM_DEVFREQ if ARCH_TEGRA
|
||||
help
|
||||
Choose this option for open-source NVIDIA support.
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ struct nvkm_device_tegra {
|
|||
struct nvkm_device device;
|
||||
struct platform_device *pdev;
|
||||
|
||||
void __iomem *regs;
|
||||
|
||||
struct reset_control *rst;
|
||||
struct clk *clk;
|
||||
struct clk *clk_ref;
|
||||
|
|
|
|||
|
|
@ -134,4 +134,5 @@ int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
|
|||
int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
|
||||
int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
|
||||
int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
|
||||
int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ nouveau_bo(struct ttm_buffer_object *bo)
|
|||
static inline void
|
||||
nouveau_bo_fini(struct nouveau_bo *bo)
|
||||
{
|
||||
ttm_bo_put(&bo->bo);
|
||||
ttm_bo_fini(&bo->bo);
|
||||
}
|
||||
|
||||
extern struct ttm_device_funcs nouveau_bo_driver;
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@
|
|||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_client_event.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_dumb_buffers.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
|
@ -807,9 +808,9 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
|||
uint32_t domain;
|
||||
int ret;
|
||||
|
||||
args->pitch = roundup(args->width * (args->bpp / 8), 256);
|
||||
args->size = args->pitch * args->height;
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
ret = drm_mode_size_dumb(dev, args, SZ_256, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Use VRAM if there is any ; otherwise fallback to system memory */
|
||||
if (nouveau_drm(dev)->client.device.info.ram_size != 0)
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
|
|||
return;
|
||||
}
|
||||
|
||||
ttm_bo_put(&nvbo->bo);
|
||||
ttm_bo_fini(&nvbo->bo);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@
|
|||
*/
|
||||
#include "nouveau_platform.h"
|
||||
|
||||
#include <nvkm/subdev/clk/gk20a_devfreq.h>
|
||||
|
||||
static int nouveau_platform_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct nvkm_device_tegra_func *func;
|
||||
|
|
@ -40,6 +42,21 @@ static void nouveau_platform_remove(struct platform_device *pdev)
|
|||
nouveau_drm_device_remove(drm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int nouveau_platform_suspend(struct device *dev)
|
||||
{
|
||||
return gk20a_devfreq_suspend(dev);
|
||||
}
|
||||
|
||||
static int nouveau_platform_resume(struct device *dev)
|
||||
{
|
||||
return gk20a_devfreq_resume(dev);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend,
|
||||
nouveau_platform_resume);
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_OF)
|
||||
static const struct nvkm_device_tegra_func gk20a_platform_data = {
|
||||
.iommu_bit = 34,
|
||||
|
|
@ -81,6 +98,9 @@ struct platform_driver nouveau_platform_driver = {
|
|||
.driver = {
|
||||
.name = "nouveau",
|
||||
.of_match_table = of_match_ptr(nouveau_platform_match),
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.pm = &nouveau_pm_ops,
|
||||
#endif
|
||||
},
|
||||
.probe = nouveau_platform_probe,
|
||||
.remove = nouveau_platform_remove,
|
||||
|
|
|
|||
|
|
@ -2280,6 +2280,7 @@ nv13b_chipset = {
|
|||
.acr = { 0x00000001, gp10b_acr_new },
|
||||
.bar = { 0x00000001, gm20b_bar_new },
|
||||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.clk = { 0x00000001, gp10b_clk_new },
|
||||
.fault = { 0x00000001, gp10b_fault_new },
|
||||
.fb = { 0x00000001, gp10b_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
|
|
|
|||
|
|
@ -259,6 +259,10 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
|
|||
tdev->func = func;
|
||||
tdev->pdev = pdev;
|
||||
|
||||
tdev->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(tdev->regs))
|
||||
return PTR_ERR(tdev->regs);
|
||||
|
||||
if (func->require_vdd) {
|
||||
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
|
||||
if (IS_ERR(tdev->vdd)) {
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ nvkm-y += nvkm/subdev/clk/gf100.o
|
|||
nvkm-y += nvkm/subdev/clk/gk104.o
|
||||
nvkm-y += nvkm/subdev/clk/gk20a.o
|
||||
nvkm-y += nvkm/subdev/clk/gm20b.o
|
||||
nvkm-y += nvkm/subdev/clk/gp10b.o
|
||||
nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o
|
||||
|
||||
nvkm-y += nvkm/subdev/clk/pllnv04.o
|
||||
nvkm-y += nvkm/subdev/clk/pllgt215.o
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@
|
|||
*
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "gk20a_devfreq.h"
|
||||
#include "gk20a.h"
|
||||
|
||||
#include <core/tegra.h>
|
||||
|
|
@ -589,6 +590,10 @@ gk20a_clk_init(struct nvkm_clk *base)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = gk20a_devfreq_init(base, &clk->devfreq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@ struct gk20a_clk {
|
|||
const struct gk20a_clk_pllg_params *params;
|
||||
struct gk20a_pll pll;
|
||||
u32 parent_rate;
|
||||
struct gk20a_devfreq *devfreq;
|
||||
|
||||
u32 (*div_to_pl)(u32);
|
||||
u32 (*pl_to_div)(u32);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,320 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
#include <linux/clk.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include <subdev/clk.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_chan.h"
|
||||
#include "priv.h"
|
||||
#include "gk20a_devfreq.h"
|
||||
#include "gk20a.h"
|
||||
#include "gp10b.h"
|
||||
|
||||
#define PMU_BUSY_CYCLES_NORM_MAX 1000U
|
||||
|
||||
#define PWR_PMU_IDLE_COUNTER_TOTAL 0U
|
||||
#define PWR_PMU_IDLE_COUNTER_BUSY 4U
|
||||
|
||||
#define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U
|
||||
#define PWR_PMU_IDLE_COUNT_REG_SIZE 16U
|
||||
#define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU
|
||||
#define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U)
|
||||
|
||||
#define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U
|
||||
#define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U
|
||||
|
||||
#define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU
|
||||
#define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U
|
||||
#define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U
|
||||
|
||||
#define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U
|
||||
#define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U
|
||||
#define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU
|
||||
|
||||
#define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU
|
||||
#define PWR_PMU_IDLE_CTRL_REG_SIZE 16U
|
||||
#define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U
|
||||
#define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U
|
||||
#define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U
|
||||
#define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2)
|
||||
#define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U
|
||||
|
||||
#define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U
|
||||
#define PWR_PMU_IDLE_MASK_REG_SIZE 16U
|
||||
#define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U
|
||||
#define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U
|
||||
|
||||
/**
|
||||
* struct gk20a_devfreq - Device frequency management
|
||||
*/
|
||||
struct gk20a_devfreq {
|
||||
/** @devfreq: devfreq device. */
|
||||
struct devfreq *devfreq;
|
||||
|
||||
/** @regs: Device registers. */
|
||||
void __iomem *regs;
|
||||
|
||||
/** @gov_data: Governor data. */
|
||||
struct devfreq_simple_ondemand_data gov_data;
|
||||
|
||||
/** @busy_time: Busy time. */
|
||||
ktime_t busy_time;
|
||||
|
||||
/** @total_time: Total time. */
|
||||
ktime_t total_time;
|
||||
|
||||
/** @time_last_update: Last update time. */
|
||||
ktime_t time_last_update;
|
||||
};
|
||||
|
||||
static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = dev_get_drvdata(dev);
|
||||
struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
|
||||
struct nvkm_clk *base = nvkm_clk(subdev);
|
||||
|
||||
switch (drm->nvkm->chipset) {
|
||||
case 0x13b: return gp10b_clk(base)->devfreq; break;
|
||||
default: return gk20a_clk(base)->devfreq; break;
|
||||
}
|
||||
}
|
||||
|
||||
static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
// Set pmu idle intr status bit on total counter overflow
|
||||
writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE,
|
||||
gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET);
|
||||
|
||||
writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE,
|
||||
gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET +
|
||||
(PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE));
|
||||
|
||||
// Setup counter for total cycles
|
||||
data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
|
||||
(PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
|
||||
data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
|
||||
data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
|
||||
writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
|
||||
(PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
|
||||
|
||||
// Setup counter for busy cycles
|
||||
writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED,
|
||||
gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET +
|
||||
(PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE));
|
||||
|
||||
data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
|
||||
(PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
|
||||
data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
|
||||
data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
|
||||
writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
|
||||
(PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
|
||||
}
|
||||
|
||||
static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
|
||||
{
|
||||
u32 ret;
|
||||
|
||||
ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
|
||||
(counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
|
||||
|
||||
return ret & PWR_PMU_IDLE_COUNT_MASK;
|
||||
}
|
||||
|
||||
static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
|
||||
{
|
||||
writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
|
||||
(counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
|
||||
}
|
||||
|
||||
static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq)
|
||||
{
|
||||
u32 ret;
|
||||
|
||||
ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
|
||||
|
||||
return ret & PWR_PMU_IDLE_INTR_STATUS_MASK;
|
||||
}
|
||||
|
||||
static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq)
|
||||
{
|
||||
writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE,
|
||||
gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
|
||||
}
|
||||
|
||||
static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq)
|
||||
{
|
||||
ktime_t now, last;
|
||||
u64 busy_cycles, total_cycles;
|
||||
u32 norm, intr_status;
|
||||
|
||||
now = ktime_get();
|
||||
last = gdevfreq->time_last_update;
|
||||
gdevfreq->total_time = ktime_us_delta(now, last);
|
||||
|
||||
busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
|
||||
total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
|
||||
intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq);
|
||||
|
||||
gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
|
||||
gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
|
||||
|
||||
if (intr_status != 0UL) {
|
||||
norm = PMU_BUSY_CYCLES_NORM_MAX;
|
||||
gk20a_pmu_clear_idle_intr_status(gdevfreq);
|
||||
} else if (total_cycles == 0ULL || busy_cycles > total_cycles) {
|
||||
norm = PMU_BUSY_CYCLES_NORM_MAX;
|
||||
} else {
|
||||
norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX,
|
||||
total_cycles);
|
||||
}
|
||||
|
||||
gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX);
|
||||
gdevfreq->time_last_update = now;
|
||||
}
|
||||
|
||||
static int gk20a_devfreq_target(struct device *dev, unsigned long *freq,
|
||||
u32 flags)
|
||||
{
|
||||
struct nouveau_drm *drm = dev_get_drvdata(dev);
|
||||
struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
|
||||
struct nvkm_clk *base = nvkm_clk(subdev);
|
||||
struct nvkm_pstate *pstates = base->func->pstates;
|
||||
int nr_pstates = base->func->nr_pstates;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < nr_pstates - 1; i++)
|
||||
if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq)
|
||||
break;
|
||||
|
||||
ret = nvkm_clk_ustate(base, pstates[i].pstate, 0);
|
||||
ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "cannot update clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
*freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
|
||||
{
|
||||
struct nouveau_drm *drm = dev_get_drvdata(dev);
|
||||
struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
|
||||
struct nvkm_clk *base = nvkm_clk(subdev);
|
||||
|
||||
*freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq)
|
||||
{
|
||||
gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
|
||||
gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
|
||||
gk20a_pmu_clear_idle_intr_status(gdevfreq);
|
||||
|
||||
gdevfreq->busy_time = 0;
|
||||
gdevfreq->total_time = 0;
|
||||
gdevfreq->time_last_update = ktime_get();
|
||||
}
|
||||
|
||||
static int gk20a_devfreq_get_dev_status(struct device *dev,
|
||||
struct devfreq_dev_status *status)
|
||||
{
|
||||
struct nouveau_drm *drm = dev_get_drvdata(dev);
|
||||
struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
|
||||
|
||||
gk20a_devfreq_get_cur_freq(dev, &status->current_frequency);
|
||||
|
||||
gk20a_devfreq_update_utilization(gdevfreq);
|
||||
|
||||
status->busy_time = ktime_to_ns(gdevfreq->busy_time);
|
||||
status->total_time = ktime_to_ns(gdevfreq->total_time);
|
||||
|
||||
gk20a_devfreq_reset(gdevfreq);
|
||||
|
||||
NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n",
|
||||
status->busy_time, status->total_time,
|
||||
status->busy_time / (status->total_time / 100),
|
||||
status->current_frequency / 1000 / 1000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct devfreq_dev_profile gk20a_devfreq_profile = {
|
||||
.timer = DEVFREQ_TIMER_DELAYED,
|
||||
.polling_ms = 50,
|
||||
.target = gk20a_devfreq_target,
|
||||
.get_cur_freq = gk20a_devfreq_get_cur_freq,
|
||||
.get_dev_status = gk20a_devfreq_get_dev_status,
|
||||
};
|
||||
|
||||
int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq)
|
||||
{
|
||||
struct nvkm_device *device = base->subdev.device;
|
||||
struct nouveau_drm *drm = dev_get_drvdata(device->dev);
|
||||
struct nvkm_device_tegra *tdev = device->func->tegra(device);
|
||||
struct nvkm_pstate *pstates = base->func->pstates;
|
||||
int nr_pstates = base->func->nr_pstates;
|
||||
struct gk20a_devfreq *new_gdevfreq;
|
||||
int i;
|
||||
|
||||
new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL);
|
||||
if (!new_gdevfreq)
|
||||
return -ENOMEM;
|
||||
|
||||
new_gdevfreq->regs = tdev->regs;
|
||||
|
||||
for (i = 0; i < nr_pstates; i++)
|
||||
dev_pm_opp_add(base->subdev.device->dev,
|
||||
pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0);
|
||||
|
||||
gk20a_pmu_init_perfmon_counter(new_gdevfreq);
|
||||
gk20a_devfreq_reset(new_gdevfreq);
|
||||
|
||||
gk20a_devfreq_profile.initial_freq =
|
||||
nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
|
||||
|
||||
new_gdevfreq->gov_data.upthreshold = 45;
|
||||
new_gdevfreq->gov_data.downdifferential = 5;
|
||||
|
||||
new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev,
|
||||
&gk20a_devfreq_profile,
|
||||
DEVFREQ_GOV_SIMPLE_ONDEMAND,
|
||||
&new_gdevfreq->gov_data);
|
||||
if (IS_ERR(new_gdevfreq->devfreq))
|
||||
return PTR_ERR(new_gdevfreq->devfreq);
|
||||
|
||||
*gdevfreq = new_gdevfreq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_devfreq_resume(struct device *dev)
|
||||
{
|
||||
struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
|
||||
|
||||
if (!gdevfreq || !gdevfreq->devfreq)
|
||||
return 0;
|
||||
|
||||
return devfreq_resume_device(gdevfreq->devfreq);
|
||||
}
|
||||
|
||||
int gk20a_devfreq_suspend(struct device *dev)
|
||||
{
|
||||
struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
|
||||
|
||||
if (!gdevfreq || !gdevfreq->devfreq)
|
||||
return 0;
|
||||
|
||||
return devfreq_suspend_device(gdevfreq->devfreq);
|
||||
}
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
#ifndef __GK20A_DEVFREQ_H__
|
||||
#define __GK20A_DEVFREQ_H__
|
||||
|
||||
#include <linux/devfreq.h>
|
||||
|
||||
struct gk20a_devfreq;
|
||||
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq);
|
||||
|
||||
int gk20a_devfreq_resume(struct device *dev);
|
||||
int gk20a_devfreq_suspend(struct device *dev);
|
||||
#else
|
||||
static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gk20a_devfreq_resume(struct device dev) { return 0; }
|
||||
static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; }
|
||||
#endif /* CONFIG_PM_DEVFREQ */
|
||||
|
||||
#endif /* __GK20A_DEVFREQ_H__ */
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue