ASoC: stm32: sai: fix device and OF node leaks on
Merge series from Johan Hovold <johan@kernel.org>: This series fixes device and OF node reference leaks during probe and a clock prepare imbalance on probe failures. Included is a related cleanup of an error path.pull/1354/merge
commit
5d0cad4090
1
.mailmap
1
.mailmap
|
|
@ -438,6 +438,7 @@ Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
|
|||
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
|
||||
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
|
||||
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
|
||||
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@linaro.org>
|
||||
Krzysztof Wilczyński <kwilczynski@kernel.org> <krzysztof.wilczynski@linux.com>
|
||||
Krzysztof Wilczyński <kwilczynski@kernel.org> <kw@linux.com>
|
||||
Kshitiz Godara <quic_kgodara@quicinc.com> <kgodara@codeaurora.org>
|
||||
|
|
|
|||
|
|
@ -50,18 +50,20 @@ patternProperties:
|
|||
groups:
|
||||
description:
|
||||
Name of the pin group to use for the functions.
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
|
||||
i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
|
||||
spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
|
||||
spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp,
|
||||
uart0_grp, uart1_grp, uart2_grp, uart3_grp,
|
||||
pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp,
|
||||
pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp,
|
||||
pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp,
|
||||
pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
|
||||
pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
|
||||
pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
|
||||
items:
|
||||
enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
|
||||
i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
|
||||
spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
|
||||
spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp,
|
||||
uart0_grp, uart1_grp, uart2_grp, uart3_grp,
|
||||
pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp,
|
||||
pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp,
|
||||
pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp,
|
||||
pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
|
||||
pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
|
||||
pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
drive-strength:
|
||||
enum: [2, 4, 6, 8, 16, 24, 32]
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ patternProperties:
|
|||
|
||||
'^conf':
|
||||
type: object
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
Pinctrl node's client devices use subnodes for pin configurations,
|
||||
which in turn use the standard properties below.
|
||||
|
|
|
|||
|
|
@ -400,19 +400,30 @@ can report through the rotational axes (absolute and/or relative rx, ry, rz).
|
|||
All other axes retain their meaning. A device must not mix
|
||||
regular directional axes and accelerometer axes on the same event node.
|
||||
|
||||
INPUT_PROP_HAPTIC_TOUCHPAD
|
||||
--------------------------
|
||||
INPUT_PROP_PRESSUREPAD
|
||||
----------------------
|
||||
|
||||
The INPUT_PROP_PRESSUREPAD property indicates that the device provides
|
||||
simulated haptic feedback (e.g. a vibrator motor situated below the surface)
|
||||
instead of physical haptic feedback (e.g. a hinge). This property is only set
|
||||
if the device:
|
||||
|
||||
The INPUT_PROP_HAPTIC_TOUCHPAD property indicates that device:
|
||||
- supports simple haptic auto and manual triggering
|
||||
- can differentiate between at least 5 fingers
|
||||
- uses correct resolution for the X/Y (units and value)
|
||||
- reports correct force per touch, and correct units for them (newtons or grams)
|
||||
- follows the MT protocol type B
|
||||
|
||||
If the simulated haptic feedback is controllable by userspace the device must:
|
||||
|
||||
- support simple haptic auto and manual triggering, and
|
||||
- report correct force per touch, and correct units for them (newtons or grams), and
|
||||
- provide the EV_FF FF_HAPTIC force feedback effect.
|
||||
|
||||
Summing up, such devices follow the MS spec for input devices in
|
||||
Win8 and Win8.1, and in addition support the Simple haptic controller HID table,
|
||||
and report correct units for the pressure.
|
||||
Win8 and Win8.1, and in addition may support the Simple haptic controller HID
|
||||
table, and report correct units for the pressure.
|
||||
|
||||
Where applicable, this property is set in addition to INPUT_PROP_BUTTONPAD, it
|
||||
does not replace that property.
|
||||
|
||||
Guidelines
|
||||
==========
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ to matching WMI devices using a struct wmi_device_id table:
|
|||
::
|
||||
|
||||
static const struct wmi_device_id foo_id_table[] = {
|
||||
/* Only use uppercase letters! */
|
||||
{ "936DA01F-9ABD-4D9D-80C7-02AF85C822A8", NULL },
|
||||
{ }
|
||||
};
|
||||
|
|
|
|||
25
MAINTAINERS
25
MAINTAINERS
|
|
@ -3927,7 +3927,7 @@ F: crypto/async_tx/
|
|||
F: include/linux/async_tx.h
|
||||
|
||||
AT24 EEPROM DRIVER
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -9267,7 +9267,6 @@ M: Ido Schimmel <idosch@nvidia.com>
|
|||
L: bridge@lists.linux.dev
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linuxfoundation.org/en/Net:Bridge
|
||||
F: include/linux/if_bridge.h
|
||||
F: include/uapi/linux/if_bridge.h
|
||||
F: include/linux/netfilter_bridge/
|
||||
|
|
@ -10680,7 +10679,7 @@ F: tools/gpio/gpio-sloppy-logic-analyzer.sh
|
|||
|
||||
GPIO SUBSYSTEM
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -10697,7 +10696,7 @@ K: GPIOD_FLAGS_BIT_NONEXCLUSIVE
|
|||
K: devm_gpiod_unhinge
|
||||
|
||||
GPIO UAPI
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
R: Kent Gibson <warthog618@gmail.com>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
@ -15311,7 +15310,7 @@ F: drivers/pwm/pwm-max7360.c
|
|||
F: include/linux/mfd/max7360.h
|
||||
|
||||
MAXIM MAX77650 PMIC MFD DRIVER
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/*/*max77650.yaml
|
||||
|
|
@ -16207,7 +16206,7 @@ MEMORY CONTROLLER DRIVERS
|
|||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
B: mailto:krzysztof.kozlowski@linaro.org
|
||||
B: mailto:krzk@kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
|
||||
F: Documentation/devicetree/bindings/memory-controllers/
|
||||
F: drivers/memory/
|
||||
|
|
@ -18782,6 +18781,10 @@ S: Maintained
|
|||
F: arch/arm/*omap*/*clock*
|
||||
|
||||
OMAP DEVICE TREE SUPPORT
|
||||
M: Aaro Koskinen <aaro.koskinen@iki.fi>
|
||||
M: Andreas Kemnade <andreas@kemnade.info>
|
||||
M: Kevin Hilman <khilman@baylibre.com>
|
||||
M: Roger Quadros <rogerq@kernel.org>
|
||||
M: Tony Lindgren <tony@atomide.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: devicetree@vger.kernel.org
|
||||
|
|
@ -19901,7 +19904,7 @@ F: drivers/pci/p2pdma.c
|
|||
F: include/linux/pci-p2pdma.h
|
||||
|
||||
PCI POWER CONTROL
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
|
|
@ -20498,7 +20501,7 @@ F: include/linux/powercap.h
|
|||
F: kernel/configs/nopm.config
|
||||
|
||||
POWER SEQUENCING
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -21180,7 +21183,7 @@ F: Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
|
|||
F: drivers/i2c/busses/i2c-qcom-cci.c
|
||||
|
||||
QUALCOMM INTERCONNECT BWMON DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
|
||||
|
|
@ -21301,7 +21304,7 @@ F: Documentation/tee/qtee.rst
|
|||
F: drivers/tee/qcomtee/
|
||||
|
||||
QUALCOMM TRUST ZONE MEMORY ALLOCATOR
|
||||
M: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/firmware/qcom/qcom_tzmem.c
|
||||
|
|
@ -25669,7 +25672,7 @@ F: Documentation/devicetree/bindings/crypto/ti,am62l-dthev2.yaml
|
|||
F: drivers/crypto/ti/
|
||||
|
||||
TI DAVINCI MACHINE SUPPORT
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
|||
|
|
@ -1254,3 +1254,17 @@
|
|||
max-frequency = <25000000>;
|
||||
bus-width = <4>;
|
||||
};
|
||||
|
||||
/*
|
||||
* FIXME: rgmii delay is introduced by MAC (configured in u-boot now)
|
||||
* instead of PCB on fuji board, so the "phy-mode" should be updated to
|
||||
* "rgmii-[tx|rx]id" when the aspeed-mac driver can handle the delay
|
||||
* properly.
|
||||
*/
|
||||
&mac3 {
|
||||
status = "okay";
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <ðphy3>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_rgmii4_default>;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -55,8 +55,8 @@
|
|||
mdio {
|
||||
/delete-node/ switch@1e;
|
||||
|
||||
bcm54210e: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
bcm54210e: ethernet-phy@25 {
|
||||
reg = <25>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@
|
|||
pinctrl-0 = <&pinctrl_audmux>;
|
||||
status = "okay";
|
||||
|
||||
ssi2 {
|
||||
mux-ssi2 {
|
||||
fsl,audmux-port = <1>;
|
||||
fsl,port-config = <
|
||||
(IMX_AUDMUX_V2_PTCR_SYN |
|
||||
|
|
@ -271,7 +271,7 @@
|
|||
>;
|
||||
};
|
||||
|
||||
aud3 {
|
||||
mux-aud3 {
|
||||
fsl,audmux-port = <2>;
|
||||
fsl,port-config = <
|
||||
IMX_AUDMUX_V2_PTCR_SYN
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@
|
|||
interrupt-parent = <&gpio2>;
|
||||
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
|
||||
reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>;
|
||||
report-rate-hz = <6>;
|
||||
report-rate-hz = <60>;
|
||||
/* settings valid only for Hycon touchscreen */
|
||||
touchscreen-size-x = <1280>;
|
||||
touchscreen-size-y = <800>;
|
||||
|
|
|
|||
|
|
@ -18,11 +18,21 @@
|
|||
|
||||
#include "bcm2712-rpi-5-b-ovl-rp1.dts"
|
||||
|
||||
/ {
|
||||
aliases {
|
||||
ethernet0 = &rp1_eth;
|
||||
};
|
||||
};
|
||||
|
||||
&pcie2 {
|
||||
#include "rp1-nexus.dtsi"
|
||||
};
|
||||
|
||||
&rp1_eth {
|
||||
assigned-clocks = <&rp1_clocks RP1_CLK_ETH_TSU>,
|
||||
<&rp1_clocks RP1_CLK_ETH>;
|
||||
assigned-clock-rates = <50000000>,
|
||||
<125000000>;
|
||||
status = "okay";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <&phy1>;
|
||||
|
|
|
|||
|
|
@ -67,7 +67,6 @@ img_subsys: bus@58000000 {
|
|||
power-domains = <&pd IMX_SC_R_CSI_0>;
|
||||
fsl,channel = <0>;
|
||||
fsl,num-irqs = <32>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gpio0_mipi_csi0: gpio@58222000 {
|
||||
|
|
@ -144,7 +143,6 @@ img_subsys: bus@58000000 {
|
|||
power-domains = <&pd IMX_SC_R_CSI_1>;
|
||||
fsl,channel = <0>;
|
||||
fsl,num-irqs = <32>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gpio0_mipi_csi1: gpio@58242000 {
|
||||
|
|
|
|||
|
|
@ -16,11 +16,20 @@
|
|||
ethernet1 = &eqos;
|
||||
};
|
||||
|
||||
extcon_usbc: usbc {
|
||||
compatible = "linux,extcon-usb-gpio";
|
||||
connector {
|
||||
compatible = "gpio-usb-b-connector", "usb-b-connector";
|
||||
id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
|
||||
label = "Type-C";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usb1_id>;
|
||||
id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
|
||||
type = "micro";
|
||||
vbus-supply = <®_usb1_vbus>;
|
||||
|
||||
port {
|
||||
usb_dr_connector: endpoint {
|
||||
remote-endpoint = <&usb3_dwc>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
leds {
|
||||
|
|
@ -244,9 +253,15 @@
|
|||
hnp-disable;
|
||||
srp-disable;
|
||||
dr_mode = "otg";
|
||||
extcon = <&extcon_usbc>;
|
||||
usb-role-switch;
|
||||
role-switch-default-mode = "peripheral";
|
||||
status = "okay";
|
||||
|
||||
port {
|
||||
usb3_dwc: endpoint {
|
||||
remote-endpoint = <&usb_dr_connector>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&usb_dwc3_1 {
|
||||
|
|
@ -273,7 +288,6 @@
|
|||
};
|
||||
|
||||
&usb3_phy0 {
|
||||
vbus-supply = <®_usb1_vbus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1886,7 +1886,7 @@
|
|||
assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
|
||||
assigned-clock-parents = <0>, <0>,
|
||||
<&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
|
||||
msi-map = <0x0 &its 0x98 0x1>;
|
||||
msi-map = <0x0 &its 0x10 0x1>;
|
||||
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
@ -1963,6 +1963,7 @@
|
|||
assigned-clock-rates = <3600000000>, <100000000>, <10000000>;
|
||||
assigned-clock-parents = <0>, <0>,
|
||||
<&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
|
||||
msi-map = <0x0 &its 0x98 0x1>;
|
||||
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@
|
|||
interrupt-parent = <&gpio>;
|
||||
interrupts = <TEGRA194_MAIN_GPIO(G, 4) IRQ_TYPE_LEVEL_LOW>;
|
||||
#phy-cells = <0>;
|
||||
wakeup-source;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -598,7 +598,6 @@
|
|||
pinctrl-2 = <&otp_pin>;
|
||||
resets = <&cru SRST_TSADC>;
|
||||
reset-names = "tsadc-apb";
|
||||
rockchip,grf = <&grf>;
|
||||
rockchip,hw-tshut-temp = <100000>;
|
||||
#thermal-sensor-cells = <1>;
|
||||
status = "disabled";
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
* Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
|
||||
*/
|
||||
|
||||
#include "rk3399.dtsi"
|
||||
#include "rk3399-base.dtsi"
|
||||
|
||||
/ {
|
||||
cluster0_opp: opp-table-0 {
|
||||
|
|
|
|||
|
|
@ -45,11 +45,11 @@
|
|||
|
||||
cam_dovdd_1v8: regulator-cam-dovdd-1v8 {
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-name = "cam-dovdd-1v8";
|
||||
vin-supply = <&vcc1v8_video>;
|
||||
gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-name = "cam-dovdd-1v8";
|
||||
vin-supply = <&vcc1v8_video>;
|
||||
};
|
||||
|
||||
cam_dvdd_1v2: regulator-cam-dvdd-1v2 {
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@
|
|||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc3v3_pcie";
|
||||
enable-active-high;
|
||||
gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
|
||||
gpios = <&gpio4 RK_PB1 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pcie_drv>;
|
||||
regulator-always-on;
|
||||
|
|
@ -187,7 +187,7 @@
|
|||
vcc5v0_usb2b: regulator-vcc5v0-usb2b {
|
||||
compatible = "regulator-fixed";
|
||||
enable-active-high;
|
||||
gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
|
||||
gpio = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&vcc5v0_usb2b_en>;
|
||||
regulator-name = "vcc5v0_usb2b";
|
||||
|
|
@ -199,7 +199,7 @@
|
|||
vcc5v0_usb2t: regulator-vcc5v0-usb2t {
|
||||
compatible = "regulator-fixed";
|
||||
enable-active-high;
|
||||
gpios = <&gpio0 RK_PD5 GPIO_ACTIVE_HIGH>;
|
||||
gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&vcc5v0_usb2t_en>;
|
||||
regulator-name = "vcc5v0_usb2t";
|
||||
|
|
|
|||
|
|
@ -789,7 +789,7 @@
|
|||
vccio1-supply = <&vccio_acodec>;
|
||||
vccio2-supply = <&vcc_1v8>;
|
||||
vccio3-supply = <&vccio_sd>;
|
||||
vccio4-supply = <&vcc_1v8>;
|
||||
vccio4-supply = <&vcca1v8_pmu>;
|
||||
vccio5-supply = <&vcc_1v8>;
|
||||
vccio6-supply = <&vcc1v8_dvp>;
|
||||
vccio7-supply = <&vcc_3v3>;
|
||||
|
|
|
|||
|
|
@ -482,6 +482,8 @@
|
|||
};
|
||||
|
||||
&i2s1_8ch {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
|
||||
rockchip,trcm-sync-tx-only;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -276,12 +276,6 @@
|
|||
opp-microvolt = <900000 900000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
|
||||
opp-2208000000 {
|
||||
opp-hz = /bits/ 64 <2208000000>;
|
||||
opp-microvolt = <950000 950000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
||||
cluster1_opp_table: opp-table-cluster1 {
|
||||
|
|
@ -348,12 +342,6 @@
|
|||
opp-microvolt = <925000 925000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
|
||||
opp-2304000000 {
|
||||
opp-hz = /bits/ 64 <2304000000>;
|
||||
opp-microvolt = <950000 950000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table-gpu {
|
||||
|
|
@ -2561,8 +2549,6 @@
|
|||
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c9m0_xfer>;
|
||||
resets = <&cru SRST_I2C9>, <&cru SRST_P_I2C9>;
|
||||
reset-names = "i2c", "apb";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
status = "disabled";
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
gpu_opp_table: opp-table-gpu {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-300000000 {
|
||||
|
|
|
|||
|
|
@ -382,14 +382,12 @@
|
|||
cap-mmc-highspeed;
|
||||
mmc-ddr-1_8v;
|
||||
mmc-hs200-1_8v;
|
||||
mmc-hs400-1_8v;
|
||||
mmc-hs400-enhanced-strobe;
|
||||
mmc-pwrseq = <&emmc_pwrseq>;
|
||||
no-sdio;
|
||||
no-sd;
|
||||
non-removable;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>;
|
||||
pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk>;
|
||||
vmmc-supply = <&vcc_3v3_s3>;
|
||||
vqmmc-supply = <&vcc_1v8_s3>;
|
||||
status = "okay";
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
gpu_opp_table: opp-table-gpu {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-300000000 {
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@
|
|||
gpios = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>;
|
||||
regulator-name = "vcc3v3_pcie20";
|
||||
regulator-boot-on;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
startup-delay-us = <50000>;
|
||||
vin-supply = <&vcc5v0_sys>;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1341,7 +1341,7 @@ CONFIG_COMMON_CLK_RS9_PCIE=y
|
|||
CONFIG_COMMON_CLK_VC3=y
|
||||
CONFIG_COMMON_CLK_VC5=y
|
||||
CONFIG_COMMON_CLK_BD718XX=m
|
||||
CONFIG_CLK_RASPBERRYPI=m
|
||||
CONFIG_CLK_RASPBERRYPI=y
|
||||
CONFIG_CLK_IMX8MM=y
|
||||
CONFIG_CLK_IMX8MN=y
|
||||
CONFIG_CLK_IMX8MP=y
|
||||
|
|
|
|||
|
|
@ -33,8 +33,8 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
|
|||
unsigned long vaddr);
|
||||
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
|
||||
|
||||
void tag_clear_highpage(struct page *to);
|
||||
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
|
||||
bool tag_clear_highpages(struct page *to, int numpages);
|
||||
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
|
||||
|
||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
|
|
|||
|
|
@ -624,6 +624,7 @@ nommu:
|
|||
kvm_timer_vcpu_load(vcpu);
|
||||
kvm_vgic_load(vcpu);
|
||||
kvm_vcpu_load_debug(vcpu);
|
||||
kvm_vcpu_load_fgt(vcpu);
|
||||
if (has_vhe())
|
||||
kvm_vcpu_load_vhe(vcpu);
|
||||
kvm_arch_vcpu_load_fp(vcpu);
|
||||
|
|
@ -642,7 +643,6 @@ nommu:
|
|||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
|
||||
vcpu_set_pauth_traps(vcpu);
|
||||
kvm_vcpu_load_fgt(vcpu);
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
|
||||
|
|
|
|||
|
|
@ -5609,7 +5609,11 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
|
|||
|
||||
guard(mutex)(&kvm->arch.config_lock);
|
||||
|
||||
if (!irqchip_in_kernel(kvm)) {
|
||||
/*
|
||||
* This hacks into the ID registers, so only perform it when the
|
||||
* first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
|
||||
*/
|
||||
if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) {
|
||||
u64 val;
|
||||
|
||||
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
|
||||
|
|
|
|||
|
|
@ -967,20 +967,21 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
|
|||
return vma_alloc_folio(flags, 0, vma, vaddr);
|
||||
}
|
||||
|
||||
void tag_clear_highpage(struct page *page)
|
||||
bool tag_clear_highpages(struct page *page, int numpages)
|
||||
{
|
||||
/*
|
||||
* Check if MTE is supported and fall back to clear_highpage().
|
||||
* get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
|
||||
* post_alloc_hook() will invoke tag_clear_highpage().
|
||||
* post_alloc_hook() will invoke tag_clear_highpages().
|
||||
*/
|
||||
if (!system_supports_mte()) {
|
||||
clear_highpage(page);
|
||||
return;
|
||||
}
|
||||
if (!system_supports_mte())
|
||||
return false;
|
||||
|
||||
/* Newly allocated page, shouldn't have been tagged yet */
|
||||
WARN_ON_ONCE(!try_page_mte_tagging(page));
|
||||
mte_zero_clear_page_tags(page_address(page));
|
||||
set_page_mte_tagged(page);
|
||||
/* Newly allocated pages, shouldn't have been tagged yet */
|
||||
for (int i = 0; i < numpages; i++, page++) {
|
||||
WARN_ON_ONCE(!try_page_mte_tagging(page));
|
||||
mte_zero_clear_page_tags(page_address(page));
|
||||
set_page_mte_tagged(page);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,6 +55,27 @@ enum cpu_type_enum {
|
|||
CPU_LAST
|
||||
};
|
||||
|
||||
static inline char *id_to_core_name(unsigned int id)
|
||||
{
|
||||
if ((id & PRID_COMP_MASK) != PRID_COMP_LOONGSON)
|
||||
return "Unknown";
|
||||
|
||||
switch (id & PRID_SERIES_MASK) {
|
||||
case PRID_SERIES_LA132:
|
||||
return "LA132";
|
||||
case PRID_SERIES_LA264:
|
||||
return "LA264";
|
||||
case PRID_SERIES_LA364:
|
||||
return "LA364";
|
||||
case PRID_SERIES_LA464:
|
||||
return "LA464";
|
||||
case PRID_SERIES_LA664:
|
||||
return "LA664";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -10,10 +10,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#include <stdint.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
|
||||
* 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
|
||||
|
|
@ -41,44 +37,44 @@ struct user_pt_regs {
|
|||
} __attribute__((aligned(8)));
|
||||
|
||||
struct user_fp_state {
|
||||
uint64_t fpr[32];
|
||||
uint64_t fcc;
|
||||
uint32_t fcsr;
|
||||
__u64 fpr[32];
|
||||
__u64 fcc;
|
||||
__u32 fcsr;
|
||||
};
|
||||
|
||||
struct user_lsx_state {
|
||||
/* 32 registers, 128 bits width per register. */
|
||||
uint64_t vregs[32*2];
|
||||
__u64 vregs[32*2];
|
||||
};
|
||||
|
||||
struct user_lasx_state {
|
||||
/* 32 registers, 256 bits width per register. */
|
||||
uint64_t vregs[32*4];
|
||||
__u64 vregs[32*4];
|
||||
};
|
||||
|
||||
struct user_lbt_state {
|
||||
uint64_t scr[4];
|
||||
uint32_t eflags;
|
||||
uint32_t ftop;
|
||||
__u64 scr[4];
|
||||
__u32 eflags;
|
||||
__u32 ftop;
|
||||
};
|
||||
|
||||
struct user_watch_state {
|
||||
uint64_t dbg_info;
|
||||
__u64 dbg_info;
|
||||
struct {
|
||||
uint64_t addr;
|
||||
uint64_t mask;
|
||||
uint32_t ctrl;
|
||||
uint32_t pad;
|
||||
__u64 addr;
|
||||
__u64 mask;
|
||||
__u32 ctrl;
|
||||
__u32 pad;
|
||||
} dbg_regs[8];
|
||||
};
|
||||
|
||||
struct user_watch_state_v2 {
|
||||
uint64_t dbg_info;
|
||||
__u64 dbg_info;
|
||||
struct {
|
||||
uint64_t addr;
|
||||
uint64_t mask;
|
||||
uint32_t ctrl;
|
||||
uint32_t pad;
|
||||
__u64 addr;
|
||||
__u64 mask;
|
||||
__u32 ctrl;
|
||||
__u32 pad;
|
||||
} dbg_regs[14];
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
|
|||
uint32_t config;
|
||||
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
|
||||
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
|
||||
const char *core_name = "Unknown";
|
||||
const char *core_name = id_to_core_name(c->processor_id);
|
||||
|
||||
switch (BIT(fls(c->isa_level) - 1)) {
|
||||
case LOONGARCH_CPU_ISA_LA32R:
|
||||
|
|
@ -291,35 +291,23 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
|
|||
break;
|
||||
}
|
||||
|
||||
switch (c->processor_id & PRID_SERIES_MASK) {
|
||||
case PRID_SERIES_LA132:
|
||||
core_name = "LA132";
|
||||
break;
|
||||
case PRID_SERIES_LA264:
|
||||
core_name = "LA264";
|
||||
break;
|
||||
case PRID_SERIES_LA364:
|
||||
core_name = "LA364";
|
||||
break;
|
||||
case PRID_SERIES_LA464:
|
||||
core_name = "LA464";
|
||||
break;
|
||||
case PRID_SERIES_LA664:
|
||||
core_name = "LA664";
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
|
||||
|
||||
if (!cpu_has_iocsr)
|
||||
if (!cpu_has_iocsr) {
|
||||
__cpu_full_name[cpu] = "Unknown";
|
||||
return;
|
||||
|
||||
if (!__cpu_full_name[cpu])
|
||||
__cpu_full_name[cpu] = cpu_full_name;
|
||||
}
|
||||
|
||||
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
|
||||
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
|
||||
|
||||
if (!__cpu_full_name[cpu]) {
|
||||
if (((char *)vendor)[0] == 0)
|
||||
__cpu_full_name[cpu] = "Unknown";
|
||||
else
|
||||
__cpu_full_name[cpu] = cpu_full_name;
|
||||
}
|
||||
|
||||
config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
|
||||
if (config & IOCSRF_CSRIPI)
|
||||
c->options |= LOONGARCH_CPU_CSRIPI;
|
||||
|
|
|
|||
|
|
@ -237,6 +237,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
|
|||
#ifdef CONFIG_SMP
|
||||
crash_smp_send_stop();
|
||||
#endif
|
||||
machine_kexec_mask_interrupts();
|
||||
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
|
||||
|
||||
pr_info("Starting crashdump kernel...\n");
|
||||
|
|
@ -274,6 +275,7 @@ void machine_kexec(struct kimage *image)
|
|||
|
||||
/* We do not want to be bothered. */
|
||||
local_irq_disable();
|
||||
machine_kexec_mask_interrupts();
|
||||
|
||||
pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
|
||||
pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
|
||||
|
|
|
|||
|
|
@ -158,35 +158,9 @@ static void __init node_mem_init(unsigned int node)
|
|||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
|
||||
/*
|
||||
* add_numamem_region
|
||||
*
|
||||
* Add a uasable memory region described by BIOS. The
|
||||
* routine gets each intersection between BIOS's region
|
||||
* and node's region, and adds them into node's memblock
|
||||
* pool.
|
||||
*
|
||||
*/
|
||||
static void __init add_numamem_region(u64 start, u64 end, u32 type)
|
||||
{
|
||||
u32 node = pa_to_nid(start);
|
||||
u64 size = end - start;
|
||||
static unsigned long num_physpages;
|
||||
static unsigned long num_physpages;
|
||||
|
||||
if (start >= end) {
|
||||
pr_debug("Invalid region: %016llx-%016llx\n", start, end);
|
||||
return;
|
||||
}
|
||||
|
||||
num_physpages += (size >> PAGE_SHIFT);
|
||||
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
|
||||
node, type, start, size);
|
||||
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
|
||||
start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages);
|
||||
memblock_set_node(start, size, &memblock.memory, node);
|
||||
}
|
||||
|
||||
static void __init init_node_memblock(void)
|
||||
static void __init info_node_memblock(void)
|
||||
{
|
||||
u32 mem_type;
|
||||
u64 mem_end, mem_start, mem_size;
|
||||
|
|
@ -206,12 +180,20 @@ static void __init init_node_memblock(void)
|
|||
case EFI_BOOT_SERVICES_DATA:
|
||||
case EFI_PERSISTENT_MEMORY:
|
||||
case EFI_CONVENTIONAL_MEMORY:
|
||||
add_numamem_region(mem_start, mem_end, mem_type);
|
||||
num_physpages += (mem_size >> PAGE_SHIFT);
|
||||
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
|
||||
(u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
|
||||
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
|
||||
mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
|
||||
break;
|
||||
case EFI_PAL_CODE:
|
||||
case EFI_UNUSABLE_MEMORY:
|
||||
case EFI_ACPI_RECLAIM_MEMORY:
|
||||
add_numamem_region(mem_start, mem_end, mem_type);
|
||||
num_physpages += (mem_size >> PAGE_SHIFT);
|
||||
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
|
||||
(u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
|
||||
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
|
||||
mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
|
||||
fallthrough;
|
||||
case EFI_RESERVED_TYPE:
|
||||
case EFI_RUNTIME_SERVICES_CODE:
|
||||
|
|
@ -249,22 +231,16 @@ int __init init_numa_memory(void)
|
|||
for (i = 0; i < NR_CPUS; i++)
|
||||
set_cpuid_to_node(i, NUMA_NO_NODE);
|
||||
|
||||
numa_reset_distance();
|
||||
nodes_clear(numa_nodes_parsed);
|
||||
nodes_clear(node_possible_map);
|
||||
nodes_clear(node_online_map);
|
||||
WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX));
|
||||
|
||||
/* Parse SRAT and SLIT if provided by firmware. */
|
||||
ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
|
||||
if (!acpi_disabled)
|
||||
ret = numa_memblks_init(acpi_numa_init, false);
|
||||
else
|
||||
ret = numa_memblks_init(fake_numa_init, false);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
node_possible_map = numa_nodes_parsed;
|
||||
if (WARN_ON(nodes_empty(node_possible_map)))
|
||||
return -EINVAL;
|
||||
|
||||
init_node_memblock();
|
||||
info_node_memblock();
|
||||
if (!memblock_validate_numa_coverage(SZ_1M))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
{
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
unsigned int isa = cpu_data[n].isa_level;
|
||||
unsigned int prid = cpu_data[n].processor_id;
|
||||
unsigned int version = cpu_data[n].processor_id & 0xff;
|
||||
unsigned int fp_version = cpu_data[n].fpu_vers;
|
||||
|
||||
|
|
@ -37,6 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id);
|
||||
seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
|
||||
seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
|
||||
seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
|
||||
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
|
||||
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
|
||||
seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
|
||||
|
|
|
|||
|
|
@ -1624,6 +1624,9 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
|
|||
/* Direct jump skips 5 NOP instructions */
|
||||
else if (is_bpf_text_address((unsigned long)orig_call))
|
||||
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
|
||||
/* Module tracing not supported - cause kernel lockups */
|
||||
else if (is_module_text_address((unsigned long)orig_call))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
|
||||
|
|
|
|||
|
|
@ -50,11 +50,11 @@ static int __init pcibios_init(void)
|
|||
*/
|
||||
lsize = cpu_last_level_cache_line_size();
|
||||
|
||||
BUG_ON(!lsize);
|
||||
if (lsize) {
|
||||
pci_dfl_cache_line_size = lsize >> 2;
|
||||
|
||||
pci_dfl_cache_line_size = lsize >> 2;
|
||||
|
||||
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
|
||||
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
cpu@0 {
|
||||
device_type = "cpu";
|
||||
compatible = "mips,mips24KEc";
|
||||
compatible = "mips,mips34Kc";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -692,7 +692,7 @@ unsigned long mips_stack_top(void)
|
|||
/* Space for the VDSO, data page & GIC user page */
|
||||
if (current->thread.abi) {
|
||||
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
|
||||
top -= PAGE_SIZE;
|
||||
top -= VDSO_NR_PAGES * PAGE_SIZE;
|
||||
top -= mips_gic_present() ? PAGE_SIZE : 0;
|
||||
|
||||
/* Space to randomize the VDSO base */
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-type.h>
|
||||
|
|
@ -508,54 +509,78 @@ static int __init set_ntlb(char *str)
|
|||
|
||||
__setup("ntlb=", set_ntlb);
|
||||
|
||||
/* Initialise all TLB entries with unique values */
|
||||
|
||||
/* Comparison function for EntryHi VPN fields. */
|
||||
static int r4k_vpn_cmp(const void *a, const void *b)
|
||||
{
|
||||
long v = *(unsigned long *)a - *(unsigned long *)b;
|
||||
int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
|
||||
return s ? (v != 0) | v >> s : v;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise all TLB entries with unique values that do not clash with
|
||||
* what we have been handed over and what we'll be using ourselves.
|
||||
*/
|
||||
static void r4k_tlb_uniquify(void)
|
||||
{
|
||||
int entry = num_wired_entries();
|
||||
unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
|
||||
int tlbsize = current_cpu_data.tlbsize;
|
||||
int start = num_wired_entries();
|
||||
unsigned long vpn_mask;
|
||||
int cnt, ent, idx, i;
|
||||
|
||||
vpn_mask = GENMASK(cpu_vmbits - 1, 13);
|
||||
vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
|
||||
|
||||
htw_stop();
|
||||
|
||||
for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
|
||||
unsigned long vpn;
|
||||
|
||||
write_c0_index(i);
|
||||
mtc0_tlbr_hazard();
|
||||
tlb_read();
|
||||
tlb_read_hazard();
|
||||
vpn = read_c0_entryhi();
|
||||
vpn &= vpn_mask & PAGE_MASK;
|
||||
tlb_vpns[cnt] = vpn;
|
||||
|
||||
/* Prevent any large pages from overlapping regular ones. */
|
||||
write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
|
||||
sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
|
||||
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
|
||||
while (entry < current_cpu_data.tlbsize) {
|
||||
unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
|
||||
unsigned long asid = 0;
|
||||
int idx;
|
||||
idx = 0;
|
||||
ent = tlbsize;
|
||||
for (i = start; i < tlbsize; i++)
|
||||
while (1) {
|
||||
unsigned long entryhi, vpn;
|
||||
|
||||
/* Skip wired MMID to make ginvt_mmid work */
|
||||
if (cpu_has_mmid)
|
||||
asid = MMID_KERNEL_WIRED + 1;
|
||||
entryhi = UNIQUE_ENTRYHI(ent);
|
||||
vpn = entryhi & vpn_mask & PAGE_MASK;
|
||||
|
||||
/* Check for match before using UNIQUE_ENTRYHI */
|
||||
do {
|
||||
if (cpu_has_mmid) {
|
||||
write_c0_memorymapid(asid);
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
} else {
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
|
||||
}
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
/* No match or match is on current entry */
|
||||
if (idx < 0 || idx == entry)
|
||||
if (idx >= cnt || vpn < tlb_vpns[idx]) {
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_index(i);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
ent++;
|
||||
break;
|
||||
/*
|
||||
* If we hit a match, we need to try again with
|
||||
* a different ASID.
|
||||
*/
|
||||
asid++;
|
||||
} while (asid < asid_mask);
|
||||
|
||||
if (idx >= 0 && idx != entry)
|
||||
panic("Unable to uniquify TLB entry %d", idx);
|
||||
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
entry++;
|
||||
}
|
||||
} else if (vpn == tlb_vpns[idx]) {
|
||||
ent++;
|
||||
} else {
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
tlbw_use_hazard();
|
||||
htw_start();
|
||||
|
|
@ -602,6 +627,7 @@ static void r4k_tlb_configure(void)
|
|||
|
||||
/* From this point on the ARC firmware is dead. */
|
||||
r4k_tlb_uniquify();
|
||||
local_flush_tlb_all();
|
||||
|
||||
/* Did I tell you that ARC SUCKS? */
|
||||
}
|
||||
|
|
|
|||
|
|
@ -241,16 +241,22 @@ mips_pci_controller:
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Setup the Malta max (2GB) memory for PCI DMA in host bridge
|
||||
* in transparent addressing mode.
|
||||
* Set up memory mapping in host bridge for PCI DMA masters,
|
||||
* in transparent addressing mode. For EVA use the Malta
|
||||
* maximum of 2 GiB memory in the alias space at 0x80000000
|
||||
* as per PHYS_OFFSET. Otherwise use 256 MiB of memory in
|
||||
* the regular space, avoiding mapping the PCI MMIO window
|
||||
* for DMA as it seems to confuse the system controller's
|
||||
* logic, causing PCI MMIO to stop working.
|
||||
*/
|
||||
mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
|
||||
MSC_WRITE(MSC01_PCI_BAR0, mask);
|
||||
MSC_WRITE(MSC01_PCI_HEAD4, mask);
|
||||
mask = PHYS_OFFSET ? PHYS_OFFSET : 0xf0000000;
|
||||
MSC_WRITE(MSC01_PCI_BAR0,
|
||||
mask | PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
MSC_WRITE(MSC01_PCI_HEAD4,
|
||||
PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
|
||||
mask &= MSC01_PCI_BAR0_SIZE_MSK;
|
||||
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
|
||||
MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
|
||||
MSC_WRITE(MSC01_PCI_P2SCMAPL, PHYS_OFFSET);
|
||||
|
||||
/* Don't handle target retries indefinitely. */
|
||||
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@
|
|||
|
||||
#define ANDES_VENDOR_ID 0x31e
|
||||
#define MICROCHIP_VENDOR_ID 0x029
|
||||
#define MIPS_VENDOR_ID 0x127
|
||||
#define SIFIVE_VENDOR_ID 0x489
|
||||
#define THEAD_VENDOR_ID 0x5b7
|
||||
#define MIPS_VENDOR_ID 0x722
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -648,9 +648,9 @@ int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
|
|||
|
||||
void __init sbi_init(void)
|
||||
{
|
||||
bool srst_power_off = false;
|
||||
int ret;
|
||||
|
||||
sbi_set_power_off();
|
||||
ret = sbi_get_spec_version();
|
||||
if (ret > 0)
|
||||
sbi_spec_version = ret;
|
||||
|
|
@ -683,6 +683,7 @@ void __init sbi_init(void)
|
|||
sbi_probe_extension(SBI_EXT_SRST)) {
|
||||
pr_info("SBI SRST extension detected\n");
|
||||
register_platform_power_off(sbi_srst_power_off);
|
||||
srst_power_off = true;
|
||||
sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
|
||||
sbi_srst_reboot_nb.priority = 192;
|
||||
register_restart_handler(&sbi_srst_reboot_nb);
|
||||
|
|
@ -702,4 +703,7 @@ void __init sbi_init(void)
|
|||
__sbi_send_ipi = __sbi_send_ipi_v01;
|
||||
__sbi_rfence = __sbi_rfence_v01;
|
||||
}
|
||||
|
||||
if (!srst_power_off)
|
||||
sbi_set_power_off();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1325,8 +1325,6 @@ static void uncore_pci_sub_driver_init(void)
|
|||
continue;
|
||||
|
||||
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
|
||||
if (!pmu)
|
||||
continue;
|
||||
|
||||
if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -705,7 +705,11 @@ void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
|
|||
|
||||
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
|
||||
|
||||
if (intercept == svm->lbr_msrs_intercepted)
|
||||
return;
|
||||
|
||||
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
|
||||
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
|
||||
|
|
@ -714,6 +718,8 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
|
||||
|
||||
svm->lbr_msrs_intercepted = intercept;
|
||||
}
|
||||
|
||||
void svm_vcpu_free_msrpm(void *msrpm)
|
||||
|
|
@ -1221,6 +1227,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
svm->x2avic_msrs_intercepted = true;
|
||||
svm->lbr_msrs_intercepted = true;
|
||||
|
||||
svm->vmcb01.ptr = page_address(vmcb01_page);
|
||||
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
|
||||
|
|
|
|||
|
|
@ -336,6 +336,7 @@ struct vcpu_svm {
|
|||
bool guest_state_loaded;
|
||||
|
||||
bool x2avic_msrs_intercepted;
|
||||
bool lbr_msrs_intercepted;
|
||||
|
||||
/* Guest GIF value, used when vGIF is not enabled */
|
||||
bool guest_gif;
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ int sb_set_blocksize(struct super_block *sb, int size)
|
|||
|
||||
EXPORT_SYMBOL(sb_set_blocksize);
|
||||
|
||||
int sb_min_blocksize(struct super_block *sb, int size)
|
||||
int __must_check sb_min_blocksize(struct super_block *sb, int size)
|
||||
{
|
||||
int minsize = bdev_logical_block_size(sb->s_bdev);
|
||||
if (size < minsize)
|
||||
|
|
|
|||
|
|
@ -182,6 +182,7 @@ bool einj_initialized __ro_after_init;
|
|||
|
||||
static void __iomem *einj_param;
|
||||
static u32 v5param_size;
|
||||
static u32 v66param_size;
|
||||
static bool is_v2;
|
||||
|
||||
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
|
||||
|
|
@ -283,6 +284,24 @@ static void check_vendor_extension(u64 paddr,
|
|||
acpi_os_unmap_iomem(p, sizeof(v));
|
||||
}
|
||||
|
||||
static u32 einjv2_init(struct einjv2_extension_struct *e)
|
||||
{
|
||||
if (e->revision != 1) {
|
||||
pr_info("Unknown v2 extension revision %u\n", e->revision);
|
||||
return 0;
|
||||
}
|
||||
if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
|
||||
pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length);
|
||||
return 0;
|
||||
}
|
||||
if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) {
|
||||
pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]);
|
||||
}
|
||||
|
||||
static void __iomem *einj_get_parameter_address(void)
|
||||
{
|
||||
int i;
|
||||
|
|
@ -310,28 +329,21 @@ static void __iomem *einj_get_parameter_address(void)
|
|||
v5param_size = sizeof(v5param);
|
||||
p = acpi_os_map_iomem(pa_v5, sizeof(*p));
|
||||
if (p) {
|
||||
int offset, len;
|
||||
|
||||
memcpy_fromio(&v5param, p, v5param_size);
|
||||
acpi5 = 1;
|
||||
check_vendor_extension(pa_v5, &v5param);
|
||||
if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
|
||||
len = v5param.einjv2_struct.length;
|
||||
offset = offsetof(struct einjv2_extension_struct, component_arr);
|
||||
max_nr_components = (len - offset) /
|
||||
sizeof(v5param.einjv2_struct.component_arr[0]);
|
||||
/*
|
||||
* The first call to acpi_os_map_iomem above does not include the
|
||||
* component array, instead it is used to read and calculate maximum
|
||||
* number of components supported by the system. Below, the mapping
|
||||
* is expanded to include the component array.
|
||||
*/
|
||||
if (available_error_type & ACPI65_EINJV2_SUPP) {
|
||||
struct einjv2_extension_struct *e;
|
||||
|
||||
e = &v5param.einjv2_struct;
|
||||
max_nr_components = einjv2_init(e);
|
||||
|
||||
/* remap including einjv2_extension_struct */
|
||||
acpi_os_unmap_iomem(p, v5param_size);
|
||||
offset = offsetof(struct set_error_type_with_address, einjv2_struct);
|
||||
v5param_size = offset + struct_size(&v5param.einjv2_struct,
|
||||
component_arr, max_nr_components);
|
||||
p = acpi_os_map_iomem(pa_v5, v5param_size);
|
||||
v66param_size = v5param_size - sizeof(*e) + e->length;
|
||||
p = acpi_os_map_iomem(pa_v5, v66param_size);
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
|
@ -527,6 +539,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
|||
u64 param3, u64 param4)
|
||||
{
|
||||
struct apei_exec_context ctx;
|
||||
u32 param_size = is_v2 ? v66param_size : v5param_size;
|
||||
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
|
||||
int i, rc;
|
||||
|
||||
|
|
@ -539,11 +552,11 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
|||
if (acpi5) {
|
||||
struct set_error_type_with_address *v5param;
|
||||
|
||||
v5param = kmalloc(v5param_size, GFP_KERNEL);
|
||||
v5param = kmalloc(param_size, GFP_KERNEL);
|
||||
if (!v5param)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(v5param, einj_param, v5param_size);
|
||||
memcpy_fromio(v5param, einj_param, param_size);
|
||||
v5param->type = type;
|
||||
if (type & ACPI5_VENDOR_BIT) {
|
||||
switch (vendor_flags) {
|
||||
|
|
@ -601,7 +614,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
|||
break;
|
||||
}
|
||||
}
|
||||
memcpy_toio(einj_param, v5param, v5param_size);
|
||||
memcpy_toio(einj_param, v5param, param_size);
|
||||
kfree(v5param);
|
||||
} else {
|
||||
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
|
||||
|
|
@ -1132,9 +1145,14 @@ static void einj_remove(struct faux_device *fdev)
|
|||
struct apei_exec_context ctx;
|
||||
|
||||
if (einj_param) {
|
||||
acpi_size size = (acpi5) ?
|
||||
v5param_size :
|
||||
sizeof(struct einj_parameter);
|
||||
acpi_size size;
|
||||
|
||||
if (v66param_size)
|
||||
size = v66param_size;
|
||||
else if (acpi5)
|
||||
size = v5param_size;
|
||||
else
|
||||
size = sizeof(struct einj_parameter);
|
||||
|
||||
acpi_os_unmap_iomem(einj_param, size);
|
||||
if (vendor_errors.size)
|
||||
|
|
|
|||
|
|
@ -3006,6 +3006,16 @@ int ata_dev_configure(struct ata_device *dev)
|
|||
}
|
||||
|
||||
dev->n_sectors = ata_id_n_sectors(id);
|
||||
if (ata_id_is_locked(id)) {
|
||||
/*
|
||||
* If Security locked, set capacity to zero to prevent
|
||||
* any I/O, e.g. partition scanning, as any I/O to a
|
||||
* locked drive will result in user visible errors.
|
||||
*/
|
||||
ata_dev_info(dev,
|
||||
"Security locked, setting capacity to zero\n");
|
||||
dev->n_sectors = 0;
|
||||
}
|
||||
|
||||
/* get current R/W Multiple count setting */
|
||||
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
|
||||
|
|
|
|||
|
|
@ -992,6 +992,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
|
|||
return;
|
||||
}
|
||||
|
||||
if (ata_id_is_locked(dev->id)) {
|
||||
/* Security locked */
|
||||
/* LOGICAL UNIT ACCESS NOT AUTHORIZED */
|
||||
ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
|
||||
ata_dev_dbg(dev,
|
||||
"Missing result TF: reporting aborted command\n");
|
||||
|
|
@ -4894,8 +4901,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
|||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
if (do_resume) {
|
||||
ret = scsi_resume_device(sdev);
|
||||
if (ret == -EWOULDBLOCK)
|
||||
if (ret == -EWOULDBLOCK) {
|
||||
scsi_device_put(sdev);
|
||||
goto unlock_scan;
|
||||
}
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
}
|
||||
ret = scsi_rescan_device(sdev);
|
||||
|
|
|
|||
|
|
@ -888,12 +888,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
if (dev->power.direct_complete)
|
||||
goto Out;
|
||||
|
||||
if (!dev->power.is_late_suspended)
|
||||
goto Out;
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Skip;
|
||||
|
||||
if (!dpm_wait_for_superior(dev, async))
|
||||
goto Out;
|
||||
|
||||
|
|
@ -926,11 +929,11 @@ Run:
|
|||
|
||||
Skip:
|
||||
dev->power.is_late_suspended = false;
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
if (error) {
|
||||
|
|
@ -1615,12 +1618,6 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_SUSPEND(0);
|
||||
|
||||
/*
|
||||
* Disable runtime PM for the device without checking if there is a
|
||||
* pending resume request for it.
|
||||
*/
|
||||
__pm_runtime_disable(dev, false);
|
||||
|
||||
dpm_wait_for_subordinate(dev, async);
|
||||
|
||||
if (READ_ONCE(async_error))
|
||||
|
|
@ -1631,9 +1628,18 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
|
|||
goto Complete;
|
||||
}
|
||||
|
||||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
if (dev->power.direct_complete)
|
||||
goto Complete;
|
||||
|
||||
/*
|
||||
* Disable runtime PM for the device without checking if there is a
|
||||
* pending resume request for it.
|
||||
*/
|
||||
__pm_runtime_disable(dev, false);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Skip;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "late power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
|
|
@ -1664,6 +1670,7 @@ Run:
|
|||
WRITE_ONCE(async_error, error);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
pm_dev_err(dev, state, async ? " async late" : " late", error);
|
||||
pm_runtime_enable(dev);
|
||||
goto Complete;
|
||||
}
|
||||
dpm_propagate_wakeup_to_parent(dev);
|
||||
|
|
|
|||
|
|
@ -121,11 +121,11 @@ static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
|
|||
&r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
|
||||
|
||||
static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
|
||||
&r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
|
||||
&r_apb0_clk.common.hw, 0x1dc, BIT(0), CLK_IS_CRITICAL);
|
||||
static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
|
||||
&r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
|
||||
static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
|
||||
&r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
|
||||
&r_apb0_clk.common.hw, 0x22c, BIT(0), CLK_IS_CRITICAL);
|
||||
|
||||
static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
|
||||
&r_ahb_clk.common,
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ static struct ccu_nm pll_audio0_4x_clk = {
|
|||
.m = _SUNXI_CCU_DIV(16, 6),
|
||||
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
|
||||
0x178, BIT(31)),
|
||||
.min_rate = 180000000U,
|
||||
.min_rate = 90000000U,
|
||||
.max_rate = 3000000000U,
|
||||
.common = {
|
||||
.reg = 0x078,
|
||||
|
|
|
|||
|
|
@ -2548,10 +2548,17 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
|
|||
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
|
||||
struct lineinfo_changed_ctx *ctx;
|
||||
struct gpio_desc *desc = data;
|
||||
struct file *fp;
|
||||
|
||||
if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Keep the file descriptor alive for the duration of the notification. */
|
||||
fp = get_file_active(&cdev->fp);
|
||||
if (!fp)
|
||||
/* Chardev file descriptor was or is being released. */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/*
|
||||
* If this is called from atomic context (for instance: with a spinlock
|
||||
* taken by the atomic notifier chain), any sleeping calls must be done
|
||||
|
|
@ -2575,8 +2582,6 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
|
|||
/* Keep the GPIO device alive until we emit the event. */
|
||||
ctx->gdev = gpio_device_get(desc->gdev);
|
||||
ctx->cdev = cdev;
|
||||
/* Keep the file descriptor alive too. */
|
||||
get_file(ctx->cdev->fp);
|
||||
|
||||
INIT_WORK(&ctx->work, lineinfo_changed_func);
|
||||
queue_work(ctx->gdev->line_state_wq, &ctx->work);
|
||||
|
|
|
|||
|
|
@ -3414,10 +3414,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
|
|||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
|
||||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
/* skip CG for VCE/UVD/VPE, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
|
||||
adev->ip_blocks[i].version->funcs->set_powergating_state) {
|
||||
/* enable powergating to save power */
|
||||
|
|
|
|||
|
|
@ -1372,7 +1372,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
|
|||
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
|
||||
flags |= AMDGPU_PTE_SYSTEM;
|
||||
|
||||
if (ttm->caching == ttm_cached)
|
||||
if (ttm && ttm->caching == ttm_cached)
|
||||
flags |= AMDGPU_PTE_SNOOPED;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2078,7 +2078,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = before->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(before, &vm->va);
|
||||
if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (before->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
|
@ -2093,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = after->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(after, &vm->va);
|
||||
if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (after->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
|
|
|||
|
|
@ -5872,9 +5872,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
if (flags & AMDGPU_IB_PREEMPTED)
|
||||
control |= INDIRECT_BUFFER_PRE_RESUME(1);
|
||||
|
||||
if (vmid)
|
||||
if (vmid && !ring->adev->gfx.rs64_enable)
|
||||
gfx_v11_0_ring_emit_de_meta(ring,
|
||||
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
|
||||
!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
|
|||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
|
|||
|
||||
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
|
||||
case IP_VERSION(13, 0, 12):
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) &&
|
||||
amdgpu_dpm_reset_vcn_is_supported(adev) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -3859,6 +3859,97 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
update_subconnector_property(aconnector);
|
||||
}
|
||||
|
||||
static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
|
||||
{
|
||||
if (!sink1 || !sink2)
|
||||
return false;
|
||||
if (sink1->sink_signal != sink2->sink_signal)
|
||||
return false;
|
||||
|
||||
if (sink1->dc_edid.length != sink2->dc_edid.length)
|
||||
return false;
|
||||
|
||||
if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
|
||||
sink1->dc_edid.length) != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: hdmi_hpd_debounce_work
|
||||
*
|
||||
* HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
|
||||
* (such as during power save transitions), this delay determines how long to
|
||||
* wait before processing the HPD event. This allows distinguishing between a
|
||||
* physical unplug (>hdmi_hpd_debounce_delay)
|
||||
* and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
|
||||
*
|
||||
* If the toggle is less than this delay, the driver compares sink capabilities
|
||||
* and permits a hotplug event if they changed.
|
||||
*
|
||||
* The default value of 1500ms was chosen based on experimental testing with
|
||||
* various monitors that exhibit spontaneous HPD toggling behavior.
|
||||
*/
|
||||
static void hdmi_hpd_debounce_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(to_delayed_work(work), struct amdgpu_dm_connector,
|
||||
hdmi_hpd_debounce_work);
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool fake_reconnect = false;
|
||||
bool reallow_idle = false;
|
||||
bool ret = false;
|
||||
guard(mutex)(&aconnector->hpd_lock);
|
||||
|
||||
/* Re-detect the display */
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reallow_idle = true;
|
||||
}
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Apply workaround delay for certain panels */
|
||||
apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
|
||||
/* Compare sinks to determine if this was a spontaneous HPD toggle */
|
||||
if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
|
||||
/*
|
||||
* Sinks match - this was a spontaneous HDMI HPD toggle.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
|
||||
fake_reconnect = true;
|
||||
}
|
||||
|
||||
/* Update connector state */
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* Only notify OS if sink actually changed */
|
||||
if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
/* Release the cached sink reference */
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (reallow_idle && dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
|
|
@ -3868,6 +3959,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool ret = false;
|
||||
bool debounce_required = false;
|
||||
|
||||
if (adev->dm.disable_hpd_irq)
|
||||
return;
|
||||
|
|
@ -3890,6 +3982,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
|
||||
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
|
||||
|
||||
/*
|
||||
* Check for HDMI disconnect with debounce enabled.
|
||||
*/
|
||||
debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
|
||||
dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
|
||||
new_connection_type == dc_connection_none &&
|
||||
aconnector->dc_link->local_sink != NULL);
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
|
||||
|
|
@ -3899,7 +3999,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else if (debounce_required) {
|
||||
/*
|
||||
* HDMI disconnect detected - schedule delayed work instead of
|
||||
* processing immediately. This allows us to coalesce spurious
|
||||
* HDMI signals from physical unplugs.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
|
||||
aconnector->hdmi_hpd_debounce_delay_ms);
|
||||
|
||||
/* Cache the current sink for later comparison */
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_retain(aconnector->hdmi_prev_sink);
|
||||
|
||||
/* Schedule delayed detection. */
|
||||
if (mod_delayed_work(system_wq,
|
||||
&aconnector->hdmi_hpd_debounce_work,
|
||||
msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
|
||||
drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
|
||||
|
||||
} else {
|
||||
|
||||
/* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
|
||||
if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
|
||||
return;
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
|
|
@ -7388,6 +7515,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
|||
if (aconnector->mst_mgr.dev)
|
||||
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
|
||||
|
||||
/* Cancel and flush any pending HDMI HPD debounce work */
|
||||
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
if (aconnector->bl_idx != -1) {
|
||||
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
|
||||
dm->backlight_dev[aconnector->bl_idx] = NULL;
|
||||
|
|
@ -8549,6 +8683,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
|
||||
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
* which means HPD hot plug not supported
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@
|
|||
|
||||
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
|
||||
|
||||
#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
|
||||
/*
|
||||
#include "include/amdgpu_dal_power_if.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
|
|
@ -819,6 +820,11 @@ struct amdgpu_dm_connector {
|
|||
bool pack_sdp_v1_3;
|
||||
enum adaptive_sync_type as_type;
|
||||
struct amdgpu_hdmi_vsdb_info vsdb_info;
|
||||
|
||||
/* HDMI HPD debounce support */
|
||||
unsigned int hdmi_hpd_debounce_delay_ms;
|
||||
struct delayed_work hdmi_hpd_debounce_work;
|
||||
struct dc_sink *hdmi_prev_sink;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
|
|
|||
|
|
@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
|
|||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
uint64_t effective_kbps = (uint64_t)kbps;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
|
||||
effective_kbps *= 1006;
|
||||
effective_kbps = div_u64(effective_kbps, 1000);
|
||||
}
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
uint64_t pbn_effective = (uint64_t)pbn;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
|
||||
pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
|
||||
else
|
||||
pbn_effective *= 1000;
|
||||
|
||||
return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
|
||||
}
|
||||
|
||||
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
|
||||
|
|
@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
|||
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
|
||||
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
kbps = pbn_to_kbps(pbn, false);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
|
|
@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
|||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
|
|
@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
|||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
int var_pbn;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
|||
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||
var_pbn = vars[next_index].pbn;
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
|
|
@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
struct drm_connector_state *new_conn_state;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
|
@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
|
|
@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
|
@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
|
@ -1763,18 +1762,6 @@ clean_exit:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t kbps_from_pbn(unsigned int pbn)
|
||||
{
|
||||
uint64_t kbps = (uint64_t)pbn;
|
||||
|
||||
kbps *= (1000000 / PEAK_FACTOR_X1000);
|
||||
kbps *= 8;
|
||||
kbps *= 54;
|
||||
kbps /= 64;
|
||||
|
||||
return (uint32_t)kbps;
|
||||
}
|
||||
|
||||
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
|
||||
struct dc_dsc_bw_range *bw_range)
|
||||
{
|
||||
|
|
@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
dc_link_get_highest_encoding_format(stream->link));
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
|
||||
|
||||
/* pick the end to end bw bottleneck */
|
||||
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
|
|
@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
|
||||
|
||||
if (immediate_upstream_port) {
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
|
||||
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
} else {
|
||||
/* For topology LCT 1 case - only one mstb*/
|
||||
|
|
|
|||
|
|
@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
|
||||
new_clocks->ref_dtbclk_khz = 0;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
|
|
@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
|
||||
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
|
||||
|
||||
if (actual_dtbclk) {
|
||||
if (actual_dtbclk > 590000) {
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1411,7 +1411,7 @@ static void dccg35_set_dtbclk_dto(
|
|||
__func__, params->otg_inst, params->pixclk_khz,
|
||||
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
|
||||
|
||||
} else {
|
||||
} else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
|
||||
switch (params->otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
|
||||
|
|
|
|||
|
|
@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
|
|||
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
|
||||
* 1, 1000);
|
||||
*/
|
||||
|
||||
/* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
|
||||
if (!power_on) {
|
||||
struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
|
||||
if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
|
||||
dpp5->funcs->dpp_force_disable_cursor(dpp5);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
|
|
|||
|
|
@ -1691,7 +1691,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
uint32_t read_dpcd_retry_cnt = 20;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
|
|
@ -1734,12 +1734,13 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
}
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
/*
|
||||
* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
|
|||
formats_size = sizeof(__u32) * plane->format_count;
|
||||
if (WARN_ON(!formats_size)) {
|
||||
/* 0 formats are never expected */
|
||||
return 0;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
modifiers_size =
|
||||
|
|
@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
|
|||
|
||||
blob = drm_property_create_blob(dev, blob_size, NULL);
|
||||
if (IS_ERR(blob))
|
||||
return NULL;
|
||||
return blob;
|
||||
|
||||
blob_data = blob->data;
|
||||
blob_data->version = FORMAT_BLOB_CURRENT;
|
||||
|
|
|
|||
|
|
@ -39,14 +39,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
|
|||
struct intel_display *display = to_intel_display(encoder);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
/* PTL doesn't have a PHY connected to PORT B; as such,
|
||||
* there will never be a case where PTL uses PHY B.
|
||||
* WCL uses PORT A and B with the C10 PHY.
|
||||
* Reusing the condition for WCL and extending it for PORT B
|
||||
* should not cause any issues for PTL.
|
||||
*/
|
||||
if (display->platform.pantherlake && phy < PHY_C)
|
||||
return true;
|
||||
if (display->platform.pantherlake) {
|
||||
if (display->platform.pantherlake_wildcatlake)
|
||||
return phy <= PHY_B;
|
||||
else
|
||||
return phy == PHY_A;
|
||||
}
|
||||
|
||||
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
|
|||
PLATFORM_GROUP(dgfx),
|
||||
};
|
||||
|
||||
static const u16 wcl_ids[] = {
|
||||
INTEL_WCL_IDS(ID),
|
||||
0
|
||||
};
|
||||
|
||||
static const struct platform_desc ptl_desc = {
|
||||
PLATFORM(pantherlake),
|
||||
.subplatforms = (const struct subplatform_desc[]) {
|
||||
{
|
||||
SUBPLATFORM(pantherlake, wildcatlake),
|
||||
.pciidlist = wcl_ids,
|
||||
},
|
||||
{},
|
||||
}
|
||||
};
|
||||
|
||||
__diag_pop();
|
||||
|
|
@ -1482,6 +1494,7 @@ static const struct {
|
|||
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
|
||||
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
|
||||
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
|
||||
INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
|
||||
};
|
||||
|
||||
static const struct {
|
||||
|
|
|
|||
|
|
@ -101,7 +101,9 @@ struct pci_dev;
|
|||
/* Display ver 14.1 (based on GMD ID) */ \
|
||||
func(battlemage) \
|
||||
/* Display ver 30 (based on GMD ID) */ \
|
||||
func(pantherlake)
|
||||
func(pantherlake) \
|
||||
func(pantherlake_wildcatlake)
|
||||
|
||||
|
||||
#define __MEMBER(name) unsigned long name:1;
|
||||
#define __COUNT(x) 1 +
|
||||
|
|
|
|||
|
|
@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
|
|||
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
|
||||
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
|
||||
|
||||
#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
|
||||
MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
|
||||
|
||||
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
|
||||
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
|
||||
|
||||
|
|
@ -183,9 +186,10 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
|
|||
{
|
||||
const char *fw_path = NULL;
|
||||
u32 max_fw_size = 0;
|
||||
|
||||
if (DISPLAY_VERx100(display) == 3002 ||
|
||||
DISPLAY_VERx100(display) == 3000) {
|
||||
if (DISPLAY_VERx100(display) == 3002) {
|
||||
fw_path = XE3LPD_3002_DMC_PATH;
|
||||
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VERx100(display) == 3000) {
|
||||
fw_path = XE3LPD_DMC_PATH;
|
||||
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VERx100(display) == 2000) {
|
||||
|
|
|
|||
|
|
@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
|
|||
nvkm_memory_unref(&fw->inst);
|
||||
nvkm_falcon_fw_dtor_sigs(fw);
|
||||
nvkm_firmware_dtor(&fw->fw);
|
||||
kfree(fw->boot);
|
||||
fw->boot = NULL;
|
||||
}
|
||||
|
||||
static const struct nvkm_firmware_func
|
||||
|
|
|
|||
|
|
@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
|
|||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
if (down_read_trylock(&rdev->exclusive_lock)) {
|
||||
radeon_fence_process(rdev, ring);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3148,6 +3148,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
|
|||
dc->client.parent = &parent->client;
|
||||
|
||||
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
|
||||
put_device(companion);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -913,15 +913,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
|
|||
u32 value;
|
||||
int err;
|
||||
|
||||
/* If the bootloader enabled DSI it needs to be disabled
|
||||
* in order for the panel initialization commands to be
|
||||
* properly sent.
|
||||
*/
|
||||
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
|
||||
|
||||
if (value & DSI_POWER_CONTROL_ENABLE)
|
||||
tegra_dsi_disable(dsi);
|
||||
|
||||
err = tegra_dsi_prepare(dsi);
|
||||
if (err < 0) {
|
||||
dev_err(dsi->dev, "failed to prepare: %d\n", err);
|
||||
|
|
|
|||
|
|
@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
|
|||
if (err)
|
||||
goto put_channel;
|
||||
|
||||
if (supported)
|
||||
if (supported) {
|
||||
struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
|
||||
context->memory_context = host1x_memory_context_alloc(
|
||||
host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
|
||||
host, client->base.dev, pid);
|
||||
put_pid(pid);
|
||||
}
|
||||
|
||||
if (IS_ERR(context->memory_context)) {
|
||||
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ config DRM_XE
|
|||
select TMPFS
|
||||
select DRM_BUDDY
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_EXEC
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
|
||||
select DRM_PANEL
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt,
|
|||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
|
||||
KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < info->num_mocs_regs; i++) {
|
||||
|
|
|
|||
|
|
@ -847,22 +847,6 @@ static int xe_irq_msix_init(struct xe_device *xe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t guc2host_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct xe_device *xe = arg;
|
||||
struct xe_tile *tile;
|
||||
u8 id;
|
||||
|
||||
if (!atomic_read(&xe->irq.enabled))
|
||||
return IRQ_NONE;
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
xe_guc_irq_handler(&tile->primary_gt->uc.guc,
|
||||
GUC_INTR_GUC2HOST);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
|
||||
{
|
||||
unsigned int tile_id, gt_id;
|
||||
|
|
@ -979,7 +963,7 @@ int xe_irq_msix_request_irqs(struct xe_device *xe)
|
|||
u16 msix;
|
||||
|
||||
msix = GUC2HOST_MSIX;
|
||||
err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
|
||||
err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
|
||||
DRIVER_NAME "-guc2host", false, &msix);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
|||
|
|
@ -375,6 +375,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
|
||||
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
|
||||
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
|
||||
INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
|
|
|||
|
|
@ -3369,8 +3369,10 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
|||
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, prefetch_region &&
|
||||
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
|
||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||
/* Guard against undefined shift in BIT(prefetch_region) */
|
||||
(prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP) ||
|
||||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
|
||||
|
|
|
|||
|
|
@ -194,6 +194,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
|
|||
if (rc)
|
||||
goto cleanup;
|
||||
|
||||
mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
|
||||
amd_sfh_wait_for_response(privdata, cl_data->sensor_idx[i], DISABLE_SENSOR);
|
||||
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
|
||||
mp2_ops->start(privdata, info);
|
||||
status = amd_sfh_wait_for_response
|
||||
|
|
|
|||
|
|
@ -355,6 +355,7 @@ static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
|
|||
|
||||
static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
|
||||
{ "SONiX USB DEVICE" },
|
||||
{ "SONiX AK870 PRO" },
|
||||
{ "Keychron" },
|
||||
{ "AONE" },
|
||||
{ "GANSS" },
|
||||
|
|
|
|||
|
|
@ -553,9 +553,8 @@ static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
|
|||
|
||||
if (IS_ERR(new_supply)) {
|
||||
hid_err(drvdata->hid_dev,
|
||||
"failed to register battery '%s' (reason: %ld)\n",
|
||||
drvdata->battery_desc.name,
|
||||
PTR_ERR(new_supply));
|
||||
"failed to register battery '%s' (reason: %pe)\n",
|
||||
drvdata->battery_desc.name, new_supply);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -75,7 +75,8 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
*/
|
||||
mouse_button_fixup(hdev, rdesc, *rsize, 20, 28, 22, 14, 8);
|
||||
break;
|
||||
case USB_DEVICE_ID_ELECOM_M_XT3URBK:
|
||||
case USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB:
|
||||
case USB_DEVICE_ID_ELECOM_M_XT3URBK_018F:
|
||||
case USB_DEVICE_ID_ELECOM_M_XT3DRBK:
|
||||
case USB_DEVICE_ID_ELECOM_M_XT4DRBK:
|
||||
/*
|
||||
|
|
@ -119,7 +120,8 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
static const struct hid_device_id elecom_devices[] = {
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ int hid_haptic_input_configured(struct hid_device *hdev,
|
|||
if (hi->application == HID_DG_TOUCHPAD) {
|
||||
if (haptic->auto_trigger_report &&
|
||||
haptic->manual_trigger_report) {
|
||||
__set_bit(INPUT_PROP_HAPTIC_TOUCHPAD, hi->input->propbit);
|
||||
__set_bit(INPUT_PROP_PRESSUREPAD, hi->input->propbit);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -449,7 +449,8 @@
|
|||
#define USB_VENDOR_ID_ELECOM 0x056e
|
||||
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
|
||||
#define USB_DEVICE_ID_ELECOM_M_XGL20DLBK 0x00e6
|
||||
#define USB_DEVICE_ID_ELECOM_M_XT3URBK 0x00fb
|
||||
#define USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB 0x00fb
|
||||
#define USB_DEVICE_ID_ELECOM_M_XT3URBK_018F 0x018f
|
||||
#define USB_DEVICE_ID_ELECOM_M_XT3DRBK 0x00fc
|
||||
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
|
||||
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
|
||||
|
|
@ -718,6 +719,7 @@
|
|||
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
|
||||
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
|
||||
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
|
||||
#define I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD 0x8987
|
||||
#define USB_DEVICE_ID_ITE8595 0x8595
|
||||
#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
|
||||
|
||||
|
|
@ -1543,7 +1545,7 @@
|
|||
#define USB_VENDOR_ID_SIGNOTEC 0x2133
|
||||
#define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018
|
||||
|
||||
#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
|
||||
#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
|
||||
#define USB_VENDOR_ID_JIELI_SDK_DEFAULT 0x4c4a
|
||||
#define USB_DEVICE_ID_JIELI_SDK_4155 0x4155
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -399,10 +399,11 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
|||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
|
||||
HID_BATTERY_QUIRK_AVOID_QUERY },
|
||||
/*
|
||||
* Elan I2C-HID touchscreens seem to all report a non present battery,
|
||||
* set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices.
|
||||
* Elan HID touchscreens seem to all report a non present battery,
|
||||
* set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices.
|
||||
*/
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -148,6 +148,14 @@ static const __u8 lenovo_tpIIbtkbd_need_fixup_collection[] = {
|
|||
0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
|
||||
};
|
||||
|
||||
static const __u8 lenovo_yoga7x_kbd_need_fixup_collection[] = {
|
||||
0x15, 0x00, // Logical Minimum (0)
|
||||
0x25, 0x65, // Logical Maximum (101)
|
||||
0x05, 0x07, // Usage Page (Keyboard)
|
||||
0x19, 0x00, // Usage Minimum (0)
|
||||
0x29, 0xDD, // Usage Maximum (221)
|
||||
};
|
||||
|
||||
static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
|
|
@ -177,6 +185,13 @@ static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
rdesc[260] = 0x01; /* report count (2) = 0x01 */
|
||||
}
|
||||
break;
|
||||
case I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD:
|
||||
if (*rsize == 176 &&
|
||||
memcmp(&rdesc[52], lenovo_yoga7x_kbd_need_fixup_collection,
|
||||
sizeof(lenovo_yoga7x_kbd_need_fixup_collection)) == 0) {
|
||||
rdesc[55] = rdesc[61]; // logical maximum = usage maximum
|
||||
}
|
||||
break;
|
||||
}
|
||||
return rdesc;
|
||||
}
|
||||
|
|
@ -1538,6 +1553,8 @@ static const struct hid_device_id lenovo_devices[] = {
|
|||
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) },
|
||||
{ HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD) },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -142,13 +142,13 @@ static void ntrig_report_version(struct hid_device *hdev)
|
|||
int ret;
|
||||
char buf[20];
|
||||
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
|
||||
unsigned char *data = kmalloc(8, GFP_KERNEL);
|
||||
unsigned char *data __free(kfree) = kmalloc(8, GFP_KERNEL);
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return;
|
||||
|
||||
if (!data)
|
||||
goto err_free;
|
||||
return;
|
||||
|
||||
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
|
||||
USB_REQ_CLEAR_FEATURE,
|
||||
|
|
@ -163,9 +163,6 @@ static void ntrig_report_version(struct hid_device *hdev)
|
|||
hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n",
|
||||
buf, data[2], data[3], data[4], data[5]);
|
||||
}
|
||||
|
||||
err_free:
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static ssize_t show_phys_width(struct device *dev,
|
||||
|
|
|
|||
|
|
@ -1942,6 +1942,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
|
|||
"Failed to retrieve DualShock4 calibration info: %d\n",
|
||||
ret);
|
||||
ret = -EILSEQ;
|
||||
kfree(buf);
|
||||
goto transfer_failed;
|
||||
} else {
|
||||
break;
|
||||
|
|
@ -1959,6 +1960,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
|
|||
|
||||
if (ret) {
|
||||
hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
|
||||
kfree(buf);
|
||||
goto transfer_failed;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -410,7 +410,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|||
#if IS_ENABLED(CONFIG_HID_ELECOM)
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
|
||||
|
|
@ -915,7 +916,6 @@ static const struct hid_device_id hid_ignore_list[] = {
|
|||
#endif
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
@ -1064,6 +1064,18 @@ bool hid_ignore(struct hid_device *hdev)
|
|||
strlen(elan_acpi_id[i].id)))
|
||||
return true;
|
||||
break;
|
||||
case USB_VENDOR_ID_JIELI_SDK_DEFAULT:
|
||||
/*
|
||||
* Multiple USB devices with identical IDs (mic & touchscreen).
|
||||
* The touch screen requires hid core processing, but the
|
||||
* microphone does not. They can be distinguished by manufacturer
|
||||
* and serial number.
|
||||
*/
|
||||
if (hdev->product == USB_DEVICE_ID_JIELI_SDK_4155 &&
|
||||
strncmp(hdev->name, "SmartlinkTechnology", 19) == 0 &&
|
||||
strncmp(hdev->uniq, "20201111000001", 14) == 0)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (hdev->type == HID_TYPE_USBMOUSE &&
|
||||
|
|
|
|||
|
|
@ -1369,8 +1369,10 @@ static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev,
|
|||
event_hook->hdev = hdev;
|
||||
event_hook->size = ARRAY_SIZE(reconnect_event);
|
||||
event_hook->event = kmemdup(reconnect_event, event_hook->size, GFP_KERNEL);
|
||||
if (!event_hook->event)
|
||||
if (!event_hook->event) {
|
||||
kfree(event_hook);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
list_add_tail(&event_hook->list, &p->event_hooks->list);
|
||||
|
||||
|
|
|
|||
|
|
@ -806,8 +806,8 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
|
|||
|
||||
static int pidff_needs_playback(struct pidff_device *pidff, int effect_id, int n)
|
||||
{
|
||||
return pidff->effect[effect_id].is_infinite ||
|
||||
pidff->effect[effect_id].loop_count != n;
|
||||
return !pidff->effect[effect_id].is_infinite ||
|
||||
pidff->effect[effect_id].loop_count != n;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -261,6 +261,12 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
|
|||
case EC_MKBP_EVENT_KEY_MATRIX:
|
||||
pm_wakeup_event(ckdev->dev, 0);
|
||||
|
||||
if (!ckdev->idev) {
|
||||
dev_warn_once(ckdev->dev,
|
||||
"Unexpected key matrix event\n");
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
if (ckdev->ec->event_size != ckdev->cols) {
|
||||
dev_err(ckdev->dev,
|
||||
"Discarded incomplete key matrix event.\n");
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ static int imx_sc_key_probe(struct platform_device *pdev)
|
|||
return error;
|
||||
}
|
||||
|
||||
error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
|
||||
error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, priv);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue