Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.17-rc5). No conflicts. Adjacent changes: include/net/sock.hpull/1354/mergec51613fa27("net: add sk->sk_drop_counters")5d6b58c932("net: lockless sock_i_ino()") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
commit
5ef04a7b06
1
.mailmap
1
.mailmap
|
|
@ -589,6 +589,7 @@ Nikolay Aleksandrov <razor@blackwall.org> <nikolay@redhat.com>
|
|||
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@cumulusnetworks.com>
|
||||
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@nvidia.com>
|
||||
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@isovalent.com>
|
||||
Nobuhiro Iwamatsu <nobuhiro.iwamatsu.x90@mail.toshiba> <nobuhiro1.iwamatsu@toshiba.co.jp>
|
||||
Odelu Kukatla <quic_okukatla@quicinc.com> <okukatla@codeaurora.org>
|
||||
Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
|
||||
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ Spectre_v2 X X
|
|||
Spectre_v2_user X X * (Note 1)
|
||||
SRBDS X X X X
|
||||
SRSO X X X X
|
||||
SSB (Note 4)
|
||||
SSB X
|
||||
TAA X X X X * (Note 2)
|
||||
TSA X X X X
|
||||
=============== ============== ============ ============= ============== ============ ========
|
||||
|
|
@ -229,9 +229,6 @@ Notes:
|
|||
3 -- Disables SMT if cross-thread mitigations are fully enabled, the CPU is
|
||||
vulnerable, and STIBP is not supported
|
||||
|
||||
4 -- Speculative store bypass is always enabled by default (no kernel
|
||||
mitigation applied) unless overridden with spec_store_bypass_disable option
|
||||
|
||||
When an attack-vector is disabled, all mitigations for the vulnerabilities
|
||||
listed in the above table are disabled, unless mitigation is required for a
|
||||
different enabled attack-vector or a mitigation is explicitly selected via a
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ properties:
|
|||
- const: bus
|
||||
- const: core
|
||||
- const: vsync
|
||||
- const: lut
|
||||
- const: tbu
|
||||
- const: tbu_rt
|
||||
# MSM8996 has additional iommu clock
|
||||
|
|
|
|||
|
|
@ -433,9 +433,8 @@ Threaded NAPI
|
|||
|
||||
Threaded NAPI is an operating mode that uses dedicated kernel
|
||||
threads rather than software IRQ context for NAPI processing.
|
||||
The configuration is per netdevice and will affect all
|
||||
NAPI instances of that device. Each NAPI instance will spawn a separate
|
||||
thread (called ``napi/${ifc-name}-${napi-id}``).
|
||||
Each threaded NAPI instance will spawn a separate thread
|
||||
(called ``napi/${ifc-name}-${napi-id}``).
|
||||
|
||||
It is recommended to pin each kernel thread to a single CPU, the same
|
||||
CPU as the CPU which services the interrupt. Note that the mapping
|
||||
|
|
|
|||
|
|
@ -2253,8 +2253,15 @@ device_setup
|
|||
Default: 0x0000
|
||||
ignore_ctl_error
|
||||
Ignore any USB-controller regarding mixer interface (default: no)
|
||||
``ignore_ctl_error=1`` may help when you get an error at accessing
|
||||
the mixer element such as URB error -22. This happens on some
|
||||
buggy USB device or the controller. This workaround corresponds to
|
||||
the ``quirk_flags`` bit 14, too.
|
||||
autoclock
|
||||
Enable auto-clock selection for UAC2 devices (default: yes)
|
||||
lowlatency
|
||||
Enable low latency playback mode (default: yes).
|
||||
Could disable it to switch back to the old mode if face a regression.
|
||||
quirk_alias
|
||||
Quirk alias list, pass strings like ``0123abcd:5678beef``, which
|
||||
applies the existing quirk for the device 5678:beef to a new
|
||||
|
|
@ -2284,6 +2291,11 @@ delayed_register
|
|||
The driver prints a message like "Found post-registration device
|
||||
assignment: 1234abcd:04" for such a device, so that user can
|
||||
notice the need.
|
||||
skip_validation
|
||||
Skip unit descriptor validation (default: no).
|
||||
The option is used to ignores the validation errors with the hexdump
|
||||
of the unit descriptor instead of a driver probe error, so that we
|
||||
can check its details.
|
||||
quirk_flags
|
||||
Contains the bit flags for various device specific workarounds.
|
||||
Applied to the corresponding card index.
|
||||
|
|
@ -2307,6 +2319,16 @@ quirk_flags
|
|||
* bit 16: Set up the interface at first like UAC1
|
||||
* bit 17: Apply the generic implicit feedback sync mode
|
||||
* bit 18: Don't apply implicit feedback sync mode
|
||||
* bit 19: Don't closed interface during setting sample rate
|
||||
* bit 20: Force an interface reset whenever stopping & restarting
|
||||
a stream
|
||||
* bit 21: Do not set PCM rate (frequency) when only one rate is
|
||||
available for the given endpoint.
|
||||
* bit 22: Set the fixed resolution 16 for Mic Capture Volume
|
||||
* bit 23: Set the fixed resolution 384 for Mic Capture Volume
|
||||
* bit 24: Set minimum volume control value as mute for devices
|
||||
where the lowest playback value represents muted state instead
|
||||
of minimum audible volume
|
||||
|
||||
This module supports multiple devices, autoprobe and hotplugging.
|
||||
|
||||
|
|
@ -2314,10 +2336,9 @@ NB: ``nrpacks`` parameter can be modified dynamically via sysfs.
|
|||
Don't put the value over 20. Changing via sysfs has no sanity
|
||||
check.
|
||||
|
||||
NB: ``ignore_ctl_error=1`` may help when you get an error at accessing
|
||||
the mixer element such as URB error -22. This happens on some
|
||||
buggy USB device or the controller. This workaround corresponds to
|
||||
the ``quirk_flags`` bit 14, too.
|
||||
NB: ``ignore_ctl_error=1`` just provides a quick way to work around the
|
||||
issues. If you have a buggy device that requires these quirks, please
|
||||
report it to the upstream.
|
||||
|
||||
NB: ``quirk_alias`` option is provided only for testing / development.
|
||||
If you want to have a proper support, contact to upstream for
|
||||
|
|
|
|||
16
MAINTAINERS
16
MAINTAINERS
|
|
@ -931,7 +931,7 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
|
|||
F: drivers/dma/altera-msgdma.c
|
||||
|
||||
ALTERA PIO DRIVER
|
||||
M: Mun Yew Tham <mun.yew.tham@intel.com>
|
||||
M: Adrian Ng <adrianhoyin.ng@altera.com>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/gpio/gpio-altera.c
|
||||
|
|
@ -3526,7 +3526,7 @@ F: Documentation/devicetree/bindings/arm/ti/nspire.yaml
|
|||
F: arch/arm/boot/dts/nspire/
|
||||
|
||||
ARM/TOSHIBA VISCONTI ARCHITECTURE
|
||||
M: Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
|
||||
M: Nobuhiro Iwamatsu <nobuhiro.iwamatsu.x90@mail.toshiba>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwamatsu/linux-visconti.git
|
||||
|
|
@ -3667,6 +3667,7 @@ F: drivers/virt/coco/arm-cca-guest/
|
|||
F: drivers/virt/coco/pkvm-guest/
|
||||
F: tools/testing/selftests/arm64/
|
||||
X: arch/arm64/boot/dts/
|
||||
X: arch/arm64/configs/defconfig
|
||||
|
||||
ARROW SPEEDCHIPS XRS7000 SERIES ETHERNET SWITCH DRIVER
|
||||
M: George McCollister <george.mccollister@gmail.com>
|
||||
|
|
@ -4205,7 +4206,7 @@ W: http://www.baycom.org/~tom/ham/ham.html
|
|||
F: drivers/net/hamradio/baycom*
|
||||
|
||||
BCACHE (BLOCK LAYER CACHE)
|
||||
M: Coly Li <colyli@kernel.org>
|
||||
M: Coly Li <colyli@fnnas.com>
|
||||
M: Kent Overstreet <kent.overstreet@linux.dev>
|
||||
L: linux-bcache@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
@ -10389,7 +10390,7 @@ S: Maintained
|
|||
F: drivers/input/touchscreen/goodix*
|
||||
|
||||
GOOGLE ETHERNET DRIVERS
|
||||
M: Jeroen de Borst <jeroendb@google.com>
|
||||
M: Joshua Washington <joshwash@google.com>
|
||||
M: Harshitha Ramamurthy <hramamurthy@google.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
@ -17850,6 +17851,7 @@ F: net/ipv6/tcp*.c
|
|||
NETWORKING [TLS]
|
||||
M: John Fastabend <john.fastabend@gmail.com>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Sabrina Dubroca <sd@queasysnail.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: include/net/tls.h
|
||||
|
|
@ -24269,6 +24271,12 @@ S: Maintained
|
|||
F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
|
||||
F: drivers/input/keyboard/sun4i-lradc-keys.c
|
||||
|
||||
SUNDANCE NETWORK DRIVER
|
||||
M: Denis Kirjanov <dkirjanov@suse.de>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/dlink/sundance.c
|
||||
|
||||
SUNPLUS ETHERNET DRIVER
|
||||
M: Wells Lu <wellslutw@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
|||
|
|
@ -387,6 +387,8 @@
|
|||
|
||||
&sdmmc1 {
|
||||
bus-width = <4>;
|
||||
no-1-8-v;
|
||||
sdhci-caps-mask = <0x0 0x00200000>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_sdmmc1_default>;
|
||||
status = "okay";
|
||||
|
|
|
|||
|
|
@ -272,7 +272,7 @@
|
|||
phy-mode = "rmii";
|
||||
phy-handle = <&phy0>;
|
||||
assigned-clocks = <&cru SCLK_MAC_SRC>;
|
||||
assigned-clock-rates= <50000000>;
|
||||
assigned-clock-rates = <50000000>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&rmii_pins>;
|
||||
status = "okay";
|
||||
|
|
|
|||
|
|
@ -250,9 +250,9 @@
|
|||
&i2s0 {
|
||||
/delete-property/ pinctrl-0;
|
||||
rockchip,trcm-sync-rx-only;
|
||||
pinctrl-0 = <&i2s0m0_sclk_rx>,
|
||||
<&i2s0m0_lrck_rx>,
|
||||
<&i2s0m0_sdi0>;
|
||||
pinctrl-0 = <&i2s0m0_sclk_rx>,
|
||||
<&i2s0m0_lrck_rx>,
|
||||
<&i2s0m0_sdi0>;
|
||||
pinctrl-names = "default";
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2,8 +2,9 @@
|
|||
#ifndef __ASM_STACKTRACE_H
|
||||
#define __ASM_STACKTRACE_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <linux/llist.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
struct stackframe {
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config ARCH_MICROCHIP
|
||||
bool
|
||||
|
||||
menuconfig ARCH_AT91
|
||||
bool "AT91/Microchip SoCs"
|
||||
depends on (CPU_LITTLE_ENDIAN && (ARCH_MULTI_V4T || ARCH_MULTI_V5)) || \
|
||||
|
|
@ -8,6 +11,7 @@ menuconfig ARCH_AT91
|
|||
select GPIOLIB
|
||||
select PINCTRL
|
||||
select SOC_BUS
|
||||
select ARCH_MICROCHIP
|
||||
|
||||
if ARCH_AT91
|
||||
config SOC_SAMV7
|
||||
|
|
|
|||
|
|
@ -14,6 +14,9 @@
|
|||
#size-cells = <2>;
|
||||
|
||||
aliases {
|
||||
serial0 = &uart0;
|
||||
serial1 = &uart1;
|
||||
serial2 = &uart2;
|
||||
serial3 = &uart3;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -555,6 +555,7 @@
|
|||
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
|
||||
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_usdhc2_vmmc>;
|
||||
vqmmc-supply = <&ldo5>;
|
||||
bus-width = <4>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -609,6 +609,7 @@
|
|||
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
|
||||
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_usdhc2_vmmc>;
|
||||
vqmmc-supply = <&ldo5>;
|
||||
bus-width = <4>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -467,6 +467,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
®_usdhc2_vqmmc {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&sai5 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_sai5>;
|
||||
|
|
@ -876,8 +880,7 @@
|
|||
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d2>,
|
||||
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d2>,
|
||||
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d2>,
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d2>,
|
||||
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d2>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
|
||||
|
|
@ -886,8 +889,7 @@
|
|||
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
|
||||
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
|
||||
|
|
@ -896,8 +898,7 @@
|
|||
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
|
||||
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
|
||||
|
|
|
|||
|
|
@ -604,6 +604,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
®_usdhc2_vqmmc {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&sai3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_sai3>;
|
||||
|
|
@ -983,8 +987,7 @@
|
|||
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d2>,
|
||||
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d2>,
|
||||
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d2>,
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d2>,
|
||||
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d2>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
|
||||
|
|
@ -993,8 +996,7 @@
|
|||
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
|
||||
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
|
||||
|
|
@ -1003,8 +1005,7 @@
|
|||
<MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4>,
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>,
|
||||
<MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0>;
|
||||
<MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_gpio: usdhc2-gpiogrp {
|
||||
|
|
|
|||
|
|
@ -16,13 +16,18 @@
|
|||
reg = <0x0 0x40000000 0 0x80000000>;
|
||||
};
|
||||
|
||||
/* identical to buck4_reg, but should never change */
|
||||
reg_vcc3v3: regulator-vcc3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "VCC3V3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
|
||||
compatible = "regulator-gpio";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_reg_usdhc2_vqmmc>;
|
||||
regulator-name = "V_SD2";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-always-on;
|
||||
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
|
||||
states = <1800000 0x1>,
|
||||
<3300000 0x0>;
|
||||
vin-supply = <&ldo5_reg>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -173,17 +178,21 @@
|
|||
read-only;
|
||||
reg = <0x53>;
|
||||
pagesize = <16>;
|
||||
vcc-supply = <®_vcc3v3>;
|
||||
vcc-supply = <&buck4_reg>;
|
||||
};
|
||||
|
||||
m24c64: eeprom@57 {
|
||||
compatible = "atmel,24c64";
|
||||
reg = <0x57>;
|
||||
pagesize = <32>;
|
||||
vcc-supply = <®_vcc3v3>;
|
||||
vcc-supply = <&buck4_reg>;
|
||||
};
|
||||
};
|
||||
|
||||
&usdhc2 {
|
||||
vqmmc-supply = <®_usdhc2_vqmmc>;
|
||||
};
|
||||
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default", "state_100mhz", "state_200mhz";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
|
|
@ -193,7 +202,7 @@
|
|||
non-removable;
|
||||
no-sd;
|
||||
no-sdio;
|
||||
vmmc-supply = <®_vcc3v3>;
|
||||
vmmc-supply = <&buck4_reg>;
|
||||
vqmmc-supply = <&buck5_reg>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
@ -233,6 +242,10 @@
|
|||
fsl,pins = <MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x10>;
|
||||
};
|
||||
|
||||
pinctrl_reg_usdhc2_vqmmc: regusdhc2vqmmcgrp {
|
||||
fsl,pins = <MX8MP_IOMUXC_GPIO1_IO04__GPIO1_IO04 0xc0>;
|
||||
};
|
||||
|
||||
pinctrl_usdhc3: usdhc3grp {
|
||||
fsl,pins = <MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194>,
|
||||
<MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4>,
|
||||
|
|
|
|||
|
|
@ -80,17 +80,17 @@
|
|||
flexcan1_phy: can-phy0 {
|
||||
compatible = "nxp,tjr1443";
|
||||
#phy-cells = <0>;
|
||||
max-bitrate = <1000000>;
|
||||
max-bitrate = <8000000>;
|
||||
enable-gpios = <&i2c6_pcal6416 6 GPIO_ACTIVE_HIGH>;
|
||||
standby-gpios = <&i2c6_pcal6416 5 GPIO_ACTIVE_HIGH>;
|
||||
standby-gpios = <&i2c6_pcal6416 5 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
flexcan2_phy: can-phy1 {
|
||||
compatible = "nxp,tjr1443";
|
||||
#phy-cells = <0>;
|
||||
max-bitrate = <1000000>;
|
||||
enable-gpios = <&i2c6_pcal6416 4 GPIO_ACTIVE_HIGH>;
|
||||
standby-gpios = <&i2c6_pcal6416 3 GPIO_ACTIVE_HIGH>;
|
||||
max-bitrate = <8000000>;
|
||||
enable-gpios = <&i2c4_gpio_expander_21 4 GPIO_ACTIVE_HIGH>;
|
||||
standby-gpios = <&i2c4_gpio_expander_21 3 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
reg_vref_1v8: regulator-1p8v {
|
||||
|
|
|
|||
|
|
@ -1843,7 +1843,7 @@
|
|||
<GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&scmi_clk IMX95_CLK_VPU>,
|
||||
<&vpu_blk_ctrl IMX95_CLK_VPUBLK_JPEG_ENC>;
|
||||
assigned-clocks = <&vpu_blk_ctrl IMX95_CLK_VPUBLK_JPEG_DEC>;
|
||||
assigned-clocks = <&vpu_blk_ctrl IMX95_CLK_VPUBLK_JPEG_ENC>;
|
||||
assigned-clock-parents = <&scmi_clk IMX95_CLK_VPUJPEG>;
|
||||
power-domains = <&scmi_devpd IMX95_PD_VPU>;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@
|
|||
};
|
||||
|
||||
vcc_cam_avdd: regulator-vcc-cam-avdd {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_cam_avdd";
|
||||
gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
|
||||
pinctrl-names = "default";
|
||||
|
|
@ -83,7 +83,7 @@
|
|||
};
|
||||
|
||||
vcc_cam_dovdd: regulator-vcc-cam-dovdd {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_cam_dovdd";
|
||||
gpio = <&gpio3 RK_PC1 GPIO_ACTIVE_LOW>;
|
||||
pinctrl-names = "default";
|
||||
|
|
@ -94,7 +94,7 @@
|
|||
};
|
||||
|
||||
vcc_cam_dvdd: regulator-vcc-cam-dvdd {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_cam_dvdd";
|
||||
gpio = <&gpio3 RK_PC5 GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
|
|
@ -106,7 +106,7 @@
|
|||
};
|
||||
|
||||
vcc_lens_afvdd: regulator-vcc-lens-afvdd {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_lens_afvdd";
|
||||
gpio = <&gpio3 RK_PB2 GPIO_ACTIVE_LOW>;
|
||||
pinctrl-names = "default";
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
};
|
||||
|
||||
cam_afvdd_2v8: regulator-cam-afvdd-2v8 {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 2 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <2800000>;
|
||||
regulator-min-microvolt = <2800000>;
|
||||
|
|
@ -35,7 +35,7 @@
|
|||
};
|
||||
|
||||
cam_avdd_2v8: regulator-cam-avdd-2v8 {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 4 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <2800000>;
|
||||
regulator-min-microvolt = <2800000>;
|
||||
|
|
@ -44,7 +44,7 @@
|
|||
};
|
||||
|
||||
cam_dovdd_1v8: regulator-cam-dovdd-1v8 {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
|
|
|
|||
|
|
@ -260,6 +260,6 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_host_ohci{
|
||||
&usb_host_ohci {
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -609,7 +609,7 @@
|
|||
|
||||
bluetooth {
|
||||
compatible = "brcm,bcm4345c5";
|
||||
interrupts-extended = <&gpio3 RK_PA7 GPIO_ACTIVE_HIGH>;
|
||||
interrupts-extended = <&gpio3 RK_PA7 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "host-wakeup";
|
||||
clocks = <&rk808 RK808_CLKOUT1>;
|
||||
clock-names = "lpo";
|
||||
|
|
|
|||
|
|
@ -959,6 +959,7 @@
|
|||
reg = <0>;
|
||||
m25p,fast-read;
|
||||
spi-max-frequency = <10000000>;
|
||||
vcc-supply = <&vcc_3v0>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -754,6 +754,7 @@
|
|||
compatible = "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <10000000>;
|
||||
vcc-supply = <&vcc_1v8>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
};
|
||||
|
||||
cam_afvdd_2v8: regulator-cam-afvdd-2v8 {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 2 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <2800000>;
|
||||
regulator-min-microvolt = <2800000>;
|
||||
|
|
@ -35,7 +35,7 @@
|
|||
};
|
||||
|
||||
cam_avdd_2v8: regulator-cam-avdd-2v8 {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 4 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <2800000>;
|
||||
regulator-min-microvolt = <2800000>;
|
||||
|
|
@ -44,7 +44,7 @@
|
|||
};
|
||||
|
||||
cam_dovdd_1v8: regulator-cam-dovdd-1v8 {
|
||||
compatible = "regulator-fixed";
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@
|
|||
gpios = <&gpio4 RK_PA1 GPIO_ACTIVE_LOW>;
|
||||
linux,default-trigger = "default-on";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 =<&blue_led>;
|
||||
pinctrl-0 = <&blue_led>;
|
||||
};
|
||||
|
||||
led-1 {
|
||||
|
|
@ -62,7 +62,7 @@
|
|||
gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_LOW>;
|
||||
linux,default-trigger = "heartbeat";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 =<&heartbeat_led>;
|
||||
pinctrl-0 = <&heartbeat_led>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -302,8 +302,7 @@
|
|||
ð1m0_tx_bus2
|
||||
ð1m0_rx_bus2
|
||||
ð1m0_rgmii_clk
|
||||
ð1m0_rgmii_bus
|
||||
ðm0_clk1_25m_out>;
|
||||
ð1m0_rgmii_bus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
@ -784,7 +783,6 @@
|
|||
rgmii_phy0: phy@1 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0x1>;
|
||||
clocks = <&cru REFCLKO25M_GMAC0_OUT>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&gmac0_rst>;
|
||||
reset-assert-us = <20000>;
|
||||
|
|
@ -797,7 +795,6 @@
|
|||
rgmii_phy1: phy@1 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0x1>;
|
||||
clocks = <&cru REFCLKO25M_GMAC1_OUT>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&gmac1_rst>;
|
||||
reset-assert-us = <20000>;
|
||||
|
|
|
|||
|
|
@ -250,6 +250,7 @@
|
|||
compatible = "belling,bl24c16a", "atmel,24c16";
|
||||
reg = <0x50>;
|
||||
pagesize = <16>;
|
||||
read-only;
|
||||
vcc-supply = <&vcc_3v3_pmu>;
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&hp_detect>;
|
||||
simple-audio-card,aux-devs = <&speaker_amp>, <&headphone_amp>;
|
||||
simple-audio-card,hp-det-gpios = <&gpio1 RK_PD3 GPIO_ACTIVE_LOW>;
|
||||
simple-audio-card,hp-det-gpios = <&gpio1 RK_PD3 GPIO_ACTIVE_HIGH>;
|
||||
simple-audio-card,widgets =
|
||||
"Microphone", "Onboard Microphone",
|
||||
"Microphone", "Microphone Jack",
|
||||
|
|
|
|||
|
|
@ -365,6 +365,8 @@
|
|||
max-frequency = <200000000>;
|
||||
mmc-hs400-1_8v;
|
||||
mmc-hs400-enhanced-strobe;
|
||||
vmmc-supply = <&vcc_3v3_s3>;
|
||||
vqmmc-supply = <&vcc_1v8_s3>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -68,6 +68,22 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&pcie30phy {
|
||||
data-lanes = <1 1 2 2>;
|
||||
};
|
||||
|
||||
&pcie3x2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pcie3x2_rst>;
|
||||
reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>;
|
||||
vpcie3v3-supply = <&vcc3v3_pcie30>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&pcie3x4 {
|
||||
num-lanes = <2>;
|
||||
};
|
||||
|
||||
&pinctrl {
|
||||
hdmirx {
|
||||
hdmirx_hpd: hdmirx-5v-detection {
|
||||
|
|
@ -90,11 +106,23 @@
|
|||
};
|
||||
};
|
||||
|
||||
pcie3 {
|
||||
pcie3x2_rst: pcie3x2-rst {
|
||||
rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
|
||||
};
|
||||
};
|
||||
|
||||
sound {
|
||||
hp_detect: hp-detect {
|
||||
rockchip,pins = <4 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
|
||||
};
|
||||
};
|
||||
|
||||
usb {
|
||||
vcc5v0_host_en: vcc5v0-host-en {
|
||||
rockchip,pins = <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&vcc3v3_pcie2x1l0 {
|
||||
|
|
@ -103,3 +131,10 @@
|
|||
pinctrl-0 = <&pcie2_0_vcc3v3_en>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&vcc5v0_host {
|
||||
enable-active-high;
|
||||
gpio = <&gpio1 RK_PA1 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&vcc5v0_host_en>;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@
|
|||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1200000000{
|
||||
opp-1200000000 {
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
|
|
@ -49,7 +49,7 @@
|
|||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1200000000{
|
||||
opp-1200000000 {
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
|
|
|
|||
|
|
@ -320,9 +320,9 @@
|
|||
&i2c3 {
|
||||
status = "okay";
|
||||
|
||||
es8388: audio-codec@10 {
|
||||
es8388: audio-codec@11 {
|
||||
compatible = "everest,es8388", "everest,es8328";
|
||||
reg = <0x10>;
|
||||
reg = <0x11>;
|
||||
clocks = <&cru I2S1_8CH_MCLKOUT>;
|
||||
AVDD-supply = <&vcc_3v3_s0>;
|
||||
DVDD-supply = <&vcc_1v8_s0>;
|
||||
|
|
|
|||
|
|
@ -1160,115 +1160,8 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
|
|||
__v; \
|
||||
})
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
|
||||
|
||||
static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
|
||||
{
|
||||
/*
|
||||
* *** VHE ONLY ***
|
||||
*
|
||||
* System registers listed in the switch are not saved on every
|
||||
* exit from the guest but are only saved on vcpu_put.
|
||||
*
|
||||
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
|
||||
*
|
||||
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
||||
* should never be listed below, because the guest cannot modify its
|
||||
* own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
|
||||
* thread when emulating cross-VCPU communication.
|
||||
*/
|
||||
if (!has_vhe())
|
||||
return false;
|
||||
|
||||
switch (reg) {
|
||||
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
||||
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
|
||||
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
|
||||
case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break;
|
||||
case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break;
|
||||
case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break;
|
||||
case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break;
|
||||
case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
|
||||
case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
|
||||
case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
|
||||
case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
|
||||
case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
|
||||
case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
|
||||
case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
|
||||
case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
|
||||
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
|
||||
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
|
||||
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
|
||||
case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
|
||||
case PAR_EL1: *val = read_sysreg_par(); break;
|
||||
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
|
||||
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
|
||||
case SCTLR2_EL1: *val = read_sysreg_s(SYS_SCTLR2_EL12); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
||||
{
|
||||
/*
|
||||
* *** VHE ONLY ***
|
||||
*
|
||||
* System registers listed in the switch are not restored on every
|
||||
* entry to the guest but are only restored on vcpu_load.
|
||||
*
|
||||
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
|
||||
*
|
||||
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
||||
* should never be listed below, because the MPIDR should only be set
|
||||
* once, before running the VCPU, and never changed later.
|
||||
*/
|
||||
if (!has_vhe())
|
||||
return false;
|
||||
|
||||
switch (reg) {
|
||||
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
||||
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
|
||||
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
|
||||
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
|
||||
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
|
||||
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
|
||||
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
|
||||
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
|
||||
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
|
||||
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
|
||||
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
|
||||
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
|
||||
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
|
||||
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
|
||||
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
|
||||
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
|
||||
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
|
||||
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
|
||||
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
|
||||
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
|
||||
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
|
||||
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg);
|
||||
|
||||
struct kvm_vm_stat {
|
||||
struct kvm_vm_stat_generic generic;
|
||||
|
|
|
|||
|
|
@ -180,6 +180,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
|
|||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
phys_addr_t pa, unsigned long size, bool writable);
|
||||
|
||||
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu);
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
|
|
|
|||
|
|
@ -355,6 +355,11 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
|||
return pteref;
|
||||
}
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
|
||||
{
|
||||
return pteref;
|
||||
}
|
||||
|
||||
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
/*
|
||||
|
|
@ -384,6 +389,11 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
|||
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
|
||||
}
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
|
||||
{
|
||||
return rcu_dereference_raw(pteref);
|
||||
}
|
||||
|
||||
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
|
||||
|
|
@ -551,6 +561,26 @@ static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2
|
|||
*/
|
||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
* @addr: Intermediate physical address at which to place the mapping.
|
||||
* @size: Size of the mapping.
|
||||
*
|
||||
* The page-table is assumed to be unreachable by any hardware walkers prior
|
||||
* to freeing and therefore no TLB invalidation is performed.
|
||||
*/
|
||||
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
*
|
||||
* It is assumed that the rest of the page-table is freed before this operation.
|
||||
*/
|
||||
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
|
||||
* @mm_ops: Memory management callbacks.
|
||||
|
|
|
|||
|
|
@ -179,7 +179,9 @@ struct pkvm_mapping {
|
|||
|
||||
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||
struct kvm_pgtable_mm_ops *mm_ops);
|
||||
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size);
|
||||
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
|
||||
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
|
||||
enum kvm_pgtable_prot prot, void *mc,
|
||||
enum kvm_pgtable_walk_flags flags);
|
||||
|
|
|
|||
|
|
@ -1,25 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2018 - Arm Ltd */
|
||||
|
||||
#ifndef __ARM64_KVM_RAS_H__
|
||||
#define __ARM64_KVM_RAS_H__
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
|
||||
/*
|
||||
* Was this synchronous external abort a RAS notification?
|
||||
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
|
||||
*/
|
||||
static inline int kvm_handle_guest_sea(void)
|
||||
{
|
||||
/* apei_claim_sea(NULL) expects to mask interrupts itself */
|
||||
lockdep_assert_irqs_enabled();
|
||||
|
||||
return apei_claim_sea(NULL);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_RAS_H__ */
|
||||
|
|
@ -17,6 +17,13 @@
|
|||
#include <linux/refcount.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
enum pgtable_type {
|
||||
TABLE_PTE,
|
||||
TABLE_PMD,
|
||||
TABLE_PUD,
|
||||
TABLE_P4D,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
atomic64_t id;
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
|||
|
|
@ -1142,9 +1142,6 @@
|
|||
|
||||
#define ARM64_FEATURE_FIELD_BITS 4
|
||||
|
||||
/* Defined for compatibility only, do not add new users. */
|
||||
#define ARM64_FEATURE_MASK(x) (x##_MASK)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro mrs_s, rt, sreg
|
||||
|
|
|
|||
|
|
@ -84,6 +84,7 @@
|
|||
#include <asm/hwcap.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
|
@ -1945,11 +1946,11 @@ static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
extern
|
||||
void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
|
||||
phys_addr_t size, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(int), int flags);
|
||||
phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags);
|
||||
|
||||
static phys_addr_t __initdata kpti_ng_temp_alloc;
|
||||
|
||||
static phys_addr_t __init kpti_ng_pgd_alloc(int shift)
|
||||
static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type)
|
||||
{
|
||||
kpti_ng_temp_alloc -= PAGE_SIZE;
|
||||
return kpti_ng_temp_alloc;
|
||||
|
|
@ -2269,6 +2270,24 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
|
|||
/* Firmware may have left a deferred SError in this register. */
|
||||
write_sysreg_s(0, SYS_DISR_EL1);
|
||||
}
|
||||
static bool has_rasv1p1(const struct arm64_cpu_capabilities *__unused, int scope)
|
||||
{
|
||||
const struct arm64_cpu_capabilities rasv1p1_caps[] = {
|
||||
{
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, V1P1)
|
||||
},
|
||||
{
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
|
||||
},
|
||||
{
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, RAS_frac, RASv1p1)
|
||||
},
|
||||
};
|
||||
|
||||
return (has_cpuid_feature(&rasv1p1_caps[0], scope) ||
|
||||
(has_cpuid_feature(&rasv1p1_caps[1], scope) &&
|
||||
has_cpuid_feature(&rasv1p1_caps[2], scope)));
|
||||
}
|
||||
#endif /* CONFIG_ARM64_RAS_EXTN */
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
|
|
@ -2687,6 +2706,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.cpu_enable = cpu_clear_disr,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
|
||||
},
|
||||
{
|
||||
.desc = "RASv1p1 Extension Support",
|
||||
.capability = ARM64_HAS_RASV1P1_EXTN,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_rasv1p1,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_RAS_EXTN */
|
||||
#ifdef CONFIG_ARM64_AMU_EXTN
|
||||
{
|
||||
|
|
|
|||
|
|
@ -2408,12 +2408,12 @@ static u64 get_hyp_id_aa64pfr0_el1(void)
|
|||
*/
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
|
||||
val &= ~(ID_AA64PFR0_EL1_CSV2 |
|
||||
ID_AA64PFR0_EL1_CSV3);
|
||||
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
|
||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
|
||||
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
|
||||
|
||||
return val;
|
||||
|
|
|
|||
|
|
@ -1420,10 +1420,10 @@ void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
|||
return;
|
||||
|
||||
/*
|
||||
* If we only have a single stage of translation (E2H=0 or
|
||||
* TGE=1), exit early. Same thing if {VM,DC}=={0,0}.
|
||||
* If we only have a single stage of translation (EL2&0), exit
|
||||
* early. Same thing if {VM,DC}=={0,0}.
|
||||
*/
|
||||
if (!vcpu_el2_e2h_is_set(vcpu) || vcpu_el2_tge_is_set(vcpu) ||
|
||||
if (compute_translation_regime(vcpu, op) == TR_EL20 ||
|
||||
!(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC)))
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -2833,7 +2833,7 @@ int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
|
|||
iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
|
||||
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
|
||||
|
||||
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
|
||||
vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
|
||||
|
||||
if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE)
|
||||
return kvm_inject_nested(vcpu, esr, except_type_serror);
|
||||
|
|
|
|||
|
|
@ -22,36 +22,28 @@
|
|||
|
||||
static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (unlikely(vcpu_has_nv(vcpu)))
|
||||
if (has_vhe())
|
||||
return vcpu_read_sys_reg(vcpu, reg);
|
||||
else if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
|
||||
__vcpu_read_sys_reg_from_cpu(reg, &val))
|
||||
return val;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||
{
|
||||
if (unlikely(vcpu_has_nv(vcpu)))
|
||||
if (has_vhe())
|
||||
vcpu_write_sys_reg(vcpu, val, reg);
|
||||
else if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU) ||
|
||||
!__vcpu_write_sys_reg_to_cpu(val, reg))
|
||||
else
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
}
|
||||
|
||||
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
||||
u64 val)
|
||||
{
|
||||
if (unlikely(vcpu_has_nv(vcpu))) {
|
||||
if (has_vhe()) {
|
||||
if (target_mode == PSR_MODE_EL1h)
|
||||
vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
|
||||
else
|
||||
vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
|
||||
} else if (has_vhe()) {
|
||||
write_sysreg_el1(val, SYS_SPSR);
|
||||
} else {
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
|
||||
}
|
||||
|
|
@ -59,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
|||
|
||||
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
if (has_vhe())
|
||||
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||
write_sysreg(val, spsr_abt);
|
||||
else
|
||||
vcpu->arch.ctxt.spsr_abt = val;
|
||||
|
|
@ -67,7 +59,7 @@ static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
|
|||
|
||||
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
if (has_vhe())
|
||||
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||
write_sysreg(val, spsr_und);
|
||||
else
|
||||
vcpu->arch.ctxt.spsr_und = val;
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ static inline __must_check bool nvhe_check_data_corruption(bool v)
|
|||
bool corruption = unlikely(condition); \
|
||||
if (corruption) { \
|
||||
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
|
||||
BUG_ON(1); \
|
||||
BUG(); \
|
||||
} else \
|
||||
WARN_ON(1); \
|
||||
} \
|
||||
|
|
|
|||
|
|
@ -253,6 +253,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
|||
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
|
||||
__vcpu_assign_sys_reg(vcpu, read_sysreg_el1(SYS_VBAR), VBAR_EL1);
|
||||
|
||||
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
||||
|
||||
|
|
@ -372,6 +373,9 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
|
|||
|
||||
/* Debug and Trace Registers are restricted. */
|
||||
|
||||
/* Group 1 ID registers */
|
||||
HOST_HANDLED(SYS_REVIDR_EL1),
|
||||
|
||||
/* AArch64 mappings of the AArch32 ID registers */
|
||||
/* CRm=1 */
|
||||
AARCH32(SYS_ID_PFR0_EL1),
|
||||
|
|
@ -460,6 +464,7 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
|
|||
|
||||
HOST_HANDLED(SYS_CCSIDR_EL1),
|
||||
HOST_HANDLED(SYS_CLIDR_EL1),
|
||||
HOST_HANDLED(SYS_AIDR_EL1),
|
||||
HOST_HANDLED(SYS_CSSELR_EL1),
|
||||
HOST_HANDLED(SYS_CTR_EL0),
|
||||
|
||||
|
|
|
|||
|
|
@ -1551,21 +1551,38 @@ static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size)
|
||||
{
|
||||
size_t pgd_sz;
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_free_walker,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF |
|
||||
KVM_PGTABLE_WALK_TABLE_POST,
|
||||
};
|
||||
|
||||
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
|
||||
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
|
||||
{
|
||||
size_t pgd_sz;
|
||||
|
||||
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
|
||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
|
||||
|
||||
/*
|
||||
* Since the pgtable is unlinked at this point, and not shared with
|
||||
* other walkers, safely deference pgd with kvm_dereference_pteref_raw()
|
||||
*/
|
||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
|
||||
pgt->pgd = NULL;
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
{
|
||||
kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
|
||||
kvm_pgtable_stage2_destroy_pgd(pgt);
|
||||
}
|
||||
|
||||
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
|
||||
{
|
||||
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ static bool __is_be(struct kvm_vcpu *vcpu)
|
|||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
|
||||
|
||||
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
|
||||
return !!(read_sysreg_el1(SYS_SCTLR) & SCTLR_ELx_EE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -43,8 +43,11 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
|||
*
|
||||
* - API/APK: they are already accounted for by vcpu_load(), and can
|
||||
* only take effect across a load/put cycle (such as ERET)
|
||||
*
|
||||
* - FIEN: no way we let a guest have access to the RAS "Common Fault
|
||||
* Injection" thing, whatever that does
|
||||
*/
|
||||
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK)
|
||||
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK | HCR_FIEN)
|
||||
|
||||
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -4,19 +4,20 @@
|
|||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
#include <asm/kvm_ras.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/virt.h>
|
||||
|
|
@ -903,6 +904,38 @@ static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume that @pgt is valid and unlinked from the KVM MMU to free the
|
||||
* page-table without taking the kvm_mmu_lock and without performing any
|
||||
* TLB invalidations.
|
||||
*
|
||||
* Also, the range of addresses can be large enough to cause need_resched
|
||||
* warnings, for instance on CONFIG_PREEMPT_NONE kernels. Hence, invoke
|
||||
* cond_resched() periodically to prevent hogging the CPU for a long time
|
||||
* and schedule something else, if required.
|
||||
*/
|
||||
static void stage2_destroy_range(struct kvm_pgtable *pgt, phys_addr_t addr,
|
||||
phys_addr_t end)
|
||||
{
|
||||
u64 next;
|
||||
|
||||
do {
|
||||
next = stage2_range_addr_end(addr, end);
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, addr,
|
||||
next - addr);
|
||||
if (next != end)
|
||||
cond_resched();
|
||||
} while (addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void kvm_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
{
|
||||
unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr);
|
||||
|
||||
stage2_destroy_range(pgt, 0, BIT(ia_bits));
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
||||
* @kvm: The pointer to the KVM structure
|
||||
|
|
@ -979,7 +1012,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
|
|||
return 0;
|
||||
|
||||
out_destroy_pgtable:
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
|
||||
kvm_stage2_destroy(pgt);
|
||||
out_free_pgtable:
|
||||
kfree(pgt);
|
||||
return err;
|
||||
|
|
@ -1076,7 +1109,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
|
|||
write_unlock(&kvm->mmu_lock);
|
||||
|
||||
if (pgt) {
|
||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
|
||||
kvm_stage2_destroy(pgt);
|
||||
kfree(pgt);
|
||||
}
|
||||
}
|
||||
|
|
@ -1811,6 +1844,19 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
|
|||
read_unlock(&vcpu->kvm->mmu_lock);
|
||||
}
|
||||
|
||||
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Give APEI the opportunity to claim the abort before handling it
|
||||
* within KVM. apei_claim_sea() expects to be called with IRQs enabled.
|
||||
*/
|
||||
lockdep_assert_irqs_enabled();
|
||||
if (apei_claim_sea(NULL) == 0)
|
||||
return 1;
|
||||
|
||||
return kvm_inject_serror(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
||||
* @vcpu: the VCPU pointer
|
||||
|
|
@ -1834,17 +1880,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||
gfn_t gfn;
|
||||
int ret, idx;
|
||||
|
||||
/* Synchronous External Abort? */
|
||||
if (kvm_vcpu_abt_issea(vcpu)) {
|
||||
/*
|
||||
* For RAS the host kernel may handle this abort.
|
||||
* There is no need to pass the error into the guest.
|
||||
*/
|
||||
if (kvm_handle_guest_sea())
|
||||
return kvm_inject_serror(vcpu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
if (kvm_vcpu_abt_issea(vcpu))
|
||||
return kvm_handle_guest_sea(vcpu);
|
||||
|
||||
esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
|
|
|
|||
|
|
@ -1287,7 +1287,10 @@ int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
|
|||
struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
|
||||
BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT));
|
||||
WARN_ON_ONCE(!(esr & ESR_ELx_VNCR));
|
||||
|
||||
if (kvm_vcpu_abt_issea(vcpu))
|
||||
return kvm_handle_guest_sea(vcpu);
|
||||
|
||||
if (esr_fsc_is_permission_fault(esr)) {
|
||||
inject_vncr_perm(vcpu);
|
||||
|
|
|
|||
|
|
@ -316,9 +316,16 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
|
|||
return 0;
|
||||
}
|
||||
|
||||
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||
u64 addr, u64 size)
|
||||
{
|
||||
__pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
|
||||
__pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
|
||||
}
|
||||
|
||||
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
|
||||
{
|
||||
/* Expected to be called after all pKVM mappings have been released. */
|
||||
WARN_ON_ONCE(!RB_EMPTY_ROOT(&pgt->pkvm_mappings.rb_root));
|
||||
}
|
||||
|
||||
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
|
|
|
|||
|
|
@ -82,43 +82,105 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
|
|||
"sys_reg write to read-only register");
|
||||
}
|
||||
|
||||
#define PURE_EL2_SYSREG(el2) \
|
||||
case el2: { \
|
||||
*el1r = el2; \
|
||||
return true; \
|
||||
}
|
||||
enum sr_loc_attr {
|
||||
SR_LOC_MEMORY = 0, /* Register definitely in memory */
|
||||
SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
|
||||
SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
|
||||
SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
|
||||
SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
|
||||
};
|
||||
|
||||
#define MAPPED_EL2_SYSREG(el2, el1, fn) \
|
||||
case el2: { \
|
||||
*xlate = fn; \
|
||||
*el1r = el1; \
|
||||
return true; \
|
||||
}
|
||||
struct sr_loc {
|
||||
enum sr_loc_attr loc;
|
||||
enum vcpu_sysreg map_reg;
|
||||
u64 (*xlate)(u64);
|
||||
};
|
||||
|
||||
static bool get_el2_to_el1_mapping(unsigned int reg,
|
||||
unsigned int *el1r, u64 (**xlate)(u64))
|
||||
static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
|
||||
enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
PURE_EL2_SYSREG( VPIDR_EL2 );
|
||||
PURE_EL2_SYSREG( VMPIDR_EL2 );
|
||||
PURE_EL2_SYSREG( ACTLR_EL2 );
|
||||
PURE_EL2_SYSREG( HCR_EL2 );
|
||||
PURE_EL2_SYSREG( MDCR_EL2 );
|
||||
PURE_EL2_SYSREG( HSTR_EL2 );
|
||||
PURE_EL2_SYSREG( HACR_EL2 );
|
||||
PURE_EL2_SYSREG( VTTBR_EL2 );
|
||||
PURE_EL2_SYSREG( VTCR_EL2 );
|
||||
PURE_EL2_SYSREG( TPIDR_EL2 );
|
||||
PURE_EL2_SYSREG( HPFAR_EL2 );
|
||||
PURE_EL2_SYSREG( HCRX_EL2 );
|
||||
PURE_EL2_SYSREG( HFGRTR_EL2 );
|
||||
PURE_EL2_SYSREG( HFGWTR_EL2 );
|
||||
PURE_EL2_SYSREG( HFGITR_EL2 );
|
||||
PURE_EL2_SYSREG( HDFGRTR_EL2 );
|
||||
PURE_EL2_SYSREG( HDFGWTR_EL2 );
|
||||
PURE_EL2_SYSREG( HAFGRTR_EL2 );
|
||||
PURE_EL2_SYSREG( CNTVOFF_EL2 );
|
||||
PURE_EL2_SYSREG( CNTHCTL_EL2 );
|
||||
case SCTLR_EL1:
|
||||
case CPACR_EL1:
|
||||
case TTBR0_EL1:
|
||||
case TTBR1_EL1:
|
||||
case TCR_EL1:
|
||||
case TCR2_EL1:
|
||||
case PIR_EL1:
|
||||
case PIRE0_EL1:
|
||||
case POR_EL1:
|
||||
case ESR_EL1:
|
||||
case AFSR0_EL1:
|
||||
case AFSR1_EL1:
|
||||
case FAR_EL1:
|
||||
case MAIR_EL1:
|
||||
case VBAR_EL1:
|
||||
case CONTEXTIDR_EL1:
|
||||
case AMAIR_EL1:
|
||||
case CNTKCTL_EL1:
|
||||
case ELR_EL1:
|
||||
case SPSR_EL1:
|
||||
case ZCR_EL1:
|
||||
case SCTLR2_EL1:
|
||||
/*
|
||||
* EL1 registers which have an ELx2 mapping are loaded if
|
||||
* we're not in hypervisor context.
|
||||
*/
|
||||
return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
|
||||
|
||||
case TPIDR_EL0:
|
||||
case TPIDRRO_EL0:
|
||||
case TPIDR_EL1:
|
||||
case PAR_EL1:
|
||||
case DACR32_EL2:
|
||||
case IFSR32_EL2:
|
||||
case DBGVCR32_EL2:
|
||||
/* These registers are always loaded, no matter what */
|
||||
return SR_LOC_LOADED;
|
||||
|
||||
default:
|
||||
/* Non-mapped EL2 registers are by definition in memory. */
|
||||
return SR_LOC_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
|
||||
enum vcpu_sysreg reg,
|
||||
enum vcpu_sysreg map_reg,
|
||||
u64 (*xlate)(u64),
|
||||
struct sr_loc *loc)
|
||||
{
|
||||
if (!is_hyp_ctxt(vcpu)) {
|
||||
loc->loc = SR_LOC_MEMORY;
|
||||
return;
|
||||
}
|
||||
|
||||
loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
|
||||
loc->map_reg = map_reg;
|
||||
|
||||
WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
|
||||
|
||||
if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
|
||||
loc->loc |= SR_LOC_XLATED;
|
||||
loc->xlate = xlate;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAPPED_EL2_SYSREG(r, m, t) \
|
||||
case r: { \
|
||||
locate_mapped_el2_register(vcpu, r, m, t, loc); \
|
||||
break; \
|
||||
}
|
||||
|
||||
static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
|
||||
struct sr_loc *loc)
|
||||
{
|
||||
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
|
||||
loc->loc = SR_LOC_MEMORY;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (reg) {
|
||||
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
|
||||
translate_sctlr_el2_to_sctlr_el1 );
|
||||
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
|
||||
|
|
@ -144,125 +206,189 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
|
|||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
|
||||
case CNTHCTL_EL2:
|
||||
/* CNTHCTL_EL2 is super special, until we support NV2.1 */
|
||||
loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
|
||||
SR_LOC_SPECIAL : SR_LOC_MEMORY);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
loc->loc = locate_direct_register(vcpu, reg);
|
||||
}
|
||||
}
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
|
||||
{
|
||||
u64 val = 0x8badf00d8badf00d;
|
||||
u64 (*xlate)(u64) = NULL;
|
||||
unsigned int el1r;
|
||||
|
||||
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||
goto memory_read;
|
||||
switch (reg) {
|
||||
case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
||||
case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
|
||||
case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
|
||||
case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
|
||||
case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
|
||||
case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
|
||||
case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
|
||||
case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
|
||||
case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
|
||||
case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
|
||||
case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
|
||||
case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
|
||||
case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
|
||||
case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
|
||||
case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
|
||||
case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
|
||||
case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
|
||||
case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
|
||||
case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
|
||||
case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
|
||||
case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
|
||||
case PAR_EL1: val = read_sysreg_par(); break;
|
||||
case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
|
||||
default: WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
|
||||
if (!is_hyp_ctxt(vcpu))
|
||||
goto memory_read;
|
||||
return val;
|
||||
}
|
||||
|
||||
static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
|
||||
{
|
||||
switch (reg) {
|
||||
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
||||
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
|
||||
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
|
||||
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
|
||||
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
|
||||
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
|
||||
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
|
||||
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
|
||||
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
|
||||
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
|
||||
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
|
||||
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
|
||||
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
|
||||
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
|
||||
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
|
||||
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
|
||||
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
|
||||
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
|
||||
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
|
||||
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
|
||||
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
|
||||
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
|
||||
default: WARN_ON_ONCE(1);
|
||||
}
|
||||
}
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
|
||||
{
|
||||
struct sr_loc loc = {};
|
||||
|
||||
locate_register(vcpu, reg, &loc);
|
||||
|
||||
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
|
||||
|
||||
if (loc.loc & SR_LOC_SPECIAL) {
|
||||
u64 val;
|
||||
|
||||
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
|
||||
|
||||
/*
|
||||
* CNTHCTL_EL2 requires some special treatment to
|
||||
* account for the bits that can be set via CNTKCTL_EL1.
|
||||
* CNTHCTL_EL2 requires some special treatment to account
|
||||
* for the bits that can be set via CNTKCTL_EL1 when E2H==1.
|
||||
*/
|
||||
switch (reg) {
|
||||
case CNTHCTL_EL2:
|
||||
if (vcpu_el2_e2h_is_set(vcpu)) {
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
|
||||
return val;
|
||||
}
|
||||
break;
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
|
||||
return val;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this register does not have an EL1 counterpart,
|
||||
* then read the stored EL2 version.
|
||||
*/
|
||||
if (reg == el1r)
|
||||
goto memory_read;
|
||||
|
||||
/*
|
||||
* If we have a non-VHE guest and that the sysreg
|
||||
* requires translation to be used at EL1, use the
|
||||
* in-memory copy instead.
|
||||
*/
|
||||
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
|
||||
goto memory_read;
|
||||
|
||||
/* Get the current version of the EL1 counterpart. */
|
||||
WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
|
||||
if (reg >= __SANITISED_REG_START__)
|
||||
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/* EL1 register can't be on the CPU if the guest is in vEL2. */
|
||||
if (unlikely(is_hyp_ctxt(vcpu)))
|
||||
goto memory_read;
|
||||
if (loc.loc & SR_LOC_LOADED) {
|
||||
enum vcpu_sysreg map_reg = reg;
|
||||
|
||||
if (__vcpu_read_sys_reg_from_cpu(reg, &val))
|
||||
return val;
|
||||
if (loc.loc & SR_LOC_MAPPED)
|
||||
map_reg = loc.map_reg;
|
||||
|
||||
if (!(loc.loc & SR_LOC_XLATED)) {
|
||||
u64 val = read_sr_from_cpu(map_reg);
|
||||
|
||||
if (reg >= __SANITISED_REG_START__)
|
||||
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
memory_read:
|
||||
return __vcpu_sys_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
|
||||
{
|
||||
u64 (*xlate)(u64) = NULL;
|
||||
unsigned int el1r;
|
||||
struct sr_loc loc = {};
|
||||
|
||||
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||
goto memory_write;
|
||||
locate_register(vcpu, reg, &loc);
|
||||
|
||||
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
|
||||
if (!is_hyp_ctxt(vcpu))
|
||||
goto memory_write;
|
||||
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
|
||||
|
||||
/*
|
||||
* Always store a copy of the write to memory to avoid having
|
||||
* to reverse-translate virtual EL2 system registers for a
|
||||
* non-VHE guest hypervisor.
|
||||
*/
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
if (loc.loc & SR_LOC_SPECIAL) {
|
||||
|
||||
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
|
||||
|
||||
switch (reg) {
|
||||
case CNTHCTL_EL2:
|
||||
/*
|
||||
* If E2H=0, CNHTCTL_EL2 is a pure shadow register.
|
||||
* Otherwise, some of the bits are backed by
|
||||
* If E2H=1, some of the bits are backed by
|
||||
* CNTKCTL_EL1, while the rest is kept in memory.
|
||||
* Yes, this is fun stuff.
|
||||
*/
|
||||
if (vcpu_el2_e2h_is_set(vcpu))
|
||||
write_sysreg_el1(val, SYS_CNTKCTL);
|
||||
return;
|
||||
write_sysreg_el1(val, SYS_CNTKCTL);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
/* No EL1 counterpart? We're done here.? */
|
||||
if (reg == el1r)
|
||||
return;
|
||||
|
||||
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
|
||||
val = xlate(val);
|
||||
|
||||
/* Redirect this to the EL1 version of the register. */
|
||||
WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
|
||||
return;
|
||||
}
|
||||
|
||||
/* EL1 register can't be on the CPU if the guest is in vEL2. */
|
||||
if (unlikely(is_hyp_ctxt(vcpu)))
|
||||
goto memory_write;
|
||||
if (loc.loc & SR_LOC_LOADED) {
|
||||
enum vcpu_sysreg map_reg = reg;
|
||||
u64 xlated_val;
|
||||
|
||||
if (__vcpu_write_sys_reg_to_cpu(val, reg))
|
||||
return;
|
||||
if (reg >= __SANITISED_REG_START__)
|
||||
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
||||
|
||||
if (loc.loc & SR_LOC_MAPPED)
|
||||
map_reg = loc.map_reg;
|
||||
|
||||
if (loc.loc & SR_LOC_XLATED)
|
||||
xlated_val = loc.xlate(val);
|
||||
else
|
||||
xlated_val = val;
|
||||
|
||||
write_sr_to_cpu(map_reg, xlated_val);
|
||||
|
||||
/*
|
||||
* Fall through to write the backing store anyway, which
|
||||
* allows translated registers to be directly read without a
|
||||
* reverse translation.
|
||||
*/
|
||||
}
|
||||
|
||||
memory_write:
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
}
|
||||
|
||||
|
|
@ -1584,6 +1710,7 @@ static u8 pmuver_to_perfmon(u8 pmuver)
|
|||
}
|
||||
|
||||
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||
|
||||
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
||||
|
|
@ -1606,19 +1733,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||
val = sanitise_id_aa64pfr0_el1(vcpu, val);
|
||||
break;
|
||||
case SYS_ID_AA64PFR1_EL1:
|
||||
if (!kvm_has_mte(vcpu->kvm)) {
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
|
||||
}
|
||||
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
|
||||
val = sanitise_id_aa64pfr1_el1(vcpu, val);
|
||||
break;
|
||||
case SYS_ID_AA64PFR2_EL1:
|
||||
val &= ID_AA64PFR2_EL1_FPMR |
|
||||
|
|
@ -1628,18 +1743,18 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||
break;
|
||||
case SYS_ID_AA64ISAR1_EL1:
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
|
||||
val &= ~(ID_AA64ISAR1_EL1_APA |
|
||||
ID_AA64ISAR1_EL1_API |
|
||||
ID_AA64ISAR1_EL1_GPA |
|
||||
ID_AA64ISAR1_EL1_GPI);
|
||||
break;
|
||||
case SYS_ID_AA64ISAR2_EL1:
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
||||
val &= ~(ID_AA64ISAR2_EL1_APA3 |
|
||||
ID_AA64ISAR2_EL1_GPA3);
|
||||
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
|
||||
has_broken_cntvoff())
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
|
||||
val &= ~ID_AA64ISAR2_EL1_WFxT;
|
||||
break;
|
||||
case SYS_ID_AA64ISAR3_EL1:
|
||||
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
|
||||
|
|
@ -1655,7 +1770,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||
ID_AA64MMFR3_EL1_S1PIE;
|
||||
break;
|
||||
case SYS_ID_MMFR4_EL1:
|
||||
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
|
||||
val &= ~ID_MMFR4_EL1_CCIDX;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -1836,6 +1951,31 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
|||
return val;
|
||||
}
|
||||
|
||||
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
if (!kvm_has_mte(vcpu->kvm)) {
|
||||
val &= ~ID_AA64PFR1_EL1_MTE;
|
||||
val &= ~ID_AA64PFR1_EL1_MTE_frac;
|
||||
}
|
||||
|
||||
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
|
||||
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
|
||||
val &= ~ID_AA64PFR1_EL1_RAS_frac;
|
||||
|
||||
val &= ~ID_AA64PFR1_EL1_SME;
|
||||
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
|
||||
val &= ~ID_AA64PFR1_EL1_NMI;
|
||||
val &= ~ID_AA64PFR1_EL1_GCS;
|
||||
val &= ~ID_AA64PFR1_EL1_THE;
|
||||
val &= ~ID_AA64PFR1_EL1_MTEX;
|
||||
val &= ~ID_AA64PFR1_EL1_PFAR;
|
||||
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
|
||||
|
|
@ -2697,6 +2837,18 @@ static bool access_ras(struct kvm_vcpu *vcpu,
|
|||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
switch(reg_to_encoding(r)) {
|
||||
case SYS_ERXPFGCDN_EL1:
|
||||
case SYS_ERXPFGCTL_EL1:
|
||||
case SYS_ERXPFGF_EL1:
|
||||
case SYS_ERXMISC2_EL1:
|
||||
case SYS_ERXMISC3_EL1:
|
||||
if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
|
||||
(kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
|
||||
kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
|
@ -2929,7 +3081,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
~(ID_AA64PFR0_EL1_AMU |
|
||||
ID_AA64PFR0_EL1_MPAM |
|
||||
ID_AA64PFR0_EL1_SVE |
|
||||
ID_AA64PFR0_EL1_RAS |
|
||||
ID_AA64PFR0_EL1_AdvSIMD |
|
||||
ID_AA64PFR0_EL1_FP)),
|
||||
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
|
||||
|
|
@ -2943,7 +3094,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
ID_AA64PFR1_EL1_SME |
|
||||
ID_AA64PFR1_EL1_RES0 |
|
||||
ID_AA64PFR1_EL1_MPAM_frac |
|
||||
ID_AA64PFR1_EL1_RAS_frac |
|
||||
ID_AA64PFR1_EL1_MTE)),
|
||||
ID_WRITABLE(ID_AA64PFR2_EL1,
|
||||
ID_AA64PFR2_EL1_FPMR |
|
||||
|
|
@ -3063,8 +3213,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
|
||||
{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
|
||||
|
||||
MTE_REG(TFSR_EL1),
|
||||
MTE_REG(TFSRE0_EL1),
|
||||
|
|
|
|||
|
|
@ -50,6 +50,14 @@ bool vgic_has_its(struct kvm *kvm)
|
|||
|
||||
bool vgic_supports_direct_msis(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
|
||||
* indirectly allowing userspace to control whether or not vPEs are
|
||||
* allocated for the VM.
|
||||
*/
|
||||
if (system_supports_direct_sgis() && !vgic_supports_direct_sgis(kvm))
|
||||
return false;
|
||||
|
||||
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1091,7 +1091,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
|||
len = vgic_v3_init_dist_iodev(io_device);
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
}
|
||||
|
||||
io_device->base_addr = dist_base_address;
|
||||
|
|
|
|||
|
|
@ -396,15 +396,7 @@ bool vgic_supports_direct_sgis(struct kvm *kvm);
|
|||
|
||||
static inline bool vgic_supports_direct_irqs(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
|
||||
* indirectly allowing userspace to control whether or not vPEs are
|
||||
* allocated for the VM.
|
||||
*/
|
||||
if (system_supports_direct_sgis())
|
||||
return vgic_supports_direct_sgis(kvm);
|
||||
|
||||
return vgic_supports_direct_msis(kvm);
|
||||
return vgic_supports_direct_msis(kvm) || vgic_supports_direct_sgis(kvm);
|
||||
}
|
||||
|
||||
int vgic_v4_init(struct kvm *kvm);
|
||||
|
|
|
|||
|
|
@ -47,13 +47,6 @@
|
|||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
|
||||
|
||||
enum pgtable_type {
|
||||
TABLE_PTE,
|
||||
TABLE_PMD,
|
||||
TABLE_PUD,
|
||||
TABLE_P4D,
|
||||
};
|
||||
|
||||
u64 kimage_voffset __ro_after_init;
|
||||
EXPORT_SYMBOL(kimage_voffset);
|
||||
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ HAS_S1PIE
|
|||
HAS_S1POE
|
||||
HAS_SCTLR2
|
||||
HAS_RAS_EXTN
|
||||
HAS_RASV1P1_EXTN
|
||||
HAS_RNG
|
||||
HAS_SB
|
||||
HAS_STAGE2_FWB
|
||||
|
|
|
|||
|
|
@ -273,6 +273,7 @@ CONFIG_DM9102=m
|
|||
CONFIG_ULI526X=m
|
||||
CONFIG_PCMCIA_XIRCOM=m
|
||||
CONFIG_DL2K=m
|
||||
CONFIG_SUNDANCE=m
|
||||
CONFIG_PCMCIA_FMVJ18X=m
|
||||
CONFIG_E100=m
|
||||
CONFIG_E1000=m
|
||||
|
|
|
|||
|
|
@ -433,6 +433,7 @@ CONFIG_DM9102=m
|
|||
CONFIG_ULI526X=m
|
||||
CONFIG_PCMCIA_XIRCOM=m
|
||||
CONFIG_DL2K=m
|
||||
CONFIG_SUNDANCE=m
|
||||
CONFIG_S2IO=m
|
||||
CONFIG_FEC_MPC52xx=m
|
||||
CONFIG_GIANFAR=m
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
|
|||
unsigned long size, bool writable, bool in_atomic)
|
||||
{
|
||||
int ret = 0;
|
||||
pgprot_t prot;
|
||||
unsigned long pfn;
|
||||
phys_addr_t addr, end;
|
||||
struct kvm_mmu_memory_cache pcache = {
|
||||
|
|
@ -55,10 +56,12 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
|
|||
|
||||
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
pfn = __phys_to_pfn(hpa);
|
||||
prot = pgprot_noncached(PAGE_WRITE);
|
||||
|
||||
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
|
||||
map.addr = addr;
|
||||
map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
|
||||
map.pte = pfn_pte(pfn, prot);
|
||||
map.pte = pte_mkdirty(map.pte);
|
||||
map.level = 0;
|
||||
|
||||
if (!writable)
|
||||
|
|
|
|||
|
|
@ -683,7 +683,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/**
|
||||
* check_vcpu_requests - check and handle pending vCPU requests
|
||||
* kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests
|
||||
* @vcpu: the VCPU pointer
|
||||
*
|
||||
* Return: 1 if we should enter the guest
|
||||
|
|
|
|||
|
|
@ -182,6 +182,8 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
|
|||
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
|
||||
unsigned long reg_val;
|
||||
|
||||
if (reg_size != sizeof(reg_val))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(®_val, uaddr, reg_size))
|
||||
return -EFAULT;
|
||||
if (reg_val != cntx->vector.vlenb)
|
||||
|
|
|
|||
|
|
@ -36,6 +36,9 @@ static inline bool pgtable_l5_enabled(void)
|
|||
#define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57)
|
||||
#endif /* USE_EARLY_PGTABLE_L5 */
|
||||
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK \
|
||||
(pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
|
||||
|
||||
extern unsigned int pgdir_shift;
|
||||
extern unsigned int ptrs_per_p4d;
|
||||
|
||||
|
|
|
|||
|
|
@ -416,6 +416,10 @@ static bool __init should_mitigate_vuln(unsigned int bug)
|
|||
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
|
||||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
|
||||
(smt_mitigations != SMT_MITIGATIONS_OFF);
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
|
||||
|
||||
default:
|
||||
WARN(1, "Unknown bug %x\n", bug);
|
||||
return false;
|
||||
|
|
@ -2710,6 +2714,11 @@ static void __init ssb_select_mitigation(void)
|
|||
ssb_mode = SPEC_STORE_BYPASS_DISABLE;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_AUTO:
|
||||
if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
|
||||
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
|
||||
else
|
||||
ssb_mode = SPEC_STORE_BYPASS_NONE;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_PRCTL:
|
||||
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -262,7 +262,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
if (c->x86_power & (1 << 8)) {
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||
} else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
|
||||
} else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) ||
|
||||
(c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -171,8 +171,28 @@ static int cmp_id(const void *key, const void *elem)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static u32 cpuid_to_ucode_rev(unsigned int val)
|
||||
{
|
||||
union zen_patch_rev p = {};
|
||||
union cpuid_1_eax c;
|
||||
|
||||
c.full = val;
|
||||
|
||||
p.stepping = c.stepping;
|
||||
p.model = c.model;
|
||||
p.ext_model = c.ext_model;
|
||||
p.ext_fam = c.ext_fam;
|
||||
|
||||
return p.ucode_rev;
|
||||
}
|
||||
|
||||
static bool need_sha_check(u32 cur_rev)
|
||||
{
|
||||
if (!cur_rev) {
|
||||
cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
|
||||
pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
|
||||
}
|
||||
|
||||
switch (cur_rev >> 8) {
|
||||
case 0x80012: return cur_rev <= 0x800126f; break;
|
||||
case 0x80082: return cur_rev <= 0x800820f; break;
|
||||
|
|
@ -749,8 +769,6 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
|
|||
n.equiv_cpu = equiv_cpu;
|
||||
n.patch_id = uci->cpu_sig.rev;
|
||||
|
||||
WARN_ON_ONCE(!n.patch_id);
|
||||
|
||||
list_for_each_entry(p, µcode_cache, plist)
|
||||
if (patch_cpus_equivalent(p, &n, false))
|
||||
return p;
|
||||
|
|
|
|||
|
|
@ -81,20 +81,25 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
|
|||
|
||||
cpuid_leaf(0x8000001e, &leaf);
|
||||
|
||||
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
|
||||
|
||||
/*
|
||||
* If leaf 0xb is available, then the domain shifts are set
|
||||
* already and nothing to do here. Only valid for family >= 0x17.
|
||||
* If leaf 0xb/0x26 is available, then the APIC ID and the domain
|
||||
* shifts are set already.
|
||||
*/
|
||||
if (!has_topoext && tscan->c->x86 >= 0x17) {
|
||||
/*
|
||||
* Leaf 0x80000008 set the CORE domain shift already.
|
||||
* Update the SMT domain, but do not propagate it.
|
||||
*/
|
||||
unsigned int nthreads = leaf.core_nthreads + 1;
|
||||
if (!has_topoext) {
|
||||
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
|
||||
|
||||
topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
|
||||
/*
|
||||
* Leaf 0x8000008 sets the CORE domain shift but not the
|
||||
* SMT domain shift. On CPUs with family >= 0x17, there
|
||||
* might be hyperthreads.
|
||||
*/
|
||||
if (tscan->c->x86 >= 0x17) {
|
||||
/* Update the SMT domain, but do not propagate it. */
|
||||
unsigned int nthreads = leaf.core_nthreads + 1;
|
||||
|
||||
topology_update_dom(tscan, TOPO_SMT_DOMAIN,
|
||||
get_count_order(nthreads), nthreads);
|
||||
}
|
||||
}
|
||||
|
||||
store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id);
|
||||
|
|
|
|||
|
|
@ -810,6 +810,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
|
|||
if (min > map->max_apic_id)
|
||||
return 0;
|
||||
|
||||
min = array_index_nospec(min, map->max_apic_id + 1);
|
||||
|
||||
for_each_set_bit(i, ipi_bitmap,
|
||||
min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
|
||||
if (map->phys_map[min + i]) {
|
||||
|
|
|
|||
|
|
@ -718,13 +718,6 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
|
|||
|
||||
static void sev_writeback_caches(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Note, the caller is responsible for ensuring correctness if the mask
|
||||
* can be modified, e.g. if a CPU could be doing VMRUN.
|
||||
*/
|
||||
if (cpumask_empty(to_kvm_sev_info(kvm)->have_run_cpus))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure that all dirty guest tagged cache entries are written back
|
||||
* before releasing the pages back to the system for use. CLFLUSH will
|
||||
|
|
@ -739,6 +732,9 @@ static void sev_writeback_caches(struct kvm *kvm)
|
|||
* serializing multiple calls and having responding CPUs (to the IPI)
|
||||
* mark themselves as still running if they are running (or about to
|
||||
* run) a vCPU for the VM.
|
||||
*
|
||||
* Note, the caller is responsible for ensuring correctness if the mask
|
||||
* can be modified, e.g. if a CPU could be doing VMRUN.
|
||||
*/
|
||||
wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9908,8 +9908,11 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
|
|||
rcu_read_lock();
|
||||
map = rcu_dereference(vcpu->kvm->arch.apic_map);
|
||||
|
||||
if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
|
||||
target = map->phys_map[dest_id]->vcpu;
|
||||
if (likely(map) && dest_id <= map->max_apic_id) {
|
||||
dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
|
||||
if (map->phys_map[dest_id])
|
||||
target = map->phys_map[dest_id]->vcpu;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
|||
|
|
@ -223,6 +223,24 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
|
|||
sync_global_pgds_l4(start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make kernel mappings visible in all page tables in the system.
|
||||
* This is necessary except when the init task populates kernel mappings
|
||||
* during the boot process. In that case, all processes originating from
|
||||
* the init task copies the kernel mappings, so there is no issue.
|
||||
* Otherwise, missing synchronization could lead to kernel crashes due
|
||||
* to missing page table entries for certain kernel mappings.
|
||||
*
|
||||
* Synchronization is performed at the top level, which is the PGD in
|
||||
* 5-level paging systems. But in 4-level paging systems, however,
|
||||
* pgd_populate() is a no-op, so synchronization is done at the P4D level.
|
||||
* sync_global_pgds() handles this difference between paging levels.
|
||||
*/
|
||||
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
|
||||
{
|
||||
sync_global_pgds(start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This function is marked __ref because it calls __init function
|
||||
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
|
||||
|
|
|
|||
|
|
@ -149,12 +149,15 @@ static inline void rq_qos_done_bio(struct bio *bio)
|
|||
q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
/*
|
||||
* If a bio has BIO_QOS_xxx set, it implicitly implies that
|
||||
* q->rq_qos is present. So, we skip re-checking q->rq_qos
|
||||
* here as an extra optimization and directly call
|
||||
* __rq_qos_done_bio().
|
||||
* A BIO may carry BIO_QOS_* flags even if the associated request_queue
|
||||
* does not have rq_qos enabled. This can happen with stacked block
|
||||
* devices — for example, NVMe multipath, where it's possible that the
|
||||
* bottom device has QoS enabled but the top device does not. Therefore,
|
||||
* always verify that q->rq_qos is present and QoS is enabled before
|
||||
* calling __rq_qos_done_bio().
|
||||
*/
|
||||
__rq_qos_done_bio(q->rq_qos, bio);
|
||||
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
|
||||
__rq_qos_done_bio(q->rq_qos, bio);
|
||||
}
|
||||
|
||||
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||
|
|
|
|||
|
|
@ -1286,14 +1286,14 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
|||
struct block_device *bdev;
|
||||
unsigned long flags;
|
||||
struct bio *bio;
|
||||
bool prepared;
|
||||
|
||||
/*
|
||||
* Submit the next plugged BIO. If we do not have any, clear
|
||||
* the plugged flag.
|
||||
*/
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
again:
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
bio = bio_list_pop(&zwplug->bio_list);
|
||||
if (!bio) {
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
||||
|
|
@ -1304,13 +1304,14 @@ again:
|
|||
trace_blk_zone_wplug_bio(zwplug->disk->queue, zwplug->zone_no,
|
||||
bio->bi_iter.bi_sector, bio_sectors(bio));
|
||||
|
||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
||||
prepared = blk_zone_wplug_prepare_bio(zwplug, bio);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
if (!prepared) {
|
||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||
goto again;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
bdev = bio->bi_bdev;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -689,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
|
|||
"where <pci_dev> is the PCI ID of an AHCI controller in the "
|
||||
"form \"domain:bus:dev.func\"");
|
||||
|
||||
static void ahci_apply_port_map_mask(struct device *dev,
|
||||
struct ahci_host_priv *hpriv, char *mask_s)
|
||||
static char *ahci_mask_port_ext;
|
||||
module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
|
||||
MODULE_PARM_DESC(mask_port_ext,
|
||||
"32-bits mask to ignore the external/hotplug capability of ports. "
|
||||
"Valid values are: "
|
||||
"\"<mask>\" to apply the same mask to all AHCI controller "
|
||||
"devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
|
||||
"specify different masks for the controllers specified, "
|
||||
"where <pci_dev> is the PCI ID of an AHCI controller in the "
|
||||
"form \"domain:bus:dev.func\"");
|
||||
|
||||
static u32 ahci_port_mask(struct device *dev, char *mask_s)
|
||||
{
|
||||
unsigned int mask;
|
||||
|
||||
if (kstrtouint(mask_s, 0, &mask)) {
|
||||
dev_err(dev, "Invalid port map mask\n");
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
hpriv->mask_port_map = mask;
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void ahci_get_port_map_mask(struct device *dev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
|
||||
{
|
||||
char *param, *end, *str, *mask_s;
|
||||
char *name;
|
||||
u32 mask = 0;
|
||||
|
||||
if (!strlen(ahci_mask_port_map))
|
||||
return;
|
||||
if (!mask_p || !strlen(mask_p))
|
||||
return 0;
|
||||
|
||||
str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
|
||||
str = kstrdup(mask_p, GFP_KERNEL);
|
||||
if (!str)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* Handle single mask case */
|
||||
if (!strchr(str, '=')) {
|
||||
ahci_apply_port_map_mask(dev, hpriv, str);
|
||||
mask = ahci_port_mask(dev, str);
|
||||
goto free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask list case: parse the parameter to apply the mask only if
|
||||
* Mask list case: parse the parameter to get the mask only if
|
||||
* the device name matches.
|
||||
*/
|
||||
param = str;
|
||||
|
|
@ -752,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
|
|||
param++;
|
||||
}
|
||||
|
||||
ahci_apply_port_map_mask(dev, hpriv, mask_s);
|
||||
mask = ahci_port_mask(dev, mask_s);
|
||||
}
|
||||
|
||||
free:
|
||||
kfree(str);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
||||
|
|
@ -782,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/* Handle port map masks passed as module parameter. */
|
||||
if (ahci_mask_port_map)
|
||||
ahci_get_port_map_mask(&pdev->dev, hpriv);
|
||||
hpriv->mask_port_map =
|
||||
ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
|
||||
hpriv->mask_port_ext =
|
||||
ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
|
||||
|
||||
ahci_save_initial_config(&pdev->dev, hpriv);
|
||||
}
|
||||
|
|
@ -1757,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
|
|||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 tmp;
|
||||
|
||||
/* mark external ports (hotplug-capable, eSATA) */
|
||||
/*
|
||||
* Mark external ports (hotplug-capable, eSATA), unless we were asked to
|
||||
* ignore this feature.
|
||||
*/
|
||||
tmp = readl(port_mmio + PORT_CMD);
|
||||
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
|
||||
(tmp & PORT_CMD_HPCP))
|
||||
(tmp & PORT_CMD_HPCP)) {
|
||||
if (hpriv->mask_port_ext & (1U << ap->port_no)) {
|
||||
ata_port_info(ap,
|
||||
"Ignoring external/hotplug capability\n");
|
||||
return;
|
||||
}
|
||||
ap->pflags |= ATA_PFLAG_EXTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
|
||||
|
|
|
|||
|
|
@ -330,6 +330,7 @@ struct ahci_host_priv {
|
|||
/* Input fields */
|
||||
unsigned int flags; /* AHCI_HFLAG_* */
|
||||
u32 mask_port_map; /* Mask of valid ports */
|
||||
u32 mask_port_ext; /* Mask of ports ext capability */
|
||||
|
||||
void __iomem * mmio; /* bus-independent mem map */
|
||||
u32 cap; /* cap to use */
|
||||
|
|
|
|||
|
|
@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
|
|||
{
|
||||
int pmp = sata_srst_pmp(link);
|
||||
struct ata_port *ap = link->ap;
|
||||
u32 rc;
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 port_fbs;
|
||||
|
||||
|
|
@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
|
|||
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
|
||||
writel(port_fbs, port_mmio + PORT_FBS);
|
||||
|
||||
rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
|
||||
|
||||
return rc;
|
||||
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
|
|||
u32 port_fbs;
|
||||
u32 port_fbs_save;
|
||||
u32 retry = 1;
|
||||
u32 rc;
|
||||
int rc;
|
||||
|
||||
port_fbs_save = readl(port_mmio + PORT_FBS);
|
||||
|
||||
|
|
|
|||
|
|
@ -139,20 +139,26 @@ static int part_shift;
|
|||
|
||||
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
|
||||
{
|
||||
struct kstat stat;
|
||||
loff_t loopsize;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Get the accurate file size. This provides better results than
|
||||
* cached inode data, particularly for network filesystems where
|
||||
* metadata may be stale.
|
||||
*/
|
||||
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
|
||||
if (ret)
|
||||
return 0;
|
||||
if (S_ISBLK(file_inode(file)->i_mode)) {
|
||||
loopsize = i_size_read(file->f_mapping->host);
|
||||
} else {
|
||||
struct kstat stat;
|
||||
|
||||
/*
|
||||
* Get the accurate file size. This provides better results than
|
||||
* cached inode data, particularly for network filesystems where
|
||||
* metadata may be stale.
|
||||
*/
|
||||
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
loopsize = stat.size;
|
||||
}
|
||||
|
||||
loopsize = stat.size;
|
||||
if (lo->lo_offset > 0)
|
||||
loopsize -= lo->lo_offset;
|
||||
/* offset is beyond i_size, weird but possible */
|
||||
|
|
|
|||
|
|
@ -239,6 +239,7 @@ struct ublk_device {
|
|||
struct mutex cancel_mutex;
|
||||
bool canceling;
|
||||
pid_t ublksrv_tgid;
|
||||
struct delayed_work exit_work;
|
||||
};
|
||||
|
||||
/* header of ublk_params */
|
||||
|
|
@ -1595,12 +1596,62 @@ static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
|
|||
ublk_get_queue(ub, i)->canceling = canceling;
|
||||
}
|
||||
|
||||
static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||
static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
|
||||
{
|
||||
struct ublk_device *ub = filp->private_data;
|
||||
int i, j;
|
||||
|
||||
if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
|
||||
UBLK_F_AUTO_BUF_REG)))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
|
||||
struct ublk_queue *ubq = ublk_get_queue(ub, i);
|
||||
|
||||
for (j = 0; j < ubq->q_depth; j++) {
|
||||
struct ublk_io *io = &ubq->ios[j];
|
||||
unsigned int refs = refcount_read(&io->ref) +
|
||||
io->task_registered_buffers;
|
||||
|
||||
/*
|
||||
* UBLK_REFCOUNT_INIT or zero means no active
|
||||
* reference
|
||||
*/
|
||||
if (refs != UBLK_REFCOUNT_INIT && refs != 0)
|
||||
return true;
|
||||
|
||||
/* reset to zero if the io hasn't active references */
|
||||
refcount_set(&io->ref, 0);
|
||||
io->task_registered_buffers = 0;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ublk_ch_release_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct ublk_device *ub =
|
||||
container_of(work, struct ublk_device, exit_work.work);
|
||||
struct gendisk *disk;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* For zero-copy and auto buffer register modes, I/O references
|
||||
* might not be dropped naturally when the daemon is killed, but
|
||||
* io_uring guarantees that registered bvec kernel buffers are
|
||||
* unregistered finally when freeing io_uring context, then the
|
||||
* active references are dropped.
|
||||
*
|
||||
* Wait until active references are dropped for avoiding use-after-free
|
||||
*
|
||||
* registered buffer may be unregistered in io_ring's release hander,
|
||||
* so have to wait by scheduling work function for avoiding the two
|
||||
* file release dependency.
|
||||
*/
|
||||
if (ublk_check_and_reset_active_ref(ub)) {
|
||||
schedule_delayed_work(&ub->exit_work, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* disk isn't attached yet, either device isn't live, or it has
|
||||
* been removed already, so we needn't to do anything
|
||||
|
|
@ -1673,6 +1724,23 @@ unlock:
|
|||
ublk_reset_ch_dev(ub);
|
||||
out:
|
||||
clear_bit(UB_STATE_OPEN, &ub->state);
|
||||
|
||||
/* put the reference grabbed in ublk_ch_release() */
|
||||
ublk_put_device(ub);
|
||||
}
|
||||
|
||||
static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct ublk_device *ub = filp->private_data;
|
||||
|
||||
/*
|
||||
* Grab ublk device reference, so it won't be gone until we are
|
||||
* really released from work function.
|
||||
*/
|
||||
ublk_get_device(ub);
|
||||
|
||||
INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
|
||||
schedule_delayed_work(&ub->exit_work, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = {
|
|||
.write = force_devcd_write,
|
||||
};
|
||||
|
||||
static void vhci_debugfs_init(struct vhci_data *data)
|
||||
{
|
||||
struct hci_dev *hdev = data->hdev;
|
||||
|
||||
debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
|
||||
&force_suspend_fops);
|
||||
|
||||
debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
|
||||
&force_wakeup_fops);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_MSFTEXT))
|
||||
debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
|
||||
&msft_opcode_fops);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_AOSPEXT))
|
||||
debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
|
||||
&aosp_capable_fops);
|
||||
|
||||
debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
|
||||
&force_devcoredump_fops);
|
||||
}
|
||||
|
||||
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
|
||||
{
|
||||
struct hci_dev *hdev;
|
||||
|
|
@ -434,22 +456,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
|
||||
&force_suspend_fops);
|
||||
|
||||
debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
|
||||
&force_wakeup_fops);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_MSFTEXT))
|
||||
debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
|
||||
&msft_opcode_fops);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_AOSPEXT))
|
||||
debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
|
||||
&aosp_capable_fops);
|
||||
|
||||
debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
|
||||
&force_devcoredump_fops);
|
||||
if (!IS_ERR_OR_NULL(hdev->debugfs))
|
||||
vhci_debugfs_init(data);
|
||||
|
||||
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
|
||||
|
||||
|
|
@ -651,6 +659,21 @@ static int vhci_open(struct inode *inode, struct file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vhci_debugfs_remove(struct hci_dev *hdev)
|
||||
{
|
||||
debugfs_lookup_and_remove("force_suspend", hdev->debugfs);
|
||||
|
||||
debugfs_lookup_and_remove("force_wakeup", hdev->debugfs);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_MSFTEXT))
|
||||
debugfs_lookup_and_remove("msft_opcode", hdev->debugfs);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_AOSPEXT))
|
||||
debugfs_lookup_and_remove("aosp_capable", hdev->debugfs);
|
||||
|
||||
debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs);
|
||||
}
|
||||
|
||||
static int vhci_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct vhci_data *data = file->private_data;
|
||||
|
|
@ -662,6 +685,8 @@ static int vhci_release(struct inode *inode, struct file *file)
|
|||
hdev = data->hdev;
|
||||
|
||||
if (hdev) {
|
||||
if (!IS_ERR_OR_NULL(hdev->debugfs))
|
||||
vhci_debugfs_remove(hdev);
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -143,6 +143,10 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
|
|||
return var_hdr->ret_status;
|
||||
}
|
||||
|
||||
#define COMM_BUF_SIZE(__payload_size) (MM_COMMUNICATE_HEADER_SIZE + \
|
||||
MM_VARIABLE_COMMUNICATE_SIZE + \
|
||||
(__payload_size))
|
||||
|
||||
/**
|
||||
* setup_mm_hdr() - Allocate a buffer for StandAloneMM and initialize the
|
||||
* header data.
|
||||
|
|
@ -150,11 +154,9 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
|
|||
* @dptr: pointer address to store allocated buffer
|
||||
* @payload_size: payload size
|
||||
* @func: standAloneMM function number
|
||||
* @ret: EFI return code
|
||||
* Return: pointer to corresponding StandAloneMM function buffer or NULL
|
||||
*/
|
||||
static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
|
||||
efi_status_t *ret)
|
||||
static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func)
|
||||
{
|
||||
const efi_guid_t mm_var_guid = EFI_MM_VARIABLE_GUID;
|
||||
struct efi_mm_communicate_header *mm_hdr;
|
||||
|
|
@ -169,17 +171,13 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
|
|||
if (max_buffer_size &&
|
||||
max_buffer_size < (MM_COMMUNICATE_HEADER_SIZE +
|
||||
MM_VARIABLE_COMMUNICATE_SIZE + payload_size)) {
|
||||
*ret = EFI_INVALID_PARAMETER;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
|
||||
MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
|
||||
GFP_KERNEL);
|
||||
if (!comm_buf) {
|
||||
*ret = EFI_OUT_OF_RESOURCES;
|
||||
comm_buf = alloc_pages_exact(COMM_BUF_SIZE(payload_size),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!comm_buf)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
|
||||
memcpy(&mm_hdr->header_guid, &mm_var_guid, sizeof(mm_hdr->header_guid));
|
||||
|
|
@ -187,9 +185,7 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
|
|||
|
||||
var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
|
||||
var_hdr->function = func;
|
||||
if (dptr)
|
||||
*dptr = comm_buf;
|
||||
*ret = EFI_SUCCESS;
|
||||
*dptr = comm_buf;
|
||||
|
||||
return var_hdr->data;
|
||||
}
|
||||
|
|
@ -212,10 +208,9 @@ static efi_status_t get_max_payload(size_t *size)
|
|||
|
||||
payload_size = sizeof(*var_payload);
|
||||
var_payload = setup_mm_hdr(&comm_buf, payload_size,
|
||||
SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE,
|
||||
&ret);
|
||||
SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE);
|
||||
if (!var_payload)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
return EFI_DEVICE_ERROR;
|
||||
|
||||
ret = mm_communicate(comm_buf, payload_size);
|
||||
if (ret != EFI_SUCCESS)
|
||||
|
|
@ -239,7 +234,7 @@ static efi_status_t get_max_payload(size_t *size)
|
|||
*/
|
||||
*size -= 2;
|
||||
out:
|
||||
kfree(comm_buf);
|
||||
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -259,9 +254,9 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
|
|||
|
||||
smm_property = setup_mm_hdr(
|
||||
&comm_buf, payload_size,
|
||||
SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET, &ret);
|
||||
SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET);
|
||||
if (!smm_property)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
return EFI_DEVICE_ERROR;
|
||||
|
||||
memcpy(&smm_property->guid, vendor, sizeof(smm_property->guid));
|
||||
smm_property->name_size = name_size;
|
||||
|
|
@ -282,7 +277,7 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
|
|||
memcpy(var_property, &smm_property->property, sizeof(*var_property));
|
||||
|
||||
out:
|
||||
kfree(comm_buf);
|
||||
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -315,9 +310,9 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
|
|||
|
||||
payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + tmp_dsize;
|
||||
var_acc = setup_mm_hdr(&comm_buf, payload_size,
|
||||
SMM_VARIABLE_FUNCTION_GET_VARIABLE, &ret);
|
||||
SMM_VARIABLE_FUNCTION_GET_VARIABLE);
|
||||
if (!var_acc)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
return EFI_DEVICE_ERROR;
|
||||
|
||||
/* Fill in contents */
|
||||
memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
|
||||
|
|
@ -347,7 +342,7 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
|
|||
memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
|
||||
var_acc->data_size);
|
||||
out:
|
||||
kfree(comm_buf);
|
||||
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -380,10 +375,9 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
|
|||
|
||||
payload_size = MM_VARIABLE_GET_NEXT_HEADER_SIZE + out_name_size;
|
||||
var_getnext = setup_mm_hdr(&comm_buf, payload_size,
|
||||
SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME,
|
||||
&ret);
|
||||
SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME);
|
||||
if (!var_getnext)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
return EFI_DEVICE_ERROR;
|
||||
|
||||
/* Fill in contents */
|
||||
memcpy(&var_getnext->guid, guid, sizeof(var_getnext->guid));
|
||||
|
|
@ -404,7 +398,7 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
|
|||
memcpy(name, var_getnext->name, var_getnext->name_size);
|
||||
|
||||
out:
|
||||
kfree(comm_buf);
|
||||
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -437,9 +431,9 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
|
|||
* the properties, if the allocation fails
|
||||
*/
|
||||
var_acc = setup_mm_hdr(&comm_buf, payload_size,
|
||||
SMM_VARIABLE_FUNCTION_SET_VARIABLE, &ret);
|
||||
SMM_VARIABLE_FUNCTION_SET_VARIABLE);
|
||||
if (!var_acc)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
return EFI_DEVICE_ERROR;
|
||||
|
||||
/*
|
||||
* The API has the ability to override RO flags. If no RO check was
|
||||
|
|
@ -467,7 +461,7 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
|
|||
ret = mm_communicate(comm_buf, payload_size);
|
||||
dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
|
||||
out:
|
||||
kfree(comm_buf);
|
||||
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -492,10 +486,9 @@ static efi_status_t tee_query_variable_info(u32 attributes,
|
|||
|
||||
payload_size = sizeof(*mm_query_info);
|
||||
mm_query_info = setup_mm_hdr(&comm_buf, payload_size,
|
||||
SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO,
|
||||
&ret);
|
||||
SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO);
|
||||
if (!mm_query_info)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
return EFI_DEVICE_ERROR;
|
||||
|
||||
mm_query_info->attr = attributes;
|
||||
ret = mm_communicate(comm_buf, payload_size);
|
||||
|
|
@ -507,7 +500,7 @@ static efi_status_t tee_query_variable_info(u32 attributes,
|
|||
*max_variable_size = mm_query_info->max_variable_size;
|
||||
|
||||
out:
|
||||
kfree(comm_buf);
|
||||
free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
|
|||
u32 ver;
|
||||
int ret = 0;
|
||||
|
||||
if (offset < 0 || offset > tgpio->gpio.ngpio)
|
||||
if (offset < 0 || offset >= tgpio->gpio.ngpio)
|
||||
return -EINVAL;
|
||||
|
||||
ver = ioread32(tgpio->membase + TGPIO_VER);
|
||||
|
|
|
|||
|
|
@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
}
|
||||
|
||||
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
|
||||
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE);
|
||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
||||
AMDGPU_PTE_EXECUTABLE);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||
|
|
|
|||
|
|
@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
|
||||
{
|
||||
struct drm_gem_object *obj = dma_buf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Pin to keep buffer in place while it's vmap'ed. The actual
|
||||
* domain is not that important as long as it's mapable. Using
|
||||
* GTT and VRAM should be compatible with most use cases.
|
||||
*/
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = drm_gem_dmabuf_vmap(dma_buf, map);
|
||||
if (ret)
|
||||
amdgpu_bo_unpin(bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
|
||||
{
|
||||
struct drm_gem_object *obj = dma_buf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
drm_gem_dmabuf_vunmap(dma_buf, map);
|
||||
amdgpu_bo_unpin(bo);
|
||||
}
|
||||
|
||||
const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
||||
.attach = amdgpu_dma_buf_attach,
|
||||
.pin = amdgpu_dma_buf_pin,
|
||||
|
|
@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
|||
.release = drm_gem_dmabuf_release,
|
||||
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
|
||||
.mmap = drm_gem_dmabuf_mmap,
|
||||
.vmap = drm_gem_dmabuf_vmap,
|
||||
.vunmap = drm_gem_dmabuf_vunmap,
|
||||
.vmap = amdgpu_dma_buf_vmap,
|
||||
.vunmap = amdgpu_dma_buf_vunmap,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -471,6 +471,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
if (index == (uint64_t)-EINVAL) {
|
||||
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
|
||||
kfree(queue);
|
||||
r = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1612,9 +1612,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
if (!adev->gfx.disable_uq &&
|
||||
adev->gfx.me_fw_version >= 2390 &&
|
||||
adev->gfx.pfp_fw_version >= 2530 &&
|
||||
adev->gfx.mec_fw_version >= 2600 &&
|
||||
adev->gfx.me_fw_version >= 2420 &&
|
||||
adev->gfx.pfp_fw_version >= 2580 &&
|
||||
adev->gfx.mec_fw_version >= 2650 &&
|
||||
adev->mes.fw_version[0] >= 120) {
|
||||
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
|
||||
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
|
||||
|
|
@ -4129,6 +4129,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
|
|||
#endif
|
||||
if (prop->tmz_queue)
|
||||
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
|
||||
if (!prop->kernel_queue)
|
||||
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
|
||||
mqd->cp_gfx_hqd_cntl = tmp;
|
||||
|
||||
/* set up cp_doorbell_control */
|
||||
|
|
@ -4281,8 +4283,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
|
|||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
|
||||
prop->allow_tunneling);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
if (prop->kernel_queue) {
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
}
|
||||
if (prop->tmz_queue)
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
|
||||
mqd->cp_hqd_pq_control = tmp;
|
||||
|
|
|
|||
|
|
@ -3026,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
|
|||
#endif
|
||||
if (prop->tmz_queue)
|
||||
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
|
||||
if (!prop->kernel_queue)
|
||||
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
|
||||
mqd->cp_gfx_hqd_cntl = tmp;
|
||||
|
||||
/* set up cp_doorbell_control */
|
||||
|
|
@ -3175,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
|
|||
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
if (prop->kernel_queue) {
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
|
||||
}
|
||||
if (prop->tmz_queue)
|
||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
|
||||
mqd->cp_hqd_pq_control = tmp;
|
||||
|
|
|
|||
|
|
@ -3458,14 +3458,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
effective_mode &= ~S_IWUSR;
|
||||
|
||||
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
|
||||
if (((adev->family == AMDGPU_FAMILY_SI) ||
|
||||
((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
|
||||
(gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
|
||||
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
|
||||
return 0;
|
||||
if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
|
||||
if (adev->family == AMDGPU_FAMILY_SI ||
|
||||
((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
|
||||
(gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
|
||||
(amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
|
||||
if (((adev->family == AMDGPU_FAMILY_SI) ||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
* mapping's backing &drm_gem_object buffers.
|
||||
*
|
||||
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
|
||||
* all existent GPU VA mappings using this &drm_gem_object as backing buffer.
|
||||
* all existing GPU VA mappings using this &drm_gem_object as backing buffer.
|
||||
*
|
||||
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
|
||||
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
|
||||
|
|
@ -72,7 +72,7 @@
|
|||
* but it can also be a 'dummy' object, which can be allocated with
|
||||
* drm_gpuvm_resv_object_alloc().
|
||||
*
|
||||
* In order to connect a struct drm_gpuva its backing &drm_gem_object each
|
||||
* In order to connect a struct drm_gpuva to its backing &drm_gem_object each
|
||||
* &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
|
||||
* &drm_gpuvm_bo contains a list of &drm_gpuva structures.
|
||||
*
|
||||
|
|
@ -81,7 +81,7 @@
|
|||
* This is ensured by the API through drm_gpuvm_bo_obtain() and
|
||||
* drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
|
||||
* &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
|
||||
* particular combination. If not existent a new instance is created and linked
|
||||
* particular combination. If not present, a new instance is created and linked
|
||||
* to the &drm_gem_object.
|
||||
*
|
||||
* &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
|
||||
|
|
@ -108,7 +108,7 @@
|
|||
* sequence of operations to satisfy a given map or unmap request.
|
||||
*
|
||||
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
|
||||
* and merging of existent GPU VA mappings with the ones that are requested to
|
||||
* and merging of existing GPU VA mappings with the ones that are requested to
|
||||
* be mapped or unmapped. This feature is required by the Vulkan API to
|
||||
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
|
||||
* as VM BIND.
|
||||
|
|
@ -119,7 +119,7 @@
|
|||
* execute in order to integrate the new mapping cleanly into the current state
|
||||
* of the GPU VA space.
|
||||
*
|
||||
* Depending on how the new GPU VA mapping intersects with the existent mappings
|
||||
* Depending on how the new GPU VA mapping intersects with the existing mappings
|
||||
* of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
|
||||
* of unmap operations, a maximum of two remap operations and a single map
|
||||
* operation. The caller might receive no callback at all if no operation is
|
||||
|
|
@ -139,16 +139,16 @@
|
|||
* one unmap operation and one or two map operations, such that drivers can
|
||||
* derive the page table update delta accordingly.
|
||||
*
|
||||
* Note that there can't be more than two existent mappings to split up, one at
|
||||
* Note that there can't be more than two existing mappings to split up, one at
|
||||
* the beginning and one at the end of the new mapping, hence there is a
|
||||
* maximum of two remap operations.
|
||||
*
|
||||
* Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
|
||||
* call back into the driver in order to unmap a range of GPU VA space. The
|
||||
* logic behind this function is way simpler though: For all existent mappings
|
||||
* logic behind this function is way simpler though: For all existing mappings
|
||||
* enclosed by the given range unmap operations are created. For mappings which
|
||||
* are only partically located within the given range, remap operations are
|
||||
* created such that those mappings are split up and re-mapped partically.
|
||||
* are only partially located within the given range, remap operations are
|
||||
* created such that those mappings are split up and re-mapped partially.
|
||||
*
|
||||
* As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
|
||||
* drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
|
||||
|
|
@ -168,7 +168,7 @@
|
|||
* provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
|
||||
* drm_gpuva_unmap() instead.
|
||||
*
|
||||
* The following diagram depicts the basic relationships of existent GPU VA
|
||||
* The following diagram depicts the basic relationships of existing GPU VA
|
||||
* mappings, a newly requested mapping and the resulting mappings as implemented
|
||||
* by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
|
||||
*
|
||||
|
|
@ -218,7 +218,7 @@
|
|||
*
|
||||
*
|
||||
* 4) Existent mapping is a left aligned subset of the requested one, hence
|
||||
* replace the existent one.
|
||||
* replace the existing one.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -236,9 +236,9 @@
|
|||
* and/or non-contiguous BO offset.
|
||||
*
|
||||
*
|
||||
* 5) Requested mapping's range is a left aligned subset of the existent one,
|
||||
* 5) Requested mapping's range is a left aligned subset of the existing one,
|
||||
* but backed by a different BO. Hence, map the requested mapping and split
|
||||
* the existent one adjusting its BO offset.
|
||||
* the existing one adjusting its BO offset.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -271,9 +271,9 @@
|
|||
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
|
||||
*
|
||||
*
|
||||
* 7) Requested mapping's range is a right aligned subset of the existent one,
|
||||
* 7) Requested mapping's range is a right aligned subset of the existing one,
|
||||
* but backed by a different BO. Hence, map the requested mapping and split
|
||||
* the existent one, without adjusting the BO offset.
|
||||
* the existing one, without adjusting the BO offset.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -304,7 +304,7 @@
|
|||
*
|
||||
* 9) Existent mapping is overlapped at the end by the requested mapping backed
|
||||
* by a different BO. Hence, map the requested mapping and split up the
|
||||
* existent one, without adjusting the BO offset.
|
||||
* existing one, without adjusting the BO offset.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -334,9 +334,9 @@
|
|||
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
|
||||
*
|
||||
*
|
||||
* 11) Requested mapping's range is a centered subset of the existent one
|
||||
* 11) Requested mapping's range is a centered subset of the existing one
|
||||
* having a different backing BO. Hence, map the requested mapping and split
|
||||
* up the existent one in two mappings, adjusting the BO offset of the right
|
||||
* up the existing one in two mappings, adjusting the BO offset of the right
|
||||
* one accordingly.
|
||||
*
|
||||
* ::
|
||||
|
|
@ -351,7 +351,7 @@
|
|||
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
|
||||
*
|
||||
*
|
||||
* 12) Requested mapping is a contiguous subset of the existent one. Split it
|
||||
* 12) Requested mapping is a contiguous subset of the existing one. Split it
|
||||
* up, but indicate that the backing PTEs could be kept.
|
||||
*
|
||||
* ::
|
||||
|
|
@ -367,7 +367,7 @@
|
|||
*
|
||||
*
|
||||
* 13) Existent mapping is a right aligned subset of the requested one, hence
|
||||
* replace the existent one.
|
||||
* replace the existing one.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -386,7 +386,7 @@
|
|||
*
|
||||
*
|
||||
* 14) Existent mapping is a centered subset of the requested one, hence
|
||||
* replace the existent one.
|
||||
* replace the existing one.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -406,7 +406,7 @@
|
|||
*
|
||||
* 15) Existent mappings is overlapped at the beginning by the requested mapping
|
||||
* backed by a different BO. Hence, map the requested mapping and split up
|
||||
* the existent one, adjusting its BO offset accordingly.
|
||||
* the existing one, adjusting its BO offset accordingly.
|
||||
*
|
||||
* ::
|
||||
*
|
||||
|
|
@ -469,8 +469,8 @@
|
|||
* make use of them.
|
||||
*
|
||||
* The below code is strictly limited to illustrate the generic usage pattern.
|
||||
* To maintain simplicitly, it doesn't make use of any abstractions for common
|
||||
* code, different (asyncronous) stages with fence signalling critical paths,
|
||||
* To maintain simplicity, it doesn't make use of any abstractions for common
|
||||
* code, different (asynchronous) stages with fence signalling critical paths,
|
||||
* any other helpers or error handling in terms of freeing memory and dropping
|
||||
* previously taken locks.
|
||||
*
|
||||
|
|
@ -479,7 +479,7 @@
|
|||
* // Allocates a new &drm_gpuva.
|
||||
* struct drm_gpuva * driver_gpuva_alloc(void);
|
||||
*
|
||||
* // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
|
||||
* // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
|
||||
* // structure in individual driver structures and lock the dma-resv with
|
||||
* // drm_exec or similar helpers.
|
||||
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
|
||||
|
|
@ -582,7 +582,7 @@
|
|||
* .sm_step_unmap = driver_gpuva_unmap,
|
||||
* };
|
||||
*
|
||||
* // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
|
||||
* // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
|
||||
* // structure in individual driver structures and lock the dma-resv with
|
||||
* // drm_exec or similar helpers.
|
||||
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
|
||||
|
|
@ -680,7 +680,7 @@
|
|||
*
|
||||
* This helper is here to provide lockless list iteration. Lockless as in, the
|
||||
* iterator releases the lock immediately after picking the first element from
|
||||
* the list, so list insertion deletion can happen concurrently.
|
||||
* the list, so list insertion and deletion can happen concurrently.
|
||||
*
|
||||
* Elements popped from the original list are kept in a local list, so removal
|
||||
* and is_empty checks can still happen while we're iterating the list.
|
||||
|
|
@ -1160,7 +1160,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
|
|||
}
|
||||
|
||||
/**
|
||||
* drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
|
||||
* drm_gpuvm_prepare_objects() - prepare all associated BOs
|
||||
* @gpuvm: the &drm_gpuvm
|
||||
* @exec: the &drm_exec locking context
|
||||
* @num_fences: the amount of &dma_fences to reserve
|
||||
|
|
@ -1230,13 +1230,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
|
|||
EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
|
||||
|
||||
/**
|
||||
* drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
|
||||
* drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
|
||||
* @vm_exec: the &drm_gpuvm_exec wrapper
|
||||
*
|
||||
* Acquires all dma-resv locks of all &drm_gem_objects the given
|
||||
* &drm_gpuvm contains mappings of.
|
||||
*
|
||||
* Addionally, when calling this function with struct drm_gpuvm_exec::extra
|
||||
* Additionally, when calling this function with struct drm_gpuvm_exec::extra
|
||||
* being set the driver receives the given @fn callback to lock additional
|
||||
* dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
|
||||
* would call drm_exec_prepare_obj() from within this callback.
|
||||
|
|
@ -1293,7 +1293,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec)
|
|||
}
|
||||
|
||||
/**
|
||||
* drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
|
||||
* drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
|
||||
* @vm_exec: the &drm_gpuvm_exec wrapper
|
||||
* @objs: additional &drm_gem_objects to lock
|
||||
* @num_objs: the number of additional &drm_gem_objects to lock
|
||||
|
|
@ -1588,7 +1588,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
|
|||
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
|
||||
|
||||
/**
|
||||
* drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
|
||||
* drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
|
||||
* given &drm_gpuvm and &drm_gem_object
|
||||
* @gpuvm: The &drm_gpuvm the @obj is mapped in.
|
||||
* @obj: The &drm_gem_object being mapped in the @gpuvm.
|
||||
|
|
@ -1624,7 +1624,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
|
|||
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
|
||||
|
||||
/**
|
||||
* drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
|
||||
* drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
|
||||
* for the given &drm_gpuvm and &drm_gem_object
|
||||
* @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
|
||||
*
|
||||
|
|
@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
|
|||
* @vm_bo: the &drm_gpuvm_bo to add or remove
|
||||
* @evict: indicates whether the object is evicted
|
||||
*
|
||||
* Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
|
||||
* Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
|
||||
*/
|
||||
void
|
||||
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
|
||||
|
|
@ -1790,7 +1790,7 @@ __drm_gpuva_remove(struct drm_gpuva *va)
|
|||
* drm_gpuva_remove() - remove a &drm_gpuva
|
||||
* @va: the &drm_gpuva to remove
|
||||
*
|
||||
* This removes the given &va from the underlaying tree.
|
||||
* This removes the given &va from the underlying tree.
|
||||
*
|
||||
* It is safe to use this function using the safe versions of iterating the GPU
|
||||
* VA space, such as drm_gpuvm_for_each_va_safe() and
|
||||
|
|
@ -2358,7 +2358,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
|
|||
*
|
||||
* This function iterates the given range of the GPU VA space. It utilizes the
|
||||
* &drm_gpuvm_ops to call back into the driver providing the operations to
|
||||
* unmap and, if required, split existent mappings.
|
||||
* unmap and, if required, split existing mappings.
|
||||
*
|
||||
* Drivers may use these callbacks to update the GPU VA space right away within
|
||||
* the callback. In case the driver decides to copy and store the operations for
|
||||
|
|
@ -2430,7 +2430,7 @@ static const struct drm_gpuvm_ops lock_ops = {
|
|||
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
|
||||
* will be newly mapped.
|
||||
*
|
||||
* The expected usage is:
|
||||
* The expected usage is::
|
||||
*
|
||||
* .. code-block:: c
|
||||
*
|
||||
|
|
@ -2475,7 +2475,7 @@ static const struct drm_gpuvm_ops lock_ops = {
|
|||
* required without the earlier DRIVER_OP_MAP. This is safe because we've
|
||||
* already locked the GEM object in the earlier DRIVER_OP_MAP step.
|
||||
*
|
||||
* Returns: 0 on success or a negative error codec
|
||||
* Returns: 0 on success or a negative error code
|
||||
*/
|
||||
int
|
||||
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
||||
|
|
@ -2619,12 +2619,12 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
|
|||
* @req_offset: the offset within the &drm_gem_object
|
||||
*
|
||||
* This function creates a list of operations to perform splitting and merging
|
||||
* of existent mapping(s) with the newly requested one.
|
||||
* of existing mapping(s) with the newly requested one.
|
||||
*
|
||||
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
|
||||
* in the given order. It can contain map, unmap and remap operations, but it
|
||||
* also can be empty if no operation is required, e.g. if the requested mapping
|
||||
* already exists is the exact same way.
|
||||
* already exists in the exact same way.
|
||||
*
|
||||
* There can be an arbitrary amount of unmap operations, a maximum of two remap
|
||||
* operations and a single map operation. The latter one represents the original
|
||||
|
|
|
|||
|
|
@ -387,19 +387,19 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
|
|||
|
||||
of_id = of_match_node(mtk_drm_of_ids, node);
|
||||
if (!of_id)
|
||||
continue;
|
||||
goto next_put_node;
|
||||
|
||||
pdev = of_find_device_by_node(node);
|
||||
if (!pdev)
|
||||
continue;
|
||||
goto next_put_node;
|
||||
|
||||
drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
|
||||
if (!drm_dev)
|
||||
continue;
|
||||
goto next_put_device_pdev_dev;
|
||||
|
||||
temp_drm_priv = dev_get_drvdata(drm_dev);
|
||||
if (!temp_drm_priv)
|
||||
continue;
|
||||
goto next_put_device_drm_dev;
|
||||
|
||||
if (temp_drm_priv->data->main_len)
|
||||
all_drm_priv[CRTC_MAIN] = temp_drm_priv;
|
||||
|
|
@ -411,10 +411,17 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
|
|||
if (temp_drm_priv->mtk_drm_bound)
|
||||
cnt++;
|
||||
|
||||
if (cnt == MAX_CRTC) {
|
||||
of_node_put(node);
|
||||
next_put_device_drm_dev:
|
||||
put_device(drm_dev);
|
||||
|
||||
next_put_device_pdev_dev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
next_put_node:
|
||||
of_node_put(node);
|
||||
|
||||
if (cnt == MAX_CRTC)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_priv->data->mmsys_dev_num == cnt) {
|
||||
|
|
|
|||
|
|
@ -1002,6 +1002,12 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
|
|||
return PTR_ERR(dsi->next_bridge);
|
||||
}
|
||||
|
||||
/*
|
||||
* set flag to request the DSI host bridge be pre-enabled before device bridge
|
||||
* in the chain, so the DSI host is ready when the device bridge is pre-enabled
|
||||
*/
|
||||
dsi->next_bridge->pre_enable_prev_first = true;
|
||||
|
||||
drm_bridge_add(&dsi->bridge);
|
||||
|
||||
ret = component_add(host->dev, &mtk_dsi_component_ops);
|
||||
|
|
|
|||
|
|
@ -182,8 +182,8 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
|
|||
|
||||
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
|
||||
{
|
||||
regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
|
||||
VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
|
||||
regmap_update_bits(hdmi->regs, VIDEO_CFG_4,
|
||||
VIDEO_SOURCE_SEL, black ? GEN_RGB : NORMAL_PATH);
|
||||
}
|
||||
|
||||
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
|
||||
|
|
@ -310,8 +310,8 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
|
|||
|
||||
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
|
||||
{
|
||||
regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
|
||||
GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
|
||||
regmap_update_bits(hdmi->regs, GRL_SHIFT_R2,
|
||||
AUDIO_PACKET_OFF, enable ? 0 : AUDIO_PACKET_OFF);
|
||||
}
|
||||
|
||||
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
|
||||
|
|
|
|||
|
|
@ -292,7 +292,8 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
|
|||
wmb(); /* Make sure the above parameter is set before update */
|
||||
mtk_plane_state->pending.dirty = true;
|
||||
|
||||
mtk_crtc_plane_disable(old_state->crtc, plane);
|
||||
if (old_state && old_state->crtc)
|
||||
mtk_crtc_plane_disable(old_state->crtc, plane);
|
||||
}
|
||||
|
||||
static void mtk_plane_atomic_update(struct drm_plane *plane,
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
|
||||
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
|
||||
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
|
||||
static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
|
||||
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
|
||||
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
|
||||
|
||||
#include "adreno_gen7_0_0_snapshot.h"
|
||||
|
|
@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
|
|||
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
|
||||
u32 *data)
|
||||
{
|
||||
u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
|
||||
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
|
||||
u32 reg;
|
||||
|
||||
if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
|
||||
reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
|
||||
A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
|
||||
} else {
|
||||
reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
|
||||
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
|
||||
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
|
||||
|
|
@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
|
|||
readl((ptr) + ((offset) << 2))
|
||||
|
||||
/* read a value from the CX debug bus */
|
||||
static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
|
||||
static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset,
|
||||
u32 *data)
|
||||
{
|
||||
u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
|
||||
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
|
||||
u32 reg;
|
||||
|
||||
if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
|
||||
reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
|
||||
A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
|
||||
} else {
|
||||
reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
|
||||
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
|
||||
}
|
||||
|
||||
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
|
||||
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
|
||||
|
|
@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
|
|||
ptr += debugbus_read(gpu, block->id, i, ptr);
|
||||
}
|
||||
|
||||
static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
|
||||
static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu,
|
||||
void __iomem *cxdbg,
|
||||
struct a6xx_gpu_state *a6xx_state,
|
||||
const struct a6xx_debugbus_block *block,
|
||||
struct a6xx_gpu_state_obj *obj)
|
||||
|
|
@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
|
|||
obj->handle = block;
|
||||
|
||||
for (ptr = obj->data, i = 0; i < block->count; i++)
|
||||
ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
|
||||
ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr);
|
||||
}
|
||||
|
||||
static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
|
||||
|
|
@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
|
|||
a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
|
||||
&a6xx_state->debugbus[i + debugbus_blocks_count]);
|
||||
}
|
||||
}
|
||||
|
||||
a6xx_state->nr_debugbus = total_debugbus_blocks;
|
||||
}
|
||||
}
|
||||
|
||||
static void a6xx_get_debugbus(struct msm_gpu *gpu,
|
||||
|
|
@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < nr_cx_debugbus_blocks; i++)
|
||||
a6xx_get_cx_debugbus_block(cxdbg,
|
||||
a6xx_get_cx_debugbus_block(gpu,
|
||||
cxdbg,
|
||||
a6xx_state,
|
||||
&cx_debugbus_blocks[i],
|
||||
&a6xx_state->cx_debugbus[i]);
|
||||
|
|
@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu,
|
|||
size_t datasize;
|
||||
int i, regcount = 0;
|
||||
|
||||
/* Some clusters need a selector register to be programmed too */
|
||||
if (cluster->sel)
|
||||
in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
|
||||
|
||||
in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
|
||||
A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
|
||||
A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
|
||||
A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
|
||||
|
||||
/* Some clusters need a selector register to be programmed too */
|
||||
if (cluster->sel)
|
||||
in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
|
||||
|
||||
for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
|
||||
int count = RANGE(cluster->regs, i);
|
||||
|
||||
|
|
@ -1796,6 +1813,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
|
|||
|
||||
print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
|
||||
print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
|
||||
drm_printf(p, " - location: %d\n", block->location);
|
||||
|
||||
for (i = 0; i < block->num_sps; i++) {
|
||||
drm_printf(p, " - sp: %d\n", i);
|
||||
|
|
@ -1873,6 +1891,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
|
|||
print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
|
||||
print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
|
||||
drm_printf(p, " - context: %d\n", dbgahb->context_id);
|
||||
drm_printf(p, " - location: %d\n", dbgahb->location_id);
|
||||
a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
|
|||
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
|
||||
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
|
||||
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
|
||||
};
|
||||
|
||||
static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
|
||||
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
|
||||
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
|
||||
REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL },
|
||||
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
|
||||
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
|
||||
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
|
||||
{ "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL },
|
||||
{ "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
|
||||
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
|
||||
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
|
||||
{ "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL },
|
||||
{ "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
|
||||
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
|
||||
{ "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
|
||||
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
|
||||
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
|
||||
};
|
||||
|
||||
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
|
||||
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
"CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
|
||||
};
|
||||
|
||||
static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
|
||||
{ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
|
||||
{ "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
|
||||
{ "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL },
|
||||
{ "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL },
|
||||
};
|
||||
|
||||
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
|
|||
A7XX_DBGBUS_USPTP_7,
|
||||
};
|
||||
|
||||
static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
|
||||
static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
|
||||
{A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
|
|
@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
|
|||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
|
||||
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
|
||||
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
|
||||
static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
|
||||
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
|
||||
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
|
||||
UINT_MAX, UINT_MAX,
|
||||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
|
||||
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
|
||||
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
|
||||
0x0b600, 0x0b600,
|
||||
UINT_MAX, UINT_MAX,
|
||||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
|
||||
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
|
||||
|
|
@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
|
|||
.val = 0x9,
|
||||
};
|
||||
|
||||
static struct gen7_cluster_registers gen7_0_0_clusters[] = {
|
||||
static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
|
||||
gen7_0_0_noncontext_pipe_br_registers, },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
|
||||
|
|
@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = {
|
|||
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
|
||||
};
|
||||
|
||||
static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
|
||||
static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
|
||||
|
|
@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = {
|
|||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
|
||||
|
||||
static struct gen7_reg_list gen7_0_0_reg_list[] = {
|
||||
static const struct gen7_reg_list gen7_0_0_reg_list[] = {
|
||||
{ gen7_0_0_gpu_registers, NULL },
|
||||
{ gen7_0_0_cx_misc_registers, NULL },
|
||||
{ gen7_0_0_dpm_registers, NULL },
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
|
|||
A7XX_DBGBUS_CCHE_2,
|
||||
};
|
||||
|
||||
static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
|
||||
static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
|
||||
{A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
|
|
@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
|
|||
.val = 0x9,
|
||||
};
|
||||
|
||||
static struct gen7_cluster_registers gen7_2_0_clusters[] = {
|
||||
static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
|
||||
gen7_2_0_noncontext_pipe_br_registers, },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
|
||||
|
|
@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = {
|
|||
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
|
||||
};
|
||||
|
||||
static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
|
||||
static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
|
||||
|
|
@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
|
|||
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
|
||||
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
|
||||
gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
|
||||
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
|
||||
{ A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
|
|
@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = {
|
|||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
|
||||
|
||||
static struct gen7_reg_list gen7_2_0_reg_list[] = {
|
||||
static const struct gen7_reg_list gen7_2_0_reg_list[] = {
|
||||
{ gen7_2_0_gpu_registers, NULL },
|
||||
{ gen7_2_0_cx_misc_registers, NULL },
|
||||
{ gen7_2_0_dpm_registers, NULL },
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue