Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf after rc4
Cross-merge bpf and other fixes after downstream PRs. No conflicts. Signed-off-by: Alexei Starovoitov <ast@kernel.org>pull/1112/head^2
commit
224ee86639
|
|
@ -562,7 +562,7 @@ The interesting knobs for XFS workqueues are as follows:
|
|||
Zoned Filesystems
|
||||
=================
|
||||
|
||||
For zoned file systems, the following attribute is exposed in:
|
||||
For zoned file systems, the following attributes are exposed in:
|
||||
|
||||
/sys/fs/xfs/<dev>/zoned/
|
||||
|
||||
|
|
@ -572,23 +572,10 @@ For zoned file systems, the following attribute is exposed in:
|
|||
is limited by the capabilities of the backing zoned device, file system
|
||||
size and the max_open_zones mount option.
|
||||
|
||||
Zoned Filesystems
|
||||
=================
|
||||
|
||||
For zoned file systems, the following attributes are exposed in:
|
||||
|
||||
/sys/fs/xfs/<dev>/zoned/
|
||||
|
||||
max_open_zones (Min: 1 Default: Varies Max: UINTMAX)
|
||||
This read-only attribute exposes the maximum number of open zones
|
||||
available for data placement. The value is determined at mount time and
|
||||
is limited by the capabilities of the backing zoned device, file system
|
||||
size and the max_open_zones mount option.
|
||||
|
||||
zonegc_low_space (Min: 0 Default: 0 Max: 100)
|
||||
Define a percentage for how much of the unused space that GC should keep
|
||||
available for writing. A high value will reclaim more of the space
|
||||
occupied by unused blocks, creating a larger buffer against write
|
||||
bursts at the cost of increased write amplification. Regardless
|
||||
of this value, garbage collection will always aim to free a minimum
|
||||
amount of blocks to keep max_open_zones open for data placement purposes.
|
||||
zonegc_low_space (Min: 0 Default: 0 Max: 100)
|
||||
Define a percentage for how much of the unused space that GC should keep
|
||||
available for writing. A high value will reclaim more of the space
|
||||
occupied by unused blocks, creating a larger buffer against write
|
||||
bursts at the cost of increased write amplification. Regardless
|
||||
of this value, garbage collection will always aim to free a minimum
|
||||
amount of blocks to keep max_open_zones open for data placement purposes.
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@ target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
|
|||
|
||||
For information about OpenRISC processors and ongoing development:
|
||||
|
||||
======= =============================
|
||||
======= ==============================
|
||||
website https://openrisc.io
|
||||
email openrisc@lists.librecores.org
|
||||
======= =============================
|
||||
email linux-openrisc@vger.kernel.org
|
||||
======= ==============================
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
@ -27,11 +27,11 @@ Toolchain binaries can be obtained from openrisc.io or our github releases page.
|
|||
Instructions for building the different toolchains can be found on openrisc.io
|
||||
or Stafford's toolchain build and release scripts.
|
||||
|
||||
========== =================================================
|
||||
binaries https://github.com/openrisc/or1k-gcc/releases
|
||||
========== ==========================================================
|
||||
binaries https://github.com/stffrdhrn/or1k-toolchain-build/releases
|
||||
toolchains https://openrisc.io/software
|
||||
building https://github.com/stffrdhrn/or1k-toolchain-build
|
||||
========== =================================================
|
||||
========== ==========================================================
|
||||
|
||||
2) Building
|
||||
|
||||
|
|
|
|||
|
|
@ -382,6 +382,14 @@ In case of new BPF instructions, once the changes have been accepted
|
|||
into the Linux kernel, please implement support into LLVM's BPF back
|
||||
end. See LLVM_ section below for further information.
|
||||
|
||||
Q: What "BPF_INTERNAL" symbol namespace is for?
|
||||
-----------------------------------------------
|
||||
A: Symbols exported as BPF_INTERNAL can only be used by BPF infrastructure
|
||||
like preload kernel modules with light skeleton. Most symbols outside
|
||||
of BPF_INTERNAL are not expected to be used by code outside of BPF either.
|
||||
Symbols may lack the designation because they predate the namespaces,
|
||||
or due to an oversight.
|
||||
|
||||
Stable submission
|
||||
=================
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ properties:
|
|||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
items:
|
||||
- minimum: 0
|
||||
maximum: 7
|
||||
maximum: 31
|
||||
description:
|
||||
Offset in bit within the address range specified by reg.
|
||||
- minimum: 1
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ properties:
|
|||
- enum:
|
||||
- qcom,apq8064-qfprom
|
||||
- qcom,apq8084-qfprom
|
||||
- qcom,ipq5018-qfprom
|
||||
- qcom,ipq5332-qfprom
|
||||
- qcom,ipq5424-qfprom
|
||||
- qcom,ipq6018-qfprom
|
||||
|
|
@ -28,6 +29,8 @@ properties:
|
|||
- qcom,msm8226-qfprom
|
||||
- qcom,msm8916-qfprom
|
||||
- qcom,msm8917-qfprom
|
||||
- qcom,msm8937-qfprom
|
||||
- qcom,msm8960-qfprom
|
||||
- qcom,msm8974-qfprom
|
||||
- qcom,msm8976-qfprom
|
||||
- qcom,msm8996-qfprom
|
||||
|
|
@ -51,6 +54,7 @@ properties:
|
|||
- qcom,sm8450-qfprom
|
||||
- qcom,sm8550-qfprom
|
||||
- qcom,sm8650-qfprom
|
||||
- qcom,x1e80100-qfprom
|
||||
- const: qcom,qfprom
|
||||
|
||||
reg:
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ properties:
|
|||
enum:
|
||||
- rockchip,px30-otp
|
||||
- rockchip,rk3308-otp
|
||||
- rockchip,rk3576-otp
|
||||
- rockchip,rk3588-otp
|
||||
|
||||
reg:
|
||||
|
|
@ -62,12 +63,34 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
maxItems: 3
|
||||
clock-names:
|
||||
maxItems: 3
|
||||
resets:
|
||||
maxItems: 1
|
||||
reset-names:
|
||||
items:
|
||||
- const: phy
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- rockchip,rk3576-otp
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 3
|
||||
clock-names:
|
||||
maxItems: 3
|
||||
resets:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
reset-names:
|
||||
items:
|
||||
- const: otp
|
||||
- const: apb
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
|
|
@ -78,6 +101,8 @@ allOf:
|
|||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
clock-names:
|
||||
minItems: 4
|
||||
resets:
|
||||
minItems: 3
|
||||
reset-names:
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ OpenRISC 1000系列(或1k)。
|
|||
|
||||
关于OpenRISC处理器和正在进行中的开发的信息:
|
||||
|
||||
======= =============================
|
||||
======= ==============================
|
||||
网站 https://openrisc.io
|
||||
邮箱 openrisc@lists.librecores.org
|
||||
======= =============================
|
||||
邮箱 linux-openrisc@vger.kernel.org
|
||||
======= ==============================
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
@ -36,11 +36,11 @@ OpenRISC工具链和Linux的构建指南
|
|||
工具链的构建指南可以在openrisc.io或Stafford的工具链构建和发布脚本
|
||||
中找到。
|
||||
|
||||
====== =================================================
|
||||
二进制 https://github.com/openrisc/or1k-gcc/releases
|
||||
====== ==========================================================
|
||||
二进制 https://github.com/stffrdhrn/or1k-toolchain-build/releases
|
||||
工具链 https://openrisc.io/software
|
||||
构建 https://github.com/stffrdhrn/or1k-toolchain-build
|
||||
====== =================================================
|
||||
====== ==========================================================
|
||||
|
||||
2) 构建
|
||||
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ OpenRISC 1000系列(或1k)。
|
|||
|
||||
關於OpenRISC處理器和正在進行中的開發的信息:
|
||||
|
||||
======= =============================
|
||||
======= ==============================
|
||||
網站 https://openrisc.io
|
||||
郵箱 openrisc@lists.librecores.org
|
||||
======= =============================
|
||||
郵箱 linux-openrisc@vger.kernel.org
|
||||
======= ==============================
|
||||
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
@ -36,11 +36,11 @@ OpenRISC工具鏈和Linux的構建指南
|
|||
工具鏈的構建指南可以在openrisc.io或Stafford的工具鏈構建和發佈腳本
|
||||
中找到。
|
||||
|
||||
====== =================================================
|
||||
二進制 https://github.com/openrisc/or1k-gcc/releases
|
||||
====== ==========================================================
|
||||
二進制 https://github.com/stffrdhrn/or1k-toolchain-build/releases
|
||||
工具鏈 https://openrisc.io/software
|
||||
構建 https://github.com/stffrdhrn/or1k-toolchain-build
|
||||
====== =================================================
|
||||
====== ==========================================================
|
||||
|
||||
2) 構建
|
||||
|
||||
|
|
|
|||
46
MAINTAINERS
46
MAINTAINERS
|
|
@ -3191,6 +3191,12 @@ M: Dinh Nguyen <dinguyen@kernel.org>
|
|||
S: Maintained
|
||||
F: drivers/clk/socfpga/
|
||||
|
||||
ARM/SOCFPGA DWMAC GLUE LAYER
|
||||
M: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/socfpga-dwmac.txt
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
|
||||
|
||||
ARM/SOCFPGA EDAC BINDINGS
|
||||
M: Matthew Gerlach <matthew.gerlach@altera.com>
|
||||
S: Maintained
|
||||
|
|
@ -3867,8 +3873,9 @@ AUXILIARY BUS DRIVER
|
|||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
R: Dave Ertman <david.m.ertman@intel.com>
|
||||
R: Ira Weiny <ira.weiny@intel.com>
|
||||
R: Leon Romanovsky <leon@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
|
||||
F: Documentation/driver-api/auxiliary_bus.rst
|
||||
F: drivers/base/auxiliary.c
|
||||
F: include/linux/auxiliary_bus.h
|
||||
|
|
@ -7227,7 +7234,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|||
M: "Rafael J. Wysocki" <rafael@kernel.org>
|
||||
M: Danilo Krummrich <dakr@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
|
||||
F: Documentation/core-api/kobject.rst
|
||||
F: drivers/base/
|
||||
F: fs/debugfs/
|
||||
|
|
@ -10457,14 +10464,20 @@ S: Supported
|
|||
F: drivers/infiniband/hw/hfi1
|
||||
|
||||
HFS FILESYSTEM
|
||||
M: Viacheslav Dubeyko <slava@dubeyko.com>
|
||||
M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
M: Yangtao Li <frank.li@vivo.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Orphan
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/hfs.rst
|
||||
F: fs/hfs/
|
||||
|
||||
HFSPLUS FILESYSTEM
|
||||
M: Viacheslav Dubeyko <slava@dubeyko.com>
|
||||
M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
M: Yangtao Li <frank.li@vivo.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Orphan
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/hfsplus.rst
|
||||
F: fs/hfsplus/
|
||||
|
||||
|
|
@ -13112,7 +13125,7 @@ KERNFS
|
|||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
|
||||
F: fs/kernfs/
|
||||
F: include/linux/kernfs.h
|
||||
|
||||
|
|
@ -16812,6 +16825,7 @@ F: Documentation/networking/net_cachelines/net_device.rst
|
|||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: drivers/ptp/
|
||||
F: drivers/s390/net/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/cn_proc.h
|
||||
F: include/linux/etherdevice.h
|
||||
|
|
@ -16821,6 +16835,7 @@ F: include/linux/fddidevice.h
|
|||
F: include/linux/hippidevice.h
|
||||
F: include/linux/if_*
|
||||
F: include/linux/inetdevice.h
|
||||
F: include/linux/ism.h
|
||||
F: include/linux/netdev*
|
||||
F: include/linux/platform_data/wiznet.h
|
||||
F: include/uapi/linux/cn_proc.h
|
||||
|
|
@ -18689,7 +18704,7 @@ F: drivers/pci/controller/pci-xgene-msi.c
|
|||
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
|
||||
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
|
||||
M: Krzysztof Wilczyński <kw@linux.com>
|
||||
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
R: Rob Herring <robh@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
@ -18742,6 +18757,16 @@ F: include/asm-generic/pci*
|
|||
F: include/linux/of_pci.h
|
||||
F: include/linux/pci*
|
||||
F: include/uapi/linux/pci*
|
||||
|
||||
PCI SUBSYSTEM [RUST]
|
||||
M: Danilo Krummrich <dakr@kernel.org>
|
||||
R: Bjorn Helgaas <bhelgaas@google.com>
|
||||
R: Krzysztof Wilczyński <kwilczynski@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: rust/helpers/pci.c
|
||||
F: rust/kernel/pci.rs
|
||||
F: samples/rust/rust_driver_pci.rs
|
||||
|
||||
|
|
@ -21312,6 +21337,7 @@ L: linux-s390@vger.kernel.org
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/s390/net/
|
||||
F: include/linux/ism.h
|
||||
|
||||
S390 PCI SUBSYSTEM
|
||||
M: Niklas Schnelle <schnelle@linux.ibm.com>
|
||||
|
|
@ -25184,9 +25210,13 @@ S: Maintained
|
|||
F: drivers/usb/typec/mux/pi3usb30532.c
|
||||
|
||||
USB TYPEC PORT CONTROLLER DRIVERS
|
||||
M: Badhri Jagan Sridharan <badhri@google.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Orphan
|
||||
F: drivers/usb/typec/tcpm/
|
||||
S: Maintained
|
||||
F: drivers/usb/typec/tcpm/tcpci.c
|
||||
F: drivers/usb/typec/tcpm/tcpm.c
|
||||
F: include/linux/usb/tcpci.h
|
||||
F: include/linux/usb/tcpm.h
|
||||
|
||||
USB TYPEC TUSB1046 MUX DRIVER
|
||||
M: Romain Gantois <romain.gantois@bootlin.com>
|
||||
|
|
|
|||
6
Makefile
6
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
@ -1053,11 +1053,11 @@ NOSTDINC_FLAGS += -nostdinc
|
|||
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
|
||||
|
||||
#Currently, disable -Wstringop-overflow for GCC 11, globally.
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
|
||||
|
||||
#Currently, disable -Wunterminated-string-initialization as broken
|
||||
KBUILD_CFLAGS += $(call cc-option, -Wno-unterminated-string-initialization)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
|
||||
|
||||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += -fno-strict-overflow
|
||||
|
|
|
|||
|
|
@ -1588,4 +1588,9 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
|||
#define kvm_has_s1poe(k) \
|
||||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
|
||||
static inline bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
|
|
|||
|
|
@ -94,17 +94,6 @@ static inline bool kaslr_requires_kpti(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Systems affected by Cavium erratum 24756 are incompatible
|
||||
* with KPTI.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
|
||||
extern const struct midr_range cavium_erratum_27456_cpus[];
|
||||
|
||||
if (is_midr_in_range_list(cavium_erratum_27456_cpus))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ static const struct midr_range cavium_erratum_23154_cpus[] = {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
const struct midr_range cavium_erratum_27456_cpus[] = {
|
||||
static const struct midr_range cavium_erratum_27456_cpus[] = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
|
|
|
|||
|
|
@ -47,10 +47,6 @@ PROVIDE(__pi_id_aa64smfr0_override = id_aa64smfr0_override);
|
|||
PROVIDE(__pi_id_aa64zfr0_override = id_aa64zfr0_override);
|
||||
PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
|
||||
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
|
||||
PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
|
||||
#endif
|
||||
PROVIDE(__pi__ctype = _ctype);
|
||||
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
|
||||
|
||||
|
|
|
|||
|
|
@ -207,6 +207,29 @@ static void __init map_fdt(u64 fdt)
|
|||
dsb(ishst);
|
||||
}
|
||||
|
||||
/*
|
||||
* PI version of the Cavium Eratum 27456 detection, which makes it
|
||||
* impossible to use non-global mappings.
|
||||
*/
|
||||
static bool __init ng_mappings_allowed(void)
|
||||
{
|
||||
static const struct midr_range cavium_erratum_27456_cpus[] __initconst = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
|
||||
{},
|
||||
};
|
||||
|
||||
for (const struct midr_range *r = cavium_erratum_27456_cpus; r->model; r++) {
|
||||
if (midr_is_cpu_model_range(read_cpuid_id(), r->model,
|
||||
r->rv_min, r->rv_max))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
|
||||
{
|
||||
static char const chosen_str[] __initconst = "/chosen";
|
||||
|
|
@ -246,7 +269,7 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
|
|||
u64 kaslr_seed = kaslr_early_init(fdt, chosen);
|
||||
|
||||
if (kaslr_seed && kaslr_requires_kpti())
|
||||
arm64_use_ng_mappings = true;
|
||||
arm64_use_ng_mappings = ng_mappings_allowed();
|
||||
|
||||
kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2743,11 +2743,6 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
|
|||
return irqchip_in_kernel(kvm);
|
||||
}
|
||||
|
||||
bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
|
||||
struct irq_bypass_producer *prod)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -73,6 +73,7 @@ config LOONGARCH
|
|||
select ARCH_SUPPORTS_RT
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_MEMTEST
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
|
|
|
|||
|
|
@ -22,22 +22,29 @@
|
|||
struct sigcontext;
|
||||
|
||||
#define kernel_fpu_available() cpu_has_fpu
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
|
||||
extern void _init_fpu(unsigned int);
|
||||
extern void _save_fp(struct loongarch_fpu *);
|
||||
extern void _restore_fp(struct loongarch_fpu *);
|
||||
void kernel_fpu_begin(void);
|
||||
void kernel_fpu_end(void);
|
||||
|
||||
extern void _save_lsx(struct loongarch_fpu *fpu);
|
||||
extern void _restore_lsx(struct loongarch_fpu *fpu);
|
||||
extern void _init_lsx_upper(void);
|
||||
extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_fpu(unsigned int);
|
||||
asmlinkage void _save_fp(struct loongarch_fpu *);
|
||||
asmlinkage void _restore_fp(struct loongarch_fpu *);
|
||||
asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
|
||||
extern void _save_lasx(struct loongarch_fpu *fpu);
|
||||
extern void _restore_lasx(struct loongarch_fpu *fpu);
|
||||
extern void _init_lasx_upper(void);
|
||||
extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_lsx_upper(void);
|
||||
asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_lasx_upper(void);
|
||||
asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
static inline void enable_lsx(void);
|
||||
static inline void disable_lsx(void);
|
||||
|
|
|
|||
|
|
@ -12,9 +12,13 @@
|
|||
#include <asm/loongarch.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
extern void _init_lbt(void);
|
||||
extern void _save_lbt(struct loongarch_lbt *);
|
||||
extern void _restore_lbt(struct loongarch_lbt *);
|
||||
asmlinkage void _init_lbt(void);
|
||||
asmlinkage void _save_lbt(struct loongarch_lbt *);
|
||||
asmlinkage void _restore_lbt(struct loongarch_lbt *);
|
||||
asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
|
||||
asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
|
||||
asmlinkage int _save_ftop_context(void __user *ftop);
|
||||
asmlinkage int _restore_ftop_context(void __user *ftop);
|
||||
|
||||
static inline int is_lbt_enabled(void)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -33,9 +33,9 @@ struct pt_regs {
|
|||
unsigned long __last[];
|
||||
} __aligned(8);
|
||||
|
||||
static inline int regs_irqs_disabled(struct pt_regs *regs)
|
||||
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
|
||||
{
|
||||
return arch_irqs_disabled_flags(regs->csr_prmd);
|
||||
return !(regs->csr_prmd & CSR_PRMD_PIE);
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
|
|
|
|||
|
|
@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
|
|||
|
||||
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
|
||||
|
||||
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_module.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_traps.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ifndef CONFIG_DYNAMIC_FTRACE
|
||||
|
|
|
|||
|
|
@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp_context)
|
||||
EXPORT_SYMBOL_GPL(_save_fp_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
|
|
@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_fp_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
|
|
@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lsx_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lsx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
|
|
@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lsx_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lsx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
|
|
@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lasx_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lasx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
|
|
@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lasx_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lasx_context)
|
||||
|
||||
.L_fpu_fault:
|
||||
li.w a0, -EFAULT # failure
|
||||
|
|
|
|||
|
|
@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lbt_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lbt_context)
|
||||
|
||||
/*
|
||||
* a0: scr
|
||||
|
|
@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lbt_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lbt_context)
|
||||
|
||||
/*
|
||||
* a0: ftop
|
||||
|
|
@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_ftop_context)
|
||||
EXPORT_SYMBOL_GPL(_save_ftop_context)
|
||||
|
||||
/*
|
||||
* a0: ftop
|
||||
|
|
@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
|
|||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_ftop_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_ftop_context)
|
||||
|
||||
.L_lbt_fault:
|
||||
li.w a0, -EFAULT # failure
|
||||
|
|
|
|||
|
|
@ -51,27 +51,6 @@
|
|||
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
|
||||
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
|
||||
|
||||
/* Assembly functions to move context to/from the FPU */
|
||||
extern asmlinkage int
|
||||
_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
extern asmlinkage int
|
||||
_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
extern asmlinkage int
|
||||
_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
|
||||
extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
|
||||
extern asmlinkage int _save_ftop_context(void __user *ftop);
|
||||
extern asmlinkage int _restore_ftop_context(void __user *ftop);
|
||||
#endif
|
||||
|
||||
struct rt_sigframe {
|
||||
struct siginfo rs_info;
|
||||
struct ucontext rs_uctx;
|
||||
|
|
|
|||
|
|
@ -553,9 +553,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
|
|||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
#else
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int *pc;
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
|
||||
|
|
@ -582,7 +583,7 @@ sigbus:
|
|||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
#endif
|
||||
irqentry_exit(regs, state);
|
||||
|
|
@ -621,12 +622,13 @@ static void bug_handler(struct pt_regs *regs)
|
|||
asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned long era = exception_era(regs);
|
||||
u64 badv = 0, lower = 0, upper = ULONG_MAX;
|
||||
union loongarch_instruction insn;
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
current->thread.trap_nr = read_csr_excode();
|
||||
|
|
@ -692,7 +694,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
|||
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
|
@ -710,11 +712,12 @@ bad_era:
|
|||
asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int opcode, bcode;
|
||||
unsigned long era = exception_era(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (__get_inst(&opcode, (u32 *)era, user))
|
||||
|
|
@ -780,7 +783,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
|
@ -1015,6 +1018,7 @@ static void init_restore_lbt(void)
|
|||
|
||||
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
{
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
/*
|
||||
|
|
@ -1024,7 +1028,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
|||
* (including the user using 'MOVGR2GCSR' to turn on TM, which
|
||||
* will not trigger the BTE), we need to check PRMD first.
|
||||
*/
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (!cpu_has_lbt) {
|
||||
|
|
@ -1038,7 +1042,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
|||
preempt_enable();
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
|
|
|||
|
|
@ -21,4 +21,4 @@ kvm-y += intc/eiointc.o
|
|||
kvm-y += intc/pch_pic.o
|
||||
kvm-y += irqfd.o
|
||||
|
||||
CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_exit.o += $(call cc-disable-warning, override-init)
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
|||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret)) {
|
||||
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
|
||||
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
|
||||
return ret;
|
||||
}
|
||||
/* Construct the mask by scanning the bit 27-30 */
|
||||
|
|
@ -127,7 +127,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
|||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret))
|
||||
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
|
||||
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -296,10 +296,10 @@ int kvm_arch_enable_virtualization_cpu(void)
|
|||
/*
|
||||
* Enable virtualization features granting guest direct control of
|
||||
* certain features:
|
||||
* GCI=2: Trap on init or unimplement cache instruction.
|
||||
* GCI=2: Trap on init or unimplemented cache instruction.
|
||||
* TORU=0: Trap on Root Unimplement.
|
||||
* CACTRL=1: Root control cache.
|
||||
* TOP=0: Trap on Previlege.
|
||||
* TOP=0: Trap on Privilege.
|
||||
* TOE=0: Trap on Exception.
|
||||
* TIT=0: Trap on Timer.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
|
||||
|
||||
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
|
||||
kvm_lose_pmu(vcpu);
|
||||
/* make sure the vcpu mode has been written */
|
||||
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
|
||||
local_irq_enable();
|
||||
|
|
@ -902,6 +903,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.st.guest_addr = 0;
|
||||
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
|
||||
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
|
||||
|
||||
/*
|
||||
* When vCPU reset, clear the ESTAT and GINTC registers
|
||||
* Other CSR registers are cleared with function _kvm_setcsr().
|
||||
*/
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
|||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
}
|
||||
return (pte_t *) pmd;
|
||||
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
|
||||
}
|
||||
|
||||
uint64_t pmd_to_entrylo(unsigned long pmd_val)
|
||||
|
|
|
|||
|
|
@ -65,9 +65,6 @@ void __init paging_init(void)
|
|||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -23,6 +23,9 @@
|
|||
*/
|
||||
extern void local_dcache_page_flush(struct page *page);
|
||||
extern void local_icache_page_inv(struct page *page);
|
||||
extern void local_dcache_range_flush(unsigned long start, unsigned long end);
|
||||
extern void local_dcache_range_inv(unsigned long start, unsigned long end);
|
||||
extern void local_icache_range_inv(unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* Data cache flushing always happen on the local cpu. Instruction cache
|
||||
|
|
@ -38,6 +41,20 @@ extern void local_icache_page_inv(struct page *page);
|
|||
extern void smp_icache_page_inv(struct page *page);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* Even if the actual block size is larger than L1_CACHE_BYTES, paddr
|
||||
* can be incremented by L1_CACHE_BYTES. When paddr is written to the
|
||||
* invalidate register, the entire cache line encompassing this address
|
||||
* is invalidated. Each subsequent reference to the same cache line will
|
||||
* not affect the invalidation process.
|
||||
*/
|
||||
#define local_dcache_block_flush(addr) \
|
||||
local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)
|
||||
#define local_dcache_block_inv(addr) \
|
||||
local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)
|
||||
#define local_icache_block_inv(addr) \
|
||||
local_icache_range_inv(addr, addr + L1_CACHE_BYTES)
|
||||
|
||||
/*
|
||||
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
|
||||
* should be called to make sure the processor sees the newly written code.
|
||||
|
|
|
|||
|
|
@ -15,16 +15,21 @@
|
|||
#ifndef __ASM_OPENRISC_CPUINFO_H
|
||||
#define __ASM_OPENRISC_CPUINFO_H
|
||||
|
||||
#include <asm/spr.h>
|
||||
#include <asm/spr_defs.h>
|
||||
|
||||
struct cache_desc {
|
||||
u32 size;
|
||||
u32 sets;
|
||||
u32 block_size;
|
||||
u32 ways;
|
||||
};
|
||||
|
||||
struct cpuinfo_or1k {
|
||||
u32 clock_frequency;
|
||||
|
||||
u32 icache_size;
|
||||
u32 icache_block_size;
|
||||
u32 icache_ways;
|
||||
|
||||
u32 dcache_size;
|
||||
u32 dcache_block_size;
|
||||
u32 dcache_ways;
|
||||
struct cache_desc icache;
|
||||
struct cache_desc dcache;
|
||||
|
||||
u16 coreid;
|
||||
};
|
||||
|
|
@ -32,4 +37,9 @@ struct cpuinfo_or1k {
|
|||
extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
|
||||
extern void setup_cpuinfo(void);
|
||||
|
||||
/*
|
||||
* Check if the cache component exists.
|
||||
*/
|
||||
extern bool cpu_cache_is_present(const unsigned int cache_type);
|
||||
|
||||
#endif /* __ASM_OPENRISC_CPUINFO_H */
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ extra-y := vmlinux.lds
|
|||
|
||||
obj-y := head.o setup.o or32_ksyms.o process.o dma.o \
|
||||
traps.o time.o irq.o entry.o ptrace.o signal.o \
|
||||
sys_call_table.o unwinder.o
|
||||
sys_call_table.o unwinder.o cacheinfo.o
|
||||
|
||||
obj-$(CONFIG_SMP) += smp.o sync-timer.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
|
|
|
|||
|
|
@ -0,0 +1,104 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* OpenRISC cacheinfo support
|
||||
*
|
||||
* Based on work done for MIPS and LoongArch. All original copyrights
|
||||
* apply as per the original source declaration.
|
||||
*
|
||||
* OpenRISC implementation:
|
||||
* Copyright (C) 2025 Sahil Siddiq <sahilcdq@proton.me>
|
||||
*/
|
||||
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/spr.h>
|
||||
#include <asm/spr_defs.h>
|
||||
|
||||
static inline void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
|
||||
unsigned int level, struct cache_desc *cache, int cpu)
|
||||
{
|
||||
this_leaf->type = type;
|
||||
this_leaf->level = level;
|
||||
this_leaf->coherency_line_size = cache->block_size;
|
||||
this_leaf->number_of_sets = cache->sets;
|
||||
this_leaf->ways_of_associativity = cache->ways;
|
||||
this_leaf->size = cache->size;
|
||||
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
||||
}
|
||||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
int leaves = 0, levels = 0;
|
||||
unsigned long upr = mfspr(SPR_UPR);
|
||||
unsigned long iccfgr, dccfgr;
|
||||
|
||||
if (!(upr & SPR_UPR_UP)) {
|
||||
printk(KERN_INFO
|
||||
"-- no UPR register... unable to detect configuration\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_DCP)) {
|
||||
dccfgr = mfspr(SPR_DCCFGR);
|
||||
cpuinfo->dcache.ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
|
||||
cpuinfo->dcache.sets = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
|
||||
cpuinfo->dcache.block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
|
||||
cpuinfo->dcache.size =
|
||||
cpuinfo->dcache.sets * cpuinfo->dcache.ways * cpuinfo->dcache.block_size;
|
||||
leaves += 1;
|
||||
printk(KERN_INFO
|
||||
"-- dcache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n",
|
||||
cpuinfo->dcache.size, cpuinfo->dcache.block_size,
|
||||
cpuinfo->dcache.sets, cpuinfo->dcache.ways);
|
||||
} else
|
||||
printk(KERN_INFO "-- dcache disabled\n");
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_ICP)) {
|
||||
iccfgr = mfspr(SPR_ICCFGR);
|
||||
cpuinfo->icache.ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
|
||||
cpuinfo->icache.sets = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
|
||||
cpuinfo->icache.block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
|
||||
cpuinfo->icache.size =
|
||||
cpuinfo->icache.sets * cpuinfo->icache.ways * cpuinfo->icache.block_size;
|
||||
leaves += 1;
|
||||
printk(KERN_INFO
|
||||
"-- icache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n",
|
||||
cpuinfo->icache.size, cpuinfo->icache.block_size,
|
||||
cpuinfo->icache.sets, cpuinfo->icache.ways);
|
||||
} else
|
||||
printk(KERN_INFO "-- icache disabled\n");
|
||||
|
||||
if (!leaves)
|
||||
return -ENOENT;
|
||||
|
||||
levels = 1;
|
||||
|
||||
this_cpu_ci->num_leaves = leaves;
|
||||
this_cpu_ci->num_levels = levels;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
int level = 1;
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_DCP)) {
|
||||
ci_leaf_init(this_leaf, CACHE_TYPE_DATA, level, &cpuinfo->dcache, cpu);
|
||||
this_leaf->attributes = ((mfspr(SPR_DCCFGR) & SPR_DCCFGR_CWS) >> 8) ?
|
||||
CACHE_WRITE_BACK : CACHE_WRITE_THROUGH;
|
||||
this_leaf++;
|
||||
}
|
||||
|
||||
if (cpu_cache_is_present(SPR_UPR_ICP))
|
||||
ci_leaf_init(this_leaf, CACHE_TYPE_INST, level, &cpuinfo->icache, cpu);
|
||||
|
||||
this_cpu_ci->cpu_map_populated = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/pagewalk.h>
|
||||
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/spr_defs.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
|
@ -24,9 +25,6 @@ static int
|
|||
page_set_nocache(pte_t *pte, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
unsigned long cl;
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
|
||||
pte_val(*pte) |= _PAGE_CI;
|
||||
|
||||
/*
|
||||
|
|
@ -36,8 +34,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
|
|||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
|
||||
/* Flush page out of dcache */
|
||||
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
|
||||
mtspr(SPR_DCBFR, cl);
|
||||
local_dcache_range_flush(__pa(addr), __pa(next));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -98,21 +95,14 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
|
|||
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long cl;
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
|
||||
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
/* Flush the dcache for the requested range */
|
||||
for (cl = addr; cl < addr + size;
|
||||
cl += cpuinfo->dcache_block_size)
|
||||
mtspr(SPR_DCBFR, cl);
|
||||
local_dcache_range_flush(addr, addr + size);
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
/* Invalidate the dcache for the requested range */
|
||||
for (cl = addr; cl < addr + size;
|
||||
cl += cpuinfo->dcache_block_size)
|
||||
mtspr(SPR_DCBIR, cl);
|
||||
local_dcache_range_inv(addr, addr + size);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -113,21 +113,6 @@ static void print_cpuinfo(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (upr & SPR_UPR_DCP)
|
||||
printk(KERN_INFO
|
||||
"-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
|
||||
cpuinfo->dcache_size, cpuinfo->dcache_block_size,
|
||||
cpuinfo->dcache_ways);
|
||||
else
|
||||
printk(KERN_INFO "-- dcache disabled\n");
|
||||
if (upr & SPR_UPR_ICP)
|
||||
printk(KERN_INFO
|
||||
"-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
|
||||
cpuinfo->icache_size, cpuinfo->icache_block_size,
|
||||
cpuinfo->icache_ways);
|
||||
else
|
||||
printk(KERN_INFO "-- icache disabled\n");
|
||||
|
||||
if (upr & SPR_UPR_DMP)
|
||||
printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
|
||||
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
|
||||
|
|
@ -155,8 +140,6 @@ static void print_cpuinfo(void)
|
|||
void __init setup_cpuinfo(void)
|
||||
{
|
||||
struct device_node *cpu;
|
||||
unsigned long iccfgr, dccfgr;
|
||||
unsigned long cache_set_size;
|
||||
int cpu_id = smp_processor_id();
|
||||
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id];
|
||||
|
||||
|
|
@ -164,20 +147,6 @@ void __init setup_cpuinfo(void)
|
|||
if (!cpu)
|
||||
panic("Couldn't find CPU%d in device tree...\n", cpu_id);
|
||||
|
||||
iccfgr = mfspr(SPR_ICCFGR);
|
||||
cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
|
||||
cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
|
||||
cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
|
||||
cpuinfo->icache_size =
|
||||
cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size;
|
||||
|
||||
dccfgr = mfspr(SPR_DCCFGR);
|
||||
cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
|
||||
cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
|
||||
cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
|
||||
cpuinfo->dcache_size =
|
||||
cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size;
|
||||
|
||||
if (of_property_read_u32(cpu, "clock-frequency",
|
||||
&cpuinfo->clock_frequency)) {
|
||||
printk(KERN_WARNING
|
||||
|
|
@ -294,14 +263,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
unsigned int vr, cpucfgr;
|
||||
unsigned int avr;
|
||||
unsigned int version;
|
||||
#ifdef CONFIG_SMP
|
||||
struct cpuinfo_or1k *cpuinfo = v;
|
||||
seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
|
||||
#endif
|
||||
|
||||
vr = mfspr(SPR_VR);
|
||||
cpucfgr = mfspr(SPR_CPUCFGR);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
|
||||
#endif
|
||||
if (vr & SPR_VR_UVRP) {
|
||||
vr = mfspr(SPR_VR2);
|
||||
version = vr & SPR_VR2_VER;
|
||||
|
|
@ -320,14 +289,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV);
|
||||
}
|
||||
seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ);
|
||||
seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size);
|
||||
seq_printf(m, "dcache block size\t: %d bytes\n",
|
||||
cpuinfo->dcache_block_size);
|
||||
seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways);
|
||||
seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size);
|
||||
seq_printf(m, "icache block size\t: %d bytes\n",
|
||||
cpuinfo->icache_block_size);
|
||||
seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways);
|
||||
seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n",
|
||||
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
|
||||
1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
|
||||
|
|
|
|||
|
|
@ -14,31 +14,70 @@
|
|||
#include <asm/spr_defs.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static __always_inline void cache_loop(struct page *page, const unsigned int reg)
|
||||
/*
|
||||
* Check if the cache component exists.
|
||||
*/
|
||||
bool cpu_cache_is_present(const unsigned int cache_type)
|
||||
{
|
||||
unsigned long upr = mfspr(SPR_UPR);
|
||||
unsigned long mask = SPR_UPR_UP | cache_type;
|
||||
|
||||
return !((upr & mask) ^ mask);
|
||||
}
|
||||
|
||||
static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
|
||||
const unsigned short reg, const unsigned int cache_type)
|
||||
{
|
||||
if (!cpu_cache_is_present(cache_type))
|
||||
return;
|
||||
|
||||
while (paddr < end) {
|
||||
mtspr(reg, paddr);
|
||||
paddr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
|
||||
const unsigned int cache_type)
|
||||
{
|
||||
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
|
||||
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
|
||||
unsigned long end = paddr + PAGE_SIZE;
|
||||
|
||||
while (line < paddr + PAGE_SIZE) {
|
||||
mtspr(reg, line);
|
||||
line += L1_CACHE_BYTES;
|
||||
}
|
||||
paddr &= ~(L1_CACHE_BYTES - 1);
|
||||
|
||||
cache_loop(paddr, end, reg, cache_type);
|
||||
}
|
||||
|
||||
void local_dcache_page_flush(struct page *page)
|
||||
{
|
||||
cache_loop(page, SPR_DCBFR);
|
||||
cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
|
||||
}
|
||||
EXPORT_SYMBOL(local_dcache_page_flush);
|
||||
|
||||
void local_icache_page_inv(struct page *page)
|
||||
{
|
||||
cache_loop(page, SPR_ICBIR);
|
||||
cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
|
||||
}
|
||||
EXPORT_SYMBOL(local_icache_page_inv);
|
||||
|
||||
void local_dcache_range_flush(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
|
||||
}
|
||||
|
||||
void local_dcache_range_inv(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
|
||||
}
|
||||
|
||||
void local_icache_range_inv(unsigned long start, unsigned long end)
|
||||
{
|
||||
cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
|
||||
}
|
||||
|
||||
void update_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte)
|
||||
{
|
||||
|
|
@ -58,4 +97,3 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
sync_icache_dcache(folio_page(folio, nr));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
int mem_init_done;
|
||||
|
||||
|
|
@ -176,8 +177,8 @@ void __init paging_init(void)
|
|||
barrier();
|
||||
|
||||
/* Invalidate instruction caches after code modification */
|
||||
mtspr(SPR_ICBIR, 0x900);
|
||||
mtspr(SPR_ICBIR, 0xa00);
|
||||
local_icache_block_inv(0x900);
|
||||
local_icache_block_inv(0xa00);
|
||||
|
||||
/* New TLB miss handlers and kernel page tables are in now place.
|
||||
* Make sure that page flags get updated for all pages in TLB by
|
||||
|
|
|
|||
|
|
@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page)
|
|||
flush_dcache_folio(page_folio(page));
|
||||
}
|
||||
|
||||
/*
|
||||
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
|
||||
* so instead we just flush the whole thing.
|
||||
*/
|
||||
#define flush_icache_range(start, end) flush_icache_all()
|
||||
#define flush_icache_user_page(vma, pg, addr, len) \
|
||||
do { \
|
||||
if (vma->vm_flags & VM_EXEC) \
|
||||
|
|
@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
|
|||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
|
||||
* so instead we just flush the whole thing.
|
||||
*/
|
||||
#define flush_icache_range flush_icache_range
|
||||
static inline void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
flush_icache_all();
|
||||
}
|
||||
|
||||
extern unsigned int riscv_cbom_block_size;
|
||||
extern unsigned int riscv_cboz_block_size;
|
||||
void riscv_init_cbo_blocksizes(void);
|
||||
|
|
|
|||
|
|
@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
|||
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init)
|
||||
CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
|
||||
|
||||
ifdef CONFIG_KEXEC_CORE
|
||||
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
|
||||
|
|
|
|||
|
|
@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
|||
/* Initialize the slot */
|
||||
void *kaddr = kmap_atomic(page);
|
||||
void *dst = kaddr + (vaddr & ~PAGE_MASK);
|
||||
unsigned long start = (unsigned long)dst;
|
||||
|
||||
memcpy(dst, src, len);
|
||||
|
||||
|
|
@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
|||
*(uprobe_opcode_t *)dst = __BUG_INSN_32;
|
||||
}
|
||||
|
||||
flush_icache_range(start, start + len);
|
||||
kunmap_atomic(kaddr);
|
||||
|
||||
/*
|
||||
* We probably need flush_icache_user_page() but it needs vma.
|
||||
* This should work on most of architectures by default. If
|
||||
* architecture needs to do something different it can define
|
||||
* its own version of the function.
|
||||
*/
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ KBUILD_CFLAGS += $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
|
|||
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
|
||||
|
||||
quiet_cmd_image = BUILD $@
|
||||
cmd_image = cp $< $@; truncate -s %4K $@; cat $(obj)/vmlinux.bin >>$@
|
||||
cmd_image = (dd if=$< bs=4k conv=sync status=none; cat $(filter-out $<,$(real-prereqs))) >$@
|
||||
|
||||
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,image)
|
||||
|
|
|
|||
|
|
@ -629,7 +629,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
|||
if (event->attr.type == event->pmu->type)
|
||||
event->hw.config |= x86_pmu_get_event_config(event);
|
||||
|
||||
if (!event->attr.freq && x86_pmu.limit_period) {
|
||||
if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
|
||||
s64 left = event->attr.sample_period;
|
||||
x86_pmu.limit_period(event, &left);
|
||||
if (left > event->attr.sample_period)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/mtrr.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/kvm_page_track.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/reboot.h>
|
||||
|
|
@ -2423,4 +2424,9 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
|
|||
*/
|
||||
#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
|
||||
|
||||
static inline bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_KVM_HOST_H */
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@
|
|||
#include <linux/mm.h> /* for struct page */
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#define __HAVE_ARCH_PTE_ALLOC_ONE
|
||||
#define __HAVE_ARCH_PGD_FREE
|
||||
#include <asm-generic/pgalloc.h>
|
||||
|
|
@ -29,16 +31,17 @@ static inline void paravirt_release_pud(unsigned long pfn) {}
|
|||
static inline void paravirt_release_p4d(unsigned long pfn) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
|
||||
/*
|
||||
* Instead of one PGD, we acquire two PGDs. Being order-1, it is
|
||||
* both 8k in size and 8k-aligned. That lets us just flip bit 12
|
||||
* in a pointer to swap between the two 4k halves.
|
||||
* In case of Page Table Isolation active, we acquire two PGDs instead of one.
|
||||
* Being order-1, it is both 8k in size and 8k-aligned. That lets us just
|
||||
* flip bit 12 in a pointer to swap between the two 4k halves.
|
||||
*/
|
||||
#define PGD_ALLOCATION_ORDER 1
|
||||
#else
|
||||
#define PGD_ALLOCATION_ORDER 0
|
||||
#endif
|
||||
static inline unsigned int pgd_allocation_order(void)
|
||||
{
|
||||
if (cpu_feature_enabled(X86_FEATURE_PTI))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and free page tables.
|
||||
|
|
|
|||
|
|
@ -1299,6 +1299,14 @@ void __init e820__memblock_setup(void)
|
|||
memblock_add(entry->addr, entry->size);
|
||||
}
|
||||
|
||||
/*
|
||||
* 32-bit systems are limited to 4BG of memory even with HIGHMEM and
|
||||
* to even less without it.
|
||||
* Discard memory after max_pfn - the actual limit detected at runtime.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
memblock_remove(PFN_PHYS(max_pfn), -1);
|
||||
|
||||
/* Throw away partial pages: */
|
||||
memblock_trim_memory(PAGE_SIZE);
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ static void load_segments(void)
|
|||
|
||||
static void machine_kexec_free_page_tables(struct kimage *image)
|
||||
{
|
||||
free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
|
||||
free_pages((unsigned long)image->arch.pgd, pgd_allocation_order());
|
||||
image->arch.pgd = NULL;
|
||||
#ifdef CONFIG_X86_PAE
|
||||
free_page((unsigned long)image->arch.pmd0);
|
||||
|
|
@ -59,7 +59,7 @@ static void machine_kexec_free_page_tables(struct kimage *image)
|
|||
static int machine_kexec_alloc_page_tables(struct kimage *image)
|
||||
{
|
||||
image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
PGD_ALLOCATION_ORDER);
|
||||
pgd_allocation_order());
|
||||
#ifdef CONFIG_X86_PAE
|
||||
image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
|
|
|
|||
|
|
@ -796,12 +796,15 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
|||
struct amd_svm_iommu_ir *ir;
|
||||
u64 entry;
|
||||
|
||||
if (WARN_ON_ONCE(!pi->ir_data))
|
||||
return -EINVAL;
|
||||
|
||||
/**
|
||||
* In some cases, the existing irte is updated and re-set,
|
||||
* so we need to check here if it's already been * added
|
||||
* to the ir_list.
|
||||
*/
|
||||
if (pi->ir_data && (pi->prev_ga_tag != 0)) {
|
||||
if (pi->prev_ga_tag) {
|
||||
struct kvm *kvm = svm->vcpu.kvm;
|
||||
u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
|
||||
struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
|
||||
|
|
@ -820,7 +823,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
|||
* Allocating new amd_iommu_pi_data, which will get
|
||||
* add to the per-vcpu ir_list.
|
||||
*/
|
||||
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
|
||||
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
if (!ir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
@ -896,10 +899,10 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
{
|
||||
struct kvm_kernel_irq_routing_entry *e;
|
||||
struct kvm_irq_routing_table *irq_rt;
|
||||
bool enable_remapped_mode = true;
|
||||
int idx, ret = 0;
|
||||
|
||||
if (!kvm_arch_has_assigned_device(kvm) ||
|
||||
!irq_remapping_cap(IRQ_POSTING_CAP))
|
||||
if (!kvm_arch_has_assigned_device(kvm) || !kvm_arch_has_irq_bypass())
|
||||
return 0;
|
||||
|
||||
pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
|
||||
|
|
@ -933,6 +936,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
kvm_vcpu_apicv_active(&svm->vcpu)) {
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
enable_remapped_mode = false;
|
||||
|
||||
/* Try to enable guest_mode in IRTE */
|
||||
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
|
||||
AVIC_HPA_MASK);
|
||||
|
|
@ -951,33 +956,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
*/
|
||||
if (!ret && pi.is_guest_mode)
|
||||
svm_ir_list_add(svm, &pi);
|
||||
} else {
|
||||
/* Use legacy mode in IRTE */
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
/**
|
||||
* Here, pi is used to:
|
||||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
/**
|
||||
* Check if the posted interrupt was previously
|
||||
* setup with the guest_mode by checking if the ga_tag
|
||||
* was cached. If so, we need to clean up the per-vcpu
|
||||
* ir_list.
|
||||
*/
|
||||
if (!ret && pi.prev_ga_tag) {
|
||||
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, id);
|
||||
if (vcpu)
|
||||
svm_ir_list_del(to_svm(vcpu), &pi);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret && svm) {
|
||||
|
|
@ -993,6 +971,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
if (enable_remapped_mode) {
|
||||
/* Use legacy mode in IRTE */
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
/**
|
||||
* Here, pi is used to:
|
||||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
/**
|
||||
* Check if the posted interrupt was previously
|
||||
* setup with the guest_mode by checking if the ga_tag
|
||||
* was cached. If so, we need to clean up the per-vcpu
|
||||
* ir_list.
|
||||
*/
|
||||
if (!ret && pi.prev_ga_tag) {
|
||||
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, id);
|
||||
if (vcpu)
|
||||
svm_ir_list_del(to_svm(vcpu), &pi);
|
||||
}
|
||||
}
|
||||
out:
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -11,6 +11,13 @@
|
|||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
#ifdef CREATE_TRACE_POINTS
|
||||
#define tracing_kvm_rip_read(vcpu) ({ \
|
||||
typeof(vcpu) __vcpu = vcpu; \
|
||||
__vcpu->arch.guest_state_protected ? 0 : kvm_rip_read(__vcpu); \
|
||||
})
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tracepoint for guest mode entry.
|
||||
*/
|
||||
|
|
@ -28,7 +35,7 @@ TRACE_EVENT(kvm_entry,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_id = vcpu->vcpu_id;
|
||||
__entry->rip = kvm_rip_read(vcpu);
|
||||
__entry->rip = tracing_kvm_rip_read(vcpu);
|
||||
__entry->immediate_exit = force_immediate_exit;
|
||||
|
||||
kvm_x86_call(get_entry_info)(vcpu, &__entry->intr_info,
|
||||
|
|
@ -319,7 +326,7 @@ TRACE_EVENT(name, \
|
|||
), \
|
||||
\
|
||||
TP_fast_assign( \
|
||||
__entry->guest_rip = kvm_rip_read(vcpu); \
|
||||
__entry->guest_rip = tracing_kvm_rip_read(vcpu); \
|
||||
__entry->isa = isa; \
|
||||
__entry->vcpu_id = vcpu->vcpu_id; \
|
||||
__entry->requests = READ_ONCE(vcpu->requests); \
|
||||
|
|
@ -423,7 +430,7 @@ TRACE_EVENT(kvm_page_fault,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_id = vcpu->vcpu_id;
|
||||
__entry->guest_rip = kvm_rip_read(vcpu);
|
||||
__entry->guest_rip = tracing_kvm_rip_read(vcpu);
|
||||
__entry->fault_address = fault_address;
|
||||
__entry->error_code = error_code;
|
||||
),
|
||||
|
|
|
|||
|
|
@ -297,6 +297,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
{
|
||||
struct kvm_kernel_irq_routing_entry *e;
|
||||
struct kvm_irq_routing_table *irq_rt;
|
||||
bool enable_remapped_mode = true;
|
||||
struct kvm_lapic_irq irq;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct vcpu_data vcpu_info;
|
||||
|
|
@ -335,21 +336,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
|
||||
kvm_set_msi_irq(kvm, e, &irq);
|
||||
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
|
||||
!kvm_irq_is_postable(&irq)) {
|
||||
/*
|
||||
* Make sure the IRTE is in remapped mode if
|
||||
* we don't handle it in posted mode.
|
||||
*/
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO
|
||||
"failed to back to remapped mode, irq: %u\n",
|
||||
host_irq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
!kvm_irq_is_postable(&irq))
|
||||
continue;
|
||||
}
|
||||
|
||||
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
|
||||
vcpu_info.vector = irq.vector;
|
||||
|
|
@ -357,11 +345,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
|
||||
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
|
||||
|
||||
if (set)
|
||||
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
|
||||
else
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
if (!set)
|
||||
continue;
|
||||
|
||||
enable_remapped_mode = false;
|
||||
|
||||
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO "%s: failed to update PI IRTE\n",
|
||||
__func__);
|
||||
|
|
@ -369,6 +358,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
}
|
||||
}
|
||||
|
||||
if (enable_remapped_mode)
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
|
|
|
|||
|
|
@ -11098,7 +11098,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
/*
|
||||
* Profile KVM exit RIPs:
|
||||
*/
|
||||
if (unlikely(prof_on == KVM_PROFILING)) {
|
||||
if (unlikely(prof_on == KVM_PROFILING &&
|
||||
!vcpu->arch.guest_state_protected)) {
|
||||
unsigned long rip = kvm_rip_read(vcpu);
|
||||
profile_hit(KVM_PROFILING, (void *)rip);
|
||||
}
|
||||
|
|
@ -13556,25 +13557,27 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
|
||||
|
||||
bool kvm_arch_has_irq_bypass(void)
|
||||
{
|
||||
return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
|
||||
}
|
||||
|
||||
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
|
||||
struct irq_bypass_producer *prod)
|
||||
{
|
||||
struct kvm_kernel_irqfd *irqfd =
|
||||
container_of(cons, struct kvm_kernel_irqfd, consumer);
|
||||
struct kvm *kvm = irqfd->kvm;
|
||||
int ret;
|
||||
|
||||
irqfd->producer = prod;
|
||||
kvm_arch_start_assignment(irqfd->kvm);
|
||||
|
||||
spin_lock_irq(&kvm->irqfds.lock);
|
||||
irqfd->producer = prod;
|
||||
|
||||
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
|
||||
prod->irq, irqfd->gsi, 1);
|
||||
if (ret)
|
||||
kvm_arch_end_assignment(irqfd->kvm);
|
||||
|
||||
spin_unlock_irq(&kvm->irqfds.lock);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -13584,9 +13587,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
|||
int ret;
|
||||
struct kvm_kernel_irqfd *irqfd =
|
||||
container_of(cons, struct kvm_kernel_irqfd, consumer);
|
||||
struct kvm *kvm = irqfd->kvm;
|
||||
|
||||
WARN_ON(irqfd->producer != prod);
|
||||
irqfd->producer = NULL;
|
||||
|
||||
/*
|
||||
* When producer of consumer is unregistered, we change back to
|
||||
|
|
@ -13594,12 +13597,18 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
|||
* when the irq is masked/disabled or the consumer side (KVM
|
||||
* int this case doesn't want to receive the interrupts.
|
||||
*/
|
||||
spin_lock_irq(&kvm->irqfds.lock);
|
||||
irqfd->producer = NULL;
|
||||
|
||||
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
|
||||
prod->irq, irqfd->gsi, 0);
|
||||
if (ret)
|
||||
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
|
||||
" fails: %d\n", irqfd->consumer.token, ret);
|
||||
|
||||
spin_unlock_irq(&kvm->irqfds.lock);
|
||||
|
||||
|
||||
kvm_arch_end_assignment(irqfd->kvm);
|
||||
}
|
||||
|
||||
|
|
@ -13612,7 +13621,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
|
|||
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
|
||||
struct kvm_kernel_irq_routing_entry *new)
|
||||
{
|
||||
if (new->type != KVM_IRQ_ROUTING_MSI)
|
||||
if (old->type != KVM_IRQ_ROUTING_MSI ||
|
||||
new->type != KVM_IRQ_ROUTING_MSI)
|
||||
return true;
|
||||
|
||||
return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
|
||||
|
|
|
|||
|
|
@ -996,8 +996,8 @@ AVXcode: 4
|
|||
83: Grp1 Ev,Ib (1A),(es)
|
||||
# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
|
||||
# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
|
||||
84: CTESTSCC (ev)
|
||||
85: CTESTSCC (es) | CTESTSCC (66),(es)
|
||||
84: CTESTSCC Eb,Gb (ev)
|
||||
85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
|
||||
88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
|
||||
8f: POP2 Bq,Rq (000),(11B),(ev)
|
||||
a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
|
||||
|
|
|
|||
|
|
@ -360,7 +360,7 @@ static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
|
|||
* We allocate one page for pgd.
|
||||
*/
|
||||
if (!SHARED_KERNEL_PMD)
|
||||
return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
|
||||
return __pgd_alloc(mm, pgd_allocation_order());
|
||||
|
||||
/*
|
||||
* Now PAE kernel is not running as a Xen domain. We can allocate
|
||||
|
|
@ -380,7 +380,7 @@ static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||
|
||||
static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
|
||||
return __pgd_alloc(mm, pgd_allocation_order());
|
||||
}
|
||||
|
||||
static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ int __init efi_alloc_page_tables(void)
|
|||
gfp_t gfp_mask;
|
||||
|
||||
gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
||||
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
|
||||
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, pgd_allocation_order());
|
||||
if (!efi_pgd)
|
||||
goto fail;
|
||||
|
||||
|
|
@ -96,7 +96,7 @@ free_p4d:
|
|||
if (pgtable_l5_enabled())
|
||||
free_page((unsigned long)pgd_page_vaddr(*pgd));
|
||||
free_pgd:
|
||||
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
||||
free_pages((unsigned long)efi_pgd, pgd_allocation_order());
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
|||
67
block/bdev.c
67
block/bdev.c
|
|
@ -152,27 +152,65 @@ static void set_init_blocksize(struct block_device *bdev)
|
|||
get_order(bsize));
|
||||
}
|
||||
|
||||
/**
|
||||
* bdev_validate_blocksize - check that this block size is acceptable
|
||||
* @bdev: blockdevice to check
|
||||
* @block_size: block size to check
|
||||
*
|
||||
* For block device users that do not use buffer heads or the block device
|
||||
* page cache, make sure that this block size can be used with the device.
|
||||
*
|
||||
* Return: On success zero is returned, negative error code on failure.
|
||||
*/
|
||||
int bdev_validate_blocksize(struct block_device *bdev, int block_size)
|
||||
{
|
||||
if (blk_validate_block_size(block_size))
|
||||
return -EINVAL;
|
||||
|
||||
/* Size cannot be smaller than the size supported by the device */
|
||||
if (block_size < bdev_logical_block_size(bdev))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_validate_blocksize);
|
||||
|
||||
int set_blocksize(struct file *file, int size)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
int ret;
|
||||
|
||||
if (blk_validate_block_size(size))
|
||||
return -EINVAL;
|
||||
|
||||
/* Size cannot be smaller than the size supported by the device */
|
||||
if (size < bdev_logical_block_size(bdev))
|
||||
return -EINVAL;
|
||||
ret = bdev_validate_blocksize(bdev, size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!file->private_data)
|
||||
return -EINVAL;
|
||||
|
||||
/* Don't change the size if it is same as current */
|
||||
if (inode->i_blkbits != blksize_bits(size)) {
|
||||
/*
|
||||
* Flush and truncate the pagecache before we reconfigure the
|
||||
* mapping geometry because folio sizes are variable now. If a
|
||||
* reader has already allocated a folio whose size is smaller
|
||||
* than the new min_order but invokes readahead after the new
|
||||
* min_order becomes visible, readahead will think there are
|
||||
* "zero" blocks per folio and crash. Take the inode and
|
||||
* invalidation locks to avoid racing with
|
||||
* read/write/fallocate.
|
||||
*/
|
||||
inode_lock(inode);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
sync_blockdev(bdev);
|
||||
kill_bdev(bdev);
|
||||
|
||||
inode->i_blkbits = blksize_bits(size);
|
||||
mapping_set_folio_min_order(inode->i_mapping, get_order(size));
|
||||
kill_bdev(bdev);
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -777,13 +815,13 @@ static void blkdev_put_part(struct block_device *part)
|
|||
blkdev_put_whole(whole);
|
||||
}
|
||||
|
||||
struct block_device *blkdev_get_no_open(dev_t dev)
|
||||
struct block_device *blkdev_get_no_open(dev_t dev, bool autoload)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
struct inode *inode;
|
||||
|
||||
inode = ilookup(blockdev_superblock, dev);
|
||||
if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
|
||||
if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
|
||||
blk_request_module(dev);
|
||||
inode = ilookup(blockdev_superblock, dev);
|
||||
if (inode)
|
||||
|
|
@ -1005,7 +1043,7 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
bdev = blkdev_get_no_open(dev);
|
||||
bdev = blkdev_get_no_open(dev, true);
|
||||
if (!bdev)
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
|
|
@ -1274,18 +1312,15 @@ void sync_bdevs(bool wait)
|
|||
*/
|
||||
void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
|
||||
{
|
||||
struct inode *backing_inode;
|
||||
struct block_device *bdev;
|
||||
|
||||
backing_inode = d_backing_inode(path->dentry);
|
||||
|
||||
/*
|
||||
* Note that backing_inode is the inode of a block device node file,
|
||||
* not the block device's internal inode. Therefore it is *not* valid
|
||||
* to use I_BDEV() here; the block device has to be looked up by i_rdev
|
||||
* Note that d_backing_inode() returns the block device node inode, not
|
||||
* the block device's internal inode. Therefore it is *not* valid to
|
||||
* use I_BDEV() here; the block device has to be looked up by i_rdev
|
||||
* instead.
|
||||
*/
|
||||
bdev = blkdev_get_no_open(backing_inode->i_rdev);
|
||||
bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false);
|
||||
if (!bdev)
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -797,7 +797,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
|
|||
return -EINVAL;
|
||||
input = skip_spaces(input);
|
||||
|
||||
bdev = blkdev_get_no_open(MKDEV(major, minor));
|
||||
bdev = blkdev_get_no_open(MKDEV(major, minor), false);
|
||||
if (!bdev)
|
||||
return -ENODEV;
|
||||
if (bdev_is_partition(bdev)) {
|
||||
|
|
|
|||
|
|
@ -61,8 +61,14 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi,
|
|||
/*
|
||||
* For read-ahead of large files to be effective, we need to read ahead
|
||||
* at least twice the optimal I/O size.
|
||||
*
|
||||
* There is no hardware limitation for the read-ahead size and the user
|
||||
* might have increased the read-ahead size through sysfs, so don't ever
|
||||
* decrease it.
|
||||
*/
|
||||
bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
|
||||
bdi->ra_pages = max3(bdi->ra_pages,
|
||||
lim->io_opt * 2 / PAGE_SIZE,
|
||||
VM_READAHEAD_PAGES);
|
||||
bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -343,6 +343,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
|
|||
op = REQ_OP_ZONE_RESET;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
if (ret)
|
||||
|
|
@ -364,8 +365,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
|
|||
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
|
||||
|
||||
fail:
|
||||
if (cmd == BLKRESETZONE)
|
||||
if (cmd == BLKRESETZONE) {
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,6 +94,9 @@ static inline void blk_wait_io(struct completion *done)
|
|||
wait_for_completion_io(done);
|
||||
}
|
||||
|
||||
struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
|
||||
void blkdev_put_no_open(struct block_device *bdev);
|
||||
|
||||
#define BIO_INLINE_VECS 4
|
||||
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
|
||||
gfp_t gfp_mask);
|
||||
|
|
|
|||
18
block/fops.c
18
block/fops.c
|
|
@ -642,7 +642,7 @@ static int blkdev_open(struct inode *inode, struct file *filp)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
bdev = blkdev_get_no_open(inode->i_rdev);
|
||||
bdev = blkdev_get_no_open(inode->i_rdev, true);
|
||||
if (!bdev)
|
||||
return -ENXIO;
|
||||
|
||||
|
|
@ -746,7 +746,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
ret = direct_write_fallback(iocb, from, ret,
|
||||
blkdev_buffered_write(iocb, from));
|
||||
} else {
|
||||
/*
|
||||
* Take i_rwsem and invalidate_lock to avoid racing with
|
||||
* set_blocksize changing i_blkbits/folio order and punching
|
||||
* out the pagecache.
|
||||
*/
|
||||
inode_lock_shared(bd_inode);
|
||||
ret = blkdev_buffered_write(iocb, from);
|
||||
inode_unlock_shared(bd_inode);
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
|
|
@ -757,6 +764,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
|
||||
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct inode *bd_inode = bdev_file_inode(iocb->ki_filp);
|
||||
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
|
||||
loff_t size = bdev_nr_bytes(bdev);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
|
|
@ -793,7 +801,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
goto reexpand;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
|
||||
* changing i_blkbits/folio order and punching out the pagecache.
|
||||
*/
|
||||
inode_lock_shared(bd_inode);
|
||||
ret = filemap_read(iocb, to, ret);
|
||||
inode_unlock_shared(bd_inode);
|
||||
|
||||
reexpand:
|
||||
if (unlikely(shorted))
|
||||
|
|
@ -836,6 +850,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
|||
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
inode_lock(inode);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
/*
|
||||
|
|
@ -868,6 +883,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
|||
|
||||
fail:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
inode_unlock(inode);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -142,6 +142,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
|
||||
if (err)
|
||||
|
|
@ -174,6 +175,7 @@ out_unplug:
|
|||
blk_finish_plug(&plug);
|
||||
fail:
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -199,12 +201,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
|
|||
end > bdev_nr_bytes(bdev))
|
||||
return -EINVAL;
|
||||
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, end - 1);
|
||||
if (!err)
|
||||
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
|
||||
GFP_KERNEL);
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -236,6 +240,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
|||
return -EINVAL;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages */
|
||||
inode_lock(bdev->bd_mapping->host);
|
||||
filemap_invalidate_lock(bdev->bd_mapping);
|
||||
err = truncate_bdev_range(bdev, mode, start, end);
|
||||
if (err)
|
||||
|
|
@ -246,6 +251,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
|
|||
|
||||
fail:
|
||||
filemap_invalidate_unlock(bdev->bd_mapping);
|
||||
inode_unlock(bdev->bd_mapping->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -215,8 +215,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
|||
spage = nth_page(spage, soff / PAGE_SIZE);
|
||||
soff = offset_in_page(soff);
|
||||
|
||||
n = slen / PAGE_SIZE;
|
||||
n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
|
||||
n = (slen - 1) / PAGE_SIZE;
|
||||
n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
|
||||
if (PageHighMem(nth_page(spage, n)) &&
|
||||
size_add(soff, slen) > PAGE_SIZE)
|
||||
break;
|
||||
|
|
@ -243,9 +243,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
|||
dpage = nth_page(dpage, doff / PAGE_SIZE);
|
||||
doff = offset_in_page(doff);
|
||||
|
||||
n = dlen / PAGE_SIZE;
|
||||
n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
|
||||
if (PageHighMem(dpage + n) &&
|
||||
n = (dlen - 1) / PAGE_SIZE;
|
||||
n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
|
||||
if (PageHighMem(nth_page(dpage, n)) &&
|
||||
size_add(doff, dlen) > PAGE_SIZE)
|
||||
break;
|
||||
dst = kmap_local_page(dpage) + doff;
|
||||
|
|
|
|||
145
crypto/testmgr.c
145
crypto/testmgr.c
|
|
@ -58,9 +58,6 @@ module_param(fuzz_iterations, uint, 0644);
|
|||
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
|
||||
#endif
|
||||
|
||||
/* Multibuffer is unlimited. Set arbitrary limit for testing. */
|
||||
#define MAX_MB_MSGS 16
|
||||
|
||||
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
|
||||
|
||||
/* a perfect nop */
|
||||
|
|
@ -3329,48 +3326,27 @@ static int test_acomp(struct crypto_acomp *tfm,
|
|||
int ctcount, int dtcount)
|
||||
{
|
||||
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
|
||||
struct scatterlist *src = NULL, *dst = NULL;
|
||||
struct acomp_req *reqs[MAX_MB_MSGS] = {};
|
||||
char *decomp_out[MAX_MB_MSGS] = {};
|
||||
char *output[MAX_MB_MSGS] = {};
|
||||
struct crypto_wait wait;
|
||||
struct acomp_req *req;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
char *output, *decomp_out;
|
||||
int ret;
|
||||
struct scatterlist src, dst;
|
||||
struct acomp_req *req;
|
||||
struct crypto_wait wait;
|
||||
|
||||
src = kmalloc_array(MAX_MB_MSGS, sizeof(*src), GFP_KERNEL);
|
||||
if (!src)
|
||||
goto out;
|
||||
dst = kmalloc_array(MAX_MB_MSGS, sizeof(*dst), GFP_KERNEL);
|
||||
if (!dst)
|
||||
goto out;
|
||||
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
|
||||
if (!output)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MAX_MB_MSGS; i++) {
|
||||
reqs[i] = acomp_request_alloc(tfm);
|
||||
if (!reqs[i])
|
||||
goto out;
|
||||
|
||||
acomp_request_set_callback(reqs[i],
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &wait);
|
||||
if (i)
|
||||
acomp_request_chain(reqs[i], reqs[0]);
|
||||
|
||||
output[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
|
||||
if (!output[i])
|
||||
goto out;
|
||||
|
||||
decomp_out[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
|
||||
if (!decomp_out[i])
|
||||
goto out;
|
||||
decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
|
||||
if (!decomp_out) {
|
||||
kfree(output);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < ctcount; i++) {
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
int ilen = ctemplate[i].inlen;
|
||||
void *input_vec;
|
||||
int j;
|
||||
|
||||
input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
|
||||
if (!input_vec) {
|
||||
|
|
@ -3378,61 +3354,70 @@ static int test_acomp(struct crypto_acomp *tfm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
memset(output, 0, dlen);
|
||||
crypto_init_wait(&wait);
|
||||
sg_init_one(src, input_vec, ilen);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
for (j = 0; j < MAX_MB_MSGS; j++) {
|
||||
sg_init_one(dst + j, output[j], dlen);
|
||||
acomp_request_set_params(reqs[j], src, dst + j, ilen, dlen);
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req) {
|
||||
pr_err("alg: acomp: request alloc failed for %s\n",
|
||||
algo);
|
||||
kfree(input_vec);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
req = reqs[0];
|
||||
acomp_request_set_params(req, &src, &dst, ilen, dlen);
|
||||
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &wait);
|
||||
|
||||
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ilen = req->dlen;
|
||||
dlen = COMP_BUF_SIZE;
|
||||
sg_init_one(&src, output, ilen);
|
||||
sg_init_one(&dst, decomp_out, dlen);
|
||||
crypto_init_wait(&wait);
|
||||
for (j = 0; j < MAX_MB_MSGS; j++) {
|
||||
sg_init_one(src + j, output[j], ilen);
|
||||
sg_init_one(dst + j, decomp_out[j], dlen);
|
||||
acomp_request_set_params(reqs[j], src + j, dst + j, ilen, dlen);
|
||||
acomp_request_set_params(req, &src, &dst, ilen, dlen);
|
||||
|
||||
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
crypto_wait_req(crypto_acomp_decompress(req), &wait);
|
||||
for (j = 0; j < MAX_MB_MSGS; j++) {
|
||||
ret = reqs[j]->base.err;
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d (%d) for %s: ret=%d\n",
|
||||
i + 1, j, algo, -ret);
|
||||
kfree(input_vec);
|
||||
goto out;
|
||||
}
|
||||
if (req->dlen != ctemplate[i].inlen) {
|
||||
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
|
||||
i + 1, algo, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reqs[j]->dlen != ctemplate[i].inlen) {
|
||||
pr_err("alg: acomp: Compression test %d (%d) failed for %s: output len = %d\n",
|
||||
i + 1, j, algo, reqs[j]->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(input_vec, decomp_out[j], reqs[j]->dlen)) {
|
||||
pr_err("alg: acomp: Compression test %d (%d) failed for %s\n",
|
||||
i + 1, j, algo);
|
||||
hexdump(output[j], reqs[j]->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
goto out;
|
||||
}
|
||||
if (memcmp(input_vec, decomp_out, req->dlen)) {
|
||||
pr_err("alg: acomp: Compression test %d failed for %s\n",
|
||||
i + 1, algo);
|
||||
hexdump(output, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
}
|
||||
|
||||
for (i = 0; i < dtcount; i++) {
|
||||
|
|
@ -3446,9 +3431,10 @@ static int test_acomp(struct crypto_acomp *tfm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
memset(output, 0, dlen);
|
||||
crypto_init_wait(&wait);
|
||||
sg_init_one(src, input_vec, ilen);
|
||||
sg_init_one(dst, output[0], dlen);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req) {
|
||||
|
|
@ -3459,7 +3445,7 @@ static int test_acomp(struct crypto_acomp *tfm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
acomp_request_set_params(req, src, dst, ilen, dlen);
|
||||
acomp_request_set_params(req, &src, &dst, ilen, dlen);
|
||||
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &wait);
|
||||
|
||||
|
|
@ -3481,10 +3467,10 @@ static int test_acomp(struct crypto_acomp *tfm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(output[0], dtemplate[i].output, req->dlen)) {
|
||||
if (memcmp(output, dtemplate[i].output, req->dlen)) {
|
||||
pr_err("alg: acomp: Decompression test %d failed for %s\n",
|
||||
i + 1, algo);
|
||||
hexdump(output[0], req->dlen);
|
||||
hexdump(output, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
|
|
@ -3498,13 +3484,8 @@ static int test_acomp(struct crypto_acomp *tfm,
|
|||
ret = 0;
|
||||
|
||||
out:
|
||||
acomp_request_free(reqs[0]);
|
||||
for (i = 0; i < MAX_MB_MSGS; i++) {
|
||||
kfree(output[i]);
|
||||
kfree(decomp_out[i]);
|
||||
}
|
||||
kfree(dst);
|
||||
kfree(src);
|
||||
kfree(decomp_out);
|
||||
kfree(output);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6373,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
|||
seq_printf(m, " node %d", buffer->target_node->debug_id);
|
||||
seq_printf(m, " size %zd:%zd offset %lx\n",
|
||||
buffer->data_size, buffer->offsets_size,
|
||||
proc->alloc.vm_start - buffer->user_data);
|
||||
buffer->user_data - proc->alloc.vm_start);
|
||||
}
|
||||
|
||||
static void print_binder_work_ilocked(struct seq_file *m,
|
||||
|
|
|
|||
|
|
@ -2453,8 +2453,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
|
|||
*/
|
||||
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
|
||||
|
||||
if (dev->flags & ATA_DFLAG_CDL)
|
||||
buf[4] = 0x02; /* Support T2A and T2B pages */
|
||||
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
|
||||
buf[4] = 0x02; /* T2A and T2B pages enabled */
|
||||
else
|
||||
buf[4] = 0;
|
||||
|
||||
|
|
@ -3886,12 +3886,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
|
|||
}
|
||||
|
||||
/*
|
||||
* Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
|
||||
* Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
|
||||
* page) into a SET FEATURES command.
|
||||
*/
|
||||
static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
||||
const u8 *buf, int len,
|
||||
u16 *fp)
|
||||
static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
||||
const u8 *buf, int len, u16 *fp)
|
||||
{
|
||||
struct ata_device *dev = qc->dev;
|
||||
struct ata_taskfile *tf = &qc->tf;
|
||||
|
|
@ -3909,17 +3908,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
|||
/* Check cdl_ctrl */
|
||||
switch (buf[0] & 0x03) {
|
||||
case 0:
|
||||
/* Disable CDL */
|
||||
/* Disable CDL if it is enabled */
|
||||
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
|
||||
return 0;
|
||||
ata_dev_dbg(dev, "Disabling CDL\n");
|
||||
cdl_action = 0;
|
||||
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
|
||||
break;
|
||||
case 0x02:
|
||||
/* Enable CDL T2A/T2B: NCQ priority must be disabled */
|
||||
/*
|
||||
* Enable CDL if not already enabled. Since this is mutually
|
||||
* exclusive with NCQ priority, allow this only if NCQ priority
|
||||
* is disabled.
|
||||
*/
|
||||
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
|
||||
return 0;
|
||||
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
|
||||
ata_dev_err(dev,
|
||||
"NCQ priority must be disabled to enable CDL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ata_dev_dbg(dev, "Enabling CDL\n");
|
||||
cdl_action = 1;
|
||||
dev->flags |= ATA_DFLAG_CDL_ENABLED;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -156,6 +156,16 @@
|
|||
* },
|
||||
* .ops = my_custom_ops,
|
||||
* };
|
||||
*
|
||||
* Please note that such custom ops approach is valid, but it is hard to implement
|
||||
* it right without global locks per-device to protect from auxiliary_drv removal
|
||||
* during call to that ops. In addition, this implementation lacks proper module
|
||||
* dependency, which causes to load/unload races between auxiliary parent and devices
|
||||
* modules.
|
||||
*
|
||||
* The most easiest way to provide these ops reliably without needing to
|
||||
* have a lock is to EXPORT_SYMBOL*() them and rely on already existing
|
||||
* modules infrastructure for validity and correct dependencies chains.
|
||||
*/
|
||||
|
||||
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
|
||||
|
|
|
|||
|
|
@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
|
|||
kset_put(&sp->subsys);
|
||||
}
|
||||
|
||||
struct subsys_private *bus_to_subsys(const struct bus_type *bus);
|
||||
struct subsys_private *class_to_subsys(const struct class *class);
|
||||
|
||||
struct driver_private {
|
||||
|
|
@ -180,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
|
|||
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
|
||||
void device_driver_detach(struct device *dev);
|
||||
|
||||
static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
|
||||
{
|
||||
/*
|
||||
* Majority (all?) read accesses to dev->driver happens either
|
||||
* while holding device lock or in bus/driver code that is only
|
||||
* invoked when the device is bound to a driver and there is no
|
||||
* concern of the pointer being changed while it is being read.
|
||||
* However when reading device's uevent file we read driver pointer
|
||||
* without taking device lock (so we do not block there for
|
||||
* arbitrary amount of time). We use WRITE_ONCE() here to prevent
|
||||
* tearing so that READ_ONCE() can safely be used in uevent code.
|
||||
*/
|
||||
// FIXME - this cast should not be needed "soon"
|
||||
WRITE_ONCE(dev->driver, (struct device_driver *)drv);
|
||||
}
|
||||
|
||||
int devres_release_all(struct device *dev);
|
||||
void device_block_probing(void);
|
||||
void device_unblock_probing(void);
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
|
|||
* NULL. A call to subsys_put() must be done when finished with the pointer in
|
||||
* order for it to be properly freed.
|
||||
*/
|
||||
static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
|
||||
struct subsys_private *bus_to_subsys(const struct bus_type *bus)
|
||||
{
|
||||
struct subsys_private *sp = NULL;
|
||||
struct kobject *kobj;
|
||||
|
|
|
|||
|
|
@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try filling "DRIVER=<name>" uevent variable for a device. Because this
|
||||
* function may race with binding and unbinding the device from a driver,
|
||||
* we need to be careful. Binding is generally safe, at worst we miss the
|
||||
* fact that the device is already bound to a driver (but the driver
|
||||
* information that is delivered through uevents is best-effort, it may
|
||||
* become obsolete as soon as it is generated anyways). Unbinding is more
|
||||
* risky as driver pointer is transitioning to NULL, so READ_ONCE() should
|
||||
* be used to make sure we are dealing with the same pointer, and to
|
||||
* ensure that driver structure is not going to disappear from under us
|
||||
* we take bus' drivers klist lock. The assumption that only registered
|
||||
* driver can be bound to a device, and to unregister a driver bus code
|
||||
* will take the same lock.
|
||||
*/
|
||||
static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct subsys_private *sp = bus_to_subsys(dev->bus);
|
||||
|
||||
if (sp) {
|
||||
scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
|
||||
struct device_driver *drv = READ_ONCE(dev->driver);
|
||||
if (drv)
|
||||
add_uevent_var(env, "DRIVER=%s", drv->name);
|
||||
}
|
||||
|
||||
subsys_put(sp);
|
||||
}
|
||||
}
|
||||
|
||||
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
||||
{
|
||||
const struct device *dev = kobj_to_dev(kobj);
|
||||
|
|
@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
|||
if (dev->type && dev->type->name)
|
||||
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
|
||||
|
||||
if (dev->driver)
|
||||
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
|
||||
/* Add "DRIVER=%s" variable if the device is bound to a driver */
|
||||
dev_driver_uevent(dev, env);
|
||||
|
||||
/* Add common DT information about the device */
|
||||
of_device_uevent(dev, env);
|
||||
|
|
@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
|
|||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Synchronize with really_probe() */
|
||||
device_lock(dev);
|
||||
/* let the kset specific function add its keys */
|
||||
retval = kset->uevent_ops->uevent(&dev->kobj, env);
|
||||
device_unlock(dev);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
|
|
@ -3700,7 +3726,7 @@ done:
|
|||
device_pm_remove(dev);
|
||||
dpm_sysfs_remove(dev);
|
||||
DPMError:
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
bus_remove_device(dev);
|
||||
BusError:
|
||||
device_remove_attrs(dev);
|
||||
|
|
|
|||
|
|
@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
|
|||
arch_teardown_dma_ops(dev);
|
||||
kfree(dev->dma_range_map);
|
||||
dev->dma_range_map = NULL;
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
if (dev->pm_domain && dev->pm_domain->dismiss)
|
||||
dev->pm_domain->dismiss(dev);
|
||||
|
|
@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
|
|||
}
|
||||
|
||||
re_probe:
|
||||
// FIXME - this cast should not be needed "soon"
|
||||
dev->driver = (struct device_driver *)drv;
|
||||
device_set_driver(dev, drv);
|
||||
|
||||
/* If using pinctrl, bind pins now before probing */
|
||||
ret = pinctrl_bind_pins(dev);
|
||||
|
|
@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async)
|
|||
if (ret == 0)
|
||||
ret = 1;
|
||||
else {
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ static int delete_path(const char *nodepath)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
|
||||
static int dev_mynode(struct device *dev, struct inode *inode)
|
||||
{
|
||||
/* did we create it */
|
||||
if (inode->i_private != &thread)
|
||||
|
|
@ -304,13 +304,13 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
|
|||
|
||||
/* does the dev_t match */
|
||||
if (is_blockdev(dev)) {
|
||||
if (!S_ISBLK(stat->mode))
|
||||
if (!S_ISBLK(inode->i_mode))
|
||||
return 0;
|
||||
} else {
|
||||
if (!S_ISCHR(stat->mode))
|
||||
if (!S_ISCHR(inode->i_mode))
|
||||
return 0;
|
||||
}
|
||||
if (stat->rdev != dev->devt)
|
||||
if (inode->i_rdev != dev->devt)
|
||||
return 0;
|
||||
|
||||
/* ours */
|
||||
|
|
@ -321,20 +321,16 @@ static int handle_remove(const char *nodename, struct device *dev)
|
|||
{
|
||||
struct path parent;
|
||||
struct dentry *dentry;
|
||||
struct kstat stat;
|
||||
struct path p;
|
||||
struct inode *inode;
|
||||
int deleted = 0;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
dentry = kern_path_locked(nodename, &parent);
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
|
||||
p.mnt = parent.mnt;
|
||||
p.dentry = dentry;
|
||||
err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
|
||||
AT_STATX_SYNC_AS_STAT);
|
||||
if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
|
||||
inode = d_inode(dentry);
|
||||
if (dev_mynode(dev, inode)) {
|
||||
struct iattr newattrs;
|
||||
/*
|
||||
* before unlinking this node, reset permissions
|
||||
|
|
@ -342,7 +338,7 @@ static int handle_remove(const char *nodename, struct device *dev)
|
|||
*/
|
||||
newattrs.ia_uid = GLOBAL_ROOT_UID;
|
||||
newattrs.ia_gid = GLOBAL_ROOT_GID;
|
||||
newattrs.ia_mode = stat.mode & ~0777;
|
||||
newattrs.ia_mode = inode->i_mode & ~0777;
|
||||
newattrs.ia_valid =
|
||||
ATTR_UID|ATTR_GID|ATTR_MODE;
|
||||
inode_lock(d_inode(dentry));
|
||||
|
|
|
|||
|
|
@ -816,21 +816,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init add_boot_memory_block(unsigned long base_section_nr)
|
||||
{
|
||||
unsigned long nr;
|
||||
|
||||
for_each_present_section_nr(base_section_nr, nr) {
|
||||
if (nr >= (base_section_nr + sections_per_block))
|
||||
break;
|
||||
|
||||
return add_memory_block(memory_block_id(base_section_nr),
|
||||
MEM_ONLINE, NULL, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_hotplug_memory_block(unsigned long block_id,
|
||||
struct vmem_altmap *altmap,
|
||||
struct memory_group *group)
|
||||
|
|
@ -957,7 +942,7 @@ static const struct attribute_group *memory_root_attr_groups[] = {
|
|||
void __init memory_dev_init(void)
|
||||
{
|
||||
int ret;
|
||||
unsigned long block_sz, nr;
|
||||
unsigned long block_sz, block_id, nr;
|
||||
|
||||
/* Validate the configured memory block size */
|
||||
block_sz = memory_block_size_bytes();
|
||||
|
|
@ -970,15 +955,23 @@ void __init memory_dev_init(void)
|
|||
panic("%s() failed to register subsystem: %d\n", __func__, ret);
|
||||
|
||||
/*
|
||||
* Create entries for memory sections that were found
|
||||
* during boot and have been initialized
|
||||
* Create entries for memory sections that were found during boot
|
||||
* and have been initialized. Use @block_id to track the last
|
||||
* handled block and initialize it to an invalid value (ULONG_MAX)
|
||||
* to bypass the block ID matching check for the first present
|
||||
* block so that it can be covered.
|
||||
*/
|
||||
for (nr = 0; nr <= __highest_present_section_nr;
|
||||
nr += sections_per_block) {
|
||||
ret = add_boot_memory_block(nr);
|
||||
if (ret)
|
||||
panic("%s() failed to add memory block: %d\n", __func__,
|
||||
ret);
|
||||
block_id = ULONG_MAX;
|
||||
for_each_present_section_nr(0, nr) {
|
||||
if (block_id != ULONG_MAX && memory_block_id(nr) == block_id)
|
||||
continue;
|
||||
|
||||
block_id = memory_block_id(nr);
|
||||
ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL);
|
||||
if (ret) {
|
||||
panic("%s() failed to add memory block: %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1080,6 +1080,7 @@ void software_node_notify(struct device *dev)
|
|||
if (!swnode)
|
||||
return;
|
||||
|
||||
kobject_get(&swnode->kobj);
|
||||
ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
|
||||
if (ret)
|
||||
return;
|
||||
|
|
@ -1089,8 +1090,6 @@ void software_node_notify(struct device *dev)
|
|||
sysfs_remove_link(&dev->kobj, "software_node");
|
||||
return;
|
||||
}
|
||||
|
||||
kobject_get(&swnode->kobj);
|
||||
}
|
||||
|
||||
void software_node_notify_remove(struct device *dev)
|
||||
|
|
|
|||
|
|
@ -1683,14 +1683,31 @@ static void ublk_start_cancel(struct ublk_queue *ubq)
|
|||
ublk_put_disk(disk);
|
||||
}
|
||||
|
||||
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
|
||||
static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct ublk_io *io = &ubq->ios[tag];
|
||||
struct ublk_device *ub = ubq->dev;
|
||||
struct request *req;
|
||||
bool done;
|
||||
|
||||
if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Don't try to cancel this command if the request is started for
|
||||
* avoiding race between io_uring_cmd_done() and
|
||||
* io_uring_cmd_complete_in_task().
|
||||
*
|
||||
* Either the started request will be aborted via __ublk_abort_rq(),
|
||||
* then this uring_cmd is canceled next time, or it will be done in
|
||||
* task work function ublk_dispatch_req() because io_uring guarantees
|
||||
* that ublk_dispatch_req() is always called
|
||||
*/
|
||||
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
|
||||
if (req && blk_mq_request_started(req))
|
||||
return;
|
||||
|
||||
spin_lock(&ubq->cancel_lock);
|
||||
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
|
||||
if (!done)
|
||||
|
|
@ -1722,7 +1739,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
|
|||
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
|
||||
struct ublk_queue *ubq = pdu->ubq;
|
||||
struct task_struct *task;
|
||||
struct ublk_io *io;
|
||||
|
||||
if (WARN_ON_ONCE(!ubq))
|
||||
return;
|
||||
|
|
@ -1737,9 +1753,8 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
|
|||
if (!ubq->canceling)
|
||||
ublk_start_cancel(ubq);
|
||||
|
||||
io = &ubq->ios[pdu->tag];
|
||||
WARN_ON_ONCE(io->cmd != cmd);
|
||||
ublk_cancel_cmd(ubq, io, issue_flags);
|
||||
WARN_ON_ONCE(ubq->ios[pdu->tag].cmd != cmd);
|
||||
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
|
||||
}
|
||||
|
||||
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
|
||||
|
|
@ -1752,7 +1767,7 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ubq->q_depth; i++)
|
||||
ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
|
||||
ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED);
|
||||
}
|
||||
|
||||
/* Cancel all pending commands, must be called after del_gendisk() returns */
|
||||
|
|
@ -1886,15 +1901,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
|
|||
}
|
||||
}
|
||||
|
||||
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
|
||||
int tag)
|
||||
{
|
||||
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
|
||||
struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
|
||||
|
||||
ublk_queue_cmd(ubq, req);
|
||||
}
|
||||
|
||||
static inline int ublk_check_cmd_op(u32 cmd_op)
|
||||
{
|
||||
u32 ioc_type = _IOC_TYPE(cmd_op);
|
||||
|
|
@ -2103,8 +2109,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
|||
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
|
||||
goto out;
|
||||
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
|
||||
ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
|
||||
break;
|
||||
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
|
||||
ublk_dispatch_req(ubq, req, issue_flags);
|
||||
return -EIOCBQUEUED;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -315,7 +315,7 @@ static int __init misc_init(void)
|
|||
goto fail_remove;
|
||||
|
||||
err = -EIO;
|
||||
if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
|
||||
if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
|
||||
goto fail_printk;
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -1576,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev,
|
|||
break;
|
||||
case VIRTIO_CONSOLE_RESIZE: {
|
||||
struct {
|
||||
__u16 rows;
|
||||
__u16 cols;
|
||||
__virtio16 cols;
|
||||
__virtio16 rows;
|
||||
} size;
|
||||
|
||||
if (!is_console_port(port))
|
||||
|
|
@ -1585,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev,
|
|||
|
||||
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
|
||||
sizeof(size));
|
||||
set_console_size(port, size.rows, size.cols);
|
||||
set_console_size(port, virtio16_to_cpu(vdev, size.rows),
|
||||
virtio16_to_cpu(vdev, size.cols));
|
||||
|
||||
port->cons.hvc->irq_requested = 1;
|
||||
resize_console(port);
|
||||
|
|
|
|||
|
|
@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
|
|||
struct jr3_pci_dev_private *devpriv = dev->private;
|
||||
|
||||
if (devpriv)
|
||||
timer_delete_sync(&devpriv->timer);
|
||||
timer_shutdown_sync(&devpriv->timer);
|
||||
|
||||
comedi_pci_detach(dev);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
|
|||
config ARM_BRCMSTB_AVS_CPUFREQ
|
||||
tristate "Broadcom STB AVS CPUfreq driver"
|
||||
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
|
||||
default y
|
||||
default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
|
||||
help
|
||||
Some Broadcom STB SoCs use a co-processor running proprietary firmware
|
||||
("AVS") to handle voltage and frequency scaling. This driver provides
|
||||
|
|
@ -88,7 +88,7 @@ config ARM_HIGHBANK_CPUFREQ
|
|||
tristate "Calxeda Highbank-based"
|
||||
depends on ARCH_HIGHBANK || COMPILE_TEST
|
||||
depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
|
||||
default m
|
||||
default m if ARCH_HIGHBANK
|
||||
help
|
||||
This adds the CPUFreq driver for Calxeda Highbank SoC
|
||||
based boards.
|
||||
|
|
@ -133,7 +133,7 @@ config ARM_MEDIATEK_CPUFREQ
|
|||
config ARM_MEDIATEK_CPUFREQ_HW
|
||||
tristate "MediaTek CPUFreq HW driver"
|
||||
depends on ARCH_MEDIATEK || COMPILE_TEST
|
||||
default m
|
||||
default m if ARCH_MEDIATEK
|
||||
help
|
||||
Support for the CPUFreq HW driver.
|
||||
Some MediaTek chipsets have a HW engine to offload the steps
|
||||
|
|
@ -181,7 +181,7 @@ config ARM_RASPBERRYPI_CPUFREQ
|
|||
config ARM_S3C64XX_CPUFREQ
|
||||
bool "Samsung S3C64XX"
|
||||
depends on CPU_S3C6410 || COMPILE_TEST
|
||||
default y
|
||||
default CPU_S3C6410
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung S3C6410 SoC.
|
||||
|
||||
|
|
@ -190,7 +190,7 @@ config ARM_S3C64XX_CPUFREQ
|
|||
config ARM_S5PV210_CPUFREQ
|
||||
bool "Samsung S5PV210 and S5PC110"
|
||||
depends on CPU_S5PV210 || COMPILE_TEST
|
||||
default y
|
||||
default CPU_S5PV210
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung S5PV210 and
|
||||
S5PC110 SoCs.
|
||||
|
|
@ -214,7 +214,7 @@ config ARM_SCMI_CPUFREQ
|
|||
config ARM_SPEAR_CPUFREQ
|
||||
bool "SPEAr CPUFreq support"
|
||||
depends on PLAT_SPEAR || COMPILE_TEST
|
||||
default y
|
||||
default PLAT_SPEAR
|
||||
help
|
||||
This adds the CPUFreq driver support for SPEAr SOCs.
|
||||
|
||||
|
|
@ -233,7 +233,7 @@ config ARM_TEGRA20_CPUFREQ
|
|||
tristate "Tegra20/30 CPUFreq support"
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
depends on CPUFREQ_DT
|
||||
default y
|
||||
default ARCH_TEGRA
|
||||
help
|
||||
This adds the CPUFreq driver support for Tegra20/30 SOCs.
|
||||
|
||||
|
|
@ -241,7 +241,7 @@ config ARM_TEGRA124_CPUFREQ
|
|||
bool "Tegra124 CPUFreq support"
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
depends on CPUFREQ_DT
|
||||
default y
|
||||
default ARCH_TEGRA
|
||||
help
|
||||
This adds the CPUFreq driver support for Tegra124 SOCs.
|
||||
|
||||
|
|
@ -256,14 +256,14 @@ config ARM_TEGRA194_CPUFREQ
|
|||
tristate "Tegra194 CPUFreq support"
|
||||
depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
|
||||
depends on TEGRA_BPMP
|
||||
default y
|
||||
default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
|
||||
help
|
||||
This adds CPU frequency driver support for Tegra194 SOCs.
|
||||
|
||||
config ARM_TI_CPUFREQ
|
||||
bool "Texas Instruments CPUFreq support"
|
||||
depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
|
||||
default y
|
||||
default ARCH_OMAP2PLUS || ARCH_K3
|
||||
help
|
||||
This driver enables valid OPPs on the running platform based on
|
||||
values contained within the SoC in use. Enable this in order to
|
||||
|
|
|
|||
|
|
@ -134,11 +134,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
|
|||
|
||||
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct apple_cpu_priv *priv = policy->driver_data;
|
||||
struct cpufreq_policy *policy;
|
||||
struct apple_cpu_priv *priv;
|
||||
struct cpufreq_frequency_table *p;
|
||||
unsigned int pstate;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
|
||||
if (priv->info->cur_pstate_mask) {
|
||||
u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
|
||||
|
||||
|
|
|
|||
|
|
@ -747,7 +747,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
|
|||
int ret;
|
||||
|
||||
if (!policy)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
cpu_data = policy->driver_data;
|
||||
|
||||
|
|
|
|||
|
|
@ -175,6 +175,7 @@ static const struct of_device_id blocklist[] __initconst = {
|
|||
{ .compatible = "qcom,sm8350", },
|
||||
{ .compatible = "qcom,sm8450", },
|
||||
{ .compatible = "qcom,sm8550", },
|
||||
{ .compatible = "qcom,sm8650", },
|
||||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
|
|
|
|||
|
|
@ -37,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
|
|||
|
||||
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct scmi_data *priv = policy->driver_data;
|
||||
struct cpufreq_policy *policy;
|
||||
struct scmi_data *priv;
|
||||
unsigned long rate;
|
||||
int ret;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
|
||||
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
|
|||
|
||||
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
unsigned long rate = clk_get_rate(priv->clk);
|
||||
struct cpufreq_policy *policy;
|
||||
struct scpi_data *priv;
|
||||
unsigned long rate;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
rate = clk_get_rate(priv->clk);
|
||||
|
||||
return rate / 1000;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -194,7 +194,9 @@ static int sun50i_cpufreq_get_efuse(void)
|
|||
struct nvmem_cell *speedbin_nvmem;
|
||||
const struct of_device_id *match;
|
||||
struct device *cpu_dev;
|
||||
u32 *speedbin;
|
||||
void *speedbin_ptr;
|
||||
u32 speedbin = 0;
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
|
|
@ -217,14 +219,18 @@ static int sun50i_cpufreq_get_efuse(void)
|
|||
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
||||
"Could not get nvmem cell\n");
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
|
||||
speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
if (IS_ERR(speedbin))
|
||||
return PTR_ERR(speedbin);
|
||||
if (IS_ERR(speedbin_ptr))
|
||||
return PTR_ERR(speedbin_ptr);
|
||||
|
||||
ret = opp_data->efuse_xlate(*speedbin);
|
||||
if (len <= 4)
|
||||
memcpy(&speedbin, speedbin_ptr, len);
|
||||
speedbin = le32_to_cpu(speedbin);
|
||||
|
||||
kfree(speedbin);
|
||||
ret = opp_data->efuse_xlate(speedbin);
|
||||
|
||||
kfree(speedbin_ptr);
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
|
|||
i2c_priv->hwrng.name = dev_name(&client->dev);
|
||||
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
|
||||
|
||||
/*
|
||||
* According to review by Bill Cox [1], this HWRNG has very low entropy.
|
||||
* [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
|
||||
*/
|
||||
i2c_priv->hwrng.quality = 1;
|
||||
|
||||
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
|
||||
if (ret)
|
||||
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
|
|||
|
||||
int cxl_ras_init(void);
|
||||
void cxl_ras_exit(void);
|
||||
int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port);
|
||||
int cxl_gpf_port_setup(struct cxl_dport *dport);
|
||||
int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
|
||||
int nid, resource_size_t *size);
|
||||
|
||||
|
|
|
|||
|
|
@ -528,13 +528,13 @@ static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
|
|||
rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
|
||||
feat_in->version, feat_in->feat_data,
|
||||
data_size, flags, offset, &return_code);
|
||||
*out_len = sizeof(*rpc_out);
|
||||
if (rc) {
|
||||
rpc_out->retval = return_code;
|
||||
return no_free_ptr(rpc_out);
|
||||
}
|
||||
|
||||
rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
|
||||
*out_len = sizeof(*rpc_out);
|
||||
|
||||
return no_free_ptr(rpc_out);
|
||||
}
|
||||
|
|
@ -677,7 +677,7 @@ static void free_memdev_fwctl(void *_fwctl_dev)
|
|||
fwctl_put(fwctl_dev);
|
||||
}
|
||||
|
||||
int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
|
||||
int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
|
||||
{
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_features_state *cxlfs;
|
||||
|
|
@ -700,7 +700,7 @@ int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
return devm_add_action_or_reset(&cxlmd->dev, free_memdev_fwctl,
|
||||
return devm_add_action_or_reset(host, free_memdev_fwctl,
|
||||
no_free_ptr(fwctl_dev));
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
|
||||
|
|
|
|||
|
|
@ -1072,14 +1072,20 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
|
|||
#define GPF_TIMEOUT_BASE_MAX 2
|
||||
#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
|
||||
|
||||
u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
|
||||
u16 cxl_gpf_get_dvsec(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
bool is_port = true;
|
||||
u16 dvsec;
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
return 0;
|
||||
|
||||
dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
|
||||
pdev = to_pci_dev(dev);
|
||||
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT)
|
||||
is_port = false;
|
||||
|
||||
dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
|
||||
is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
|
||||
if (!dvsec)
|
||||
dev_warn(dev, "%s GPF DVSEC not present\n",
|
||||
|
|
@ -1128,26 +1134,24 @@ static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
|
|||
return rc;
|
||||
}
|
||||
|
||||
int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
|
||||
int cxl_gpf_port_setup(struct cxl_dport *dport)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
|
||||
if (!port)
|
||||
if (!dport)
|
||||
return -EINVAL;
|
||||
|
||||
if (!port->gpf_dvsec) {
|
||||
if (!dport->gpf_dvsec) {
|
||||
struct pci_dev *pdev;
|
||||
int dvsec;
|
||||
|
||||
dvsec = cxl_gpf_get_dvsec(dport_dev, true);
|
||||
dvsec = cxl_gpf_get_dvsec(dport->dport_dev);
|
||||
if (!dvsec)
|
||||
return -EINVAL;
|
||||
|
||||
port->gpf_dvsec = dvsec;
|
||||
dport->gpf_dvsec = dvsec;
|
||||
pdev = to_pci_dev(dport->dport_dev);
|
||||
update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1);
|
||||
update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2);
|
||||
}
|
||||
|
||||
pdev = to_pci_dev(dport_dev);
|
||||
update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
|
||||
update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1678,7 +1678,7 @@ retry:
|
|||
if (rc && rc != -EBUSY)
|
||||
return rc;
|
||||
|
||||
cxl_gpf_port_setup(dport_dev, port);
|
||||
cxl_gpf_port_setup(dport);
|
||||
|
||||
/* Any more ports to add between this one and the root? */
|
||||
if (!dev_is_cxl_root_child(&port->dev))
|
||||
|
|
|
|||
|
|
@ -581,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
|
|||
resource_size_t rcrb = ri->base;
|
||||
void __iomem *addr;
|
||||
u32 bar0, bar1;
|
||||
u16 cmd;
|
||||
u32 id;
|
||||
|
||||
if (which == CXL_RCRB_UPSTREAM)
|
||||
|
|
@ -603,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
|
|||
}
|
||||
|
||||
id = readl(addr + PCI_VENDOR_ID);
|
||||
cmd = readw(addr + PCI_COMMAND);
|
||||
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
|
||||
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
|
||||
iounmap(addr);
|
||||
|
|
@ -618,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
|
|||
dev_err(dev, "Failed to access Downstream Port RCRB\n");
|
||||
return CXL_RESOURCE_NONE;
|
||||
}
|
||||
if (!(cmd & PCI_COMMAND_MEMORY))
|
||||
return CXL_RESOURCE_NONE;
|
||||
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
|
||||
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
|
||||
return CXL_RESOURCE_NONE;
|
||||
|
|
|
|||
|
|
@ -592,7 +592,6 @@ struct cxl_dax_region {
|
|||
* @cdat: Cached CDAT data
|
||||
* @cdat_available: Should a CDAT attribute be available in sysfs
|
||||
* @pci_latency: Upstream latency in picoseconds
|
||||
* @gpf_dvsec: Cached GPF port DVSEC
|
||||
*/
|
||||
struct cxl_port {
|
||||
struct device dev;
|
||||
|
|
@ -616,7 +615,6 @@ struct cxl_port {
|
|||
} cdat;
|
||||
bool cdat_available;
|
||||
long pci_latency;
|
||||
int gpf_dvsec;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -664,6 +662,7 @@ struct cxl_rcrb_info {
|
|||
* @regs: Dport parsed register blocks
|
||||
* @coord: access coordinates (bandwidth and latency performance attributes)
|
||||
* @link_latency: calculated PCIe downstream latency
|
||||
* @gpf_dvsec: Cached GPF port DVSEC
|
||||
*/
|
||||
struct cxl_dport {
|
||||
struct device *dport_dev;
|
||||
|
|
@ -675,6 +674,7 @@ struct cxl_dport {
|
|||
struct cxl_regs regs;
|
||||
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
|
||||
long link_latency;
|
||||
int gpf_dvsec;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -910,6 +910,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
|
|||
#define __mock static
|
||||
#endif
|
||||
|
||||
u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
|
||||
u16 cxl_gpf_get_dvsec(struct device *dev);
|
||||
|
||||
#endif /* __CXL_H__ */
|
||||
|
|
|
|||
|
|
@ -1018,7 +1018,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = devm_cxl_setup_fwctl(cxlmd);
|
||||
rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd);
|
||||
if (rc)
|
||||
dev_dbg(&pdev->dev, "No CXL FWCTL setup\n");
|
||||
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ static void cxl_nvdimm_arm_dirty_shutdown_tracking(struct cxl_nvdimm *cxl_nvd)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!cxl_gpf_get_dvsec(cxlds->dev, false))
|
||||
if (!cxl_gpf_get_dvsec(cxlds->dev))
|
||||
return;
|
||||
|
||||
if (cxl_get_dirty_count(mds, &count)) {
|
||||
|
|
|
|||
|
|
@ -1224,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
|
|||
if (!svc->intel_svc_fcs) {
|
||||
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
|
||||
ret = -ENOMEM;
|
||||
goto err_unregister_dev;
|
||||
goto err_unregister_rsu_dev;
|
||||
}
|
||||
|
||||
ret = platform_device_add(svc->intel_svc_fcs);
|
||||
if (ret) {
|
||||
platform_device_put(svc->intel_svc_fcs);
|
||||
goto err_unregister_dev;
|
||||
goto err_unregister_rsu_dev;
|
||||
}
|
||||
|
||||
ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
|
||||
if (ret)
|
||||
goto err_unregister_fcs_dev;
|
||||
|
||||
dev_set_drvdata(dev, svc);
|
||||
|
||||
pr_info("Intel Service Layer Driver Initialized\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_dev:
|
||||
err_unregister_fcs_dev:
|
||||
platform_device_unregister(svc->intel_svc_fcs);
|
||||
err_unregister_rsu_dev:
|
||||
platform_device_unregister(svc->stratix10_svc_rsu);
|
||||
err_free_kfifo:
|
||||
kfifo_free(&controller->svc_fifo);
|
||||
|
|
@ -1253,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
|
|||
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
|
||||
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
|
||||
|
||||
of_platform_depopulate(ctrl->dev);
|
||||
|
||||
platform_device_unregister(svc->intel_svc_fcs);
|
||||
platform_device_unregister(svc->stratix10_svc_rsu);
|
||||
|
||||
|
|
|
|||
|
|
@ -43,6 +43,29 @@
|
|||
#include <linux/dma-fence-array.h>
|
||||
#include <linux/pci-p2pdma.h>
|
||||
|
||||
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
|
||||
|
||||
/**
|
||||
* dma_buf_attach_adev - Helper to get adev of an attachment
|
||||
*
|
||||
* @attach: attachment
|
||||
*
|
||||
* Returns:
|
||||
* A struct amdgpu_device * if the attaching device is an amdgpu device or
|
||||
* partition, NULL otherwise.
|
||||
*/
|
||||
static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
|
||||
{
|
||||
if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
|
||||
struct drm_gem_object *obj = attach->importer_priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
return amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
|
||||
*
|
||||
|
|
@ -54,11 +77,13 @@
|
|||
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
|
||||
struct drm_gem_object *obj = dmabuf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
|
||||
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
|
||||
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
|
||||
attach->peer2peer = false;
|
||||
|
||||
amdgpu_vm_bo_update_shared(bo);
|
||||
|
|
@ -77,22 +102,32 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
|
|||
{
|
||||
struct dma_buf *dmabuf = attach->dmabuf;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
|
||||
u32 domains = bo->preferred_domains;
|
||||
u32 domains = bo->allowed_domains;
|
||||
|
||||
dma_resv_assert_held(dmabuf->resv);
|
||||
|
||||
/*
|
||||
* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
|
||||
/* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
|
||||
* support if all attachments can do P2P. If any attachment can't do
|
||||
* P2P just pin into GTT instead.
|
||||
*
|
||||
* To avoid with conflicting pinnings between GPUs and RDMA when move
|
||||
* notifiers are disabled, only allow pinning in VRAM when move
|
||||
* notiers are enabled.
|
||||
*/
|
||||
list_for_each_entry(attach, &dmabuf->attachments, node)
|
||||
if (!attach->peer2peer)
|
||||
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
|
||||
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
|
||||
} else {
|
||||
list_for_each_entry(attach, &dmabuf->attachments, node)
|
||||
if (!attach->peer2peer)
|
||||
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
|
||||
}
|
||||
|
||||
if (domains & AMDGPU_GEM_DOMAIN_VRAM)
|
||||
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
||||
if (WARN_ON(!domains))
|
||||
return -EINVAL;
|
||||
|
||||
return amdgpu_bo_pin(bo, domains);
|
||||
}
|
||||
|
||||
|
|
@ -470,6 +505,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
|
|||
struct drm_gem_object *obj = &bo->tbo.base;
|
||||
struct drm_gem_object *gobj;
|
||||
|
||||
if (!adev)
|
||||
return false;
|
||||
|
||||
if (obj->import_attach) {
|
||||
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||
|
||||
|
|
|
|||
|
|
@ -1920,26 +1920,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
|
|||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||
case IP_VERSION(3, 5, 0):
|
||||
case IP_VERSION(3, 6, 0):
|
||||
/*
|
||||
* On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
|
||||
* cause a hard hang. A fix exists for newer PMFW.
|
||||
*
|
||||
* As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
|
||||
* IPS state in all cases, except for s0ix and all displays off (DPMS),
|
||||
* where IPS2 is allowed.
|
||||
*
|
||||
* When checking pmfw version, use the major and minor only.
|
||||
*/
|
||||
if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
|
||||
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
||||
else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
|
||||
/*
|
||||
* Other ASICs with DCN35 that have residency issues with
|
||||
* IPS2 in idle.
|
||||
* We want them to use IPS2 only in display off cases.
|
||||
*/
|
||||
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
||||
break;
|
||||
case IP_VERSION(3, 5, 1):
|
||||
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
|
||||
break;
|
||||
|
|
@ -3355,16 +3335,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
|||
for (k = 0; k < dc_state->stream_count; k++) {
|
||||
bundle->stream_update.stream = dc_state->streams[k];
|
||||
|
||||
for (m = 0; m < dc_state->stream_status->plane_count; m++) {
|
||||
for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
|
||||
bundle->surface_updates[m].surface =
|
||||
dc_state->stream_status->plane_states[m];
|
||||
dc_state->stream_status[k].plane_states[m];
|
||||
bundle->surface_updates[m].surface->force_full_update =
|
||||
true;
|
||||
}
|
||||
|
||||
update_planes_and_stream_adapter(dm->dc,
|
||||
UPDATE_TYPE_FULL,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->stream_status[k].plane_count,
|
||||
dc_state->streams[k],
|
||||
&bundle->stream_update,
|
||||
bundle->surface_updates);
|
||||
|
|
@ -6521,12 +6501,12 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
|
|||
const struct drm_display_mode *native_mode,
|
||||
bool scale_enabled)
|
||||
{
|
||||
if (scale_enabled) {
|
||||
copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
|
||||
} else if (native_mode->clock == drm_mode->clock &&
|
||||
native_mode->htotal == drm_mode->htotal &&
|
||||
native_mode->vtotal == drm_mode->vtotal) {
|
||||
copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
|
||||
if (scale_enabled || (
|
||||
native_mode->clock == drm_mode->clock &&
|
||||
native_mode->htotal == drm_mode->htotal &&
|
||||
native_mode->vtotal == drm_mode->vtotal)) {
|
||||
if (native_mode->crtc_clock)
|
||||
copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
|
||||
} else {
|
||||
/* no scaling nor amdgpu inserted, no need to patch */
|
||||
}
|
||||
|
|
@ -11043,6 +11023,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
|||
state->allow_modeset)
|
||||
return true;
|
||||
|
||||
if (amdgpu_in_reset(adev) && state->allow_modeset)
|
||||
return true;
|
||||
|
||||
/* Exit early if we know that we're adding or removing the plane. */
|
||||
if (old_plane_state->crtc != new_plane_state->crtc)
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -918,7 +918,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
|
|||
{
|
||||
struct drm_connector *connector = data;
|
||||
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
|
||||
unsigned char start = block * EDID_LENGTH;
|
||||
unsigned short start = block * EDID_LENGTH;
|
||||
struct edid *edid;
|
||||
int r;
|
||||
|
||||
|
|
|
|||
|
|
@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
|||
.dcn_downspread_percent = 0.5,
|
||||
.gpuvm_min_page_size_bytes = 4096,
|
||||
.hostvm_min_page_size_bytes = 4096,
|
||||
.do_urgent_latency_adjustment = 0,
|
||||
.do_urgent_latency_adjustment = 1,
|
||||
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
|
||||
};
|
||||
|
||||
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,17 @@
|
|||
#define DC_LOGGER \
|
||||
link->ctx->logger
|
||||
|
||||
static void get_default_8b_10b_lttpr_aux_rd_interval(
|
||||
union training_aux_rd_interval *training_rd_interval)
|
||||
{
|
||||
/* LTTPR are required to program DPCD 0000Eh to 0x4 (16ms) upon AUX
|
||||
* read reply to this register. Since old sinks with DPCD rev 1.1
|
||||
* and earlier may not support this register, assume the mandatory
|
||||
* value is programmed by the LTTPR to avoid AUX timeout issues.
|
||||
*/
|
||||
training_rd_interval->raw = 0x4;
|
||||
}
|
||||
|
||||
static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum lttpr_mode lttpr_mode)
|
||||
|
|
@ -43,17 +54,22 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
|
|||
uint32_t wait_in_micro_secs = 100;
|
||||
|
||||
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
|
||||
link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TRAINING_AUX_RD_INTERVAL,
|
||||
(uint8_t *)&training_rd_interval,
|
||||
sizeof(training_rd_interval));
|
||||
if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
|
||||
wait_in_micro_secs = 400;
|
||||
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
|
||||
wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12)
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TRAINING_AUX_RD_INTERVAL,
|
||||
(uint8_t *)&training_rd_interval,
|
||||
sizeof(training_rd_interval));
|
||||
else if (dp_is_lttpr_present(link))
|
||||
get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval);
|
||||
|
||||
if (training_rd_interval.raw != 0) {
|
||||
if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
|
||||
wait_in_micro_secs = 400;
|
||||
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
|
||||
wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
|
||||
}
|
||||
}
|
||||
return wait_in_micro_secs;
|
||||
}
|
||||
|
|
@ -71,13 +87,15 @@ static uint32_t get_eq_training_aux_rd_interval(
|
|||
DP_128B132B_TRAINING_AUX_RD_INTERVAL,
|
||||
(uint8_t *)&training_rd_interval,
|
||||
sizeof(training_rd_interval));
|
||||
} else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
|
||||
link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TRAINING_AUX_RD_INTERVAL,
|
||||
(uint8_t *)&training_rd_interval,
|
||||
sizeof(training_rd_interval));
|
||||
} else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12)
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TRAINING_AUX_RD_INTERVAL,
|
||||
(uint8_t *)&training_rd_interval,
|
||||
sizeof(training_rd_interval));
|
||||
else if (dp_is_lttpr_present(link))
|
||||
get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval);
|
||||
}
|
||||
|
||||
switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue