Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.17-rc8).

Conflicts:

drivers/net/can/spi/hi311x.c
  6b69680847 ("can: hi311x: fix null pointer dereference when resuming from sleep before interface was enabled")
  27ce71e1ce ("net: WQ_PERCPU added to alloc_workqueue users")
https://lore.kernel.org/72ce7599-1b5b-464a-a5de-228ff9724701@kernel.org

net/smc/smc_loopback.c
drivers/dibs/dibs_loopback.c
  a35c04de25 ("net/smc: fix warning in smc_rx_splice() when calling get_page()")
  cc21191b58 ("dibs: Move data path to dibs layer")
https://lore.kernel.org/74368a5c-48ac-4f8e-a198-40ec1ed3cf5f@kernel.org

Adjacent changes:

drivers/net/dsa/lantiq/lantiq_gswip.c
  c0054b25e2 ("net: dsa: lantiq_gswip: move gswip_add_single_port_br() call to port_setup()")
  7a1eaef0a7 ("net: dsa: lantiq_gswip: support model-specific mac_select_pcs()")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
pull/1354/merge
Jakub Kicinski 2025-09-11 17:37:09 -07:00
commit 203e3beb73
220 changed files with 2022 additions and 811 deletions

View File

@ -1,5 +1,6 @@
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Alyssa Rosenzweig <alyssa@rosenzweig.io>
Christoph Hellwig <hch@lst.de>
Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Marc Gonzalez <marc.w.gonzalez@free.fr>

View File

@ -623,6 +623,7 @@ Paulo Alcantara <pc@manguebit.org> <palcantara@suse.com>
Paulo Alcantara <pc@manguebit.org> <pc@manguebit.com>
Pavankumar Kondeti <quic_pkondeti@quicinc.com> <pkondeti@codeaurora.org>
Peter A Jonsson <pj@ludd.ltu.se>
Peter Hilber <peter.hilber@oss.qualcomm.com> <quic_philber@quicinc.com>
Peter Oruba <peter.oruba@amd.com>
Peter Oruba <peter@oruba.de>
Pierre-Louis Bossart <pierre-louis.bossart@linux.dev> <pierre-louis.bossart@linux.intel.com>

View File

@ -2293,7 +2293,7 @@ delayed_register
notice the need.
skip_validation
Skip unit descriptor validation (default: no).
The option is used to ignores the validation errors with the hexdump
The option is used to ignore the validation errors with the hexdump
of the unit descriptor instead of a driver probe error, so that we
can check its details.
quirk_flags

View File

@ -1845,7 +1845,6 @@ S: Odd fixes
F: drivers/input/mouse/bcm5974.c
APPLE PCIE CONTROLLER DRIVER
M: Alyssa Rosenzweig <alyssa@rosenzweig.io>
M: Marc Zyngier <maz@kernel.org>
L: linux-pci@vger.kernel.org
S: Maintained
@ -2364,7 +2363,6 @@ F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
M: Sven Peter <sven@kernel.org>
M: Janne Grunau <j@jannau.net>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
R: Neal Gompa <neal@gompa.dev>
L: asahi@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -22074,6 +22072,7 @@ F: drivers/infiniband/ulp/rtrs/
RUNTIME VERIFICATION (RV)
M: Steven Rostedt <rostedt@goodmis.org>
M: Gabriele Monaco <gmonaco@redhat.com>
L: linux-trace-kernel@vger.kernel.org
S: Maintained
F: Documentation/trace/rv/
@ -26819,7 +26818,7 @@ F: drivers/nvdimm/nd_virtio.c
F: drivers/nvdimm/virtio_pmem.c
VIRTIO RTC DRIVER
M: Peter Hilber <quic_philber@quicinc.com>
M: Peter Hilber <peter.hilber@oss.qualcomm.com>
L: virtualization@lists.linux.dev
S: Maintained
F: drivers/virtio/virtio_rtc_*

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 17
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -218,7 +218,7 @@
&usbphy {
usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";

View File

@ -82,7 +82,7 @@
};
&ehci0 {
status = "okay";
status = "okay";
};
&mmc1 {

View File

@ -705,7 +705,7 @@
};
/omit-if-no-ref/
uart2_rts_cts_pi_pins: uart2-rts-cts-pi-pins{
uart2_rts_cts_pi_pins: uart2-rts-cts-pi-pins {
pins = "PI16", "PI17";
function = "uart2";
};

View File

@ -29,7 +29,7 @@
clk_can0: clock-can0 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <40000000>;
clock-frequency = <40000000>;
};
gpio-keys {

View File

@ -66,8 +66,10 @@
mdio0 {
#address-cells = <1>;
#size-cells = <0>;
phy0: ethernet-phy@0 {
reg = <0>;
compatible = "snps,dwmac-mdio";
phy0: ethernet-phy@4 {
reg = <4>;
rxd0-skew-ps = <0>;
rxd1-skew-ps = <0>;
rxd2-skew-ps = <0>;

View File

@ -119,7 +119,7 @@
"Out Jack", "HPL",
"Out Jack", "HPR",
"AIN1L", "In Jack",
"AIN1L", "In Jack";
"AIN1R", "In Jack";
status = "okay";
simple-audio-card,dai-link@0 {

View File

@ -38,7 +38,7 @@
simple-audio-card,mclk-fs = <256>;
simple-audio-card,cpu {
sound-dai = <&audio0 0>;
sound-dai = <&audio0>;
};
simple-audio-card,codec {

View File

@ -242,7 +242,7 @@ choice
config VF_USE_PIT_TIMER
bool "Use PIT timer"
select VF_PIT_TIMER
select NXP_PIT_TIMER
help
Use SoC Periodic Interrupt Timer (PIT) as clocksource

View File

@ -298,7 +298,7 @@
cpu-thermal {
polling-delay-passive = <250>;
polling-delay = <2000>;
thermal-sensors = <&tmu 0>;
thermal-sensors = <&tmu 1>;
trips {
cpu_alert0: trip0 {
temperature = <85000>;
@ -331,7 +331,7 @@
soc-thermal {
polling-delay-passive = <250>;
polling-delay = <2000>;
thermal-sensors = <&tmu 1>;
thermal-sensors = <&tmu 0>;
trips {
soc_alert0: trip0 {
temperature = <85000>;

View File

@ -345,11 +345,13 @@
/* CPS Lane 1 - U32 */
sata-port@0 {
phys = <&cp1_comphy1 0>;
status = "okay";
};
/* CPS Lane 3 - U31 */
sata-port@1 {
phys = <&cp1_comphy3 1>;
status = "okay";
};
};

View File

@ -152,11 +152,12 @@
/* SRDS #0 - SATA on M.2 connector */
&cp0_sata0 {
phys = <&cp0_comphy0 1>;
status = "okay";
/* only port 1 is available */
/delete-node/ sata-port@0;
sata-port@1 {
phys = <&cp0_comphy0 1>;
status = "okay";
};
};
/* microSD */

View File

@ -563,11 +563,13 @@
/* SRDS #1 - SATA on M.2 (J44) */
&cp1_sata0 {
phys = <&cp1_comphy1 0>;
status = "okay";
/* only port 0 is available */
/delete-node/ sata-port@1;
sata-port@0 {
phys = <&cp1_comphy1 0>;
status = "okay";
};
};
&cp1_syscon0 {

View File

@ -413,7 +413,13 @@
/* SRDS #0,#1,#2,#3 - PCIe */
&cp0_pcie0 {
num-lanes = <4>;
phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>;
/*
* The mvebu-comphy driver does not currently know how to pass correct
* lane-count to ATF while configuring the serdes lanes.
* Rely on bootloader configuration only.
*
* phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>, <&cp0_comphy2 0>, <&cp0_comphy3 0>;
*/
status = "okay";
};
@ -475,7 +481,13 @@
/* SRDS #0,#1 - PCIe */
&cp1_pcie0 {
num-lanes = <2>;
phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>;
/*
* The mvebu-comphy driver does not currently know how to pass correct
* lane-count to ATF while configuring the serdes lanes.
* Rely on bootloader configuration only.
*
* phys = <&cp1_comphy0 0>, <&cp1_comphy1 0>;
*/
status = "okay";
};
@ -512,10 +524,9 @@
status = "okay";
/* only port 1 is available */
/delete-node/ sata-port@0;
sata-port@1 {
phys = <&cp1_comphy3 1>;
status = "okay";
};
};
@ -631,9 +642,8 @@
status = "okay";
/* only port 1 is available */
/delete-node/ sata-port@0;
sata-port@1 {
status = "okay";
phys = <&cp2_comphy3 1>;
};
};

View File

@ -137,6 +137,14 @@
pinctrl-0 = <&ap_mmc0_pins>;
pinctrl-names = "default";
vqmmc-supply = <&v_1_8>;
/*
* Not stable in HS modes - phy needs "more calibration", so disable
* UHS (by preventing voltage switch), SDR104, SDR50 and DDR50 modes.
*/
no-1-8-v;
no-sd;
no-sdio;
non-removable;
status = "okay";
};

View File

@ -731,6 +731,7 @@
spi-max-frequency = <104000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <1>;
vcc-supply = <&vcc_1v8_s3>;
};
};

View File

@ -42,9 +42,8 @@
simple-audio-card,bitclock-master = <&masterdai>;
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&masterdai>;
simple-audio-card,hp-det-gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_LOW>;
simple-audio-card,hp-det-gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>;
simple-audio-card,mclk-fs = <256>;
simple-audio-card,pin-switches = "Headphones";
simple-audio-card,routing =
"Headphones", "LOUT1",
"Headphones", "ROUT1",

View File

@ -298,6 +298,10 @@ config AS_HAS_LVZ_EXTENSION
config CC_HAS_ANNOTATE_TABLEJUMP
def_bool $(cc-option,-mannotate-tablejump)
config RUSTC_HAS_ANNOTATE_TABLEJUMP
depends on RUST
def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump)
menu "Kernel type and options"
source "kernel/Kconfig.hz"
@ -563,10 +567,14 @@ config ARCH_STRICT_ALIGN
-mstrict-align build parameter to prevent unaligned accesses.
CPUs with h/w unaligned access support:
Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
Loongson-2K2000/2K3000 and all of Loongson-3 series processors
based on LoongArch.
CPUs without h/w unaligned access support:
Loongson-2K500/2K1000.
Loongson-2K0300/2K0500/2K1000.
If you want to make sure whether to support unaligned memory access
on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
This option is enabled by default to make the kernel be able to run
on all LoongArch systems. But you can disable it manually if you want

View File

@ -102,16 +102,21 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
ifdef CONFIG_OBJTOOL
ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
KBUILD_CFLAGS += -mannotate-tablejump
else
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
endif
ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
else
KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers
endif
ifdef CONFIG_LTO_CLANG
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
# be passed via '-mllvm' to ld.lld.
KBUILD_CFLAGS += -mannotate-tablejump
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
endif
else
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
endif
endif
KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small

View File

@ -10,9 +10,8 @@
#ifndef _ASM_LOONGARCH_ACENV_H
#define _ASM_LOONGARCH_ACENV_H
/*
* This header is required by ACPI core, but we have nothing to fill in
* right now. Will be updated later when needed.
*/
#ifdef CONFIG_ARCH_STRICT_ALIGN
#define ACPI_MISALIGNMENT_NOT_SUPPORTED
#endif /* CONFIG_ARCH_STRICT_ALIGN */
#endif /* _ASM_LOONGARCH_ACENV_H */

View File

@ -16,6 +16,13 @@
*/
#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
/*
* _PAGE_MODIFIED is a SW pte bit, it records page ever written on host
* kernel, on secondary MMU it records the page writeable attribute, in
* order for fast path handling.
*/
#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED
#define _KVM_FLUSH_PGTABLE 0x1
#define _KVM_HAS_PGMASK 0x2
#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
WRITE_ONCE(*ptep, val);
}
static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; }
static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; }
static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
{
@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
{
return pte | _PAGE_DIRTY;
return pte | __WRITEABLE;
}
static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
{
return pte & ~_PAGE_DIRTY;
return pte & ~__WRITEABLE;
}
static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
return pte & ~_PAGE_HUGE;
}
static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte)
{
return pte | KVM_PAGE_WRITEABLE;
}
static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
{
return ctx->flag & _KVM_FLUSH_PGTABLE;

View File

@ -86,7 +86,7 @@ late_initcall(fdt_cpu_clk_init);
static ssize_t boardinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf,
return sysfs_emit(buf,
"BIOS Information\n"
"Vendor\t\t\t: %s\n"
"Version\t\t\t: %s\n"
@ -109,6 +109,8 @@ static int __init boardinfo_init(void)
struct kobject *loongson_kobj;
loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
if (!loongson_kobj)
return -ENOMEM;
return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
}

View File

@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0);
regs->regs[22] = 0;
} else {
regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task);
regs->regs[22] = task->thread.reg22;
}
regs->regs[1] = 0;
regs->regs[22] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {

View File

@ -54,6 +54,9 @@ static int __init init_vdso(void)
vdso_info.code_mapping.pages =
kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
if (!vdso_info.code_mapping.pages)
return -ENOMEM;
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);

View File

@ -778,10 +778,8 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
return 0;
default:
return KVM_HCALL_INVALID_CODE;
};
return KVM_HCALL_INVALID_CODE;
};
}
}
/*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.

View File

@ -426,21 +426,26 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
data = (void __user *)attr->addr;
switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
if (copy_from_user(&val, data, 4))
return -EFAULT;
break;
default:
break;
}
spin_lock_irqsave(&s->lock, flags);
switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
if (copy_from_user(&val, data, 4))
ret = -EFAULT;
else {
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
ret = -EINVAL;
else
s->num_cpu = val;
}
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
ret = -EINVAL;
else
s->num_cpu = val;
break;
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
if (copy_from_user(&s->features, data, 4))
ret = -EFAULT;
s->features = val;
if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
s->status |= BIT(EIOINTC_ENABLE);
break;
@ -462,19 +467,17 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
static int kvm_eiointc_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
bool is_write, int *data)
{
int addr, cpu, offset, ret = 0;
unsigned long flags;
void *p = NULL;
void __user *data;
struct loongarch_eiointc *s;
s = dev->kvm->arch.eiointc;
addr = attr->attr;
cpu = addr >> 16;
addr &= 0xffff;
data = (void __user *)attr->addr;
switch (addr) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
offset = (addr - EIOINTC_NODETYPE_START) / 4;
@ -513,13 +516,10 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
}
spin_lock_irqsave(&s->lock, flags);
if (is_write) {
if (copy_from_user(p, data, 4))
ret = -EFAULT;
} else {
if (copy_to_user(data, p, 4))
ret = -EFAULT;
}
if (is_write)
memcpy(p, data, 4);
else
memcpy(data, p, 4);
spin_unlock_irqrestore(&s->lock, flags);
return ret;
@ -527,19 +527,17 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
bool is_write, int *data)
{
int addr, ret = 0;
unsigned long flags;
void *p = NULL;
void __user *data;
struct loongarch_eiointc *s;
s = dev->kvm->arch.eiointc;
addr = attr->attr;
addr &= 0xffff;
data = (void __user *)attr->addr;
switch (addr) {
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
if (is_write)
@ -561,13 +559,10 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
return -EINVAL;
}
spin_lock_irqsave(&s->lock, flags);
if (is_write) {
if (copy_from_user(p, data, 4))
ret = -EFAULT;
} else {
if (copy_to_user(data, p, 4))
ret = -EFAULT;
}
if (is_write)
memcpy(p, data, 4);
else
memcpy(data, p, 4);
spin_unlock_irqrestore(&s->lock, flags);
return ret;
@ -576,11 +571,27 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
static int kvm_eiointc_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int ret, data;
switch (attr->group) {
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
return kvm_eiointc_regs_access(dev, attr, false);
ret = kvm_eiointc_regs_access(dev, attr, false, &data);
if (ret)
return ret;
if (copy_to_user((void __user *)attr->addr, &data, 4))
ret = -EFAULT;
return ret;
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
return kvm_eiointc_sw_status_access(dev, attr, false);
ret = kvm_eiointc_sw_status_access(dev, attr, false, &data);
if (ret)
return ret;
if (copy_to_user((void __user *)attr->addr, &data, 4))
ret = -EFAULT;
return ret;
default:
return -EINVAL;
}
@ -589,13 +600,21 @@ static int kvm_eiointc_get_attr(struct kvm_device *dev,
static int kvm_eiointc_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int data;
switch (attr->group) {
case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
return kvm_eiointc_ctrl_access(dev, attr);
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
return kvm_eiointc_regs_access(dev, attr, true);
if (copy_from_user(&data, (void __user *)attr->addr, 4))
return -EFAULT;
return kvm_eiointc_regs_access(dev, attr, true, &data);
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
return kvm_eiointc_sw_status_access(dev, attr, true);
if (copy_from_user(&data, (void __user *)attr->addr, 4))
return -EFAULT;
return kvm_eiointc_sw_status_access(dev, attr, true, &data);
default:
return -EINVAL;
}

View File

@ -348,6 +348,7 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
{
char buf[8];
int addr, offset, len = 8, ret = 0;
void __user *data;
void *p = NULL;
@ -397,17 +398,23 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev,
return -EINVAL;
}
spin_lock(&s->lock);
/* write or read value according to is_write */
if (is_write) {
if (copy_from_user(p, data, len))
ret = -EFAULT;
} else {
if (copy_to_user(data, p, len))
ret = -EFAULT;
if (copy_from_user(buf, data, len))
return -EFAULT;
}
spin_lock(&s->lock);
if (is_write)
memcpy(p, buf, len);
else
memcpy(buf, p, len);
spin_unlock(&s->lock);
if (!is_write) {
if (copy_to_user(data, buf, len))
return -EFAULT;
}
return ret;
}

View File

@ -569,7 +569,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
/* Track access to pages marked old */
new = kvm_pte_mkyoung(*ptep);
if (write && !kvm_pte_dirty(new)) {
if (!kvm_pte_write(new)) {
if (!kvm_pte_writeable(new)) {
ret = -EFAULT;
goto out;
}
@ -856,9 +856,9 @@ retry:
prot_bits |= _CACHE_SUC;
if (writeable) {
prot_bits |= _PAGE_WRITE;
prot_bits = kvm_pte_mkwriteable(prot_bits);
if (write)
prot_bits |= __WRITEABLE;
prot_bits = kvm_pte_mkdirty(prot_bits);
}
/* Disable dirty logging on HugePages */
@ -904,7 +904,7 @@ retry:
kvm_release_faultin_page(kvm, page, false, writeable);
spin_unlock(&kvm->mmu_lock);
if (prot_bits & _PAGE_DIRTY)
if (kvm_pte_dirty(prot_bits))
mark_page_dirty_in_slot(kvm, memslot, gfn);
out:

View File

@ -17,7 +17,7 @@
#cooling-cells = <2>;
};
i2c-gpio-0 {
i2c-0 {
compatible = "i2c-gpio";
sda-gpios = <&pio 3 14 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; /* PD14/GPIO44 */
scl-gpios = <&pio 3 15 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; /* PD15/GPIO45 */

View File

@ -16,11 +16,11 @@
#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
/* Load/Store return codes */
#define ZPCI_PCI_LS_OK 0
#define ZPCI_PCI_LS_ERR 1
#define ZPCI_PCI_LS_BUSY 2
#define ZPCI_PCI_LS_INVAL_HANDLE 3
/* PCI instruction condition codes */
#define ZPCI_CC_OK 0
#define ZPCI_CC_ERR 1
#define ZPCI_CC_BUSY 2
#define ZPCI_CC_INVAL_HANDLE 3
/* Load/Store address space identifiers */
#define ZPCI_PCIAS_MEMIO_0 0

View File

@ -562,6 +562,24 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
extern struct ghcb *boot_ghcb;
static inline void sev_evict_cache(void *va, int npages)
{
volatile u8 val __always_unused;
u8 *bytes = va;
int page_idx;
/*
* For SEV guests, a read from the first/last cache-lines of a 4K page
* using the guest key is sufficient to cause a flush of all cache-lines
* associated with that 4K page without incurring all the overhead of a
* full CLFLUSH sequence.
*/
for (page_idx = 0; page_idx < npages; page_idx++) {
val = bytes[page_idx * PAGE_SIZE];
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
}
}
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define snp_vmpl 0
@ -605,6 +623,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc,
static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
static inline void sev_evict_cache(void *va, int npages) {}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@ -619,24 +638,6 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
void snp_fixup_e820_tables(void);
static inline void sev_evict_cache(void *va, int npages)
{
volatile u8 val __always_unused;
u8 *bytes = va;
int page_idx;
/*
* For SEV guests, a read from the first/last cache-lines of a 4K page
* using the guest key is sufficient to cause a flush of all cache-lines
* associated with that 4K page without incurring all the overhead of a
* full CLFLUSH sequence.
*/
for (page_idx = 0; page_idx < npages; page_idx++) {
val = bytes[page_idx * PAGE_SIZE];
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
}
}
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@ -652,7 +653,6 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
static inline void snp_fixup_e820_tables(void) {}
static inline void sev_evict_cache(void *va, int npages) {}
#endif
#endif

View File

@ -970,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
lock_sock(sk);
if (ctx->write) {
release_sock(sk);
return -EBUSY;
}
ctx->write = true;
if (ctx->init && !ctx->more) {
if (ctx->used) {
err = -EINVAL;
@ -1019,6 +1025,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
continue;
}
ctx->merge = 0;
if (!af_alg_writable(sk)) {
err = af_alg_wait_for_wmem(sk, msg->msg_flags);
if (err)
@ -1058,7 +1066,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
ctx->used += plen;
copied += plen;
size -= plen;
ctx->merge = 0;
} else {
do {
struct page *pg;
@ -1104,6 +1111,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unlock:
af_alg_data_wakeup(sk);
ctx->write = false;
release_sock(sk);
return copied ?: err;

View File

@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {

View File

@ -312,7 +312,9 @@ config BT_HCIBCM4377
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
depends on BT_HCIUART
depends on USB
select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@ -437,8 +439,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on BT_HCIUART
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@ -483,7 +487,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
depends on BT_HCIUART
depends on SERIAL_DEV_BUS
select BT_HCIUART_H4
select CRC32
select CRC8
help

View File

@ -121,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
int h4_deinit(void);
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@ -162,6 +158,10 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
int h4_deinit(void);
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);

View File

@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
static struct device_node *cpg_mstp_pd_np __initdata = NULL;
static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
of_genpd_add_provider_simple(np, pd);
cpg_mstp_pd_np = of_node_get(np);
cpg_mstp_pd_genpd = pd;
}
static int __init cpg_mstp_pd_init_provider(void)
{
int error;
if (!cpg_mstp_pd_np)
return -ENODEV;
error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
of_node_put(cpg_mstp_pd_np);
return error;
}
postcore_initcall(cpg_mstp_pd_init_provider);

View File

@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
rate = (parent_rate / p) / m;
rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;

View File

@ -2953,6 +2953,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("cpufreq: supports frequency invariance\n");
}
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@ -2974,21 +2983,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("supports frequency invariance");
}
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
if (!cpufreq_driver->setpolicy)
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);

View File

@ -2430,7 +2430,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
__sev_platform_shutdown_locked(NULL);
__sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*

View File

@ -12,6 +12,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/dibs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@ -49,6 +50,7 @@ static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
{
struct dibs_lo_dmb_node *dmb_node, *tmp_node;
struct dibs_lo_dev *ldev;
struct folio *folio;
unsigned long flags;
int sba_idx, rc;
@ -70,13 +72,16 @@ static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
dmb_node->sba_idx = sba_idx;
dmb_node->len = dmb->dmb_len;
dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
__GFP_NOWARN | __GFP_NORETRY |
__GFP_NOMEMALLOC);
if (!dmb_node->cpu_addr) {
/* not critical; fail under memory pressure and fallback to TCP */
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
__GFP_NORETRY | __GFP_ZERO,
get_order(dmb_node->len));
if (!folio) {
rc = -ENOMEM;
goto err_node;
}
dmb_node->cpu_addr = folio_address(folio);
dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
refcount_set(&dmb_node->refcnt, 1);
@ -122,7 +127,7 @@ static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
write_unlock_bh(&ldev->dmb_ht_lock);
clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
kfree(dmb_node->cpu_addr);
folio_put(virt_to_folio(dmb_node->cpu_addr));
kfree(dmb_node);
if (atomic_dec_and_test(&ldev->dmb_cnt))

View File

@ -41,7 +41,7 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
#define FW_CDEV_KERNEL_VERSION 5
#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5

View File

@ -198,7 +198,10 @@ static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
if (err != -ENODEV)
dev_warn(bpmp->dev,
"failed to parse memory region: %d\n", err);
return err;
}

View File

@ -942,8 +942,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
struct acpi_gpio_info info;
struct acpi_gpio_info info = {};
struct gpio_desc *desc;
int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@ -957,6 +958,12 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
/* ACPI uses hundredths of milliseconds units */
ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
if (ret)
return ERR_PTR(ret);
return desc;
}
@ -992,7 +999,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */

View File

@ -317,6 +317,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "PNP0C50:00@8",
},
},
{
/*
* Same as G1619-04. New model.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
},
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
.ignore_wake = "PNP0C50:00@8",
},
},
{
/*
* Spurious wakeups from GPIO 11

View File

@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
if (adev->kfd.dev)
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
if (adev->kfd.dev) {
if (adev->in_s0ix)
kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
else
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
}
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
if (adev->kfd.dev)
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
if (adev->kfd.dev) {
if (adev->in_s0ix)
r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
else
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
}
return r;
}

View File

@ -426,7 +426,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@ -516,11 +518,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
{
return 0;
}
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
{
return 0;
}
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;

View File

@ -5136,7 +5136,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
if (!adev->in_s0ix && !adev->in_runpm)
if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@ -5156,10 +5156,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix) {
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
amdgpu_userq_suspend(adev);
}
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@ -5254,15 +5252,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
if (r)
goto exit;
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
if (r)
goto exit;
r = amdgpu_userq_resume(adev);
if (r)
goto exit;
}
r = amdgpu_userq_resume(adev);
if (r)
goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@ -5275,7 +5271,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!adev->in_s0ix && !r && !adev->in_runpm)
if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}

View File

@ -1654,6 +1654,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.pfp_fw_version >= 102 &&
adev->gfx.mec_fw_version >= 66 &&
adev->mes.fw_version[0] >= 128) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
adev->gfx.enable_cleaner_shader = false;
dev_err(adev->dev, "Failed to initialize cleaner shader\n");
}
}
break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;

View File

@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
{
struct kfd_node *node;
int i, r;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
r = node->dqm->ops.unhalt(node->dqm);
if (r) {
dev_err(kfd_device, "Error in starting scheduler\n");
return r;
}
}
return 0;
}
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
{
struct kfd_node *node;
int i, r;
if (!kfd->init_complete)
return 0;
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
r = node->dqm->ops.halt(node->dqm);
if (r)
return r;
}
return 0;
}
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;

View File

@ -8717,7 +8717,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
{
{ /*
* We cannot be sure that the frontend index maps to the same
* backend index - some even map to more than one.
* So we have to go through the CRTC to find the right IRQ.
*/
int irq_type = amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
struct drm_device *dev = adev_to_drm(adev);
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@ -8770,7 +8779,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 3):
case IP_VERSION(3, 2, 0):
if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
#endif
}
} else {
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 3):
case IP_VERSION(3, 2, 0):
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
#endif
if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
}
drm_crtc_vblank_off(&acrtc->base);
}
}

View File

@ -2236,7 +2236,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
return ret;
}
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
if (ret)
return ret;

View File

@ -2677,7 +2677,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@ -2746,8 +2746,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
if (platform->pdata.intp_irq)
if (platform->pdata.intp_irq) {
enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
}
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);

View File

@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
if (!mhdp_state->current_mode)
return;
if (!mhdp_state->current_mode) {
ret = -EINVAL;
goto out;
}
drm_mode_set_name(mhdp_state->current_mode);

View File

@ -2432,8 +2432,6 @@ static const struct drm_gpuvm_ops lock_ops = {
*
* The expected usage is::
*
* .. code-block:: c
*
* vm_bind {
* struct drm_exec exec;
*

View File

@ -546,7 +546,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
luminance_range->max_luminance,
panel->vbt.backlight.pwm_freq_hz,
intel_dp->edp_dpcd, &current_level, &current_mode,
false);
panel->backlight.edp.vesa.luminance_control_support);
if (ret < 0)
return ret;

View File

@ -117,6 +117,7 @@ enum xe_guc_action {
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,

View File

@ -17,6 +17,7 @@
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
* | | | - `GuC Opt In Feature KLVs`_ |
* | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@ -152,6 +153,30 @@ enum {
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
/**
* DOC: GuC Scheduling Policies KLVs
*
* `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
*
* _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
* Some platforms do not allow concurrent execution of RCS and CCS
* workloads from different address spaces. By default, the GuC prioritizes
* RCS submissions over CCS ones, which can lead to CCS workloads being
* significantly (or completely) starved of execution time. This KLV allows
* the driver to specify a quantum (in ms) and a ratio (percentage value
* between 0 and 100), and the GuC will prioritize the CCS for that
* percentage of each quantum. For example, specifying 100ms and 30% will
* make the GuC prioritize the CCS for 30ms of every 100ms.
* Note that this does not necessarly mean that RCS and CCS engines will
* only be active for their percentage of the quantum, as the restriction
* only kicks in if both classes are fully busy with non-compatible address
* spaces; i.e., if one engine is idle or running the same address space,
* a pending job on the other engine will still be submitted to the HW no
* matter what the ratio is
*/
#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
/**
* DOC: GuC VGT Policy KLVs
*

View File

@ -311,12 +311,16 @@ int xe_device_sysfs_init(struct xe_device *xe)
if (xe->info.platform == XE_BATTLEMAGE) {
ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
if (ret)
return ret;
goto cleanup;
ret = late_bind_create_files(dev);
if (ret)
return ret;
goto cleanup;
}
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
cleanup:
xe_device_sysfs_fini(xe);
return ret;
}

View File

@ -151,6 +151,16 @@ err_lrc:
return err;
}
static void __xe_exec_queue_fini(struct xe_exec_queue *q)
{
int i;
q->ops->fini(q);
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
}
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags,
@ -181,11 +191,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
goto err_post_alloc;
goto err_post_init;
}
return q;
err_post_init:
__xe_exec_queue_fini(q);
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
@ -283,13 +295,11 @@ void xe_exec_queue_destroy(struct kref *ref)
xe_exec_queue_put(eq);
}
q->ops->fini(q);
q->ops->destroy(q);
}
void xe_exec_queue_fini(struct xe_exec_queue *q)
{
int i;
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
* and wakeup any waiters.
@ -298,9 +308,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
wake_up_var(&q->xef->exec_queue.pending_removal);
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
__xe_exec_queue_fini(q);
__xe_exec_queue_free(q);
}

View File

@ -166,8 +166,14 @@ struct xe_exec_queue_ops {
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
/** @fini: Fini exec queue for submission backend */
/** @fini: Undoes the init() for submission backend */
void (*fini)(struct xe_exec_queue *q);
/**
* @destroy: Destroy exec queue for submission backend. The backend
* function must call xe_exec_queue_fini() (which will in turn call the
* fini() backend function) to ensure the queue is properly cleaned up.
*/
void (*destroy)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);

View File

@ -385,10 +385,20 @@ err_free:
return err;
}
static void execlist_exec_queue_fini_async(struct work_struct *w)
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
{
struct xe_execlist_exec_queue *exl = q->execlist;
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);
}
static void execlist_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_execlist_exec_queue *ee =
container_of(w, struct xe_execlist_exec_queue, fini_async);
container_of(w, struct xe_execlist_exec_queue, destroy_async);
struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);
xe_exec_queue_fini(q);
}
@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
/* NIY */
}
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
queue_work(system_unbound_wq, &q->execlist->fini_async);
INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
queue_work(system_unbound_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.init = execlist_exec_queue_init,
.kill = execlist_exec_queue_kill,
.fini = execlist_exec_queue_fini,
.destroy = execlist_exec_queue_destroy,
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,

View File

@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
bool has_run;
struct work_struct fini_async;
struct work_struct destroy_async;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;

View File

@ -41,6 +41,7 @@
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
gt->uc.guc.submission_state.enabled = false;
xe_guc_submit_disable(&gt->uc.guc);
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)

View File

@ -1632,7 +1632,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);

View File

@ -880,9 +880,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
return ret;
}
guc->submission_state.enabled = true;
return 0;
return xe_guc_submit_enable(guc);
}
int xe_guc_reset(struct xe_guc *guc)
@ -1579,7 +1577,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
guc->submission_state.enabled = false;
xe_guc_submit_disable(guc);
}
int xe_guc_reset_prepare(struct xe_guc *guc)

View File

@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
/** @fini_async: do final fini async from this worker */
struct work_struct fini_async;
/** @destroy_async: do final destroy async from this worker */
struct work_struct destroy_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_exec_queue */

View File

@ -32,6 +32,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
/*
* Given that we want to guarantee enough RCS throughput to avoid missing
* frames, we set the yield policy to 20% of each 80ms interval.
*/
#define RC_YIELD_DURATION 80 /* in ms */
#define RC_YIELD_RATIO 20 /* in percent */
static u32 *emit_render_compute_yield_klv(u32 *emit)
{
*emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
*emit++ = RC_YIELD_DURATION;
*emit++ = RC_YIELD_RATIO;
return emit;
}
#define SCHEDULING_POLICY_MAX_DWORDS 16
static int guc_init_global_schedule_policy(struct xe_guc *guc)
{
u32 data[SCHEDULING_POLICY_MAX_DWORDS];
u32 *emit = data;
u32 count = 0;
int ret;
if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
return 0;
*emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
if (CCS_MASK(guc_to_gt(guc)))
emit = emit_render_compute_yield_klv(emit);
count = emit - data;
if (count > 1) {
xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
ret = xe_guc_ct_send_block(&guc->ct, data, count);
if (ret < 0) {
xe_gt_err(guc_to_gt(guc),
"failed to enable GuC sheduling policies: %pe\n",
ERR_PTR(ret));
return ret;
}
}
return 0;
}
int xe_guc_submit_enable(struct xe_guc *guc)
{
int ret;
ret = guc_init_global_schedule_policy(guc);
if (ret)
return ret;
guc->submission_state.enabled = true;
return 0;
}
void xe_guc_submit_disable(struct xe_guc *guc)
{
guc->submission_state.enabled = false;
}
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
@ -1277,21 +1343,12 @@ rearm:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
static void __guc_exec_queue_fini_async(struct work_struct *w)
static void guc_exec_queue_fini(struct xe_exec_queue *q)
{
struct xe_guc_exec_queue *ge =
container_of(w, struct xe_guc_exec_queue, fini_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc_exec_queue *ge = q->guc;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
@ -1300,25 +1357,43 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
* (timeline name).
*/
kfree_rcu(ge, rcu);
}
static void __guc_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
container_of(w, struct xe_guc_exec_queue, destroy_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
xe_exec_queue_fini(q);
xe_pm_runtime_put(guc_to_xe(guc));
}
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
__guc_exec_queue_fini_async(&q->guc->fini_async);
__guc_exec_queue_destroy_async(&q->guc->destroy_async);
else
queue_work(xe->destroy_wq, &q->guc->fini_async);
queue_work(xe->destroy_wq, &q->guc->destroy_async);
}
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
{
/*
* Might be done from within the GPU scheduler, need to do async as we
@ -1327,7 +1402,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
* this we and don't really care when everything is fini'd, just that it
* is.
*/
guc_exec_queue_fini_async(q);
guc_exec_queue_destroy_async(q);
}
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@ -1341,7 +1416,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
if (exec_queue_registered(q))
disable_scheduling_deregister(guc, q);
else
__guc_exec_queue_fini(guc, q);
__guc_exec_queue_destroy(guc, q);
}
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@ -1574,14 +1649,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
#define STATIC_MSG_CLEANUP 0
#define STATIC_MSG_SUSPEND 1
#define STATIC_MSG_RESUME 2
static void guc_exec_queue_fini(struct xe_exec_queue *q)
static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
__guc_exec_queue_fini(exec_queue_to_guc(q), q);
__guc_exec_queue_destroy(exec_queue_to_guc(q), q);
}
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@ -1711,6 +1786,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
.destroy = guc_exec_queue_destroy,
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@ -1732,7 +1808,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else if (exec_queue_destroyed(q))
__guc_exec_queue_fini(guc, q);
__guc_exec_queue_destroy(guc, q);
}
if (q->guc->suspend_pending) {
set_exec_queue_suspended(q);
@ -1989,7 +2065,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else
__guc_exec_queue_fini(guc, q);
__guc_exec_queue_destroy(guc, q);
}
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)

View File

@ -13,6 +13,8 @@ struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
int xe_guc_submit_enable(struct xe_guc *guc);
void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);

View File

@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
u64 reg_val = 0, min, max;
u32 reg_val = 0;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
/* Check if PL limits are disabled. */
if (!(reg_val & PWR_LIM_EN)) {
*value = PL_DISABLE;
drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
*value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power;
/* For platforms with mailbox power limit support clamping would be done by pcode. */
if (!hwmon->xe->info.has_mbx_power_limits) {
reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
u64 pkg_pwr, min, max;
pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku);
min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
if (min && max)
@ -493,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
u32 reg_val, x, y, x_w = 2; /* 2 bits */
u64 tau4, out;
int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
@ -505,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &reg_val);
if (ret) {
drm_err(&hwmon->xe->drm,
"power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
channel, power_attr, r, ret);
r = 0;
"power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
channel, power_attr, reg_val, ret);
reg_val = 0;
}
} else {
r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
channel));
}
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
/*
* tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)

View File

@ -35,6 +35,10 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
static void xe_nvm_release_dev(struct device *dev)
{
struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
kfree(nvm);
}
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe)
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
kfree(nvm);
xe->nvm = NULL;
}

View File

@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
kt->tile = tile;
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
if (err) {
kobject_put(&kt->base);
return err;
}
if (err)
goto err_object;
tile->sysfs = &kt->base;
err = xe_vram_freq_sysfs_init(tile);
if (err)
return err;
goto err_object;
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
err_object:
kobject_put(&kt->base);
return err;
}

View File

@ -240,8 +240,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
pfence = xe_preempt_fence_create(q, q->lr.context,
++q->lr.seqno);
if (!pfence) {
err = -ENOMEM;
if (IS_ERR(pfence)) {
err = PTR_ERR(pfence);
goto out_fini;
}

View File

@ -597,8 +597,6 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
depends on ACPI
select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help

View File

@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
struct amdtp_hid_data *hid_data = hid->driver_data;
struct amdtp_cl_data *cli_data = hid_data->cli_data;
struct request_list *req_list = &cli_data->req_list;
struct amd_input_data *in_data = cli_data->in_data;
struct amd_mp2_dev *mp2;
int i;
mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->hid_sensor_hubs[i] == hid) {
struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work)
u8 report_id, node_type;
u8 report_size = 0;
mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
guard(mutex)(&mp2->lock);
req_node = list_last_entry(&req_list->list, struct request_list, list);
list_del(&req_node->list);
current_index = req_node->current_index;
@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work)
node_type = req_node->report_type;
kfree(req_node);
mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
mp2_ops = mp2->mp2_ops;
if (node_type == HID_FEATURE_REPORT) {
report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work)
cli_data->cur_hid_dev = current_index;
cli_data->sensor_requested_cnt[current_index] = 0;
amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
if (!list_empty(&req_list->list))
schedule_delayed_work(&cli_data->work, 0);
}
void amd_sfh_work_buffer(struct work_struct *work)
@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work)
u8 report_size;
int i;
mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
cli_data->report_id[i], in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,

View File

@ -10,6 +10,7 @@
#ifndef AMD_SFH_COMMON_H
#define AMD_SFH_COMMON_H
#include <linux/mutex.h>
#include <linux/pci.h>
#include "amd_sfh_hid.h"
@ -59,6 +60,8 @@ struct amd_mp2_dev {
u32 mp2_acs;
struct sfh_dev_status dev_en;
struct work_struct work;
/* mp2 to protect data */
struct mutex lock;
u8 init_done;
u8 rver;
};

View File

@ -466,6 +466,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
rc = devm_mutex_init(&pdev->dev, &privdata->lock);
if (rc)
return rc;
privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
if (privdata->sfh1_1_ops) {
if (boot_cpu_data.x86 >= 0x1A)

View File

@ -974,7 +974,10 @@ static int asus_input_mapping(struct hid_device *hdev,
case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break;
case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break;
case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break;
case 0x4e: asus_map_key_clear(KEY_FN_ESC); break;
case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break;
case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */
case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */
case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */
case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */

View File

@ -229,10 +229,12 @@ static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
if (ret != CP2112_GPIO_SET_LENGTH) {
hid_err(hdev, "error setting GPIO values: %d\n", ret);
return ret < 0 ? ret : -EIO;
}
return ret;
return 0;
}
static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
@ -309,9 +311,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
cp2112_gpio_set_unlocked(dev, offset, value);
return 0;
return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,

View File

@ -32,8 +32,6 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
#include <linux/platform_profile.h>
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
@ -734,7 +732,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
report_key_event(input, KEY_RFKILL);
return 1;
}
platform_profile_cycle();
report_key_event(input, KEY_PERFORMANCE);
return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/

View File

@ -997,6 +997,8 @@ static const struct pci_device_id quicki2c_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ }
};
MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);

View File

@ -13,6 +13,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48
#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A
/* Packet size value, the unit is 16 bytes */
#define MAX_PACKET_SIZE_VALUE_LNL 256

View File

@ -976,6 +976,8 @@ static const struct pci_device_id quickspi_pci_tbl[] = {
{PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), },
{}
};
MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);

View File

@ -19,6 +19,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49
#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B
/* HIDSPI special ACPI parameters DSM methods */
#define ACPI_QUICKSPI_REVISION_NUM 2

View File

@ -233,6 +233,7 @@ static u16 get_legacy_obj_type(u16 opcode)
{
switch (opcode) {
case MLX5_CMD_OP_CREATE_RQ:
case MLX5_CMD_OP_CREATE_RMP:
return MLX5_EVENT_QUEUE_TYPE_RQ;
case MLX5_CMD_OP_CREATE_QP:
return MLX5_EVENT_QUEUE_TYPE_QP;

View File

@ -555,6 +555,7 @@ struct gcr3_tbl_info {
};
struct amd_io_pgtable {
seqcount_t seqcount; /* Protects root/mode update */
struct io_pgtable pgtbl;
int mode;
u64 *root;

View File

@ -1455,12 +1455,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias)
if (alias) {
for (dev_i = devid_start; dev_i <= devid; ++dev_i)
pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@ -3067,7 +3067,8 @@ static int __init early_amd_iommu_init(void)
if (!boot_cpu_has(X86_FEATURE_CX16)) {
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
return -EINVAL;
ret = -EINVAL;
goto out;
}
/*

View File

@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/seqlock.h>
#include <asm/barrier.h>
@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
write_seqcount_begin(&pgtable->seqcount);
pgtable->root = pte;
pgtable->mode += 1;
write_seqcount_end(&pgtable->seqcount);
amd_iommu_update_and_flush_device_table(domain);
pte = NULL;
@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
{
unsigned long last_addr = address + (page_size - 1);
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
unsigned int seqcount;
int level, end_lvl;
u64 *pte, *page;
@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
}
level = pgtable->mode - 1;
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
do {
seqcount = read_seqcount_begin(&pgtable->seqcount);
level = pgtable->mode - 1;
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long *page_size)
{
int level;
unsigned int seqcount;
u64 *pte;
*page_size = 0;
@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
level = pgtable->mode - 1;
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
do {
seqcount = read_seqcount_begin(&pgtable->seqcount);
level = pgtable->mode - 1;
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
seqcount_init(&pgtable->seqcount);
cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;

View File

@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long lvl_pages = lvl_to_nr_pages(level);
struct dma_pte *pte = NULL;
if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
!IS_ALIGNED(end_pfn + 1, lvl_pages)))
return;
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
pages_to_remove = min_t(unsigned long, nr_pages,
pages_to_remove = min_t(unsigned long,
round_down(nr_pages, lvl_pages),
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);

View File

@ -711,6 +711,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&igroup->lock);
iommufd_hw_pagetable_put(idev->ictx, hwpt);
/* Caller must destroy hwpt */
return hwpt;
}
@ -1057,7 +1059,6 @@ void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid)
hwpt = iommufd_hw_pagetable_detach(idev, pasid);
if (!hwpt)
return;
iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, "IOMMUFD");

View File

@ -393,12 +393,12 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
const struct file_operations *fops)
{
struct file *filep;
int fdno;
spin_lock_init(&eventq->lock);
INIT_LIST_HEAD(&eventq->deliver);
init_waitqueue_head(&eventq->wait_queue);
/* The filep is fput() by the core code during failure */
filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
if (IS_ERR(filep))
return PTR_ERR(filep);
@ -408,10 +408,7 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
eventq->filep = filep;
refcount_inc(&eventq->obj.users);
fdno = get_unused_fd_flags(O_CLOEXEC);
if (fdno < 0)
fput(filep);
return fdno;
return get_unused_fd_flags(O_CLOEXEC);
}
static const struct file_operations iommufd_fault_fops =
@ -452,7 +449,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
return 0;
out_put_fdno:
put_unused_fd(fdno);
fput(fault->common.filep);
return rc;
}
@ -536,7 +532,6 @@ int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
out_put_fdno:
put_unused_fd(fdno);
fput(veventq->common.filep);
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
out_unlock_veventqs:

View File

@ -454,9 +454,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
if (hwpt_paging->auto_domain) {
lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
return;
}

View File

@ -23,6 +23,7 @@
#include "iommufd_test.h"
struct iommufd_object_ops {
size_t file_offset;
void (*pre_destroy)(struct iommufd_object *obj);
void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
@ -121,6 +122,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
old = xas_store(&xas, NULL);
xa_unlock(&ictx->objects);
WARN_ON(old != XA_ZERO_ENTRY);
if (WARN_ON(!refcount_dec_and_test(&obj->users)))
return;
kfree(obj);
}
@ -131,10 +136,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
if (iommufd_object_ops[obj->type].abort)
iommufd_object_ops[obj->type].abort(obj);
const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type];
if (ops->file_offset) {
struct file **filep = ((void *)obj) + ops->file_offset;
/*
* A file should hold a users refcount while the file is open
* and put it back in its release. The file should hold a
* pointer to obj in their private data. Normal fput() is
* deferred to a workqueue and can get out of order with the
* following kfree(obj). Using the sync version ensures the
* release happens immediately. During abort we require the file
* refcount is one at this point - meaning the object alloc
* function cannot do anything to allow another thread to take a
* refcount prior to a guaranteed success.
*/
if (*filep)
__fput_sync(*filep);
}
if (ops->abort)
ops->abort(obj);
else
iommufd_object_ops[obj->type].destroy(obj);
ops->destroy(obj);
iommufd_object_abort(ictx, obj);
}
@ -550,16 +575,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_EXEC)
return -EPERM;
mtree_lock(&ictx->mt_mmap);
/* vma->vm_pgoff carries a page-shifted start position to an immap */
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
if (!immap)
if (!immap || !refcount_inc_not_zero(&immap->owner->users)) {
mtree_unlock(&ictx->mt_mmap);
return -ENXIO;
}
mtree_unlock(&ictx->mt_mmap);
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
return -ENXIO;
if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
rc = -ENXIO;
goto err_refcount;
}
vma->vm_pgoff = 0;
vma->vm_private_data = immap;
@ -570,10 +602,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
immap->mmio_addr >> PAGE_SHIFT, length,
vma->vm_page_prot);
if (rc)
return rc;
goto err_refcount;
return 0;
/* vm_ops.open won't be called for mmap itself. */
refcount_inc(&immap->owner->users);
err_refcount:
refcount_dec(&immap->owner->users);
return rc;
}
@ -651,6 +684,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
}
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD");
#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \
.file_offset = (offsetof(_struct, _filep) + \
BUILD_BUG_ON_ZERO(!__same_type( \
struct file *, ((_struct *)NULL)->_filep)) + \
BUILD_BUG_ON_ZERO(offsetof(_struct, _obj)))
static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_ACCESS] = {
.destroy = iommufd_access_destroy_object,
@ -661,6 +700,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_FAULT] = {
.destroy = iommufd_fault_destroy,
IOMMUFD_FILE_OFFSET(struct iommufd_fault, common.filep, common.obj),
},
[IOMMUFD_OBJ_HW_QUEUE] = {
.destroy = iommufd_hw_queue_destroy,
@ -683,6 +723,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_VEVENTQ] = {
.destroy = iommufd_veventq_destroy,
.abort = iommufd_veventq_abort,
IOMMUFD_FILE_OFFSET(struct iommufd_veventq, common.filep, common.obj),
},
[IOMMUFD_OBJ_VIOMMU] = {
.destroy = iommufd_viommu_destroy,

View File

@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
}
}
static bool reg_ioat_propagate_error(int cc, u8 status)
{
/*
* If the device is in the error state the reset routine
* will register the IOAT of the newly set domain on re-enable
*/
if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
return false;
/*
* If the device was removed treat registration as success
* and let the subsequent error event trigger tear down.
*/
if (cc == ZPCI_CC_INVAL_HANDLE)
return false;
return cc != ZPCI_CC_OK;
}
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@ -696,7 +713,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev->dma_table = s390_domain->dma_table;
zdev_s390_domain_update(zdev, domain);
@ -1032,7 +1049,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
lockdep_assert_held(&zdev->dom_lock);
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
return NULL;
s390_domain = to_s390_domain(zdev->s390_domain);
@ -1123,12 +1141,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
/*
* If the device is undergoing error recovery the reset code
* will re-establish the new domain.
*/
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev_s390_domain_update(zdev, domain);

View File

@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)

View File

@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.chunk_sectors = mddev->chunk_sectors;

View File

@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.max_hw_wzeroes_unmap_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)

Some files were not shown because too many files have changed in this diff Show More