Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.15-rc8).

Conflicts:
  80f2ab46c2 ("irdma: free iwdev->rf after removing MSI-X")
  4bcc063939 ("ice, irdma: fix an off by one in error handling code")
  c24a65b6a2 ("iidc/ice/irdma: Update IDC to support multiple consumers")
https://lore.kernel.org/20250513130630.280ee6c5@canb.auug.org.au

No extra adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
pull/1112/head
Jakub Kicinski 2025-05-22 09:42:41 -07:00
commit 33e1b1b399
226 changed files with 2340 additions and 1238 deletions

View File

@ -1,6 +1,6 @@
What: /sys/bus/hid/drivers/hid-appletb-kbd/<dev>/mode
Date: September, 2023
KernelVersion: 6.5
Date: March, 2025
KernelVersion: 6.15
Contact: linux-input@vger.kernel.org
Description:
The set of keys displayed on the Touch Bar.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml#
$id: http://devicetree.org/schemas/net/can/microchip,mcp2510.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip MCP251X stand-alone CAN controller

View File

@ -10146,6 +10146,13 @@ F: drivers/gpio/gpio-regmap.c
F: include/linux/gpio/regmap.h
K: (devm_)?gpio_regmap_(un)?register
GPIO SLOPPY LOGIC ANALYZER
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
S: Supported
F: Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst
F: drivers/gpio/gpio-sloppy-logic-analyzer.c
F: tools/gpio/gpio-sloppy-logic-analyzer.sh
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <brgl@bgdev.pl>
@ -15549,6 +15556,18 @@ S: Maintained
F: include/linux/execmem.h
F: mm/execmem.c
MEMORY MANAGEMENT - GUP (GET USER PAGES)
M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@redhat.com>
R: Jason Gunthorpe <jgg@nvidia.com>
R: John Hubbard <jhubbard@nvidia.com>
R: Peter Xu <peterx@redhat.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: mm/gup.c
MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@ -18451,7 +18470,7 @@ F: include/uapi/linux/ppdev.h
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
R: Ajay Kaher <ajay.kaher@broadcom.com>
R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
R: Alexey Makhalov <alexey.makhalov@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
@ -20613,8 +20632,8 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET AVB DRIVER
M: Paul Barker <paul.barker.ct@bp.renesas.com>
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
R: Paul Barker <paul@pbarker.dev>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
@ -25944,7 +25963,7 @@ F: drivers/misc/vmw_balloon.c
VMWARE HYPERVISOR INTERFACE
M: Ajay Kaher <ajay.kaher@broadcom.com>
M: Alexey Makhalov <alexey.amakhalov@broadcom.com>
M: Alexey Makhalov <alexey.makhalov@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: virtualization@lists.linux.dev
L: x86@kernel.org
@ -25972,7 +25991,7 @@ F: drivers/scsi/vmw_pvscsi.h
VMWARE VIRTUAL PTP CLOCK DRIVER
M: Nick Shi <nick.shi@broadcom.com>
R: Ajay Kaher <ajay.kaher@broadcom.com>
R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
R: Alexey Makhalov <alexey.makhalov@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset

View File

@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t;
#define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP)
struct arch_uprobe {
unsigned long resume_era;
u32 insn[2];
u32 ixol[2];
bool simulate;

View File

@ -16,6 +16,7 @@
#include <asm/stackframe.h>
#include <asm/thread_info.h>
.section .cpuidle.text, "ax"
.align 5
SYM_FUNC_START(__arch_cpu_idle)
/* start of idle interrupt region */
@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle)
*/
idle 0
/* end of idle interrupt region */
1: jr ra
idle_exit:
jr ra
SYM_FUNC_END(__arch_cpu_idle)
.previous
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, 1b
la_abs t1, idle_exit
LONG_L t0, sp, PT_ERA
/* 3 instructions idle interrupt region */
ori t0, t0, 0b1100

View File

@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
static inline void fpregs_lock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
else
local_bh_disable();
}
static inline void fpregs_unlock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
else
local_bh_enable();
}
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
preempt_disable();
if (!irqs_disabled())
fpregs_lock();
WARN_ON(this_cpu_read(in_kernel_fpu));
@ -73,7 +90,8 @@ void kernel_fpu_end(void)
this_cpu_write(in_kernel_fpu, false);
preempt_enable();
if (!irqs_disabled())
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);

View File

@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj;
}
static long init_offset __nosavedata;
static long init_offset;
void save_counter(void)
{

View File

@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
user_enable_single_step(current);
return 0;
}
@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
if (auprobe->simulate)
instruction_pointer_set(regs, auprobe->resume_era);
else
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
user_disable_single_step(current);
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
return 0;
}
@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs);
auprobe->resume_era = regs->csr_era;
return true;
}

View File

@ -2,6 +2,7 @@
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
@ -14,6 +15,7 @@ struct pt_regs saved_regs;
void save_processor_state(void)
{
save_counter();
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
@ -26,6 +28,7 @@ void save_processor_state(void)
void restore_processor_state(void)
{
sync_counter();
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);

View File

@ -959,6 +959,102 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}
static int vmgexit_ap_control(u64 event, struct sev_es_save_area *vmsa, u32 apic_id)
{
bool create = event != SVM_VMGEXIT_AP_DESTROY;
struct ghcb_state state;
unsigned long flags;
struct ghcb *ghcb;
int ret = 0;
local_irq_save(flags);
ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
if (create)
ghcb_set_rax(ghcb, vmsa->sev_features);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
ghcb_set_sw_exit_info_1(ghcb,
((u64)apic_id << 32) |
((u64)snp_vmpl << 16) |
event);
ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
sev_es_wr_ghcb_msr(__pa(ghcb));
VMGEXIT();
if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
lower_32_bits(ghcb->save.sw_exit_info_1)) {
pr_err("SNP AP %s error\n", (create ? "CREATE" : "DESTROY"));
ret = -EINVAL;
}
__sev_put_ghcb(&state);
local_irq_restore(flags);
return ret;
}
static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{
int ret;
if (snp_vmpl) {
struct svsm_call call = {};
unsigned long flags;
local_irq_save(flags);
call.caa = this_cpu_read(svsm_caa);
call.rcx = __pa(va);
if (make_vmsa) {
/* Protocol 0, Call ID 2 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
call.rdx = __pa(caa);
call.r8 = apic_id;
} else {
/* Protocol 0, Call ID 3 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
}
ret = svsm_perform_call_protocol(&call);
local_irq_restore(flags);
} else {
/*
* If the kernel runs at VMPL0, it can change the VMSA
* bit for a page using the RMPADJUST instruction.
* However, for the instruction to succeed it must
* target the permissions of a lesser privileged (higher
* numbered) VMPL level, so use VMPL1.
*/
u64 attrs = 1;
if (make_vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}
return ret;
}
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{
int err;
err = snp_set_vmsa(vmsa, NULL, apic_id, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
free_page((unsigned long)vmsa);
}
static void set_pte_enc(pte_t *kpte, int level, void *va)
{
struct pte_enc_desc d = {
@ -1005,7 +1101,8 @@ static void unshare_all_memory(void)
data = per_cpu(runtime_data, cpu);
ghcb = (unsigned long)&data->ghcb_page;
if (addr <= ghcb && ghcb <= addr + size) {
/* Handle the case of a huge page containing the GHCB page */
if (addr <= ghcb && ghcb < addr + size) {
skipped_addr = true;
break;
}
@ -1055,11 +1152,70 @@ void snp_kexec_begin(void)
pr_warn("Failed to stop shared<->private conversions\n");
}
/*
* Shutdown all APs except the one handling kexec/kdump and clearing
* the VMSA tag on AP's VMSA pages as they are not being used as
* VMSA page anymore.
*/
static void shutdown_all_aps(void)
{
struct sev_es_save_area *vmsa;
int apic_id, this_cpu, cpu;
this_cpu = get_cpu();
/*
* APs are already in HLT loop when enc_kexec_finish() callback
* is invoked.
*/
for_each_present_cpu(cpu) {
vmsa = per_cpu(sev_vmsa, cpu);
/*
* The BSP or offlined APs do not have guest allocated VMSA
* and there is no need to clear the VMSA tag for this page.
*/
if (!vmsa)
continue;
/*
* Cannot clear the VMSA tag for the currently running vCPU.
*/
if (this_cpu == cpu) {
unsigned long pa;
struct page *p;
pa = __pa(vmsa);
/*
* Mark the VMSA page of the running vCPU as offline
* so that is excluded and not touched by makedumpfile
* while generating vmcore during kdump.
*/
p = pfn_to_online_page(pa >> PAGE_SHIFT);
if (p)
__SetPageOffline(p);
continue;
}
apic_id = cpuid_to_apicid[cpu];
/*
* Issue AP destroy to ensure AP gets kicked out of guest mode
* to allow using RMPADJUST to remove the VMSA tag on it's
* VMSA page.
*/
vmgexit_ap_control(SVM_VMGEXIT_AP_DESTROY, vmsa, apic_id);
snp_cleanup_vmsa(vmsa, apic_id);
}
put_cpu();
}
void snp_kexec_finish(void)
{
struct sev_es_runtime_data *data;
unsigned long size, addr;
unsigned int level, cpu;
unsigned long size;
struct ghcb *ghcb;
pte_t *pte;
@ -1069,6 +1225,8 @@ void snp_kexec_finish(void)
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
shutdown_all_aps();
unshare_all_memory();
/*
@ -1085,56 +1243,13 @@ void snp_kexec_finish(void)
ghcb = &data->ghcb_page;
pte = lookup_address((unsigned long)ghcb, &level);
size = page_level_size(level);
set_pte_enc(pte, level, (void *)ghcb);
snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE));
/* Handle the case of a huge page containing the GHCB page */
addr = (unsigned long)ghcb & page_level_mask(level);
set_pte_enc(pte, level, (void *)addr);
snp_set_memory_private(addr, (size / PAGE_SIZE));
}
}
static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{
int ret;
if (snp_vmpl) {
struct svsm_call call = {};
unsigned long flags;
local_irq_save(flags);
call.caa = this_cpu_read(svsm_caa);
call.rcx = __pa(va);
if (make_vmsa) {
/* Protocol 0, Call ID 2 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
call.rdx = __pa(caa);
call.r8 = apic_id;
} else {
/* Protocol 0, Call ID 3 */
call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
}
ret = svsm_perform_call_protocol(&call);
local_irq_restore(flags);
} else {
/*
* If the kernel runs at VMPL0, it can change the VMSA
* bit for a page using the RMPADJUST instruction.
* However, for the instruction to succeed it must
* target the permissions of a lesser privileged (higher
* numbered) VMPL level, so use VMPL1.
*/
u64 attrs = 1;
if (make_vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}
return ret;
}
#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
#define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
#define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
@ -1166,24 +1281,10 @@ static void *snp_alloc_vmsa_page(int cpu)
return page_address(p + 1);
}
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{
int err;
err = snp_set_vmsa(vmsa, NULL, apic_id, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
free_page((unsigned long)vmsa);
}
static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
{
struct sev_es_save_area *cur_vmsa, *vmsa;
struct ghcb_state state;
struct svsm_ca *caa;
unsigned long flags;
struct ghcb *ghcb;
u8 sipi_vector;
int cpu, ret;
u64 cr4;
@ -1297,33 +1398,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
}
/* Issue VMGEXIT AP Creation NAE event */
local_irq_save(flags);
ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_rax(ghcb, vmsa->sev_features);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
ghcb_set_sw_exit_info_1(ghcb,
((u64)apic_id << 32) |
((u64)snp_vmpl << 16) |
SVM_VMGEXIT_AP_CREATE);
ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
sev_es_wr_ghcb_msr(__pa(ghcb));
VMGEXIT();
if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
lower_32_bits(ghcb->save.sw_exit_info_1)) {
pr_err("SNP AP Creation error\n");
ret = -EINVAL;
}
__sev_put_ghcb(&state);
local_irq_restore(flags);
/* Perform cleanup if there was an error */
ret = vmgexit_ap_control(SVM_VMGEXIT_AP_CREATE, vmsa, apic_id);
if (ret) {
snp_cleanup_vmsa(vmsa, apic_id);
vmsa = NULL;

View File

@ -2465,8 +2465,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_
setup_pebs_fixed_sample_data);
}
static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask)
{
u64 pebs_enabled = cpuc->pebs_enabled & mask;
struct perf_event *event;
int bit;
@ -2477,7 +2478,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
* It needs to call intel_pmu_save_and_restart_reload() to
* update the event->count for this case.
*/
for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) {
event = cpuc->events[bit];
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_save_and_restart_reload(event, 0);
@ -2512,7 +2513,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
}
if (unlikely(base >= top)) {
intel_pmu_pebs_event_update_no_drain(cpuc, size);
intel_pmu_pebs_event_update_no_drain(cpuc, mask);
return;
}
@ -2626,7 +2627,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
(hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
if (unlikely(base >= top)) {
intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX);
intel_pmu_pebs_event_update_no_drain(cpuc, mask);
return;
}

View File

@ -75,7 +75,7 @@
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
/* Free ( 3*32+ 6) */
#define X86_FEATURE_ZEN6 ( 3*32+ 6) /* CPU based on Zen6 microarchitecture */
/* Free ( 3*32+ 7) */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */

View File

@ -116,7 +116,7 @@ enum psc_op {
#define GHCB_MSR_VMPL_REQ 0x016
#define GHCB_MSR_VMPL_REQ_LEVEL(v) \
/* GHCBData[39:32] */ \
(((u64)(v) & GENMASK_ULL(7, 0) << 32) | \
((((u64)(v) & GENMASK_ULL(7, 0)) << 32) | \
/* GHCBDdata[11:0] */ \
GHCB_MSR_VMPL_REQ)

View File

@ -472,6 +472,11 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
case 0x60 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
case 0x50 ... 0x5f:
case 0x90 ... 0xaf:
case 0xc0 ... 0xcf:
setup_force_cpu_cap(X86_FEATURE_ZEN6);
break;
default:
goto warn;
}

View File

@ -566,7 +566,7 @@ static void __init lowmem_pfn_init(void)
"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
#define MSG_HIGHMEM_TRIMMED \
"Warning: only 4GB will be used. Support for for CONFIG_HIGHMEM64G was removed!\n"
"Warning: only 4GB will be used. Support for CONFIG_HIGHMEM64G was removed!\n"
/*
* We have more RAM than fits into lowmem - we try to put it into
* highmem, also taking the highmem=x boot parameter into account:

View File

@ -9,6 +9,7 @@
* not aware of PI.
*/
#include <linux/blk-integrity.h>
#include <linux/t10-pi.h>
#include <linux/workqueue.h>
#include "blk.h"
@ -43,6 +44,29 @@ static void bio_integrity_verify_fn(struct work_struct *work)
bio_endio(bio);
}
#define BIP_CHECK_FLAGS (BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
static bool bip_should_check(struct bio_integrity_payload *bip)
{
return bip->bip_flags & BIP_CHECK_FLAGS;
}
static bool bi_offload_capable(struct blk_integrity *bi)
{
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_CRC64:
return bi->tuple_size == sizeof(struct crc64_pi_tuple);
case BLK_INTEGRITY_CSUM_CRC:
case BLK_INTEGRITY_CSUM_IP:
return bi->tuple_size == sizeof(struct t10_pi_tuple);
default:
pr_warn_once("%s: unknown integrity checksum type:%d\n",
__func__, bi->csum_type);
fallthrough;
case BLK_INTEGRITY_CSUM_NONE:
return false;
}
}
/**
* __bio_integrity_endio - Integrity I/O completion function
* @bio: Protected bio
@ -54,12 +78,12 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/
bool __bio_integrity_endio(struct bio *bio)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_data *bid =
container_of(bip, struct bio_integrity_data, bip);
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
bip_should_check(bip)) {
INIT_WORK(&bid->work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bid->work);
return false;
@ -84,6 +108,7 @@ bool bio_integrity_prep(struct bio *bio)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_data *bid;
bool set_flags = true;
gfp_t gfp = GFP_NOIO;
unsigned int len;
void *buf;
@ -100,19 +125,24 @@ bool bio_integrity_prep(struct bio *bio)
switch (bio_op(bio)) {
case REQ_OP_READ:
if (bi->flags & BLK_INTEGRITY_NOVERIFY)
return true;
if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
if (bi_offload_capable(bi))
return true;
set_flags = false;
}
break;
case REQ_OP_WRITE:
if (bi->flags & BLK_INTEGRITY_NOGENERATE)
return true;
/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk for non-integrity metadata where nothing else
* initializes the memory.
*/
if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
if (bi_offload_capable(bi))
return true;
set_flags = false;
gfp |= __GFP_ZERO;
} else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
gfp |= __GFP_ZERO;
break;
default:
@ -137,19 +167,21 @@ bool bio_integrity_prep(struct bio *bio)
bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY;
bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
bid->bip.bip_flags |= BIP_IP_CHECKSUM;
if (bi->csum_type)
bid->bip.bip_flags |= BIP_CHECK_GUARD;
if (bi->flags & BLK_INTEGRITY_REF_TAG)
bid->bip.bip_flags |= BIP_CHECK_REFTAG;
if (set_flags) {
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
bid->bip.bip_flags |= BIP_IP_CHECKSUM;
if (bi->csum_type)
bid->bip.bip_flags |= BIP_CHECK_GUARD;
if (bi->flags & BLK_INTEGRITY_REF_TAG)
bid->bip.bip_flags |= BIP_CHECK_REFTAG;
}
if (bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf)) < len)
goto err_end_io;
/* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE)
if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip))
blk_integrity_generate(bio);
else
bid->saved_bio_iter = bio->bi_iter;

View File

@ -611,7 +611,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
{
struct bio *bio;
if (nr_vecs > UIO_MAXIOV)
if (nr_vecs > BIO_MAX_INLINE_VECS)
return NULL;
return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
}

View File

@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock,
goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
out_free_state:
kfree_sensitive(state);

View File

@ -455,7 +455,7 @@ priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t
if (ret < 0)
return ret;
buf[size] = '\0';
buf[ret] = '\0';
ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
&process_quantum);
if (ret != 4)

View File

@ -231,16 +231,18 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor);
while ((unsigned long)entry + proc_sz < table_end) {
/* ignore subtable types that are smaller than a processor node */
while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
}
return 1;
}
@ -273,15 +275,18 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {
while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
/* entry->length may not equal proc_sz, revalidate the processor structure length */
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
(unsigned long)entry + entry->length <= table_end &&
entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}

View File

@ -1708,7 +1708,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
* that ublk_dispatch_req() is always called
*/
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
if (req && blk_mq_request_started(req))
if (req && blk_mq_request_started(req) && req->tag == tag)
return;
spin_lock(&ubq->cancel_lock);

View File

@ -3014,9 +3014,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
int ret = 0;
unsigned int skip = 0;
u8 pkt_type;
u8 *sk_ptr;
unsigned int sk_len;
u16 seqno;
u32 dump_size;
@ -3025,18 +3024,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb);
sk_ptr = skb->data;
sk_len = skb->len;
skip = sizeof(struct hci_event_hdr);
if (pkt_type == HCI_ACLDATA_PKT)
skip += sizeof(struct hci_acl_hdr);
if (pkt_type == HCI_ACLDATA_PKT) {
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
}
skb_pull(skb, skip);
dump_hdr = (struct qca_dump_hdr *)skb->data;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@ -3056,16 +3050,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size;
btdata->qca_dump.ram_dump_seqno = 0;
sk_ptr += offsetof(struct qca_dump_hdr, data0);
sk_len -= offsetof(struct qca_dump_hdr, data0);
skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev);
bt_dev_info(hdev, "%s memdump size(%u)\n",
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
dump_size);
} else {
sk_ptr += offsetof(struct qca_dump_hdr, data);
sk_len -= offsetof(struct qca_dump_hdr, data);
skb_pull(skb, offsetof(struct qca_dump_hdr, data));
}
if (!btdata->qca_dump.ram_dump_size) {
@ -3085,7 +3078,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
skb_pull(skb, skb->len - sk_len);
hci_devcd_append(hdev, skb);
btdata->qca_dump.ram_dump_seqno++;
if (seqno == QCA_LAST_SEQUENCE_NUM) {
@ -3113,68 +3105,58 @@ out:
/* Return: true if the ACL packet is a dump packet, false otherwise. */
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
bool is_dump = false;
sk_ptr = skb->data;
sk_len = skb->len;
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
if (!clone)
return false;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
goto out;
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
goto out;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
goto out;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
is_dump = true;
out:
consume_skb(clone);
return is_dump;
}
/* Return: true if the event packet is a dump packet, false otherwise. */
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct qca_dump_hdr *dump_hdr;
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
bool is_dump = false;
sk_ptr = skb->data;
sk_len = skb->len;
event_hdr = hci_event_hdr(skb);
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
if (!clone)
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
goto out;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
goto out;
return true;
is_dump = true;
out:
consume_skb(clone);
return is_dump;
}
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)

View File

@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
clk_data->num = S2MPS11_CLKS_NUM;
switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);

View File

@ -541,6 +541,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(5), 14, GFLAGS),
GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0,
RK3576_CLKGATE_CON(5), 15, GFLAGS),
GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0,
RK3576_CLKGATE_CON(6), 0, GFLAGS),
COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0,
RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK3576_CLKGATE_CON(6), 3, GFLAGS),

View File

@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
{ .hw = &pll_periph0_2x_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
mmc0_mmc1_parents, 0x830,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
mmc0_mmc1_parents, 0x834,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static const struct clk_parent_data mmc2_parents[] = {
{ .fw_name = "hosc" },
@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
0x838,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
0x84c, BIT(0), 0);

View File

@ -52,6 +52,28 @@ struct ccu_mp {
} \
}
#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
_reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
_muxshift, _muxwidth, \
_gate, _postdiv, _flags)\
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
.p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
.mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
.fixed_post_div = _postdiv, \
.common = { \
.reg = _reg, \
.features = CCU_FEATURE_FIXED_POSTDIV, \
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
_parents, \
&ccu_mp_ops, \
_flags), \
} \
}
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
@ -109,8 +131,7 @@ struct ccu_mp {
_mshift, _mwidth, \
_pshift, _pwidth, \
_muxshift, _muxwidth, \
_gate, _features, \
_flags) \
_gate, _flags, _features) \
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \

View File

@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
count++;
dma_resv_list_set(fobj, i, fence, usage);
/* pointer update must be visible before we extend the num_fences */
smp_store_mb(fobj->num_fences, count);
/* fence update must be visible before we extend the num_fences */
smp_wmb();
fobj->num_fences = count;
}
EXPORT_SYMBOL(dma_resv_add_fence);

View File

@ -342,6 +342,9 @@ static void pt_cmd_callback_work(void *data, int err)
struct pt_dma_chan *chan;
unsigned long flags;
if (!desc)
return;
dma_chan = desc->vd.tx.chan;
chan = to_pt_chan(dma_chan);
@ -355,16 +358,14 @@ static void pt_cmd_callback_work(void *data, int err)
desc->status = DMA_ERROR;
spin_lock_irqsave(&chan->vc.lock, flags);
if (desc) {
if (desc->status != DMA_COMPLETE) {
if (desc->status != DMA_ERROR)
desc->status = DMA_COMPLETE;
if (desc->status != DMA_COMPLETE) {
if (desc->status != DMA_ERROR)
desc->status = DMA_COMPLETE;
dma_cookie_complete(tx_desc);
dma_descriptor_unmap(tx_desc);
} else {
tx_desc = NULL;
}
dma_cookie_complete(tx_desc);
dma_descriptor_unmap(tx_desc);
} else {
tx_desc = NULL;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);

View File

@ -841,9 +841,9 @@ static int dmatest_func(void *data)
} else {
dma_async_issue_pending(chan);
wait_event_timeout(thread->done_wait,
done->done,
msecs_to_jiffies(params->timeout));
wait_event_freezable_timeout(thread->done_wait,
done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);

View File

@ -57,7 +57,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
intr = edma_readl_chreg(fsl_chan, ch_int);
if (!intr)
return IRQ_HANDLED;
return IRQ_NONE;
edma_writel_chreg(fsl_chan, 1, ch_int);

View File

@ -222,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev, *fdev;
int rc = 0;
struct iommu_sva *sva;
struct iommu_sva *sva = NULL;
unsigned int pasid;
struct idxd_cdev *idxd_cdev;
@ -317,7 +317,7 @@ failed_set_pasid:
if (device_user_pasid_enabled(idxd))
idxd_xa_pasid_remove(ctx);
failed_get_pasid:
if (device_user_pasid_enabled(idxd))
if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
iommu_sva_unbind_device(sva);
failed:
mutex_unlock(&wq->wq_lock);
@ -407,6 +407,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
return -EPERM;
if (current->mm != ctx->mm)
return -EPERM;
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@ -473,6 +476,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
ssize_t written = 0;
int i;
if (current->mm != ctx->mm)
return -EPERM;
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@ -493,6 +499,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_device *idxd = wq->idxd;
__poll_t out = 0;
if (current->mm != ctx->mm)
return POLLNVAL;
poll_wait(filp, &wq->err_queue, wait);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)

View File

@ -155,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
pci_free_irq_vectors(pdev);
}
static void idxd_clean_wqs(struct idxd_device *idxd)
{
struct idxd_wq *wq;
struct device *conf_dev;
int i;
for (i = 0; i < idxd->max_wqs; i++) {
wq = idxd->wqs[i];
if (idxd->hw.wq_cap.op_config)
bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
conf_dev = wq_confdev(wq);
put_device(conf_dev);
kfree(wq);
}
bitmap_free(idxd->wq_enable_map);
kfree(idxd->wqs);
}
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@ -169,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
kfree(idxd->wqs);
return -ENOMEM;
rc = -ENOMEM;
goto err_bitmap;
}
for (i = 0; i < idxd->max_wqs; i++) {
@ -189,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
put_device(conf_dev);
if (rc < 0)
goto err;
}
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
@ -203,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@ -211,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
put_device(conf_dev);
rc = -ENOMEM;
goto err;
goto err_opcap_bmap;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@ -224,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
err:
err_opcap_bmap:
kfree(wq->wqcfg);
err:
put_device(conf_dev);
kfree(wq);
while (--i >= 0) {
wq = idxd->wqs[i];
if (idxd->hw.wq_cap.op_config)
bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
conf_dev = wq_confdev(wq);
put_device(conf_dev);
kfree(wq);
}
bitmap_free(idxd->wq_enable_map);
err_bitmap:
kfree(idxd->wqs);
return rc;
}
static void idxd_clean_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
struct device *conf_dev;
int i;
for (i = 0; i < idxd->max_engines; i++) {
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
kfree(engine);
}
kfree(idxd->engines);
}
static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
@ -263,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(conf_dev);
kfree(engine);
goto err;
}
@ -276,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
kfree(engine);
}
kfree(idxd->engines);
return rc;
}
static void idxd_clean_groups(struct idxd_device *idxd)
{
struct idxd_group *group;
int i;
for (i = 0; i < idxd->max_groups; i++) {
group = idxd->groups[i];
put_device(group_confdev(group));
kfree(group);
}
kfree(idxd->groups);
}
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@ -310,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(conf_dev);
kfree(group);
goto err;
}
@ -334,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
kfree(group);
}
kfree(idxd->groups);
return rc;
}
static void idxd_cleanup_internals(struct idxd_device *idxd)
{
int i;
for (i = 0; i < idxd->max_groups; i++)
put_device(group_confdev(idxd->groups[i]));
for (i = 0; i < idxd->max_engines; i++)
put_device(engine_confdev(idxd->engines[i]));
for (i = 0; i < idxd->max_wqs; i++)
put_device(wq_confdev(idxd->wqs[i]));
idxd_clean_groups(idxd);
idxd_clean_engines(idxd);
idxd_clean_wqs(idxd);
destroy_workqueue(idxd->wq);
}
@ -390,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
int rc, i;
int rc;
init_waitqueue_head(&idxd->cmd_waitq);
@ -421,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_evl:
destroy_workqueue(idxd->wq);
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
put_device(group_confdev(idxd->groups[i]));
idxd_clean_groups(idxd);
err_group:
for (i = 0; i < idxd->max_engines; i++)
put_device(engine_confdev(idxd->engines[i]));
idxd_clean_engines(idxd);
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
put_device(wq_confdev(idxd->wqs[i]));
idxd_clean_wqs(idxd);
err_wqs:
return rc;
}
@ -528,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
}
static void idxd_free(struct idxd_device *idxd)
{
if (!idxd)
return;
put_device(idxd_confdev(idxd));
bitmap_free(idxd->opcap_bmap);
ida_free(&idxd_ida, idxd->id);
kfree(idxd);
}
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
@ -545,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
return NULL;
goto err_ida;
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
if (!idxd->opcap_bmap) {
ida_free(&idxd_ida, idxd->id);
return NULL;
}
if (!idxd->opcap_bmap)
goto err_opcap;
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
if (rc < 0) {
put_device(conf_dev);
return NULL;
}
if (rc < 0)
goto err_name;
spin_lock_init(&idxd->dev_lock);
spin_lock_init(&idxd->cmd_lock);
return idxd;
err_name:
put_device(conf_dev);
bitmap_free(idxd->opcap_bmap);
err_opcap:
ida_free(&idxd_ida, idxd->id);
err_ida:
kfree(idxd);
return NULL;
}
static int idxd_enable_system_pasid(struct idxd_device *idxd)
@ -1190,7 +1266,7 @@ int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
put_device(idxd_confdev(idxd));
idxd_free(idxd);
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@ -1232,7 +1308,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
struct idxd_irq_entry *irq_entry;
idxd_unregister_devices(idxd);
/*
@ -1245,20 +1320,12 @@ static void idxd_remove(struct pci_dev *pdev)
get_device(idxd_confdev(idxd));
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
idxd_device_remove_debugfs(idxd);
irq_entry = idxd_get_ie(idxd, 0);
free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
idxd_cleanup(idxd);
pci_iounmap(pdev, idxd->reg_base);
if (device_user_pasid_enabled(idxd))
idxd_disable_sva(pdev);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
put_device(idxd_confdev(idxd));
idxd_free(idxd);
pci_disable_device(pdev);
}
static struct pci_driver idxd_pci_driver = {

View File

@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
{
struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
struct virt_dma_desc *vd;
unsigned long flags;
spin_lock_irqsave(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->pc->queue, node)
if (vd->tx.cookie == cookie) {
spin_unlock_irqrestore(&cvc->pc->lock, flags);
return vd;
}
spin_unlock_irqrestore(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->vc.desc_issued, node)
if (vd->tx.cookie == cookie)
@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&cvc->pc->lock, flags);
spin_lock_irqsave(&cvc->vc.lock, flags);
vd = mtk_cqdma_find_active_desc(c, cookie);
spin_unlock_irqrestore(&cvc->vc.lock, flags);
spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
cvd = to_cqdma_vdesc(vd);

View File

@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
unsigned long flags;
while (1) {
spin_lock_irqsave(&uc->vc.lock, flags);
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
spin_unlock_irqrestore(&uc->vc.lock, flags);
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);

View File

@ -1204,6 +1204,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
guard(mutex)(&chip->i2c_lock);
if (chip->client->irq > 0)
enable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(chip);
@ -1216,6 +1218,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
static void pca953x_save_context(struct pca953x_chip *chip)
{
guard(mutex)(&chip->i2c_lock);
/* Disable IRQ to prevent early triggering while regmap "cache only" is on */
if (chip->client->irq > 0)
disable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, true);
}

View File

@ -401,10 +401,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file,
char buf[32], *trimmed;
int ret, dir, val = 0;
ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
if (count >= sizeof(buf))
return -EINVAL;
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
trimmed = strim(buf);
if (strcmp(trimmed, "input") == 0) {
@ -623,12 +628,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file,
char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
int ret;
if (count >= sizeof(buf))
return -EINVAL;
ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
user_buf, count);
if (ret < 0)
return ret;
buf[strlen(buf) - 1] = '\0';
buf[ret] = '\0';
ret = gpiod_set_consumer_name(data->ad.desc, buf);
if (ret)

View File

@ -742,6 +742,12 @@ EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask);
bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
/*
* hog pins are requested before registering GPIO chip
*/
if (!gc->gpiodev)
return true;
/* No mask means all valid */
if (likely(!gc->gpiodev->valid_mask))
return true;

View File

@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))

View File

@ -752,6 +752,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
/* The mall_size is already calculated as mall_size_per_umc * num_umc.
* However, for gfx1151, which features a 2-to-1 UMC mapping,
* the result must be multiplied by 2 to determine the actual mall size.
*/
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 5, 1):
adev->gmc.mall_size *= 2;
break;
default:
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):

View File

@ -1023,6 +1023,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
/* Keeping one read-back to ensure all register writes are done, otherwise
* it may introduce race conditions */
RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
return 0;
}
@ -1205,6 +1209,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
/* Keeping one read-back to ensure all register writes are done, otherwise
* it may introduce race conditions */
RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
return 0;
}

View File

@ -372,6 +372,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
if (new_state->stream->adjust.timing_adjust_pending)
return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@ -12763,7 +12765,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
/* The reply is stored in the top nibble of the command. */
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
if (!payload->write && p_notify->aux_reply.length)
/*write req may receive a byte indicating partially written number as well*/
if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);

View File

@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
if (payload.write) {
memcpy(copy, msg->buffer, msg->size);
payload.data = copy;
}
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
*/
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes. Force 0 to retry*/
drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = 0;
/*one byte indicating partially written bytes*/
drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = payload.data[0];
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
result = msg->size;
@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;

View File

@ -439,9 +439,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
if (dc->optimized_required || dc->wm_optimized_required)
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
if (dc->optimized_required || dc->wm_optimized_required) {
stream->adjust.timing_adjust_pending = true;
return false;
}
}
dc_exit_ips_for_hw_access(dc);
@ -3168,7 +3171,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->crtc_timing_adjust) {
if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
stream->adjust.timing_adjust_pending)
update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
update->crtc_timing_adjust->timing_adjust_pending = false;

View File

@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.do_urgent_latency_adjustment = 1,
.do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)

View File

@ -910,7 +910,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@ -918,10 +918,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
for (j = 0; j < context->stream_status[i].plane_count; j++) {
if (context->stream_status[i].plane_states[j] == plane) {
*plane_id = (i << 16) | j;
return true;
if (context->streams[i]->stream_id == stream_id) {
for (j = 0; j < context->stream_status[i].plane_count; j++) {
if (context->stream_status[i].plane_states[j] == plane) {
*plane_id = (i << 16) | j;
return true;
}
}
}
}
@ -944,14 +946,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@ -1037,7 +1039,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
@ -1048,7 +1050,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */

View File

@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
// DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
cur_rom_en = 1;
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,

View File

@ -1980,9 +1980,9 @@ void dcn401_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
if (pipe_ctx->update_flags.raw ||
(pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
pipe_ctx->stream->update_flags.raw)
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
pipe_ctx->plane_state->update_flags.raw ||
pipe_ctx->stream->update_flags.raw))
dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||

View File

@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
/* The subsequent call to dc_commit_updates_for_stream for a full update
* will release the current state and swap to a new state. Releasing the
* current state results in the stream pointers in the pipe_ctx structs
* to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
*/
for (i = 0; i < count; i++)
streams[i] = pipes[i]->stream;
for (i = 0; i < count; i++) {
stream_update.stream = pipes[i]->stream;
stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
pipes[i]->stream, &stream_update,
streams[i], &stream_update,
state);
}

View File

@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
lockdep_assert_held(&gpusvm->notifier_lock);
if (range->flags.has_dma_mapping) {
struct drm_gpusvm_range_flags flags = {
.__flags = range->flags.__flags,
};
for (i = 0, j = 0; i < npages; j++) {
struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
dev, *addr);
i += 1 << addr->order;
}
range->flags.has_devmem_pages = false;
range->flags.has_dma_mapping = false;
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
flags.has_devmem_pages = false;
flags.has_dma_mapping = false;
WRITE_ONCE(range->flags.__flags, flags.__flags);
range->dpagemap = NULL;
}
}
@ -1334,6 +1342,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
struct drm_gpusvm_range_flags flags;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
@ -1378,7 +1387,8 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
if (range->flags.unmapped) {
flags.__flags = range->flags.__flags;
if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
goto err_free;
@ -1454,6 +1464,11 @@ map_pages:
goto err_unmap;
}
if (ctx->devmem_only) {
err = -EFAULT;
goto err_unmap;
}
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
@ -1469,14 +1484,17 @@ map_pages:
}
i += 1 << order;
num_dma_mapped = i;
range->flags.has_dma_mapping = true;
flags.has_dma_mapping = true;
}
if (zdd) {
range->flags.has_devmem_pages = true;
flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
}
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
WRITE_ONCE(range->flags.__flags, flags.__flags);
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
@ -1765,6 +1783,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
goto err_finalize;
/* Upon success bind devmem allocation to range and zdd */
devmem_allocation->timeslice_expiration = get_jiffies_64() +
msecs_to_jiffies(ctx->timeslice_ms);
zdd->devmem_allocation = devmem_allocation; /* Owns ref */
err_finalize:
@ -1985,6 +2005,13 @@ static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
void *buf;
int i, err = 0;
if (page) {
zdd = page->zone_device_data;
if (time_before64(get_jiffies_64(),
zdd->devmem_allocation->timeslice_expiration))
return 0;
}
start = ALIGN_DOWN(fault_addr, size);
end = ALIGN(fault_addr + 1, size);

View File

@ -75,7 +75,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
unsigned long long venc_freq;
unsigned long long hdmi_freq;
vclk_freq = mode->clock * 1000;
vclk_freq = mode->clock * 1000ULL;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
@ -123,7 +123,7 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
unsigned long long clock = mode->clock * 1000;
unsigned long long clock = mode->clock * 1000ULL;
unsigned long long phy_freq;
unsigned long long vclk_freq;
unsigned long long venc_freq;

View File

@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
drm_client_setup(drm, NULL);
if (bpp == 16)
drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
else
drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
return 0;
}

View File

@ -47,6 +47,10 @@
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
#define MI_SRM_USE_GGTT REG_BIT(22)
#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
#define MI_FLUSH_DW __MI_INSTR(0x26)
#define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)

View File

@ -43,6 +43,10 @@
#define XEHPC_BCS8_RING_BASE 0x3ee000
#define GSCCS_RING_BASE 0x11a000
#define ENGINE_ID(base) XE_REG((base) + 0x8c)
#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4)
#define ENGINE_CLASS_ID REG_GENMASK(2, 0)
#define RING_TAIL(base) XE_REG((base) + 0x30)
#define TAIL_ADDR REG_GENMASK(20, 3)
@ -154,6 +158,7 @@
#define STOP_RING REG_BIT(8)
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac)
#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)

View File

@ -157,6 +157,7 @@
#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)

View File

@ -11,7 +11,9 @@
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_PER_CTX_PTR (0x12 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)

View File

@ -330,6 +330,8 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
/** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
u8 has_64bit_timestamp:1;
/** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
/**

View File

@ -830,7 +830,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_lrc *lrc;
u32 old_ts, new_ts;
u64 old_ts, new_ts;
int idx;
/*

View File

@ -941,7 +941,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
return xe_sched_invalidate_job(job, 2);
}
ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
/*

View File

@ -24,6 +24,7 @@
#include "xe_hw_fence.h"
#include "xe_map.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_trace_lrc.h"
#include "xe_vm.h"
@ -650,6 +651,7 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
#define LRC_PPHWSP_SIZE SZ_4K
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
@ -684,7 +686,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
/* The start seqno is stored in the driver-defined portion of PPHWSP */
/* This is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
@ -694,11 +696,21 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
}
static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc)
{
return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET;
}
static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32);
}
static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
{
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32);
}
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
/* Indirect ring state page is at the very end of LRC */
@ -726,8 +738,10 @@ DECL_MAP_ADDR_HELPERS(regs)
DECL_MAP_ADDR_HELPERS(start_seqno)
DECL_MAP_ADDR_HELPERS(ctx_job_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw)
DECL_MAP_ADDR_HELPERS(parallel)
DECL_MAP_ADDR_HELPERS(indirect_ring)
DECL_MAP_ADDR_HELPERS(engine_id)
#undef DECL_MAP_ADDR_HELPERS
@ -742,19 +756,38 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
return __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
}
/**
* xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp udw GGTT address
*/
u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc)
{
return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
}
/**
* xe_lrc_ctx_timestamp() - Read ctx timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp value
*/
u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
u32 ldw, udw = 0;
map = __xe_lrc_ctx_timestamp_map(lrc);
return xe_map_read32(xe, &map);
ldw = xe_map_read32(xe, &map);
if (xe->info.has_64bit_timestamp) {
map = __xe_lrc_ctx_timestamp_udw_map(lrc);
udw = xe_map_read32(xe, &map);
}
return (u64)udw << 32 | ldw;
}
/**
@ -864,7 +897,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
@ -877,6 +910,65 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
xe_bo_unpin(lrc->bo);
xe_bo_unlock(lrc->bo);
xe_bo_put(lrc->bo);
xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo);
}
/*
* xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
* context run ticks.
* @lrc: Pointer to the lrc.
*
* Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
* context, but only gets updated when the context switches out. In order to
* check how long a context has been active before it switches out, two things
* are required:
*
* (1) Determine if the context is running:
* To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in
* the LRC. The value chosen is 1 since 0 is the initial value when the LRC is
* initialized. During a query, we just check for this value to determine if the
* context is active. If the context switched out, it would overwrite this
* location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as
* the last part of context restore, so reusing this LRC location will not
* clobber anything.
*
* (2) Calculate the time that the context has been active for:
* The CTX_TIMESTAMP ticks only when the context is active. If a context is
* active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization.
* While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific
* engine instance. Since we do not know which instance the context is running
* on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and
* store it in the PPHSWP.
*/
#define CONTEXT_ACTIVE 1ULL
static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
{
u32 *cmd;
cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
*cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
*cmd++ = ENGINE_ID(0).addr;
*cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc);
*cmd++ = 0;
*cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
*cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
*cmd++ = 0;
*cmd++ = lower_32_bits(CONTEXT_ACTIVE);
if (lrc_to_xe(lrc)->info.has_64bit_timestamp) {
*cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
*cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
*cmd++ = 0;
*cmd++ = upper_32_bits(CONTEXT_ACTIVE);
}
*cmd++ = MI_BATCH_BUFFER_END;
xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
}
#define PVC_CTX_ASID (0x2e + 1)
@ -893,31 +985,40 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
void *init_data = NULL;
u32 arb_enable;
u32 lrc_size;
u32 bo_flags;
int err;
kref_init(&lrc->refcount);
lrc->gt = gt;
lrc->flags = 0;
lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE;
/*
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
ttm_bo_type_kernel,
bo_flags);
if (IS_ERR(lrc->bb_per_ctx_bo)) {
err = PTR_ERR(lrc->bb_per_ctx_bo);
goto err_lrc_finish;
}
lrc->size = lrc_size;
lrc->tile = gt_to_tile(hwe->gt);
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
lrc->ctx_timestamp = 0;
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
@ -990,7 +1091,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
_MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
@ -1019,6 +1123,8 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
xe_lrc_setup_utilization(lrc);
return 0;
err_lrc_finish:
@ -1238,6 +1344,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
return __xe_lrc_parallel_map(lrc);
}
/**
* xe_lrc_engine_id() - Read engine id value
* @lrc: Pointer to the lrc.
*
* Returns: context id value
*/
static u32 xe_lrc_engine_id(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
map = __xe_lrc_engine_id_map(lrc);
return xe_map_read32(xe, &map);
}
static int instr_dw(u32 cmd_header)
{
/* GFXPIPE "SINGLE_DW" opcodes are a single dword */
@ -1684,7 +1805,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
snapshot->lrc_snapshot = NULL;
snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
return snapshot;
}
@ -1784,22 +1905,74 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
kfree(snapshot);
}
static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
{
u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id);
u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id);
struct xe_hw_engine *hwe;
u64 val;
hwe = xe_gt_hw_engine(lrc->gt, class, instance, false);
if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe),
"Unexpected engine class:instance %d:%d for context utilization\n",
class, instance))
return -1;
if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
val = xe_mmio_read64_2x32(&hwe->gt->mmio,
RING_CTX_TIMESTAMP(hwe->mmio_base));
else
val = xe_mmio_read32(&hwe->gt->mmio,
RING_CTX_TIMESTAMP(hwe->mmio_base));
*reg_ctx_ts = val;
return 0;
}
/**
* xe_lrc_update_timestamp() - Update ctx timestamp
* @lrc: Pointer to the lrc.
* @old_ts: Old timestamp value
*
* Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
* update saved value.
* update saved value. With support for active contexts, the calculation may be
* slightly racy, so follow a read-again logic to ensure that the context is
* still active before returning the right timestamp.
*
* Returns: New ctx timestamp value
*/
u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
{
u64 lrc_ts, reg_ts;
u32 engine_id;
*old_ts = lrc->ctx_timestamp;
lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
lrc_ts = xe_lrc_ctx_timestamp(lrc);
/* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */
if (IS_SRIOV_VF(lrc_to_xe(lrc))) {
lrc->ctx_timestamp = lrc_ts;
goto done;
}
if (lrc_ts == CONTEXT_ACTIVE) {
engine_id = xe_lrc_engine_id(lrc);
if (!get_ctx_timestamp(lrc, engine_id, &reg_ts))
lrc->ctx_timestamp = reg_ts;
/* read lrc again to ensure context is still active */
lrc_ts = xe_lrc_ctx_timestamp(lrc);
}
/*
* If context switched out, just use the lrc_ts. Note that this needs to
* be a separate if condition.
*/
if (lrc_ts != CONTEXT_ACTIVE)
lrc->ctx_timestamp = lrc_ts;
done:
trace_xe_lrc_update_timestamp(lrc, *old_ts);
return lrc->ctx_timestamp;

View File

@ -120,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
@ -136,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
*
* Returns the current LRC timestamp
*/
u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts);
u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts);
#endif

View File

@ -25,8 +25,8 @@ struct xe_lrc {
/** @size: size of lrc including any indirect ring state page */
u32 size;
/** @tile: tile which this LRC belongs to */
struct xe_tile *tile;
/** @gt: gt which this LRC belongs to */
struct xe_gt *gt;
/** @flags: LRC flags */
#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
@ -52,7 +52,10 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
/** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */
u32 ctx_timestamp;
u64 ctx_timestamp;
/** @bb_per_ctx_bo: buffer object for per context batch wa buffer */
struct xe_bo *bb_per_ctx_bo;
};
struct xe_lrc_snapshot;

View File

@ -29,9 +29,6 @@ struct xe_modparam xe_modparam = {
module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444);
MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault");
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");

View File

@ -12,7 +12,6 @@
struct xe_modparam {
bool force_execlist;
bool probe_display;
bool always_migrate_to_vram;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;

View File

@ -140,6 +140,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_indirect_ring_state = 1, \
.has_range_tlb_invalidation = 1, \
.has_usm = 1, \
.has_64bit_timestamp = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@ -668,6 +669,7 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
for_each_remote_tile(tile, xe, id) {
int err;

View File

@ -21,6 +21,7 @@ struct xe_graphics_desc {
u8 has_indirect_ring_state:1;
u8 has_range_tlb_invalidation:1;
u8 has_usm:1;
u8 has_64bit_timestamp:1;
};
struct xe_media_desc {

View File

@ -2232,11 +2232,19 @@ static void op_commit(struct xe_vm *vm,
}
case DRM_GPUVA_OP_DRIVER:
{
/* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
op->map_range.range->tile_present |= BIT(tile->id);
op->map_range.range->tile_invalidated &= ~BIT(tile->id);
WRITE_ONCE(op->map_range.range->tile_present,
op->map_range.range->tile_present |
BIT(tile->id));
WRITE_ONCE(op->map_range.range->tile_invalidated,
op->map_range.range->tile_invalidated &
~BIT(tile->id));
} else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
op->unmap_range.range->tile_present &= ~BIT(tile->id);
WRITE_ONCE(op->unmap_range.range->tile_present,
op->unmap_range.range->tile_present &
~BIT(tile->id));
}
break;
}

View File

@ -234,13 +234,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
MI_COPY_MEM_MEM_DST_GGTT;
dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
dw[i++] = MI_NOOP;
return i;
}

View File

@ -227,7 +227,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
if (!shrinker)
return ERR_PTR(-ENOMEM);
shrinker->shrink = shrinker_alloc(0, "xe system shrinker");
shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
return ERR_PTR(-ENOMEM);

View File

@ -15,8 +15,17 @@
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
/* Not reliable without notifier lock */
return range->base.flags.has_devmem_pages;
/*
* Advisory only check whether the range is currently backed by VRAM
* memory.
*/
struct drm_gpusvm_range_flags flags = {
/* Pairs with WRITE_ONCE in drm_gpusvm.c */
.__flags = READ_ONCE(range->base.flags.__flags),
};
return flags.has_devmem_pages;
}
static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
@ -645,9 +654,16 @@ void xe_svm_fini(struct xe_vm *vm)
}
static bool xe_svm_range_is_valid(struct xe_svm_range *range,
struct xe_tile *tile)
struct xe_tile *tile,
bool devmem_only)
{
return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
/*
* Advisory only check whether the range currently has a valid mapping,
* READ_ONCE pairs with WRITE_ONCE in xe_pt.c
*/
return ((READ_ONCE(range->tile_present) &
~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
(!devmem_only || xe_svm_range_in_vram(range));
}
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
@ -712,6 +728,36 @@ unlock:
return err;
}
static bool supports_4K_migration(struct xe_device *xe)
{
if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
return false;
return true;
}
static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
struct xe_vma *vma)
{
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
if (!range->base.flags.migrate_devmem)
return false;
if (xe_svm_range_in_vram(range)) {
drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
return true;
}
/**
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
@ -735,11 +781,16 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.check_pages_threshold = IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
.devmem_only = atomic && IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
};
struct xe_svm_range *range;
struct drm_gpusvm_range *r;
struct drm_exec exec;
struct dma_fence *fence;
int migrate_try_count = ctx.devmem_only ? 3 : 1;
ktime_t end = 0;
int err;
@ -758,24 +809,31 @@ retry:
if (IS_ERR(r))
return PTR_ERR(r);
if (ctx.devmem_only && !r->flags.migrate_devmem)
return -EACCES;
range = to_xe_range(r);
if (xe_svm_range_is_valid(range, tile))
if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
/* XXX: Add migration policy, for now migrate range once */
if (!range->skip_migrate && range->base.flags.migrate_devmem &&
xe_svm_range_size(range) >= SZ_64K) {
range->skip_migrate = true;
if (--migrate_try_count >= 0 &&
xe_svm_range_needs_migrate_to_vram(range, vma)) {
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
drm_dbg(&vm->xe->drm,
"VRAM allocation failed, falling back to "
"retrying fault, asid=%u, errno=%pe\n",
vm->usm.asid, ERR_PTR(err));
goto retry;
if (migrate_try_count || !ctx.devmem_only) {
drm_dbg(&vm->xe->drm,
"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
vm->usm.asid, ERR_PTR(err));
goto retry;
} else {
drm_err(&vm->xe->drm,
"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
vm->usm.asid, ERR_PTR(err));
return err;
}
}
}
@ -783,15 +841,23 @@ retry:
err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
if (err == -EOPNOTSUPP) {
range_debug(range, "PAGE FAULT - EVICT PAGES");
drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (migrate_try_count > 0 || !ctx.devmem_only) {
if (err == -EOPNOTSUPP) {
range_debug(range, "PAGE FAULT - EVICT PAGES");
drm_gpusvm_range_evict(&vm->svm.gpusvm,
&range->base);
}
drm_dbg(&vm->xe->drm,
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
range_debug(range, "PAGE FAULT - RETRY PAGES");
goto retry;
} else {
drm_err(&vm->xe->drm,
"Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
}
drm_dbg(&vm->xe->drm,
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
range_debug(range, "PAGE FAULT - RETRY PAGES");
goto retry;
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
@ -815,6 +881,7 @@ retry_bind:
drm_exec_fini(&exec);
err = PTR_ERR(fence);
if (err == -EAGAIN) {
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
range_debug(range, "PAGE FAULT - RETRY BIND");
goto retry;
}
@ -825,9 +892,6 @@ retry_bind:
}
drm_exec_fini(&exec);
if (xe_modparam.always_migrate_to_vram)
range->skip_migrate = false;
dma_fence_wait(fence, false);
dma_fence_put(fence);

View File

@ -36,11 +36,6 @@ struct xe_svm_range {
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
/**
* @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
* locking.
*/
u8 skip_migrate :1;
};
#if IS_ENABLED(CONFIG_DRM_GPUSVM)

View File

@ -19,12 +19,12 @@
#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
TRACE_EVENT(xe_lrc_update_timestamp,
TP_PROTO(struct xe_lrc *lrc, uint32_t old),
TP_PROTO(struct xe_lrc *lrc, uint64_t old),
TP_ARGS(lrc, old),
TP_STRUCT__entry(
__field(struct xe_lrc *, lrc)
__field(u32, old)
__field(u32, new)
__field(u64, old)
__field(u64, new)
__string(name, lrc->fence_ctx.name)
__string(device_id, __dev_name_lrc(lrc))
),
@ -36,7 +36,7 @@ TRACE_EVENT(xe_lrc_update_timestamp,
__assign_str(name);
__assign_str(device_id);
),
TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s",
__entry->lrc, __get_str(name),
__entry->old, __entry->new,
__get_str(device_id))

View File

@ -815,6 +815,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
{ XE_RTP_NAME("22021007897"),
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),

View File

@ -83,6 +83,9 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
case ALS_IDX:
privdata->dev_en.is_als_present = false;
break;
case SRA_IDX:
privdata->dev_en.is_sra_present = false;
break;
}
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
@ -134,9 +137,6 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
break;
if (cl_data->sensor_idx[i] == SRA_IDX) {
info.sensor_idx = cl_data->sensor_idx[i];
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
@ -145,8 +145,10 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
(privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
cl_data->is_any_sensor_enabled = true;
privdata->dev_en.is_sra_present = true;
}
continue;
}
@ -238,6 +240,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
cleanup:
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
if (cl_data->sensor_idx[i] == SRA_IDX)
continue;
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);

View File

@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
struct hid_bpf_ops *e;
int ret;
if (unlikely(hdev->bpf.destroyed))
return ERR_PTR(-ENODEV);
if (type >= HID_REPORT_TYPES)
return ERR_PTR(-EINVAL);
@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
if (unlikely(hdev->bpf.destroyed))
return -ENODEV;
if (rtype >= HID_REPORT_TYPES)
return -EINVAL;
@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
if (unlikely(hdev->bpf.destroyed))
return -ENODEV;
idx = srcu_read_lock(&hdev->bpf.srcu);
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
srcu_read_lock_held(&hdev->bpf.srcu)) {

View File

@ -157,6 +157,7 @@ static const __u8 fixed_rdesc_vendor[] = {
ReportCount(5) // padding
Input(Const)
// Byte 4 in report - just exists so we get to be a tablet pad
UsagePage_Digitizers
Usage_Dig_BarrelSwitch // BTN_STYLUS
ReportCount(1)
ReportSize(1)

View File

@ -41,6 +41,10 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
#define USB_VENDOR_ID_ADATA_XPG 0x125f
#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155

View File

@ -27,6 +27,8 @@
static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },

View File

@ -1150,11 +1150,9 @@ static void steam_client_ll_close(struct hid_device *hdev)
struct steam_device *steam = hdev->driver_data;
unsigned long flags;
bool connected;
spin_lock_irqsave(&steam->lock, flags);
steam->client_opened--;
connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
schedule_work(&steam->unregister_work);

View File

@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
kfree(send_buf);
hid_err(hdev, "Unexpected non-int endpoint\n");
return;
}

View File

@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev,
suffix = "System Control";
break;
}
}
if (suffix)
} else {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
if (!hi->input->name)
return -ENOMEM;
}
return 0;
}

View File

@ -70,10 +70,16 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
{
while (!kfifo_is_empty(fifo)) {
int size = kfifo_peek_len(fifo);
u8 *buf = kzalloc(size, GFP_KERNEL);
u8 *buf;
unsigned int count;
int err;
buf = kzalloc(size, GFP_KERNEL);
if (!buf) {
kfifo_skip(fifo);
continue;
}
count = kfifo_out(fifo, buf, size);
if (count != size) {
// Hard to say what is the "right" action in this
@ -81,6 +87,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
// to flush seems reasonable enough, however.
hid_warn(hdev, "%s: removed fifo entry with unexpected size\n",
__func__);
kfree(buf);
continue;
}
err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
@ -2361,6 +2368,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
unsigned int connect_mask = HID_CONNECT_HIDRAW;
features->pktlen = wacom_compute_pktlen(hdev);
if (!features->pktlen)
return -ENODEV;
if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL))
return -ENOMEM;

View File

@ -278,9 +278,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
if (IS_ERR(dev->slave))
if (IS_ERR(dev->slave)) {
i2c_del_adapter(&dev->adapter);
return dev_err_probe(device, PTR_ERR(dev->slave),
"register UCSI failed\n");
}
}
pm_runtime_set_autosuspend_delay(device, 1000);

View File

@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device)
down_read(&devices_rwsem);
/* Mark for userspace that device is ready */
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
goto out;
@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name,
return ret;
}
dev_set_uevent_suppress(&device->dev, false);
/* Mark for userspace that device is ready */
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ib_device_notify_register(device);
ib_device_put(device);
return 0;

View File

@ -221,7 +221,7 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_d
break;
if (i < IRDMA_MIN_MSIX) {
for (; i > 0; i--)
while (--i >= 0)
ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
@ -256,6 +256,8 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
irdma_ib_unregister_device(iwdev);
irdma_deinit_interrupts(iwdev->rf, cdev_info);
kfree(iwdev->rf);
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
}

View File

@ -4871,5 +4871,4 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev)
irdma_rt_deinit_hw(iwdev);
irdma_ctrl_deinit_hw(iwdev->rf);
kfree(iwdev->rf);
}

View File

@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
vfree(cq->queue->buf);
kfree(cq->queue);
if (err)
return err;
}
cq->is_user = uresp;

View File

@ -252,7 +252,7 @@ static void __init gicv2m_teardown(void)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
.chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",

View File

@ -203,7 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
.chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",

View File

@ -197,7 +197,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
.chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",

View File

@ -161,7 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = {
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
.chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",

View File

@ -157,7 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = {
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
.chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",

View File

@ -208,17 +208,17 @@ skip:
}
#ifdef CONFIG_SMP
static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
{
lockdep_assert_held(&lpriv->lock);
if (!timer_pending(&lpriv->timer)) {
lpriv->timer.expires = jiffies + 1;
add_timer_on(&lpriv->timer, smp_processor_id());
add_timer_on(&lpriv->timer, cpu);
}
}
#else
static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
{
}
#endif
@ -233,7 +233,7 @@ void imsic_local_sync_all(bool force_all)
if (force_all)
bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
if (!__imsic_local_sync(lpriv))
__imsic_local_timer_start(lpriv);
__imsic_local_timer_start(lpriv, smp_processor_id());
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@ -278,7 +278,7 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
return;
}
__imsic_local_timer_start(lpriv);
__imsic_local_timer_start(lpriv, cpu);
}
}
#else

View File

@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/timer.h>
#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
void __iomem *reg_base;
struct can_berr_counter bec;
u8 cmd_seq;
u8 tx_max_count;
u8 tx_idx;
u8 ack_idx;
int err_rep_cnt;
int echo_idx;
unsigned int completed_tx_pkts;
unsigned int completed_tx_bytes;
spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
spinlock_t echo_lock; /* Locks the message echo buffer */
struct timer_list bec_poll_timer;
struct completion start_comp, flush_comp;
};
@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
can->tx_idx = 0;
can->ack_idx = 0;
ret = open_candev(netdev);
if (ret)
return ret;
@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
timer_delete(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
{
return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
}
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
struct kvaser_pciefd_can *can,
struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
unsigned int frame_len;
int nr_words;
u8 count;
if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
return NETDEV_TX_BUSY;
nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
can_put_echo_skb(skb, netdev, can->echo_idx, 0);
/* Move echo index to the next slot */
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
WRITE_ONCE(can->can.echo_skb[seq], NULL);
frame_len = can_skb_get_frame_len(skb);
can_put_echo_skb(skb, netdev, seq, frame_len);
netdev_sent_queue(netdev, frame_len);
WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
/* No room for a new message, stop the queue until at least one
* successful transmit
*/
if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
netif_stop_queue(netdev);
spin_unlock_irqrestore(&can->echo_lock, irq_flags);
netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
can->completed_tx_pkts = 0;
can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->echo_idx = 0;
spin_lock_init(&can->echo_lock);
can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
return -ENOMEM;
return 0;
}
cf->len = can_fd_dlc2len(dlc);
@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
return -ENOMEM;
return 0;
}
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
return netif_rx(skb);
netif_rx(skb);
return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
int len;
u8 count;
unsigned int len, frame_len = 0;
struct sk_buff *skb;
if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
return 0;
skb = can->can.echo_skb[echo_idx];
if (skb)
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
if (!skb)
return 0;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
netif_wake_queue(can->can.dev);
/* Pairs with barrier in kvaser_pciefd_start_xmit() */
smp_store_release(&can->ack_idx, can->ack_idx + 1);
can->completed_tx_pkts++;
can->completed_tx_bytes += frame_len;
if (!one_shot_fail) {
can->can.dev->stats.tx_bytes += len;
@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
/* Report ACKs in this buffer to BQL en masse for correct periods */
for (i = 0; i < pcie->nr_channels; ++i) {
struct kvaser_pciefd_can *can = pcie->can[i];
if (!can->completed_tx_pkts)
continue;
netif_subqueue_completed_wake(can->can.dev, 0,
can->completed_tx_pkts,
can->completed_tx_bytes,
kvaser_pciefd_tx_avail(can), 1);
can->completed_tx_pkts = 0;
can->completed_tx_bytes = 0;
}
return res;
}
static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
kvaser_pciefd_read_buffer(pcie, 0);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
}
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
u32 srb_irq = 0;
u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
if (pci_irq & irq_mask->kcan_rx0)
srb_irq = kvaser_pciefd_receive_irq(pcie);
kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
if (srb_release)
iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
return IRQ_HANDLED;
}
@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
}
}
static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
{
unsigned int i;
/* Masking PCI_IRQ is insufficient as running ISR will unmask it */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
for (i = 0; i < pcie->nr_channels; ++i)
iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
}
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
void __iomem *irq_en_base;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
iowrite32(irq_mask->all, irq_en_base);
iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
/* Disable PCI interrupts */
iowrite32(0, irq_en_base);
kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
err_pci_free_irq_vectors:
@ -1844,35 +1875,26 @@ err_disable_pci:
return ret;
}
static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
{
int i;
for (i = 0; i < pcie->nr_channels; i++) {
struct kvaser_pciefd_can *can = pcie->can[i];
if (can) {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
unregister_candev(can->can.dev);
timer_delete(&can->bec_poll_timer);
kvaser_pciefd_pwm_stop(can);
free_candev(can->can.dev);
}
}
}
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
unsigned int i;
kvaser_pciefd_remove_all_ctrls(pcie);
for (i = 0; i < pcie->nr_channels; ++i) {
struct kvaser_pciefd_can *can = pcie->can[i];
/* Disable interrupts */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
unregister_candev(can->can.dev);
timer_delete(&can->bec_poll_timer);
kvaser_pciefd_pwm_stop(can);
}
kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
pci_free_irq_vectors(pcie->pci);
for (i = 0; i < pcie->nr_channels; ++i)
free_candev(pcie->can[i]->can.dev);
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);

View File

@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
#define SLCAN_CMD_LEN 1
#define SLCAN_SFF_ID_LEN 3
#define SLCAN_EFF_ID_LEN 8
#define SLCAN_DATA_LENGTH_LEN 1
#define SLCAN_ERROR_LEN 1
#define SLCAN_STATE_LEN 1
#define SLCAN_STATE_BE_RXCNT_LEN 3
#define SLCAN_STATE_BE_TXCNT_LEN 3
#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
SLCAN_STATE_BE_RXCNT_LEN + \
SLCAN_STATE_BE_TXCNT_LEN)
#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
SLCAN_STATE_LEN + \
SLCAN_STATE_BE_RXCNT_LEN + \
SLCAN_STATE_BE_TXCNT_LEN)
#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
SLCAN_ERROR_LEN + \
SLCAN_DATA_LENGTH_LEN)
#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
SLCAN_SFF_ID_LEN + \
SLCAN_DATA_LENGTH_LEN)
struct slcan {
struct can_priv can;
@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
return;
skb = alloc_can_skb(sl->dev, &cf);
if (unlikely(!skb)) {
sl->dev->stats.rx_dropped++;
@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
return;
}
if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
int i, len;
if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
return;
/* get len from sanitized ASCII value */
len = cmd[1];
if (len >= '0' && len < '9')
@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
sl->rcount > 4)
if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
slcan_bump(sl);
sl->rcount = 0;

View File

@ -636,7 +636,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
dma_addr_t dma_addr = le32_to_cpu(desc->addr);
struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
struct airoha_gdm_port *port;
@ -645,22 +644,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
if (!dma_addr)
break;
len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
if (!len)
break;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
dma_sync_single_for_cpu(eth->dev, dma_addr,
dma_sync_single_for_cpu(eth->dev, e->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
data_len = q->skb ? q->buf_size
: SKB_WITH_OVERHEAD(q->buf_size);
if (data_len < len)
if (!len || data_len < len)
goto free_frag;
p = airoha_qdma_get_gdm_port(eth, desc);
@ -723,9 +716,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
q->skb = NULL;
continue;
free_frag:
page_pool_put_full_page(q->page_pool, page, true);
dev_kfree_skb(q->skb);
q->skb = NULL;
if (q->skb) {
dev_kfree_skb(q->skb);
q->skb = NULL;
} else {
page_pool_put_full_page(q->page_pool, page, true);
}
}
airoha_qdma_fill_rx_queue(q);

View File

@ -20,6 +20,7 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@ -304,14 +305,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
netdev_lock(bp->dev);
ops = rcu_dereference(ulp->ulp_ops);
ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
netdev_unlock(bp->dev);
}
}
@ -330,8 +329,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
netdev_lock(bp->dev);
ops = rcu_dereference(ulp->ulp_ops);
ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@ -343,7 +341,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
netdev_unlock(bp->dev);
kfree(ent);
}
}

View File

@ -61,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
return -EBUSY;
}
netif_device_detach(priv->netdev);
priv->reset_type = type;
set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
@ -91,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
return ret;
}
netif_device_attach(priv->netdev);
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
@ -117,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv)
if (running)
dev_close(priv->netdev);
hbg_reset(priv);
/* in hbg_pci_err_detected(), we will detach first,
* so we need to attach before open
*/
if (!netif_device_present(priv->netdev))
netif_device_attach(priv->netdev);
if (hbg_reset(priv))
goto err_unlock;
if (running)
dev_open(priv->netdev, NULL);
err_unlock:
rtnl_unlock();
}
@ -160,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
hbg_err_reset(priv);
netif_device_attach(netdev);
return PCI_ERS_RESULT_RECOVERED;
}

View File

@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv,
const struct hbg_ethtool_stats *stats;
u32 i;
if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
return;
for (i = 0; i < info_len; i++) {
stats = &info[i];
if (!stats->reg)

Some files were not shown because too many files have changed in this diff Show More