Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.18-rc4). No conflicts, adjacent changes: drivers/net/ethernet/stmicro/stmmac/stmmac_main.cpull/1354/mergeded9813d17("net: stmmac: Consider Tx VLAN offload tag length for maxSDU")26ab9830be("net: stmmac: replace has_xxxx with core_type") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
commit
1a2352ad82
4
CREDITS
4
CREDITS
|
|
@ -2036,6 +2036,10 @@ S: Botanicka' 68a
|
||||||
S: 602 00 Brno
|
S: 602 00 Brno
|
||||||
S: Czech Republic
|
S: Czech Republic
|
||||||
|
|
||||||
|
N: Karsten Keil
|
||||||
|
E: isdn@linux-pingi.de
|
||||||
|
D: ISDN subsystem maintainer
|
||||||
|
|
||||||
N: Jakob Kemi
|
N: Jakob Kemi
|
||||||
E: jakob.kemi@telia.com
|
E: jakob.kemi@telia.com
|
||||||
D: V4L W9966 Webcam driver
|
D: V4L W9966 Webcam driver
|
||||||
|
|
|
||||||
|
|
@ -180,9 +180,9 @@ allOf:
|
||||||
then:
|
then:
|
||||||
properties:
|
properties:
|
||||||
reg:
|
reg:
|
||||||
minItems: 2
|
maxItems: 2
|
||||||
reg-names:
|
reg-names:
|
||||||
minItems: 2
|
maxItems: 2
|
||||||
else:
|
else:
|
||||||
properties:
|
properties:
|
||||||
reg:
|
reg:
|
||||||
|
|
|
||||||
|
|
@ -142,7 +142,9 @@ allOf:
|
||||||
required:
|
required:
|
||||||
- orientation-switch
|
- orientation-switch
|
||||||
then:
|
then:
|
||||||
$ref: /schemas/usb/usb-switch.yaml#
|
allOf:
|
||||||
|
- $ref: /schemas/usb/usb-switch.yaml#
|
||||||
|
- $ref: /schemas/usb/usb-switch-ports.yaml#
|
||||||
|
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -125,7 +125,9 @@ allOf:
|
||||||
contains:
|
contains:
|
||||||
const: google,gs101-usb31drd-phy
|
const: google,gs101-usb31drd-phy
|
||||||
then:
|
then:
|
||||||
$ref: /schemas/usb/usb-switch.yaml#
|
allOf:
|
||||||
|
- $ref: /schemas/usb/usb-switch.yaml#
|
||||||
|
- $ref: /schemas/usb/usb-switch-ports.yaml#
|
||||||
|
|
||||||
properties:
|
properties:
|
||||||
clocks:
|
clocks:
|
||||||
|
|
|
||||||
|
|
@ -197,6 +197,7 @@ allOf:
|
||||||
- renesas,rcar-gen2-scif
|
- renesas,rcar-gen2-scif
|
||||||
- renesas,rcar-gen3-scif
|
- renesas,rcar-gen3-scif
|
||||||
- renesas,rcar-gen4-scif
|
- renesas,rcar-gen4-scif
|
||||||
|
- renesas,rcar-gen5-scif
|
||||||
then:
|
then:
|
||||||
properties:
|
properties:
|
||||||
interrupts:
|
interrupts:
|
||||||
|
|
|
||||||
|
|
@ -14,9 +14,14 @@ allOf:
|
||||||
|
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
enum:
|
oneOf:
|
||||||
- cdns,spi-r1p6
|
- enum:
|
||||||
- xlnx,zynq-spi-r1p6
|
- xlnx,zynq-spi-r1p6
|
||||||
|
- items:
|
||||||
|
- enum:
|
||||||
|
- xlnx,zynqmp-spi-r1p6
|
||||||
|
- xlnx,versal-net-spi-r1p6
|
||||||
|
- const: cdns,spi-r1p6
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ properties:
|
||||||
- rockchip,rk3328-spi
|
- rockchip,rk3328-spi
|
||||||
- rockchip,rk3368-spi
|
- rockchip,rk3368-spi
|
||||||
- rockchip,rk3399-spi
|
- rockchip,rk3399-spi
|
||||||
|
- rockchip,rk3506-spi
|
||||||
- rockchip,rk3528-spi
|
- rockchip,rk3528-spi
|
||||||
- rockchip,rk3562-spi
|
- rockchip,rk3562-spi
|
||||||
- rockchip,rk3568-spi
|
- rockchip,rk3568-spi
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,7 @@ required:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -89,13 +89,21 @@ required:
|
||||||
- reg
|
- reg
|
||||||
- "#address-cells"
|
- "#address-cells"
|
||||||
- "#size-cells"
|
- "#size-cells"
|
||||||
- dma-ranges
|
|
||||||
- ranges
|
- ranges
|
||||||
- clocks
|
- clocks
|
||||||
- clock-names
|
- clock-names
|
||||||
- interrupts
|
- interrupts
|
||||||
- power-domains
|
- power-domains
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
const: fsl,imx8mp-dwc3
|
||||||
|
then:
|
||||||
|
required:
|
||||||
|
- dma-ranges
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,7 @@ required:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
- if:
|
- if:
|
||||||
required:
|
required:
|
||||||
- mode-switch
|
- mode-switch
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ required:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -91,6 +91,7 @@ required:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -81,6 +81,7 @@ required:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -68,6 +68,7 @@ properties:
|
||||||
- qcom,sm8550-dwc3
|
- qcom,sm8550-dwc3
|
||||||
- qcom,sm8650-dwc3
|
- qcom,sm8650-dwc3
|
||||||
- qcom,x1e80100-dwc3
|
- qcom,x1e80100-dwc3
|
||||||
|
- qcom,x1e80100-dwc3-mp
|
||||||
- const: qcom,snps-dwc3
|
- const: qcom,snps-dwc3
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
|
|
@ -460,8 +461,10 @@ allOf:
|
||||||
then:
|
then:
|
||||||
properties:
|
properties:
|
||||||
interrupts:
|
interrupts:
|
||||||
|
minItems: 4
|
||||||
maxItems: 5
|
maxItems: 5
|
||||||
interrupt-names:
|
interrupt-names:
|
||||||
|
minItems: 4
|
||||||
items:
|
items:
|
||||||
- const: dwc_usb3
|
- const: dwc_usb3
|
||||||
- const: pwr_event
|
- const: pwr_event
|
||||||
|
|
|
||||||
|
|
@ -60,6 +60,7 @@ required:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ maintainers:
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: usb-switch.yaml#
|
- $ref: usb-switch.yaml#
|
||||||
|
- $ref: usb-switch-ports.yaml#
|
||||||
|
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,68 @@
|
||||||
|
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||||
|
%YAML 1.2
|
||||||
|
---
|
||||||
|
$id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml#
|
||||||
|
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||||
|
|
||||||
|
title: USB Orientation and Mode Switches Ports Graph Properties
|
||||||
|
|
||||||
|
maintainers:
|
||||||
|
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||||
|
|
||||||
|
description:
|
||||||
|
Ports Graph properties for devices handling USB mode and orientation switching.
|
||||||
|
|
||||||
|
properties:
|
||||||
|
port:
|
||||||
|
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||||
|
description:
|
||||||
|
A port node to link the device to a TypeC controller for the purpose of
|
||||||
|
handling altmode muxing and orientation switching.
|
||||||
|
|
||||||
|
properties:
|
||||||
|
endpoint:
|
||||||
|
$ref: /schemas/graph.yaml#/$defs/endpoint-base
|
||||||
|
unevaluatedProperties: false
|
||||||
|
properties:
|
||||||
|
data-lanes:
|
||||||
|
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||||
|
minItems: 1
|
||||||
|
maxItems: 8
|
||||||
|
uniqueItems: true
|
||||||
|
items:
|
||||||
|
maximum: 8
|
||||||
|
|
||||||
|
ports:
|
||||||
|
$ref: /schemas/graph.yaml#/properties/ports
|
||||||
|
properties:
|
||||||
|
port@0:
|
||||||
|
$ref: /schemas/graph.yaml#/properties/port
|
||||||
|
description:
|
||||||
|
Super Speed (SS) Output endpoint to the Type-C connector
|
||||||
|
|
||||||
|
port@1:
|
||||||
|
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||||
|
description:
|
||||||
|
Super Speed (SS) Input endpoint from the Super-Speed PHY
|
||||||
|
unevaluatedProperties: false
|
||||||
|
|
||||||
|
properties:
|
||||||
|
endpoint:
|
||||||
|
$ref: /schemas/graph.yaml#/$defs/endpoint-base
|
||||||
|
unevaluatedProperties: false
|
||||||
|
properties:
|
||||||
|
data-lanes:
|
||||||
|
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||||
|
minItems: 1
|
||||||
|
maxItems: 8
|
||||||
|
uniqueItems: true
|
||||||
|
items:
|
||||||
|
maximum: 8
|
||||||
|
|
||||||
|
oneOf:
|
||||||
|
- required:
|
||||||
|
- port
|
||||||
|
- required:
|
||||||
|
- ports
|
||||||
|
|
||||||
|
additionalProperties: true
|
||||||
|
|
@ -25,56 +25,4 @@ properties:
|
||||||
description: Possible handler of SuperSpeed signals retiming
|
description: Possible handler of SuperSpeed signals retiming
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
||||||
port:
|
|
||||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
|
||||||
description:
|
|
||||||
A port node to link the device to a TypeC controller for the purpose of
|
|
||||||
handling altmode muxing and orientation switching.
|
|
||||||
|
|
||||||
properties:
|
|
||||||
endpoint:
|
|
||||||
$ref: /schemas/graph.yaml#/$defs/endpoint-base
|
|
||||||
unevaluatedProperties: false
|
|
||||||
properties:
|
|
||||||
data-lanes:
|
|
||||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
|
||||||
minItems: 1
|
|
||||||
maxItems: 8
|
|
||||||
uniqueItems: true
|
|
||||||
items:
|
|
||||||
maximum: 8
|
|
||||||
|
|
||||||
ports:
|
|
||||||
$ref: /schemas/graph.yaml#/properties/ports
|
|
||||||
properties:
|
|
||||||
port@0:
|
|
||||||
$ref: /schemas/graph.yaml#/properties/port
|
|
||||||
description:
|
|
||||||
Super Speed (SS) Output endpoint to the Type-C connector
|
|
||||||
|
|
||||||
port@1:
|
|
||||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
|
||||||
description:
|
|
||||||
Super Speed (SS) Input endpoint from the Super-Speed PHY
|
|
||||||
unevaluatedProperties: false
|
|
||||||
|
|
||||||
properties:
|
|
||||||
endpoint:
|
|
||||||
$ref: /schemas/graph.yaml#/$defs/endpoint-base
|
|
||||||
unevaluatedProperties: false
|
|
||||||
properties:
|
|
||||||
data-lanes:
|
|
||||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
|
||||||
minItems: 1
|
|
||||||
maxItems: 8
|
|
||||||
uniqueItems: true
|
|
||||||
items:
|
|
||||||
maximum: 8
|
|
||||||
|
|
||||||
oneOf:
|
|
||||||
- required:
|
|
||||||
- port
|
|
||||||
- required:
|
|
||||||
- ports
|
|
||||||
|
|
||||||
additionalProperties: true
|
additionalProperties: true
|
||||||
|
|
|
||||||
|
|
@ -605,6 +605,8 @@ operations:
|
||||||
reply: &pin-attrs
|
reply: &pin-attrs
|
||||||
attributes:
|
attributes:
|
||||||
- id
|
- id
|
||||||
|
- module-name
|
||||||
|
- clock-id
|
||||||
- board-label
|
- board-label
|
||||||
- panel-label
|
- panel-label
|
||||||
- package-label
|
- package-label
|
||||||
|
|
|
||||||
|
|
@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
|
||||||
|
|
||||||
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
|
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
|
||||||
|
|
||||||
Please send bug reports to Matt Mackall <mpm@selenic.com>
|
|
||||||
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
|
|
||||||
|
|
||||||
Introduction:
|
Introduction:
|
||||||
=============
|
=============
|
||||||
|
|
||||||
|
|
|
||||||
23
MAINTAINERS
23
MAINTAINERS
|
|
@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h
|
||||||
|
|
||||||
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
|
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
|
||||||
M: Arnd Bergmann <arnd@arndb.de>
|
M: Arnd Bergmann <arnd@arndb.de>
|
||||||
|
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||||
|
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
|
||||||
|
M: Linus Walleij <linus.walleij@linaro.org>
|
||||||
|
R: Drew Fustini <fustini@kernel.org>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: soc@lists.linux.dev
|
L: soc@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
@ -13112,6 +13116,15 @@ F: include/uapi/linux/io_uring.h
|
||||||
F: include/uapi/linux/io_uring/
|
F: include/uapi/linux/io_uring/
|
||||||
F: io_uring/
|
F: io_uring/
|
||||||
|
|
||||||
|
IO_URING ZCRX
|
||||||
|
M: Pavel Begunkov <asml.silence@gmail.com>
|
||||||
|
L: io-uring@vger.kernel.org
|
||||||
|
L: netdev@vger.kernel.org
|
||||||
|
T: git https://github.com/isilence/linux.git zcrx/for-next
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
|
||||||
|
S: Maintained
|
||||||
|
F: io_uring/zcrx.*
|
||||||
|
|
||||||
IPMI SUBSYSTEM
|
IPMI SUBSYSTEM
|
||||||
M: Corey Minyard <corey@minyard.net>
|
M: Corey Minyard <corey@minyard.net>
|
||||||
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
|
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
|
||||||
|
|
@ -13247,10 +13260,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast
|
||||||
F: drivers/infiniband/ulp/isert
|
F: drivers/infiniband/ulp/isert
|
||||||
|
|
||||||
ISDN/CMTP OVER BLUETOOTH
|
ISDN/CMTP OVER BLUETOOTH
|
||||||
M: Karsten Keil <isdn@linux-pingi.de>
|
|
||||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Odd Fixes
|
S: Orphan
|
||||||
W: http://www.isdn4linux.de
|
W: http://www.isdn4linux.de
|
||||||
F: Documentation/isdn/
|
F: Documentation/isdn/
|
||||||
F: drivers/isdn/capi/
|
F: drivers/isdn/capi/
|
||||||
|
|
@ -13259,10 +13270,8 @@ F: include/uapi/linux/isdn/
|
||||||
F: net/bluetooth/cmtp/
|
F: net/bluetooth/cmtp/
|
||||||
|
|
||||||
ISDN/mISDN SUBSYSTEM
|
ISDN/mISDN SUBSYSTEM
|
||||||
M: Karsten Keil <isdn@linux-pingi.de>
|
|
||||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Orphan
|
||||||
W: http://www.isdn4linux.de
|
W: http://www.isdn4linux.de
|
||||||
F: drivers/isdn/Kconfig
|
F: drivers/isdn/Kconfig
|
||||||
F: drivers/isdn/Makefile
|
F: drivers/isdn/Makefile
|
||||||
|
|
@ -14395,6 +14404,7 @@ F: tools/memory-model/
|
||||||
|
|
||||||
LINUX-NEXT TREE
|
LINUX-NEXT TREE
|
||||||
M: Stephen Rothwell <sfr@canb.auug.org.au>
|
M: Stephen Rothwell <sfr@canb.auug.org.au>
|
||||||
|
M: Mark Brown <broonie@kernel.org>
|
||||||
L: linux-next@vger.kernel.org
|
L: linux-next@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
|
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
|
||||||
|
|
@ -21326,6 +21336,7 @@ F: drivers/media/platform/qcom/venus/
|
||||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||||
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
|
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
|
||||||
L: wcn36xx@lists.infradead.org
|
L: wcn36xx@lists.infradead.org
|
||||||
|
L: linux-wireless@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
|
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
|
||||||
F: drivers/net/wireless/ath/wcn36xx/
|
F: drivers/net/wireless/ath/wcn36xx/
|
||||||
|
|
|
||||||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 18
|
PATCHLEVEL = 18
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc2
|
EXTRAVERSION = -rc3
|
||||||
NAME = Baby Opossum Posse
|
NAME = Baby Opossum Posse
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
||||||
|
|
@ -77,6 +77,14 @@
|
||||||
/delete-property/ pinctrl-0;
|
/delete-property/ pinctrl-0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&pm {
|
||||||
|
clocks = <&firmware_clocks 5>,
|
||||||
|
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
|
||||||
|
<&clocks BCM2835_CLOCK_H264>,
|
||||||
|
<&clocks BCM2835_CLOCK_ISP>;
|
||||||
|
clock-names = "v3d", "peri_image", "h264", "isp";
|
||||||
|
};
|
||||||
|
|
||||||
&rmem {
|
&rmem {
|
||||||
/*
|
/*
|
||||||
* RPi4's co-processor will copy the board's bootloader configuration
|
* RPi4's co-processor will copy the board's bootloader configuration
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,16 @@
|
||||||
clock-names = "pixel", "hdmi";
|
clock-names = "pixel", "hdmi";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&pm {
|
||||||
|
clocks = <&firmware_clocks 5>,
|
||||||
|
<&clocks BCM2835_CLOCK_PERI_IMAGE>,
|
||||||
|
<&clocks BCM2835_CLOCK_H264>,
|
||||||
|
<&clocks BCM2835_CLOCK_ISP>;
|
||||||
|
clock-names = "v3d", "peri_image", "h264", "isp";
|
||||||
|
};
|
||||||
|
|
||||||
&v3d {
|
&v3d {
|
||||||
|
clocks = <&firmware_clocks 5>;
|
||||||
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
|
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -326,6 +326,8 @@
|
||||||
<0x7fffe000 0x2000>;
|
<0x7fffe000 0x2000>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#address-cells = <0>;
|
#address-cells = <0>;
|
||||||
|
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
|
||||||
|
IRQ_TYPE_LEVEL_HIGH)>;
|
||||||
#interrupt-cells = <3>;
|
#interrupt-cells = <3>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -293,6 +293,7 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
|
||||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||||
{
|
{
|
||||||
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
|
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||||
|
if (pte_sw_dirty(pte))
|
||||||
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
|
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from)
|
||||||
from != folio_page(src, 0))
|
from != folio_page(src, 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
|
folio_try_hugetlb_mte_tagging(dst);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Populate tags for all subpages.
|
* Populate tags for all subpages.
|
||||||
|
|
@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from)
|
||||||
}
|
}
|
||||||
folio_set_hugetlb_mte_tagged(dst);
|
folio_set_hugetlb_mte_tagged(dst);
|
||||||
} else if (page_mte_tagged(from)) {
|
} else if (page_mte_tagged(from)) {
|
||||||
/* It's a new page, shouldn't have been tagged yet */
|
/*
|
||||||
WARN_ON_ONCE(!try_page_mte_tagging(to));
|
* Most of the time it's a new page that shouldn't have been
|
||||||
|
* tagged yet. However, folio migration can end up reusing the
|
||||||
|
* same page without untagging it. Ignore the warning if the
|
||||||
|
* page is already tagged.
|
||||||
|
*/
|
||||||
|
try_page_mte_tagging(to);
|
||||||
|
|
||||||
mte_copy_page_tags(kto, kfrom);
|
mte_copy_page_tags(kto, kfrom);
|
||||||
set_page_mte_tagged(to);
|
set_page_mte_tagged(to);
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = {
|
||||||
.name = "keyboard",
|
.name = "keyboard",
|
||||||
.start = 0x60,
|
.start = 0x60,
|
||||||
.end = 0x6f,
|
.end = 0x6f,
|
||||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
.flags = IORESOURCE_IO
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.name = "dma page reg",
|
.name = "dma page reg",
|
||||||
|
|
@ -213,7 +213,7 @@ void __init plat_mem_setup(void)
|
||||||
|
|
||||||
/* Request I/O space for devices used on the Malta board. */
|
/* Request I/O space for devices used on the Malta board. */
|
||||||
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
|
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
|
||||||
request_resource(&ioport_resource, standard_io_resources+i);
|
insert_resource(&ioport_resource, standard_io_resources + i);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
|
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
|
||||||
|
|
|
||||||
|
|
@ -230,8 +230,7 @@ void __init mips_pcibios_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* PIIX4 ACPI starts at 0x1000 */
|
/* PIIX4 ACPI starts at 0x1000 */
|
||||||
if (controller->io_resource->start < 0x00001000UL)
|
PCIBIOS_MIN_IO = 0x1000;
|
||||||
controller->io_resource->start = 0x00001000UL;
|
|
||||||
|
|
||||||
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
|
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
|
||||||
ioport_resource.end = controller->io_resource->end;
|
ioport_resource.end = controller->io_resource->end;
|
||||||
|
|
|
||||||
|
|
@ -84,15 +84,9 @@
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#ifdef CONFIG_32BIT
|
|
||||||
#define PER_CPU_OFFSET_SHIFT 2
|
|
||||||
#else
|
|
||||||
#define PER_CPU_OFFSET_SHIFT 3
|
|
||||||
#endif
|
|
||||||
|
|
||||||
.macro asm_per_cpu dst sym tmp
|
.macro asm_per_cpu dst sym tmp
|
||||||
lw \tmp, TASK_TI_CPU_NUM(tp)
|
lw \tmp, TASK_TI_CPU_NUM(tp)
|
||||||
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
|
slli \tmp, \tmp, RISCV_LGPTR
|
||||||
la \dst, __per_cpu_offset
|
la \dst, __per_cpu_offset
|
||||||
add \dst, \dst, \tmp
|
add \dst, \dst, \tmp
|
||||||
REG_L \tmp, 0(\dst)
|
REG_L \tmp, 0(\dst)
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,8 @@ struct riscv_isainfo {
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
|
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
|
||||||
|
|
||||||
|
extern const struct seq_operations cpuinfo_op;
|
||||||
|
|
||||||
/* Per-cpu ISA extensions. */
|
/* Per-cpu ISA extensions. */
|
||||||
extern struct riscv_isainfo hart_isa[NR_CPUS];
|
extern struct riscv_isainfo hart_isa[NR_CPUS];
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
|
||||||
return pair->value == other_pair->value;
|
return pair->value == other_pair->value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
|
void riscv_hwprobe_register_async_probe(void);
|
||||||
|
void riscv_hwprobe_complete_async_probe(void);
|
||||||
|
#else
|
||||||
|
static inline void riscv_hwprobe_register_async_probe(void) {}
|
||||||
|
static inline void riscv_hwprobe_complete_async_probe(void) {}
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -69,6 +69,8 @@ typedef struct {
|
||||||
|
|
||||||
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
|
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
|
||||||
|
|
||||||
|
#define MAX_POSSIBLE_PHYSMEM_BITS 56
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rv64 PTE format:
|
* rv64 PTE format:
|
||||||
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
|
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
|
||||||
|
|
|
||||||
|
|
@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
||||||
return __pgprot(prot);
|
return __pgprot(prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define pgprot_dmacoherent pgprot_writecombine
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
|
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
|
||||||
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
|
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,12 @@ struct vdso_arch_data {
|
||||||
|
|
||||||
/* Boolean indicating all CPUs have the same static hwprobe values. */
|
/* Boolean indicating all CPUs have the same static hwprobe values. */
|
||||||
__u8 homogeneous_cpus;
|
__u8 homogeneous_cpus;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A gate to check and see if the hwprobe data is actually ready, as
|
||||||
|
* probing is deferred to avoid boot slowdowns.
|
||||||
|
*/
|
||||||
|
__u8 ready;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
|
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
|
||||||
|
|
|
||||||
|
|
@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!of_device_is_available(node)) {
|
if (!of_device_is_available(node))
|
||||||
pr_info("CPU with hartid=%lu is not available\n", *hart);
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
|
||||||
|
|
||||||
if (of_property_read_string(node, "riscv,isa-base", &isa))
|
if (of_property_read_string(node, "riscv,isa-base", &isa))
|
||||||
goto old_interface;
|
goto old_interface;
|
||||||
|
|
|
||||||
|
|
@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
u32 prev_vlenb = 0;
|
u32 prev_vlenb = 0;
|
||||||
u32 vlenb;
|
u32 vlenb = 0;
|
||||||
|
|
||||||
/* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
|
/* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */
|
||||||
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
|
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -40,6 +40,17 @@ enum ipi_message_type {
|
||||||
IPI_MAX
|
IPI_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const char * const ipi_names[] = {
|
||||||
|
[IPI_RESCHEDULE] = "Rescheduling interrupts",
|
||||||
|
[IPI_CALL_FUNC] = "Function call interrupts",
|
||||||
|
[IPI_CPU_STOP] = "CPU stop interrupts",
|
||||||
|
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
|
||||||
|
[IPI_IRQ_WORK] = "IRQ work interrupts",
|
||||||
|
[IPI_TIMER] = "Timer broadcast interrupts",
|
||||||
|
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
|
||||||
|
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
|
||||||
|
};
|
||||||
|
|
||||||
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
|
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
|
||||||
[0 ... NR_CPUS-1] = INVALID_HARTID
|
[0 ... NR_CPUS-1] = INVALID_HARTID
|
||||||
};
|
};
|
||||||
|
|
@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr)
|
||||||
/* Request IPIs */
|
/* Request IPIs */
|
||||||
for (i = 0; i < nr_ipi; i++) {
|
for (i = 0; i < nr_ipi; i++) {
|
||||||
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
|
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
|
||||||
"IPI", &ipi_dummy_dev);
|
ipi_names[i], &ipi_dummy_dev);
|
||||||
WARN_ON(err);
|
WARN_ON(err);
|
||||||
|
|
||||||
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
|
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
|
||||||
|
|
@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr)
|
||||||
riscv_ipi_enable();
|
riscv_ipi_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char * const ipi_names[] = {
|
|
||||||
[IPI_RESCHEDULE] = "Rescheduling interrupts",
|
|
||||||
[IPI_CALL_FUNC] = "Function call interrupts",
|
|
||||||
[IPI_CPU_STOP] = "CPU stop interrupts",
|
|
||||||
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
|
|
||||||
[IPI_IRQ_WORK] = "IRQ work interrupts",
|
|
||||||
[IPI_TIMER] = "Timer broadcast interrupts",
|
|
||||||
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
|
|
||||||
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
|
|
||||||
};
|
|
||||||
|
|
||||||
void show_ipi_stats(struct seq_file *p, int prec)
|
void show_ipi_stats(struct seq_file *p, int prec)
|
||||||
{
|
{
|
||||||
unsigned int cpu, i;
|
unsigned int cpu, i;
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,9 @@
|
||||||
* more details.
|
* more details.
|
||||||
*/
|
*/
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
|
#include <linux/completion.h>
|
||||||
|
#include <linux/atomic.h>
|
||||||
|
#include <linux/once.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/hwprobe.h>
|
#include <asm/hwprobe.h>
|
||||||
|
|
@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
|
||||||
bool first = true;
|
bool first = true;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
|
||||||
|
pair->key != RISCV_HWPROBE_KEY_MIMPID &&
|
||||||
|
pair->key != RISCV_HWPROBE_KEY_MARCHID)
|
||||||
|
goto out;
|
||||||
|
|
||||||
for_each_cpu(cpu, cpus) {
|
for_each_cpu(cpu, cpus) {
|
||||||
u64 cpu_id;
|
u64 cpu_id;
|
||||||
|
|
||||||
|
|
@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
pair->value = id;
|
pair->value = id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
|
|
||||||
size_t pair_count, size_t cpusetsize,
|
|
||||||
unsigned long __user *cpus_user,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
if (flags & RISCV_HWPROBE_WHICH_CPUS)
|
|
||||||
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
|
|
||||||
cpus_user, flags);
|
|
||||||
|
|
||||||
return hwprobe_get_values(pairs, pair_count, cpusetsize,
|
|
||||||
cpus_user, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
||||||
static int __init init_hwprobe_vdso_data(void)
|
static DECLARE_COMPLETION(boot_probes_done);
|
||||||
|
static atomic_t pending_boot_probes = ATOMIC_INIT(1);
|
||||||
|
|
||||||
|
void riscv_hwprobe_register_async_probe(void)
|
||||||
|
{
|
||||||
|
atomic_inc(&pending_boot_probes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void riscv_hwprobe_complete_async_probe(void)
|
||||||
|
{
|
||||||
|
if (atomic_dec_and_test(&pending_boot_probes))
|
||||||
|
complete(&boot_probes_done);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int complete_hwprobe_vdso_data(void)
|
||||||
{
|
{
|
||||||
struct vdso_arch_data *avd = vdso_k_arch_data;
|
struct vdso_arch_data *avd = vdso_k_arch_data;
|
||||||
u64 id_bitsmash = 0;
|
u64 id_bitsmash = 0;
|
||||||
struct riscv_hwprobe pair;
|
struct riscv_hwprobe pair;
|
||||||
int key;
|
int key;
|
||||||
|
|
||||||
|
if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
|
||||||
|
wait_for_completion(&boot_probes_done);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize vDSO data with the answers for the "all CPUs" case, to
|
* Initialize vDSO data with the answers for the "all CPUs" case, to
|
||||||
* save a syscall in the common case.
|
* save a syscall in the common case.
|
||||||
|
|
@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void)
|
||||||
* vDSO should defer to the kernel for exotic cpu masks.
|
* vDSO should defer to the kernel for exotic cpu masks.
|
||||||
*/
|
*/
|
||||||
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
|
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure all the VDSO values are visible before we look at them.
|
||||||
|
* This pairs with the implicit "no speculativly visible accesses"
|
||||||
|
* barrier in the VDSO hwprobe code.
|
||||||
|
*/
|
||||||
|
smp_wmb();
|
||||||
|
avd->ready = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init init_hwprobe_vdso_data(void)
|
||||||
|
{
|
||||||
|
struct vdso_arch_data *avd = vdso_k_arch_data;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent the vDSO cached values from being used, as they're not ready
|
||||||
|
* yet.
|
||||||
|
*/
|
||||||
|
avd->ready = false;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_initcall_sync(init_hwprobe_vdso_data);
|
arch_initcall_sync(init_hwprobe_vdso_data);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static int complete_hwprobe_vdso_data(void) { return 0; }
|
||||||
|
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
|
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
|
||||||
|
size_t pair_count, size_t cpusetsize,
|
||||||
|
unsigned long __user *cpus_user,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
|
||||||
|
|
||||||
|
if (flags & RISCV_HWPROBE_WHICH_CPUS)
|
||||||
|
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
|
||||||
|
cpus_user, flags);
|
||||||
|
|
||||||
|
return hwprobe_get_values(pairs, pair_count, cpusetsize,
|
||||||
|
cpus_user, flags);
|
||||||
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
|
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
|
||||||
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
|
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
|
||||||
cpus, unsigned int, flags)
|
cpus, unsigned int, flags)
|
||||||
|
|
|
||||||
|
|
@ -379,6 +379,7 @@ free:
|
||||||
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
|
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
|
||||||
{
|
{
|
||||||
schedule_on_each_cpu(check_vector_unaligned_access);
|
schedule_on_each_cpu(check_vector_unaligned_access);
|
||||||
|
riscv_hwprobe_complete_async_probe();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void)
|
||||||
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
|
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
|
||||||
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
|
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
|
||||||
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
|
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
|
||||||
kthread_run(vec_check_unaligned_access_speed_all_cpus,
|
riscv_hwprobe_register_async_probe();
|
||||||
NULL, "vec_check_unaligned_access_speed_all_cpus");
|
if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus,
|
||||||
|
NULL, "vec_check_unaligned_access_speed_all_cpus"))) {
|
||||||
|
pr_warn("Failed to create vec_unalign_check kthread\n");
|
||||||
|
riscv_hwprobe_complete_async_probe();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
|
||||||
* homogeneous, then this function can handle requests for arbitrary
|
* homogeneous, then this function can handle requests for arbitrary
|
||||||
* masks.
|
* masks.
|
||||||
*/
|
*/
|
||||||
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
|
if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready))
|
||||||
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
|
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
|
||||||
|
|
||||||
/* This is something we can handle, fill out the pairs. */
|
/* This is something we can handle, fill out the pairs. */
|
||||||
|
|
|
||||||
|
|
@ -1463,7 +1463,9 @@ static void __init retbleed_update_mitigation(void)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
|
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
|
||||||
|
if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
|
||||||
pr_err(RETBLEED_INTEL_MSG);
|
pr_err(RETBLEED_INTEL_MSG);
|
||||||
|
|
||||||
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
|
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1825,13 +1827,6 @@ void unpriv_ebpf_notify(int new_state)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline bool match_option(const char *arg, int arglen, const char *opt)
|
|
||||||
{
|
|
||||||
int len = strlen(opt);
|
|
||||||
|
|
||||||
return len == arglen && !strncmp(arg, opt, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The kernel command line selection for spectre v2 */
|
/* The kernel command line selection for spectre v2 */
|
||||||
enum spectre_v2_mitigation_cmd {
|
enum spectre_v2_mitigation_cmd {
|
||||||
SPECTRE_V2_CMD_NONE,
|
SPECTRE_V2_CMD_NONE,
|
||||||
|
|
|
||||||
|
|
@ -194,7 +194,7 @@ static bool need_sha_check(u32 cur_rev)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (cur_rev >> 8) {
|
switch (cur_rev >> 8) {
|
||||||
case 0x80012: return cur_rev <= 0x800126f; break;
|
case 0x80012: return cur_rev <= 0x8001277; break;
|
||||||
case 0x80082: return cur_rev <= 0x800820f; break;
|
case 0x80082: return cur_rev <= 0x800820f; break;
|
||||||
case 0x83010: return cur_rev <= 0x830107c; break;
|
case 0x83010: return cur_rev <= 0x830107c; break;
|
||||||
case 0x86001: return cur_rev <= 0x860010e; break;
|
case 0x86001: return cur_rev <= 0x860010e; break;
|
||||||
|
|
|
||||||
|
|
@ -458,7 +458,16 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||||
r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
|
r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rdt_cpu_has(X86_FEATURE_ABMC)) {
|
/*
|
||||||
|
* resctrl assumes a system that supports assignable counters can
|
||||||
|
* switch to "default" mode. Ensure that there is a "default" mode
|
||||||
|
* to switch to. This enforces a dependency between the independent
|
||||||
|
* X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL
|
||||||
|
* hardware features.
|
||||||
|
*/
|
||||||
|
if (rdt_cpu_has(X86_FEATURE_ABMC) &&
|
||||||
|
(rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) ||
|
||||||
|
rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) {
|
||||||
r->mon.mbm_cntr_assignable = true;
|
r->mon.mbm_cntr_assignable = true;
|
||||||
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
|
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
|
||||||
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;
|
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;
|
||||||
|
|
|
||||||
|
|
@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
|
||||||
if (!bi->interval_exp)
|
if (!bi->interval_exp)
|
||||||
bi->interval_exp = ilog2(lim->logical_block_size);
|
bi->interval_exp = ilog2(lim->logical_block_size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The PI generation / validation helpers do not expect intervals to
|
||||||
|
* straddle multiple bio_vecs. Enforce alignment so that those are
|
||||||
|
* never generated, and that each buffer is aligned as expected.
|
||||||
|
*/
|
||||||
|
if (bi->csum_type) {
|
||||||
|
lim->dma_alignment = max(lim->dma_alignment,
|
||||||
|
(1U << bi->interval_exp) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
|
|
||||||
*
|
|
||||||
* @node: RIMT table node to be looked-up
|
|
||||||
*
|
|
||||||
* Returns: fwnode_handle pointer on success, NULL on failure
|
|
||||||
*/
|
|
||||||
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
|
|
||||||
{
|
|
||||||
struct fwnode_handle *fwnode = NULL;
|
|
||||||
struct rimt_fwnode *curr;
|
|
||||||
|
|
||||||
spin_lock(&rimt_fwnode_lock);
|
|
||||||
list_for_each_entry(curr, &rimt_fwnode_list, list) {
|
|
||||||
if (curr->rimt_node == node) {
|
|
||||||
fwnode = curr->fwnode;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock(&rimt_fwnode_lock);
|
|
||||||
|
|
||||||
return fwnode;
|
|
||||||
}
|
|
||||||
|
|
||||||
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
|
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
|
||||||
void *context)
|
void *context)
|
||||||
{
|
{
|
||||||
|
|
@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RISC-V supports IOMMU as a PCI device or a platform device.
|
||||||
|
* When it is a platform device, there should be a namespace device as
|
||||||
|
* well along with RIMT. To create the link between RIMT information and
|
||||||
|
* the platform device, the IOMMU driver should register itself with the
|
||||||
|
* RIMT module. This is true for PCI based IOMMU as well.
|
||||||
|
*/
|
||||||
|
int rimt_iommu_register(struct device *dev)
|
||||||
|
{
|
||||||
|
struct fwnode_handle *rimt_fwnode;
|
||||||
|
struct acpi_rimt_node *node;
|
||||||
|
|
||||||
|
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
|
||||||
|
if (!node) {
|
||||||
|
pr_err("Could not find IOMMU node in RIMT\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev_is_pci(dev)) {
|
||||||
|
rimt_fwnode = acpi_alloc_fwnode_static();
|
||||||
|
if (!rimt_fwnode)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rimt_fwnode->dev = dev;
|
||||||
|
if (!dev->fwnode)
|
||||||
|
dev->fwnode = rimt_fwnode;
|
||||||
|
|
||||||
|
rimt_set_fwnode(node, rimt_fwnode);
|
||||||
|
} else {
|
||||||
|
rimt_set_fwnode(node, dev->fwnode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_IOMMU_API
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
|
||||||
|
*
|
||||||
|
* @node: RIMT table node to be looked-up
|
||||||
|
*
|
||||||
|
* Returns: fwnode_handle pointer on success, NULL on failure
|
||||||
|
*/
|
||||||
|
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
|
||||||
|
{
|
||||||
|
struct fwnode_handle *fwnode = NULL;
|
||||||
|
struct rimt_fwnode *curr;
|
||||||
|
|
||||||
|
spin_lock(&rimt_fwnode_lock);
|
||||||
|
list_for_each_entry(curr, &rimt_fwnode_list, list) {
|
||||||
|
if (curr->rimt_node == node) {
|
||||||
|
fwnode = curr->fwnode;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&rimt_fwnode_lock);
|
||||||
|
|
||||||
|
return fwnode;
|
||||||
|
}
|
||||||
|
|
||||||
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
|
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
|
||||||
{
|
{
|
||||||
struct acpi_rimt_pcie_rc *pci_rc;
|
struct acpi_rimt_pcie_rc *pci_rc;
|
||||||
|
|
@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* RISC-V supports IOMMU as a PCI device or a platform device.
|
|
||||||
* When it is a platform device, there should be a namespace device as
|
|
||||||
* well along with RIMT. To create the link between RIMT information and
|
|
||||||
* the platform device, the IOMMU driver should register itself with the
|
|
||||||
* RIMT module. This is true for PCI based IOMMU as well.
|
|
||||||
*/
|
|
||||||
int rimt_iommu_register(struct device *dev)
|
|
||||||
{
|
|
||||||
struct fwnode_handle *rimt_fwnode;
|
|
||||||
struct acpi_rimt_node *node;
|
|
||||||
|
|
||||||
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
|
|
||||||
if (!node) {
|
|
||||||
pr_err("Could not find IOMMU node in RIMT\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev_is_pci(dev)) {
|
|
||||||
rimt_fwnode = acpi_alloc_fwnode_static();
|
|
||||||
if (!rimt_fwnode)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
rimt_fwnode->dev = dev;
|
|
||||||
if (!dev->fwnode)
|
|
||||||
dev->fwnode = rimt_fwnode;
|
|
||||||
|
|
||||||
rimt_set_fwnode(node, rimt_fwnode);
|
|
||||||
} else {
|
|
||||||
rimt_set_fwnode(node, dev->fwnode);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_API
|
|
||||||
|
|
||||||
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
|
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
|
||||||
u32 id_in, u32 *id_out,
|
u32 id_in, u32 *id_out,
|
||||||
u8 type_mask)
|
u8 type_mask)
|
||||||
|
|
|
||||||
|
|
@ -851,18 +851,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
||||||
} else {
|
} else {
|
||||||
if (!internal)
|
if (!internal)
|
||||||
node->local_weak_refs++;
|
node->local_weak_refs++;
|
||||||
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
|
if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
|
||||||
if (target_list == NULL) {
|
|
||||||
pr_err("invalid inc weak node for %d\n",
|
|
||||||
node->debug_id);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* See comment above
|
|
||||||
*/
|
|
||||||
binder_enqueue_work_ilocked(&node->work, target_list);
|
binder_enqueue_work_ilocked(&node->work, target_list);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2418,10 +2409,10 @@ err_fd_not_accepted:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct binder_ptr_fixup - data to be fixed-up in target buffer
|
* struct binder_ptr_fixup - data to be fixed-up in target buffer
|
||||||
* @offset offset in target buffer to fixup
|
* @offset: offset in target buffer to fixup
|
||||||
* @skip_size bytes to skip in copy (fixup will be written later)
|
* @skip_size: bytes to skip in copy (fixup will be written later)
|
||||||
* @fixup_data data to write at fixup offset
|
* @fixup_data: data to write at fixup offset
|
||||||
* @node list node
|
* @node: list node
|
||||||
*
|
*
|
||||||
* This is used for the pointer fixup list (pf) which is created and consumed
|
* This is used for the pointer fixup list (pf) which is created and consumed
|
||||||
* during binder_transaction() and is only accessed locally. No
|
* during binder_transaction() and is only accessed locally. No
|
||||||
|
|
@ -2438,10 +2429,10 @@ struct binder_ptr_fixup {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct binder_sg_copy - scatter-gather data to be copied
|
* struct binder_sg_copy - scatter-gather data to be copied
|
||||||
* @offset offset in target buffer
|
* @offset: offset in target buffer
|
||||||
* @sender_uaddr user address in source buffer
|
* @sender_uaddr: user address in source buffer
|
||||||
* @length bytes to copy
|
* @length: bytes to copy
|
||||||
* @node list node
|
* @node: list node
|
||||||
*
|
*
|
||||||
* This is used for the sg copy list (sgc) which is created and consumed
|
* This is used for the sg copy list (sgc) which is created and consumed
|
||||||
* during binder_transaction() and is only accessed locally. No
|
* during binder_transaction() and is only accessed locally. No
|
||||||
|
|
@ -4064,13 +4055,14 @@ binder_freeze_notification_done(struct binder_proc *proc,
|
||||||
/**
|
/**
|
||||||
* binder_free_buf() - free the specified buffer
|
* binder_free_buf() - free the specified buffer
|
||||||
* @proc: binder proc that owns buffer
|
* @proc: binder proc that owns buffer
|
||||||
|
* @thread: binder thread performing the buffer release
|
||||||
* @buffer: buffer to be freed
|
* @buffer: buffer to be freed
|
||||||
* @is_failure: failed to send transaction
|
* @is_failure: failed to send transaction
|
||||||
*
|
*
|
||||||
* If buffer for an async transaction, enqueue the next async
|
* If the buffer is for an async transaction, enqueue the next async
|
||||||
* transaction from the node.
|
* transaction from the node.
|
||||||
*
|
*
|
||||||
* Cleanup buffer and free it.
|
* Cleanup the buffer and free it.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
binder_free_buf(struct binder_proc *proc,
|
binder_free_buf(struct binder_proc *proc,
|
||||||
|
|
|
||||||
|
|
@ -106,13 +106,22 @@ impl DeliverToRead for FreezeMessage {
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
if freeze.is_clearing {
|
if freeze.is_clearing {
|
||||||
|
kernel::warn_on!(freeze.num_cleared_duplicates != 0);
|
||||||
|
if freeze.num_pending_duplicates > 0 {
|
||||||
|
// The primary freeze listener was deleted, so convert a pending duplicate back
|
||||||
|
// into the primary one.
|
||||||
|
freeze.num_pending_duplicates -= 1;
|
||||||
|
freeze.is_pending = true;
|
||||||
|
freeze.is_clearing = true;
|
||||||
|
} else {
|
||||||
_removed_listener = freeze_entry.remove_node();
|
_removed_listener = freeze_entry.remove_node();
|
||||||
|
}
|
||||||
drop(node_refs);
|
drop(node_refs);
|
||||||
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
|
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
|
||||||
writer.write_payload(&self.cookie.0)?;
|
writer.write_payload(&self.cookie.0)?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
} else {
|
} else {
|
||||||
let is_frozen = freeze.node.owner.inner.lock().is_frozen;
|
let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
|
||||||
if freeze.last_is_frozen == Some(is_frozen) {
|
if freeze.last_is_frozen == Some(is_frozen) {
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
|
@ -245,8 +254,9 @@ impl Process {
|
||||||
);
|
);
|
||||||
return Err(EINVAL);
|
return Err(EINVAL);
|
||||||
}
|
}
|
||||||
if freeze.is_clearing {
|
let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
|
||||||
// Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
|
if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
|
||||||
|
// Immediately send another FreezeMessage.
|
||||||
clear_msg = Some(FreezeMessage::init(alloc, cookie));
|
clear_msg = Some(FreezeMessage::init(alloc, cookie));
|
||||||
}
|
}
|
||||||
freeze.is_pending = false;
|
freeze.is_pending = false;
|
||||||
|
|
|
||||||
|
|
@ -687,7 +687,7 @@ impl Node {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if inner.freeze_list.is_empty() {
|
if inner.freeze_list.is_empty() {
|
||||||
_unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
|
_unused_capacity = mem::take(&mut inner.freeze_list);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -72,6 +72,33 @@ impl Mapping {
|
||||||
const PROC_DEFER_FLUSH: u8 = 1;
|
const PROC_DEFER_FLUSH: u8 = 1;
|
||||||
const PROC_DEFER_RELEASE: u8 = 2;
|
const PROC_DEFER_RELEASE: u8 = 2;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
pub(crate) enum IsFrozen {
|
||||||
|
Yes,
|
||||||
|
No,
|
||||||
|
InProgress,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IsFrozen {
|
||||||
|
/// Whether incoming transactions should be rejected due to freeze.
|
||||||
|
pub(crate) fn is_frozen(self) -> bool {
|
||||||
|
match self {
|
||||||
|
IsFrozen::Yes => true,
|
||||||
|
IsFrozen::No => false,
|
||||||
|
IsFrozen::InProgress => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether freeze notifications consider this process frozen.
|
||||||
|
pub(crate) fn is_fully_frozen(self) -> bool {
|
||||||
|
match self {
|
||||||
|
IsFrozen::Yes => true,
|
||||||
|
IsFrozen::No => false,
|
||||||
|
IsFrozen::InProgress => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The fields of `Process` protected by the spinlock.
|
/// The fields of `Process` protected by the spinlock.
|
||||||
pub(crate) struct ProcessInner {
|
pub(crate) struct ProcessInner {
|
||||||
is_manager: bool,
|
is_manager: bool,
|
||||||
|
|
@ -98,7 +125,7 @@ pub(crate) struct ProcessInner {
|
||||||
/// are woken up.
|
/// are woken up.
|
||||||
outstanding_txns: u32,
|
outstanding_txns: u32,
|
||||||
/// Process is frozen and unable to service binder transactions.
|
/// Process is frozen and unable to service binder transactions.
|
||||||
pub(crate) is_frozen: bool,
|
pub(crate) is_frozen: IsFrozen,
|
||||||
/// Process received sync transactions since last frozen.
|
/// Process received sync transactions since last frozen.
|
||||||
pub(crate) sync_recv: bool,
|
pub(crate) sync_recv: bool,
|
||||||
/// Process received async transactions since last frozen.
|
/// Process received async transactions since last frozen.
|
||||||
|
|
@ -124,7 +151,7 @@ impl ProcessInner {
|
||||||
started_thread_count: 0,
|
started_thread_count: 0,
|
||||||
defer_work: 0,
|
defer_work: 0,
|
||||||
outstanding_txns: 0,
|
outstanding_txns: 0,
|
||||||
is_frozen: false,
|
is_frozen: IsFrozen::No,
|
||||||
sync_recv: false,
|
sync_recv: false,
|
||||||
async_recv: false,
|
async_recv: false,
|
||||||
binderfs_file: None,
|
binderfs_file: None,
|
||||||
|
|
@ -1260,7 +1287,7 @@ impl Process {
|
||||||
let is_manager = {
|
let is_manager = {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
inner.is_dead = true;
|
inner.is_dead = true;
|
||||||
inner.is_frozen = false;
|
inner.is_frozen = IsFrozen::No;
|
||||||
inner.sync_recv = false;
|
inner.sync_recv = false;
|
||||||
inner.async_recv = false;
|
inner.async_recv = false;
|
||||||
inner.is_manager
|
inner.is_manager
|
||||||
|
|
@ -1346,10 +1373,6 @@ impl Process {
|
||||||
.alloc
|
.alloc
|
||||||
.take_for_each(|offset, size, debug_id, odata| {
|
.take_for_each(|offset, size, debug_id, odata| {
|
||||||
let ptr = offset + address;
|
let ptr = offset + address;
|
||||||
pr_warn!(
|
|
||||||
"{}: removing orphan mapping {offset}:{size}\n",
|
|
||||||
self.pid_in_current_ns()
|
|
||||||
);
|
|
||||||
let mut alloc =
|
let mut alloc =
|
||||||
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
|
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
|
||||||
if let Some(data) = odata {
|
if let Some(data) = odata {
|
||||||
|
|
@ -1371,7 +1394,7 @@ impl Process {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
inner.outstanding_txns -= 1;
|
inner.outstanding_txns -= 1;
|
||||||
inner.is_frozen && inner.outstanding_txns == 0
|
inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
|
||||||
};
|
};
|
||||||
|
|
||||||
if wake {
|
if wake {
|
||||||
|
|
@ -1385,7 +1408,7 @@ impl Process {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
inner.sync_recv = false;
|
inner.sync_recv = false;
|
||||||
inner.async_recv = false;
|
inner.async_recv = false;
|
||||||
inner.is_frozen = false;
|
inner.is_frozen = IsFrozen::No;
|
||||||
drop(inner);
|
drop(inner);
|
||||||
msgs.send_messages();
|
msgs.send_messages();
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
|
@ -1394,7 +1417,7 @@ impl Process {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
inner.sync_recv = false;
|
inner.sync_recv = false;
|
||||||
inner.async_recv = false;
|
inner.async_recv = false;
|
||||||
inner.is_frozen = true;
|
inner.is_frozen = IsFrozen::InProgress;
|
||||||
|
|
||||||
if info.timeout_ms > 0 {
|
if info.timeout_ms > 0 {
|
||||||
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
|
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
|
||||||
|
|
@ -1408,7 +1431,7 @@ impl Process {
|
||||||
.wait_interruptible_timeout(&mut inner, jiffies)
|
.wait_interruptible_timeout(&mut inner, jiffies)
|
||||||
{
|
{
|
||||||
CondVarTimeoutResult::Signal { .. } => {
|
CondVarTimeoutResult::Signal { .. } => {
|
||||||
inner.is_frozen = false;
|
inner.is_frozen = IsFrozen::No;
|
||||||
return Err(ERESTARTSYS);
|
return Err(ERESTARTSYS);
|
||||||
}
|
}
|
||||||
CondVarTimeoutResult::Woken { jiffies: remaining } => {
|
CondVarTimeoutResult::Woken { jiffies: remaining } => {
|
||||||
|
|
@ -1422,17 +1445,18 @@ impl Process {
|
||||||
}
|
}
|
||||||
|
|
||||||
if inner.txns_pending_locked() {
|
if inner.txns_pending_locked() {
|
||||||
inner.is_frozen = false;
|
inner.is_frozen = IsFrozen::No;
|
||||||
Err(EAGAIN)
|
Err(EAGAIN)
|
||||||
} else {
|
} else {
|
||||||
drop(inner);
|
drop(inner);
|
||||||
match self.prepare_freeze_messages() {
|
match self.prepare_freeze_messages() {
|
||||||
Ok(batch) => {
|
Ok(batch) => {
|
||||||
|
self.inner.lock().is_frozen = IsFrozen::Yes;
|
||||||
batch.send_messages();
|
batch.send_messages();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(kernel::alloc::AllocError) => {
|
Err(kernel::alloc::AllocError) => {
|
||||||
self.inner.lock().is_frozen = false;
|
self.inner.lock().is_frozen = IsFrozen::No;
|
||||||
Err(ENOMEM)
|
Err(ENOMEM)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -249,7 +249,7 @@ impl Transaction {
|
||||||
|
|
||||||
if oneway {
|
if oneway {
|
||||||
if let Some(target_node) = self.target_node.clone() {
|
if let Some(target_node) = self.target_node.clone() {
|
||||||
if process_inner.is_frozen {
|
if process_inner.is_frozen.is_frozen() {
|
||||||
process_inner.async_recv = true;
|
process_inner.async_recv = true;
|
||||||
if self.flags & TF_UPDATE_TXN != 0 {
|
if self.flags & TF_UPDATE_TXN != 0 {
|
||||||
if let Some(t_outdated) =
|
if let Some(t_outdated) =
|
||||||
|
|
@ -270,7 +270,7 @@ impl Transaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if process_inner.is_frozen {
|
if process_inner.is_frozen.is_frozen() {
|
||||||
return Err(BinderError::new_frozen_oneway());
|
return Err(BinderError::new_frozen_oneway());
|
||||||
} else {
|
} else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
|
@ -280,7 +280,7 @@ impl Transaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if process_inner.is_frozen {
|
if process_inner.is_frozen.is_frozen() {
|
||||||
process_inner.sync_recv = true;
|
process_inner.sync_recv = true;
|
||||||
return Err(BinderError::new_frozen());
|
return Err(BinderError::new_frozen());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
|
||||||
* frequency (by keeping the initial capacity_freq_ref value).
|
* frequency (by keeping the initial capacity_freq_ref value).
|
||||||
*/
|
*/
|
||||||
cpu_clk = of_clk_get(cpu_node, 0);
|
cpu_clk = of_clk_get(cpu_node, 0);
|
||||||
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
|
if (!IS_ERR_OR_NULL(cpu_clk)) {
|
||||||
per_cpu(capacity_freq_ref, cpu) =
|
per_cpu(capacity_freq_ref, cpu) =
|
||||||
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
|
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
|
||||||
clk_put(cpu_clk);
|
clk_put(cpu_clk);
|
||||||
|
|
|
||||||
|
|
@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
|
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
|
||||||
dev_warn(sup, "sync_state() pending due to %s\n",
|
dev_info(sup, "sync_state() pending due to %s\n",
|
||||||
dev_name(link->consumer));
|
dev_name(link->consumer));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,50 +23,46 @@ struct devcd_entry {
|
||||||
void *data;
|
void *data;
|
||||||
size_t datalen;
|
size_t datalen;
|
||||||
/*
|
/*
|
||||||
* Here, mutex is required to serialize the calls to del_wk work between
|
* There are 2 races for which mutex is required.
|
||||||
* user/kernel space which happens when devcd is added with device_add()
|
|
||||||
* and that sends uevent to user space. User space reads the uevents,
|
|
||||||
* and calls to devcd_data_write() which try to modify the work which is
|
|
||||||
* not even initialized/queued from devcoredump.
|
|
||||||
*
|
*
|
||||||
|
* The first race is between device creation and userspace writing to
|
||||||
|
* schedule immediately destruction.
|
||||||
*
|
*
|
||||||
|
* This race is handled by arming the timer before device creation, but
|
||||||
|
* when device creation fails the timer still exists.
|
||||||
*
|
*
|
||||||
* cpu0(X) cpu1(Y)
|
* To solve this, hold the mutex during device_add(), and set
|
||||||
|
* init_completed on success before releasing the mutex.
|
||||||
*
|
*
|
||||||
* dev_coredump() uevent sent to user space
|
* That way the timer will never fire until device_add() is called,
|
||||||
* device_add() ======================> user space process Y reads the
|
* it will do nothing if init_completed is not set. The timer is also
|
||||||
* uevents writes to devcd fd
|
* cancelled in that case.
|
||||||
* which results into writes to
|
|
||||||
*
|
*
|
||||||
* devcd_data_write()
|
* The second race involves multiple parallel invocations of devcd_free(),
|
||||||
* mod_delayed_work()
|
* add a deleted flag so only 1 can call the destructor.
|
||||||
* try_to_grab_pending()
|
|
||||||
* timer_delete()
|
|
||||||
* debug_assert_init()
|
|
||||||
* INIT_DELAYED_WORK()
|
|
||||||
* schedule_delayed_work()
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Also, mutex alone would not be enough to avoid scheduling of
|
|
||||||
* del_wk work after it get flush from a call to devcd_free()
|
|
||||||
* mentioned as below.
|
|
||||||
*
|
|
||||||
* disabled_store()
|
|
||||||
* devcd_free()
|
|
||||||
* mutex_lock() devcd_data_write()
|
|
||||||
* flush_delayed_work()
|
|
||||||
* mutex_unlock()
|
|
||||||
* mutex_lock()
|
|
||||||
* mod_delayed_work()
|
|
||||||
* mutex_unlock()
|
|
||||||
* So, delete_work flag is required.
|
|
||||||
*/
|
*/
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
bool delete_work;
|
bool init_completed, deleted;
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
|
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
|
||||||
void *data, size_t datalen);
|
void *data, size_t datalen);
|
||||||
void (*free)(void *data);
|
void (*free)(void *data);
|
||||||
|
/*
|
||||||
|
* If nothing interferes and device_add() was returns success,
|
||||||
|
* del_wk will destroy the device after the timer fires.
|
||||||
|
*
|
||||||
|
* Multiple userspace processes can interfere in the working of the timer:
|
||||||
|
* - Writing to the coredump will reschedule the timer to run immediately,
|
||||||
|
* if still armed.
|
||||||
|
*
|
||||||
|
* This is handled by using "if (cancel_delayed_work()) {
|
||||||
|
* schedule_delayed_work() }", to prevent re-arming after having
|
||||||
|
* been previously fired.
|
||||||
|
* - Writing to /sys/class/devcoredump/disabled will destroy the
|
||||||
|
* coredump synchronously.
|
||||||
|
* This is handled by using disable_delayed_work_sync(), and then
|
||||||
|
* checking if deleted flag is set with &devcd->mutex held.
|
||||||
|
*/
|
||||||
struct delayed_work del_wk;
|
struct delayed_work del_wk;
|
||||||
struct device *failing_dev;
|
struct device *failing_dev;
|
||||||
};
|
};
|
||||||
|
|
@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
|
||||||
kfree(devcd);
|
kfree(devcd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __devcd_del(struct devcd_entry *devcd)
|
||||||
|
{
|
||||||
|
devcd->deleted = true;
|
||||||
|
device_del(&devcd->devcd_dev);
|
||||||
|
put_device(&devcd->devcd_dev);
|
||||||
|
}
|
||||||
|
|
||||||
static void devcd_del(struct work_struct *wk)
|
static void devcd_del(struct work_struct *wk)
|
||||||
{
|
{
|
||||||
struct devcd_entry *devcd;
|
struct devcd_entry *devcd;
|
||||||
|
bool init_completed;
|
||||||
|
|
||||||
devcd = container_of(wk, struct devcd_entry, del_wk.work);
|
devcd = container_of(wk, struct devcd_entry, del_wk.work);
|
||||||
|
|
||||||
device_del(&devcd->devcd_dev);
|
/* devcd->mutex serializes against dev_coredumpm_timeout */
|
||||||
put_device(&devcd->devcd_dev);
|
mutex_lock(&devcd->mutex);
|
||||||
|
init_completed = devcd->init_completed;
|
||||||
|
mutex_unlock(&devcd->mutex);
|
||||||
|
|
||||||
|
if (init_completed)
|
||||||
|
__devcd_del(devcd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
|
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
|
||||||
|
|
@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
|
||||||
struct device *dev = kobj_to_dev(kobj);
|
struct device *dev = kobj_to_dev(kobj);
|
||||||
struct devcd_entry *devcd = dev_to_devcd(dev);
|
struct devcd_entry *devcd = dev_to_devcd(dev);
|
||||||
|
|
||||||
mutex_lock(&devcd->mutex);
|
/*
|
||||||
if (!devcd->delete_work) {
|
* Although it's tempting to use mod_delayed work here,
|
||||||
devcd->delete_work = true;
|
* that will cause a reschedule if the timer already fired.
|
||||||
mod_delayed_work(system_wq, &devcd->del_wk, 0);
|
*/
|
||||||
}
|
if (cancel_delayed_work(&devcd->del_wk))
|
||||||
mutex_unlock(&devcd->mutex);
|
schedule_delayed_work(&devcd->del_wk, 0);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
struct devcd_entry *devcd = dev_to_devcd(dev);
|
struct devcd_entry *devcd = dev_to_devcd(dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To prevent a race with devcd_data_write(), disable work and
|
||||||
|
* complete manually instead.
|
||||||
|
*
|
||||||
|
* We cannot rely on the return value of
|
||||||
|
* disable_delayed_work_sync() here, because it might be in the
|
||||||
|
* middle of a cancel_delayed_work + schedule_delayed_work pair.
|
||||||
|
*
|
||||||
|
* devcd->mutex here guards against multiple parallel invocations
|
||||||
|
* of devcd_free().
|
||||||
|
*/
|
||||||
|
disable_delayed_work_sync(&devcd->del_wk);
|
||||||
mutex_lock(&devcd->mutex);
|
mutex_lock(&devcd->mutex);
|
||||||
if (!devcd->delete_work)
|
if (!devcd->deleted)
|
||||||
devcd->delete_work = true;
|
__devcd_del(devcd);
|
||||||
|
|
||||||
flush_delayed_work(&devcd->del_wk);
|
|
||||||
mutex_unlock(&devcd->mutex);
|
mutex_unlock(&devcd->mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
|
||||||
* put_device() <- last reference
|
* put_device() <- last reference
|
||||||
* error = fn(dev, data) devcd_dev_release()
|
* error = fn(dev, data) devcd_dev_release()
|
||||||
* devcd_free(dev, data) kfree(devcd)
|
* devcd_free(dev, data) kfree(devcd)
|
||||||
* mutex_lock(&devcd->mutex);
|
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* In the above diagram, it looks like disabled_store() would be racing with parallelly
|
* In the above diagram, it looks like disabled_store() would be racing with parallelly
|
||||||
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
|
* running devcd_del() and result in memory abort after dropping its last reference with
|
||||||
* is called after kfree of devcd memory after dropping its last reference with
|
|
||||||
* put_device(). However, this will not happens as fn(dev, data) runs
|
* put_device(). However, this will not happens as fn(dev, data) runs
|
||||||
* with its own reference to device via klist_node so it is not its last reference.
|
* with its own reference to device via klist_node so it is not its last reference.
|
||||||
* so, above situation would not occur.
|
* so, above situation would not occur.
|
||||||
|
|
@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
|
||||||
devcd->read = read;
|
devcd->read = read;
|
||||||
devcd->free = free;
|
devcd->free = free;
|
||||||
devcd->failing_dev = get_device(dev);
|
devcd->failing_dev = get_device(dev);
|
||||||
devcd->delete_work = false;
|
devcd->deleted = false;
|
||||||
|
|
||||||
mutex_init(&devcd->mutex);
|
mutex_init(&devcd->mutex);
|
||||||
device_initialize(&devcd->devcd_dev);
|
device_initialize(&devcd->devcd_dev);
|
||||||
|
|
@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
|
||||||
atomic_inc_return(&devcd_count));
|
atomic_inc_return(&devcd_count));
|
||||||
devcd->devcd_dev.class = &devcd_class;
|
devcd->devcd_dev.class = &devcd_class;
|
||||||
|
|
||||||
mutex_lock(&devcd->mutex);
|
|
||||||
dev_set_uevent_suppress(&devcd->devcd_dev, true);
|
dev_set_uevent_suppress(&devcd->devcd_dev, true);
|
||||||
|
|
||||||
|
/* devcd->mutex prevents devcd_del() completing until init finishes */
|
||||||
|
mutex_lock(&devcd->mutex);
|
||||||
|
devcd->init_completed = false;
|
||||||
|
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
|
||||||
|
schedule_delayed_work(&devcd->del_wk, timeout);
|
||||||
|
|
||||||
if (device_add(&devcd->devcd_dev))
|
if (device_add(&devcd->devcd_dev))
|
||||||
goto put_device;
|
goto put_device;
|
||||||
|
|
||||||
|
|
@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
|
||||||
|
|
||||||
dev_set_uevent_suppress(&devcd->devcd_dev, false);
|
dev_set_uevent_suppress(&devcd->devcd_dev, false);
|
||||||
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
|
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
|
||||||
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
|
|
||||||
schedule_delayed_work(&devcd->del_wk, timeout);
|
/*
|
||||||
|
* Safe to run devcd_del() now that we are done with devcd_dev.
|
||||||
|
* Alternatively we could have taken a ref on devcd_dev before
|
||||||
|
* dropping the lock.
|
||||||
|
*/
|
||||||
|
devcd->init_completed = true;
|
||||||
mutex_unlock(&devcd->mutex);
|
mutex_unlock(&devcd->mutex);
|
||||||
return;
|
return;
|
||||||
put_device:
|
put_device:
|
||||||
put_device(&devcd->devcd_dev);
|
|
||||||
mutex_unlock(&devcd->mutex);
|
mutex_unlock(&devcd->mutex);
|
||||||
|
cancel_delayed_work_sync(&devcd->del_wk);
|
||||||
|
put_device(&devcd->devcd_dev);
|
||||||
|
|
||||||
put_module:
|
put_module:
|
||||||
module_put(owner);
|
module_put(owner);
|
||||||
free:
|
free:
|
||||||
|
|
|
||||||
|
|
@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
list_for_each_entry(core, &bus->cores, list) {
|
list_for_each_entry(core, &bus->cores, list) {
|
||||||
|
struct device_node *np;
|
||||||
|
|
||||||
/* We support that core ourselves */
|
/* We support that core ourselves */
|
||||||
switch (core->id.id) {
|
switch (core->id.id) {
|
||||||
case BCMA_CORE_4706_CHIPCOMMON:
|
case BCMA_CORE_4706_CHIPCOMMON:
|
||||||
|
|
@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
|
||||||
if (bcma_is_core_needed_early(core->id.id))
|
if (bcma_is_core_needed_early(core->id.id))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
np = core->dev.of_node;
|
||||||
|
if (np && !of_device_is_available(np))
|
||||||
|
continue;
|
||||||
|
|
||||||
/* Only first GMAC core on BCM4706 is connected and working */
|
/* Only first GMAC core on BCM4706 is connected and working */
|
||||||
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
|
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
|
||||||
core->core_unit > 0)
|
core->core_unit > 0)
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,7 @@
|
||||||
static DEFINE_IDR(nbd_index_idr);
|
static DEFINE_IDR(nbd_index_idr);
|
||||||
static DEFINE_MUTEX(nbd_index_mutex);
|
static DEFINE_MUTEX(nbd_index_mutex);
|
||||||
static struct workqueue_struct *nbd_del_wq;
|
static struct workqueue_struct *nbd_del_wq;
|
||||||
|
static struct cred *nbd_cred;
|
||||||
static int nbd_total_devices = 0;
|
static int nbd_total_devices = 0;
|
||||||
|
|
||||||
struct nbd_sock {
|
struct nbd_sock {
|
||||||
|
|
@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
|
||||||
int result;
|
int result;
|
||||||
struct msghdr msg = {} ;
|
struct msghdr msg = {} ;
|
||||||
unsigned int noreclaim_flag;
|
unsigned int noreclaim_flag;
|
||||||
|
const struct cred *old_cred;
|
||||||
|
|
||||||
if (unlikely(!sock)) {
|
if (unlikely(!sock)) {
|
||||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||||
|
|
@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
old_cred = override_creds(nbd_cred);
|
||||||
|
|
||||||
msg.msg_iter = *iter;
|
msg.msg_iter = *iter;
|
||||||
|
|
||||||
noreclaim_flag = memalloc_noreclaim_save();
|
noreclaim_flag = memalloc_noreclaim_save();
|
||||||
|
|
@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
|
||||||
|
|
||||||
memalloc_noreclaim_restore(noreclaim_flag);
|
memalloc_noreclaim_restore(noreclaim_flag);
|
||||||
|
|
||||||
|
revert_creds(old_cred);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2677,7 +2683,15 @@ static int __init nbd_init(void)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nbd_cred = prepare_kernel_cred(&init_task);
|
||||||
|
if (!nbd_cred) {
|
||||||
|
destroy_workqueue(nbd_del_wq);
|
||||||
|
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
if (genl_register_family(&nbd_genl_family)) {
|
if (genl_register_family(&nbd_genl_family)) {
|
||||||
|
put_cred(nbd_cred);
|
||||||
destroy_workqueue(nbd_del_wq);
|
destroy_workqueue(nbd_del_wq);
|
||||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void)
|
||||||
/* Also wait for nbd_dev_remove_work() completes */
|
/* Also wait for nbd_dev_remove_work() completes */
|
||||||
destroy_workqueue(nbd_del_wq);
|
destroy_workqueue(nbd_del_wq);
|
||||||
|
|
||||||
|
put_cred(nbd_cred);
|
||||||
idr_destroy(&nbd_index_idr);
|
idr_destroy(&nbd_index_idr);
|
||||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,7 @@ struct bpa10x_data {
|
||||||
struct usb_anchor rx_anchor;
|
struct usb_anchor rx_anchor;
|
||||||
|
|
||||||
struct sk_buff *rx_skb[2];
|
struct sk_buff *rx_skb[2];
|
||||||
|
struct hci_uart hu;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void bpa10x_tx_complete(struct urb *urb)
|
static void bpa10x_tx_complete(struct urb *urb)
|
||||||
|
|
@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
|
||||||
if (urb->status == 0) {
|
if (urb->status == 0) {
|
||||||
bool idx = usb_pipebulk(urb->pipe);
|
bool idx = usb_pipebulk(urb->pipe);
|
||||||
|
|
||||||
data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
|
data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
|
||||||
urb->transfer_buffer,
|
urb->transfer_buffer,
|
||||||
urb->actual_length,
|
urb->actual_length,
|
||||||
bpa10x_recv_pkts,
|
bpa10x_recv_pkts,
|
||||||
|
|
@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
|
||||||
hci_set_drvdata(hdev, data);
|
hci_set_drvdata(hdev, data);
|
||||||
|
|
||||||
data->hdev = hdev;
|
data->hdev = hdev;
|
||||||
|
data->hu.hdev = hdev;
|
||||||
|
|
||||||
SET_HCIDEV_DEV(hdev, &intf->dev);
|
SET_HCIDEV_DEV(hdev, &intf->dev);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
|
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
|
||||||
btintel_pcie_msix_gp1_handler(data);
|
btintel_pcie_msix_gp1_handler(data);
|
||||||
|
|
||||||
/* This interrupt is triggered by the firmware after updating
|
|
||||||
* boot_stage register and image_response register
|
|
||||||
*/
|
|
||||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
|
|
||||||
btintel_pcie_msix_gp0_handler(data);
|
|
||||||
|
|
||||||
/* For TX */
|
/* For TX */
|
||||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
|
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
|
||||||
|
|
@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||||
btintel_pcie_msix_tx_handle(data);
|
btintel_pcie_msix_tx_handle(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This interrupt is triggered by the firmware after updating
|
||||||
|
* boot_stage register and image_response register
|
||||||
|
*/
|
||||||
|
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
|
||||||
|
btintel_pcie_msix_gp0_handler(data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Before sending the interrupt the HW disables it to prevent a nested
|
* Before sending the interrupt the HW disables it to prevent a nested
|
||||||
* interrupt. This is done by writing 1 to the corresponding bit in
|
* interrupt. This is done by writing 1 to the corresponding bit in
|
||||||
|
|
|
||||||
|
|
@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
|
||||||
|
|
||||||
sdio_claim_host(bdev->func);
|
sdio_claim_host(bdev->func);
|
||||||
|
|
||||||
|
/* set drv_pmctrl if BT is closed before doing reset */
|
||||||
|
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
|
||||||
|
sdio_enable_func(bdev->func);
|
||||||
|
btmtksdio_drv_pmctrl(bdev);
|
||||||
|
}
|
||||||
|
|
||||||
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
|
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
|
||||||
skb_queue_purge(&bdev->txq);
|
skb_queue_purge(&bdev->txq);
|
||||||
cancel_work_sync(&bdev->txrx_work);
|
cancel_work_sync(&bdev->txrx_work);
|
||||||
|
|
@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* set fw_pmctrl back if BT is closed after doing reset */
|
||||||
|
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
|
||||||
|
btmtksdio_fw_pmctrl(bdev);
|
||||||
|
sdio_disable_func(bdev->func);
|
||||||
|
}
|
||||||
|
|
||||||
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
|
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
|
||||||
err:
|
err:
|
||||||
sdio_release_host(bdev->func);
|
sdio_release_host(bdev->func);
|
||||||
|
|
|
||||||
|
|
@ -79,6 +79,7 @@ struct btmtkuart_dev {
|
||||||
u16 stp_dlen;
|
u16 stp_dlen;
|
||||||
|
|
||||||
const struct btmtkuart_data *data;
|
const struct btmtkuart_data *data;
|
||||||
|
struct hci_uart hu;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define btmtkuart_is_standalone(bdev) \
|
#define btmtkuart_is_standalone(bdev) \
|
||||||
|
|
@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
|
||||||
sz_left -= adv;
|
sz_left -= adv;
|
||||||
p_left += adv;
|
p_left += adv;
|
||||||
|
|
||||||
bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
|
bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
|
||||||
sz_h4, mtk_recv_pkts,
|
sz_h4, mtk_recv_pkts,
|
||||||
ARRAY_SIZE(mtk_recv_pkts));
|
ARRAY_SIZE(mtk_recv_pkts));
|
||||||
if (IS_ERR(bdev->rx_skb)) {
|
if (IS_ERR(bdev->rx_skb)) {
|
||||||
|
|
@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
bdev->hdev = hdev;
|
bdev->hdev = hdev;
|
||||||
|
bdev->hu.hdev = hdev;
|
||||||
|
|
||||||
hdev->bus = HCI_UART;
|
hdev->bus = HCI_UART;
|
||||||
hci_set_drvdata(hdev, bdev);
|
hci_set_drvdata(hdev, bdev);
|
||||||
|
|
|
||||||
|
|
@ -212,6 +212,7 @@ struct btnxpuart_dev {
|
||||||
struct ps_data psdata;
|
struct ps_data psdata;
|
||||||
struct btnxpuart_data *nxp_data;
|
struct btnxpuart_data *nxp_data;
|
||||||
struct reset_control *pdn;
|
struct reset_control *pdn;
|
||||||
|
struct hci_uart hu;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NXP_V1_FW_REQ_PKT 0xa5
|
#define NXP_V1_FW_REQ_PKT 0xa5
|
||||||
|
|
@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
|
||||||
|
|
||||||
ps_start_timer(nxpdev);
|
ps_start_timer(nxpdev);
|
||||||
|
|
||||||
nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
|
nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
|
||||||
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
|
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
|
||||||
if (IS_ERR(nxpdev->rx_skb)) {
|
if (IS_ERR(nxpdev->rx_skb)) {
|
||||||
int err = PTR_ERR(nxpdev->rx_skb);
|
int err = PTR_ERR(nxpdev->rx_skb);
|
||||||
|
|
@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
|
||||||
reset_control_deassert(nxpdev->pdn);
|
reset_control_deassert(nxpdev->pdn);
|
||||||
|
|
||||||
nxpdev->hdev = hdev;
|
nxpdev->hdev = hdev;
|
||||||
|
nxpdev->hu.hdev = hdev;
|
||||||
|
|
||||||
hdev->bus = HCI_UART;
|
hdev->bus = HCI_UART;
|
||||||
hci_set_drvdata(hdev, nxpdev);
|
hci_set_drvdata(hdev, nxpdev);
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
|
ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
|
||||||
ag6xx_recv_pkts,
|
ag6xx_recv_pkts,
|
||||||
ARRAY_SIZE(ag6xx_recv_pkts));
|
ARRAY_SIZE(ag6xx_recv_pkts));
|
||||||
if (IS_ERR(ag6xx->rx_skb)) {
|
if (IS_ERR(ag6xx->rx_skb)) {
|
||||||
|
|
|
||||||
|
|
@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
struct aml_data *aml_data = hu->priv;
|
struct aml_data *aml_data = hu->priv;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
|
aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
|
||||||
aml_recv_pkts,
|
aml_recv_pkts,
|
||||||
ARRAY_SIZE(aml_recv_pkts));
|
ARRAY_SIZE(aml_recv_pkts));
|
||||||
if (IS_ERR(aml_data->rx_skb)) {
|
if (IS_ERR(aml_data->rx_skb)) {
|
||||||
|
|
|
||||||
|
|
@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
{
|
{
|
||||||
struct ath_struct *ath = hu->priv;
|
struct ath_struct *ath = hu->priv;
|
||||||
|
|
||||||
ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
|
ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
|
||||||
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
|
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
|
||||||
if (IS_ERR(ath->rx_skb)) {
|
if (IS_ERR(ath->rx_skb)) {
|
||||||
int err = PTR_ERR(ath->rx_skb);
|
int err = PTR_ERR(ath->rx_skb);
|
||||||
|
|
|
||||||
|
|
@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
|
bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
|
||||||
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
|
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
|
||||||
if (IS_ERR(bcm->rx_skb)) {
|
if (IS_ERR(bcm->rx_skb)) {
|
||||||
int err = PTR_ERR(bcm->rx_skb);
|
int err = PTR_ERR(bcm->rx_skb);
|
||||||
|
|
|
||||||
|
|
@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
|
h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
|
||||||
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
|
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
|
||||||
if (IS_ERR(h4->rx_skb)) {
|
if (IS_ERR(h4->rx_skb)) {
|
||||||
int err = PTR_ERR(h4->rx_skb);
|
int err = PTR_ERR(h4->rx_skb);
|
||||||
|
|
@ -151,12 +151,12 @@ int __exit h4_deinit(void)
|
||||||
return hci_uart_unregister_proto(&h4p);
|
return hci_uart_unregister_proto(&h4p);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
|
||||||
const unsigned char *buffer, int count,
|
const unsigned char *buffer, int count,
|
||||||
const struct h4_recv_pkt *pkts, int pkts_count)
|
const struct h4_recv_pkt *pkts, int pkts_count)
|
||||||
{
|
{
|
||||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
|
||||||
u8 alignment = hu->alignment ? hu->alignment : 1;
|
u8 alignment = hu->alignment ? hu->alignment : 1;
|
||||||
|
struct hci_dev *hdev = hu->hdev;
|
||||||
|
|
||||||
/* Check for error from previous call */
|
/* Check for error from previous call */
|
||||||
if (IS_ERR(skb))
|
if (IS_ERR(skb))
|
||||||
|
|
|
||||||
|
|
@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
|
intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
|
||||||
intel_recv_pkts,
|
intel_recv_pkts,
|
||||||
ARRAY_SIZE(intel_recv_pkts));
|
ARRAY_SIZE(intel_recv_pkts));
|
||||||
if (IS_ERR(intel->rx_skb)) {
|
if (IS_ERR(intel->rx_skb)) {
|
||||||
|
|
|
||||||
|
|
@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
|
ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
|
||||||
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
|
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
|
||||||
if (IS_ERR(ll->rx_skb)) {
|
if (IS_ERR(ll->rx_skb)) {
|
||||||
int err = PTR_ERR(ll->rx_skb);
|
int err = PTR_ERR(ll->rx_skb);
|
||||||
|
|
|
||||||
|
|
@ -264,7 +264,7 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
!test_bit(STATE_FW_LOADED, &mrvl->flags))
|
!test_bit(STATE_FW_LOADED, &mrvl->flags))
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
|
mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
|
||||||
mrvl_recv_pkts,
|
mrvl_recv_pkts,
|
||||||
ARRAY_SIZE(mrvl_recv_pkts));
|
ARRAY_SIZE(mrvl_recv_pkts));
|
||||||
if (IS_ERR(mrvl->rx_skb)) {
|
if (IS_ERR(mrvl->rx_skb)) {
|
||||||
|
|
|
||||||
|
|
@ -624,7 +624,7 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
|
btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
|
||||||
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
|
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
|
||||||
if (IS_ERR(btdev->rx_skb)) {
|
if (IS_ERR(btdev->rx_skb)) {
|
||||||
err = PTR_ERR(btdev->rx_skb);
|
err = PTR_ERR(btdev->rx_skb);
|
||||||
|
|
|
||||||
|
|
@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
return -EUNATCH;
|
return -EUNATCH;
|
||||||
|
|
||||||
qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
|
qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
|
||||||
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
|
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
|
||||||
if (IS_ERR(qca->rx_skb)) {
|
if (IS_ERR(qca->rx_skb)) {
|
||||||
int err = PTR_ERR(qca->rx_skb);
|
int err = PTR_ERR(qca->rx_skb);
|
||||||
|
|
|
||||||
|
|
@ -162,7 +162,7 @@ struct h4_recv_pkt {
|
||||||
int h4_init(void);
|
int h4_init(void);
|
||||||
int h4_deinit(void);
|
int h4_deinit(void);
|
||||||
|
|
||||||
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
|
||||||
const unsigned char *buffer, int count,
|
const unsigned char *buffer, int count,
|
||||||
const struct h4_recv_pkt *pkts, int pkts_count);
|
const struct h4_recv_pkt *pkts, int pkts_count);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -317,7 +317,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
|
||||||
unsigned int count = 0;
|
unsigned int count = 0;
|
||||||
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
|
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
|
||||||
|
|
||||||
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
|
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
|
||||||
async->munge_count += num_bytes;
|
async->munge_count += num_bytes;
|
||||||
return num_bytes;
|
return num_bytes;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1559,7 +1559,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
pin = dpll_pin_find_from_nlattr(info);
|
pin = dpll_pin_find_from_nlattr(info);
|
||||||
if (!IS_ERR(pin)) {
|
if (IS_ERR(pin)) {
|
||||||
|
nlmsg_free(msg);
|
||||||
|
return PTR_ERR(pin);
|
||||||
|
}
|
||||||
if (!dpll_pin_available(pin)) {
|
if (!dpll_pin_available(pin)) {
|
||||||
nlmsg_free(msg);
|
nlmsg_free(msg);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
@ -1569,7 +1572,6 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||||
nlmsg_free(msg);
|
nlmsg_free(msg);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
genlmsg_end(msg, hdr);
|
genlmsg_end(msg, hdr);
|
||||||
|
|
||||||
return genlmsg_reply(msg, info);
|
return genlmsg_reply(msg, info);
|
||||||
|
|
@ -1735,13 +1737,15 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||||
}
|
}
|
||||||
|
|
||||||
dpll = dpll_device_find_from_nlattr(info);
|
dpll = dpll_device_find_from_nlattr(info);
|
||||||
if (!IS_ERR(dpll)) {
|
if (IS_ERR(dpll)) {
|
||||||
|
nlmsg_free(msg);
|
||||||
|
return PTR_ERR(dpll);
|
||||||
|
}
|
||||||
ret = dpll_msg_add_dev_handle(msg, dpll);
|
ret = dpll_msg_add_dev_handle(msg, dpll);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nlmsg_free(msg);
|
nlmsg_free(msg);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
genlmsg_end(msg, hdr);
|
genlmsg_end(msg, hdr);
|
||||||
|
|
||||||
return genlmsg_reply(msg, info);
|
return genlmsg_reply(msg, info);
|
||||||
|
|
|
||||||
|
|
@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
|
||||||
}
|
}
|
||||||
|
|
||||||
is_diff = zl3073x_out_is_diff(zldev, out);
|
is_diff = zl3073x_out_is_diff(zldev, out);
|
||||||
is_enabled = zl3073x_out_is_enabled(zldev, out);
|
is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip N-pin if the corresponding input/output is differential */
|
/* Skip N-pin if the corresponding input/output is differential */
|
||||||
|
|
|
||||||
|
|
@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int allocate_tlabel(struct fw_card *card)
|
static int allocate_tlabel(struct fw_card *card)
|
||||||
__must_hold(&card->transactions_lock)
|
__must_hold(&card->transactions.lock)
|
||||||
{
|
{
|
||||||
int tlabel;
|
int tlabel;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
|
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
|
||||||
|
* @ohci: Pointer to the OHCI-1394 controller structure
|
||||||
*
|
*
|
||||||
* OHCI1394 initialization itself and any device going on- or offline
|
* OHCI1394 initialization itself and any device going on- or offline
|
||||||
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
|
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
|
||||||
|
|
@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
|
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
|
||||||
|
* @ohci: Pointer to the OHCI-1394 controller structure
|
||||||
|
*
|
||||||
* This enables remote DMA access over IEEE1394 from every host for the low
|
* This enables remote DMA access over IEEE1394 from every host for the low
|
||||||
* 4GB of address space. DMA accesses above 4GB are not available currently.
|
* 4GB of address space. DMA accesses above 4GB are not available currently.
|
||||||
*/
|
*/
|
||||||
|
|
@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
|
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
|
||||||
|
* @ohci: Pointer to the OHCI-1394 controller structure
|
||||||
|
*
|
||||||
* This initializes the given controller and enables physical DMA engine in it.
|
* This initializes the given controller and enables physical DMA engine in it.
|
||||||
*/
|
*/
|
||||||
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
|
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
|
||||||
|
|
@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_ohci1394_controller - Map the registers of the controller and init DMA
|
* init_ohci1394_controller - Map the registers of the controller and init DMA
|
||||||
|
* @num: PCI bus number
|
||||||
|
* @slot: PCI device number
|
||||||
|
* @func: PCI function number
|
||||||
|
*
|
||||||
* This maps the registers of the specified controller and initializes it
|
* This maps the registers of the specified controller and initializes it
|
||||||
*/
|
*/
|
||||||
static inline void __init init_ohci1394_controller(int num, int slot, int func)
|
static inline void __init init_ohci1394_controller(int num, int slot, int func)
|
||||||
|
|
@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
|
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
|
||||||
|
* @opt: Kernel command line parameter string
|
||||||
*/
|
*/
|
||||||
static int __init setup_ohci1394_dma(char *opt)
|
static int __init setup_ohci1394_dma(char *opt)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
|
||||||
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
|
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
|
||||||
|
{
|
||||||
|
struct ffa_mem_region_attributes *ep_mem_access;
|
||||||
|
|
||||||
|
if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
|
||||||
|
memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
|
||||||
|
{
|
||||||
|
if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
|
||||||
|
mem_region->ep_mem_size = 0;
|
||||||
|
} else {
|
||||||
|
mem_region->ep_mem_size = ffa_emad_size_get(version);
|
||||||
|
mem_region->ep_mem_offset = sizeof(*mem_region);
|
||||||
|
memset(mem_region->reserved, 0, 12);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
|
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
|
||||||
struct ffa_mem_ops_args *args)
|
struct ffa_mem_ops_args *args)
|
||||||
|
|
@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
|
||||||
mem_region->flags = args->flags;
|
mem_region->flags = args->flags;
|
||||||
mem_region->sender_id = drv_info->vm_id;
|
mem_region->sender_id = drv_info->vm_id;
|
||||||
mem_region->attributes = ffa_memory_attributes_get(func_id);
|
mem_region->attributes = ffa_memory_attributes_get(func_id);
|
||||||
ep_mem_access = buffer +
|
|
||||||
ffa_mem_desc_offset(buffer, 0, drv_info->version);
|
|
||||||
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
|
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
|
||||||
drv_info->version);
|
drv_info->version);
|
||||||
|
|
||||||
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
|
for (idx = 0; idx < args->nattrs; idx++) {
|
||||||
|
ep_mem_access = buffer +
|
||||||
|
ffa_mem_desc_offset(buffer, idx, drv_info->version);
|
||||||
ep_mem_access->receiver = args->attrs[idx].receiver;
|
ep_mem_access->receiver = args->attrs[idx].receiver;
|
||||||
ep_mem_access->attrs = args->attrs[idx].attrs;
|
ep_mem_access->attrs = args->attrs[idx].attrs;
|
||||||
ep_mem_access->composite_off = composite_offset;
|
ep_mem_access->composite_off = composite_offset;
|
||||||
ep_mem_access->flag = 0;
|
ep_mem_access->flag = 0;
|
||||||
ep_mem_access->reserved = 0;
|
ep_mem_access->reserved = 0;
|
||||||
|
ffa_emad_impdef_value_init(drv_info->version,
|
||||||
|
ep_mem_access->impdef_val,
|
||||||
|
args->attrs[idx].impdef_val);
|
||||||
}
|
}
|
||||||
mem_region->handle = 0;
|
mem_region->handle = 0;
|
||||||
mem_region->ep_count = args->nattrs;
|
mem_region->ep_count = args->nattrs;
|
||||||
if (drv_info->version <= FFA_VERSION_1_0) {
|
ffa_mem_region_additional_setup(drv_info->version, mem_region);
|
||||||
mem_region->ep_mem_size = 0;
|
|
||||||
} else {
|
|
||||||
mem_region->ep_mem_size = sizeof(*ep_mem_access);
|
|
||||||
mem_region->ep_mem_offset = sizeof(*mem_region);
|
|
||||||
memset(mem_region->reserved, 0, 12);
|
|
||||||
}
|
|
||||||
|
|
||||||
composite = buffer + composite_offset;
|
composite = buffer + composite_offset;
|
||||||
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
|
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
|
||||||
|
|
|
||||||
|
|
@ -309,16 +309,36 @@ enum debug_counters {
|
||||||
SCMI_DEBUG_COUNTERS_LAST
|
SCMI_DEBUG_COUNTERS_LAST
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void scmi_inc_count(atomic_t *arr, int stat)
|
/**
|
||||||
|
* struct scmi_debug_info - Debug common info
|
||||||
|
* @top_dentry: A reference to the top debugfs dentry
|
||||||
|
* @name: Name of this SCMI instance
|
||||||
|
* @type: Type of this SCMI instance
|
||||||
|
* @is_atomic: Flag to state if the transport of this instance is atomic
|
||||||
|
* @counters: An array of atomic_c's used for tracking statistics (if enabled)
|
||||||
|
*/
|
||||||
|
struct scmi_debug_info {
|
||||||
|
struct dentry *top_dentry;
|
||||||
|
const char *name;
|
||||||
|
const char *type;
|
||||||
|
bool is_atomic;
|
||||||
|
atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
|
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
|
||||||
atomic_inc(&arr[stat]);
|
if (dbg)
|
||||||
|
atomic_inc(&dbg->counters[stat]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void scmi_dec_count(atomic_t *arr, int stat)
|
static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
|
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
|
||||||
atomic_dec(&arr[stat]);
|
if (dbg)
|
||||||
|
atomic_dec(&dbg->counters[stat]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum scmi_bad_msg {
|
enum scmi_bad_msg {
|
||||||
|
|
|
||||||
|
|
@ -115,22 +115,6 @@ struct scmi_protocol_instance {
|
||||||
|
|
||||||
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
|
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
|
||||||
|
|
||||||
/**
|
|
||||||
* struct scmi_debug_info - Debug common info
|
|
||||||
* @top_dentry: A reference to the top debugfs dentry
|
|
||||||
* @name: Name of this SCMI instance
|
|
||||||
* @type: Type of this SCMI instance
|
|
||||||
* @is_atomic: Flag to state if the transport of this instance is atomic
|
|
||||||
* @counters: An array of atomic_c's used for tracking statistics (if enabled)
|
|
||||||
*/
|
|
||||||
struct scmi_debug_info {
|
|
||||||
struct dentry *top_dentry;
|
|
||||||
const char *name;
|
|
||||||
const char *type;
|
|
||||||
bool is_atomic;
|
|
||||||
atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct scmi_info - Structure representing a SCMI instance
|
* struct scmi_info - Structure representing a SCMI instance
|
||||||
*
|
*
|
||||||
|
|
@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
|
||||||
/* Set in-flight */
|
/* Set in-flight */
|
||||||
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
|
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
|
||||||
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
|
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
|
||||||
scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
|
scmi_inc_count(info->dbg, XFERS_INFLIGHT);
|
||||||
|
|
||||||
xfer->pending = true;
|
xfer->pending = true;
|
||||||
}
|
}
|
||||||
|
|
@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
|
||||||
hash_del(&xfer->node);
|
hash_del(&xfer->node);
|
||||||
xfer->pending = false;
|
xfer->pending = false;
|
||||||
|
|
||||||
scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
|
scmi_dec_count(info->dbg, XFERS_INFLIGHT);
|
||||||
}
|
}
|
||||||
|
xfer->flags = 0;
|
||||||
hlist_add_head(&xfer->node, &minfo->free_xfers);
|
hlist_add_head(&xfer->node, &minfo->free_xfers);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
|
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
|
||||||
|
|
@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||||
{
|
{
|
||||||
struct scmi_info *info = handle_to_scmi_info(handle);
|
struct scmi_info *info = handle_to_scmi_info(handle);
|
||||||
|
|
||||||
xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
|
|
||||||
xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
|
|
||||||
return __scmi_xfer_put(&info->tx_minfo, xfer);
|
return __scmi_xfer_put(&info->tx_minfo, xfer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
|
||||||
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
|
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
|
||||||
|
|
||||||
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
|
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
|
||||||
scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
|
scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
|
||||||
|
|
||||||
return xfer;
|
return xfer;
|
||||||
}
|
}
|
||||||
|
|
@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
|
||||||
msg_type, xfer_id, msg_hdr, xfer->state);
|
msg_type, xfer_id, msg_hdr, xfer->state);
|
||||||
|
|
||||||
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
|
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
|
||||||
scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
|
scmi_inc_count(info->dbg, ERR_MSG_INVALID);
|
||||||
|
|
||||||
/* On error the refcount incremented above has to be dropped */
|
/* On error the refcount incremented above has to be dropped */
|
||||||
__scmi_xfer_put(minfo, xfer);
|
__scmi_xfer_put(minfo, xfer);
|
||||||
|
|
@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
|
||||||
PTR_ERR(xfer));
|
PTR_ERR(xfer));
|
||||||
|
|
||||||
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
|
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
|
||||||
scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
|
scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
|
||||||
|
|
||||||
scmi_clear_channel(info, cinfo);
|
scmi_clear_channel(info, cinfo);
|
||||||
return;
|
return;
|
||||||
|
|
@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
|
||||||
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
|
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
|
||||||
xfer->hdr.id, "NOTI", xfer->hdr.seq,
|
xfer->hdr.id, "NOTI", xfer->hdr.seq,
|
||||||
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
|
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
|
||||||
scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
|
scmi_inc_count(info->dbg, NOTIFICATION_OK);
|
||||||
|
|
||||||
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
|
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
|
||||||
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
|
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
|
||||||
|
|
@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
|
||||||
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
|
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
|
||||||
scmi_clear_channel(info, cinfo);
|
scmi_clear_channel(info, cinfo);
|
||||||
complete(xfer->async_done);
|
complete(xfer->async_done);
|
||||||
scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
|
scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
|
||||||
} else {
|
} else {
|
||||||
complete(&xfer->done);
|
complete(&xfer->done);
|
||||||
scmi_inc_count(info->dbg->counters, RESPONSE_OK);
|
scmi_inc_count(info->dbg, RESPONSE_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
|
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
|
||||||
|
|
@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
|
||||||
"timed out in resp(caller: %pS) - polling\n",
|
"timed out in resp(caller: %pS) - polling\n",
|
||||||
(void *)_RET_IP_);
|
(void *)_RET_IP_);
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
|
scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
|
||||||
"RESP" : "resp",
|
"RESP" : "resp",
|
||||||
xfer->hdr.seq, xfer->hdr.status,
|
xfer->hdr.seq, xfer->hdr.status,
|
||||||
xfer->rx.buf, xfer->rx.len);
|
xfer->rx.buf, xfer->rx.len);
|
||||||
scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
|
scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
|
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
|
||||||
scmi_raw_message_report(info->raw, xfer,
|
scmi_raw_message_report(info->raw, xfer,
|
||||||
|
|
@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
|
||||||
dev_err(dev, "timed out in resp(caller: %pS)\n",
|
dev_err(dev, "timed out in resp(caller: %pS)\n",
|
||||||
(void *)_RET_IP_);
|
(void *)_RET_IP_);
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
|
scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
|
||||||
!is_transport_polling_capable(info->desc)) {
|
!is_transport_polling_capable(info->desc)) {
|
||||||
dev_warn_once(dev,
|
dev_warn_once(dev,
|
||||||
"Polling mode is not supported by transport.\n");
|
"Polling mode is not supported by transport.\n");
|
||||||
scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
|
scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
cinfo = idr_find(&info->tx_idr, pi->proto->id);
|
cinfo = idr_find(&info->tx_idr, pi->proto->id);
|
||||||
if (unlikely(!cinfo)) {
|
if (unlikely(!cinfo)) {
|
||||||
scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
|
scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
/* True ONLY if also supported by transport. */
|
/* True ONLY if also supported by transport. */
|
||||||
|
|
@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
|
||||||
ret = info->desc->ops->send_message(cinfo, xfer);
|
ret = info->desc->ops->send_message(cinfo, xfer);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_dbg(dev, "Failed to send message %d\n", ret);
|
dev_dbg(dev, "Failed to send message %d\n", ret);
|
||||||
scmi_inc_count(info->dbg->counters, SENT_FAIL);
|
scmi_inc_count(info->dbg, SENT_FAIL);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
|
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
|
||||||
xfer->hdr.id, "CMND", xfer->hdr.seq,
|
xfer->hdr.id, "CMND", xfer->hdr.seq,
|
||||||
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
|
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
|
||||||
scmi_inc_count(info->dbg->counters, SENT_OK);
|
scmi_inc_count(info->dbg, SENT_OK);
|
||||||
|
|
||||||
ret = scmi_wait_for_message_response(cinfo, xfer);
|
ret = scmi_wait_for_message_response(cinfo, xfer);
|
||||||
if (!ret && xfer->hdr.status) {
|
if (!ret && xfer->hdr.status) {
|
||||||
ret = scmi_to_linux_errno(xfer->hdr.status);
|
ret = scmi_to_linux_errno(xfer->hdr.status);
|
||||||
scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
|
scmi_inc_count(info->dbg, ERR_PROTOCOL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info->desc->ops->mark_txdone)
|
if (info->desc->ops->mark_txdone)
|
||||||
|
|
@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
|
||||||
u8 channels[SCMI_MAX_CHANNELS] = {};
|
u8 channels[SCMI_MAX_CHANNELS] = {};
|
||||||
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
|
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
|
||||||
|
|
||||||
if (!info->dbg)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* Enumerate all channels to collect their ids */
|
/* Enumerate all channels to collect their ids */
|
||||||
idr_for_each_entry(&info->tx_idr, cinfo, id) {
|
idr_for_each_entry(&info->tx_idr, cinfo, id) {
|
||||||
/*
|
/*
|
||||||
|
|
@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
|
||||||
if (!info->dbg)
|
if (!info->dbg)
|
||||||
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
|
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
|
if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
|
||||||
ret = scmi_debugfs_raw_mode_setup(info);
|
ret = scmi_debugfs_raw_mode_setup(info);
|
||||||
if (!coex) {
|
if (!coex) {
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
|
||||||
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
|
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
|
||||||
struct scmi_info *info = handle_to_scmi_info(handle);
|
struct scmi_info *info = handle_to_scmi_info(handle);
|
||||||
|
|
||||||
|
if (!info->dbg)
|
||||||
|
return 0;
|
||||||
|
|
||||||
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
|
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
|
||||||
} else {
|
} else {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
|
||||||
.reg_stride = 1,
|
.reg_stride = 1,
|
||||||
.val_bits = 8,
|
.val_bits = 8,
|
||||||
.io_port = true,
|
.io_port = true,
|
||||||
|
.max_register = 0x5,
|
||||||
.wr_table = &idio_16_wr_table,
|
.wr_table = &idio_16_wr_table,
|
||||||
.rd_table = &idio_16_rd_table,
|
.rd_table = &idio_16_rd_table,
|
||||||
.volatile_table = &idio_16_rd_table,
|
.volatile_table = &idio_16_rd_table,
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
|
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
|
||||||
|
|
||||||
|
#include <linux/bitmap.h>
|
||||||
#include <linux/bits.h>
|
#include <linux/bits.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
|
||||||
struct idio_16_data *data;
|
struct idio_16_data *data;
|
||||||
struct regmap_irq_chip *chip;
|
struct regmap_irq_chip *chip;
|
||||||
struct regmap_irq_chip_data *chip_data;
|
struct regmap_irq_chip_data *chip_data;
|
||||||
|
DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
|
||||||
|
|
||||||
if (!config->parent)
|
if (!config->parent)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
|
||||||
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
|
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
|
||||||
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
|
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
|
||||||
|
|
||||||
|
bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
|
||||||
|
gpio_config.fixed_direction_output = fixed_direction_output;
|
||||||
|
|
||||||
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
|
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
|
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
|
||||||
|
|
|
||||||
|
|
@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
|
||||||
{
|
{
|
||||||
const struct ljca_gpio_packet *packet = evt_data;
|
const struct ljca_gpio_packet *packet = evt_data;
|
||||||
struct ljca_gpio_dev *ljca_gpio = context;
|
struct ljca_gpio_dev *ljca_gpio = context;
|
||||||
int i, irq;
|
int i;
|
||||||
|
|
||||||
if (cmd != LJCA_GPIO_INT_EVENT)
|
if (cmd != LJCA_GPIO_INT_EVENT)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < packet->num; i++) {
|
for (i = 0; i < packet->num; i++) {
|
||||||
irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
|
generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
|
||||||
packet->item[i].index);
|
packet->item[i].index);
|
||||||
if (!irq) {
|
|
||||||
dev_err(ljca_gpio->gc.parent,
|
|
||||||
"gpio_id %u does not mapped to IRQ yet\n",
|
|
||||||
packet->item[i].index);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
|
|
||||||
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
|
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
|
||||||
.reg_stride = 1,
|
.reg_stride = 1,
|
||||||
.val_bits = 8,
|
.val_bits = 8,
|
||||||
.io_port = true,
|
.io_port = true,
|
||||||
|
.max_register = 0x7,
|
||||||
.wr_table = &idio_16_wr_table,
|
.wr_table = &idio_16_wr_table,
|
||||||
.rd_table = &idio_16_rd_table,
|
.rd_table = &idio_16_rd_table,
|
||||||
.volatile_table = &idio_16_rd_table,
|
.volatile_table = &idio_16_rd_table,
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ struct gpio_regmap {
|
||||||
unsigned int reg_clr_base;
|
unsigned int reg_clr_base;
|
||||||
unsigned int reg_dir_in_base;
|
unsigned int reg_dir_in_base;
|
||||||
unsigned int reg_dir_out_base;
|
unsigned int reg_dir_out_base;
|
||||||
|
unsigned long *fixed_direction_output;
|
||||||
|
|
||||||
#ifdef CONFIG_REGMAP_IRQ
|
#ifdef CONFIG_REGMAP_IRQ
|
||||||
int regmap_irq_line;
|
int regmap_irq_line;
|
||||||
|
|
@ -134,6 +135,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
|
||||||
unsigned int base, val, reg, mask;
|
unsigned int base, val, reg, mask;
|
||||||
int invert, ret;
|
int invert, ret;
|
||||||
|
|
||||||
|
if (gpio->fixed_direction_output) {
|
||||||
|
if (test_bit(offset, gpio->fixed_direction_output))
|
||||||
|
return GPIO_LINE_DIRECTION_OUT;
|
||||||
|
else
|
||||||
|
return GPIO_LINE_DIRECTION_IN;
|
||||||
|
}
|
||||||
|
|
||||||
if (gpio->reg_dat_base && !gpio->reg_set_base)
|
if (gpio->reg_dat_base && !gpio->reg_set_base)
|
||||||
return GPIO_LINE_DIRECTION_IN;
|
return GPIO_LINE_DIRECTION_IN;
|
||||||
if (gpio->reg_set_base && !gpio->reg_dat_base)
|
if (gpio->reg_set_base && !gpio->reg_dat_base)
|
||||||
|
|
@ -284,6 +292,17 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
|
||||||
goto err_free_gpio;
|
goto err_free_gpio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (config->fixed_direction_output) {
|
||||||
|
gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!gpio->fixed_direction_output) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto err_free_gpio;
|
||||||
|
}
|
||||||
|
bitmap_copy(gpio->fixed_direction_output,
|
||||||
|
config->fixed_direction_output, chip->ngpio);
|
||||||
|
}
|
||||||
|
|
||||||
/* if not set, assume there is only one register */
|
/* if not set, assume there is only one register */
|
||||||
gpio->ngpio_per_reg = config->ngpio_per_reg;
|
gpio->ngpio_per_reg = config->ngpio_per_reg;
|
||||||
if (!gpio->ngpio_per_reg)
|
if (!gpio->ngpio_per_reg)
|
||||||
|
|
@ -300,7 +319,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
|
||||||
|
|
||||||
ret = gpiochip_add_data(chip, gpio);
|
ret = gpiochip_add_data(chip, gpio);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_free_gpio;
|
goto err_free_bitmap;
|
||||||
|
|
||||||
#ifdef CONFIG_REGMAP_IRQ
|
#ifdef CONFIG_REGMAP_IRQ
|
||||||
if (config->regmap_irq_chip) {
|
if (config->regmap_irq_chip) {
|
||||||
|
|
@ -309,7 +328,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
|
||||||
config->regmap_irq_line, config->regmap_irq_flags,
|
config->regmap_irq_line, config->regmap_irq_flags,
|
||||||
0, config->regmap_irq_chip, &gpio->irq_chip_data);
|
0, config->regmap_irq_chip, &gpio->irq_chip_data);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_gpio;
|
goto err_free_bitmap;
|
||||||
|
|
||||||
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
|
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
|
||||||
} else
|
} else
|
||||||
|
|
@ -326,6 +345,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
|
||||||
|
|
||||||
err_remove_gpiochip:
|
err_remove_gpiochip:
|
||||||
gpiochip_remove(chip);
|
gpiochip_remove(chip);
|
||||||
|
err_free_bitmap:
|
||||||
|
bitmap_free(gpio->fixed_direction_output);
|
||||||
err_free_gpio:
|
err_free_gpio:
|
||||||
kfree(gpio);
|
kfree(gpio);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
@ -344,6 +365,7 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
gpiochip_remove(&gpio->gpio_chip);
|
gpiochip_remove(&gpio->gpio_chip);
|
||||||
|
bitmap_free(gpio->fixed_direction_output);
|
||||||
kfree(gpio);
|
kfree(gpio);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
|
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
|
||||||
|
|
|
||||||
|
|
@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
|
||||||
return GPIOD_ASIS;
|
return GPIOD_ASIS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
|
||||||
|
unsigned int acpi_debounce)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* ACPI uses hundredths of milliseconds units */
|
||||||
|
acpi_debounce *= 10;
|
||||||
|
ret = gpio_set_debounce_timeout(desc, acpi_debounce);
|
||||||
|
if (ret)
|
||||||
|
gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
|
||||||
|
acpi_debounce, ret);
|
||||||
|
}
|
||||||
|
|
||||||
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
|
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
|
||||||
struct acpi_resource_gpio *agpio,
|
struct acpi_resource_gpio *agpio,
|
||||||
unsigned int index,
|
unsigned int index,
|
||||||
|
|
@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
|
||||||
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
|
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
|
||||||
unsigned int pin = agpio->pin_table[index];
|
unsigned int pin = agpio->pin_table[index];
|
||||||
struct gpio_desc *desc;
|
struct gpio_desc *desc;
|
||||||
int ret;
|
|
||||||
|
|
||||||
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
|
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
|
||||||
if (IS_ERR(desc))
|
if (IS_ERR(desc))
|
||||||
return desc;
|
return desc;
|
||||||
|
|
||||||
/* ACPI uses hundredths of milliseconds units */
|
acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
|
||||||
ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
|
|
||||||
if (ret)
|
|
||||||
dev_warn(chip->parent,
|
|
||||||
"Failed to set debounce-timeout for pin 0x%04X, err %d\n",
|
|
||||||
pin, ret);
|
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
|
||||||
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
|
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
|
||||||
if (IS_ERR(desc)) {
|
if (IS_ERR(desc)) {
|
||||||
dev_err(chip->parent,
|
dev_err(chip->parent,
|
||||||
"Failed to request GPIO for pin 0x%04X, err %ld\n",
|
"Failed to request GPIO for pin 0x%04X, err %pe\n",
|
||||||
pin, PTR_ERR(desc));
|
pin, desc);
|
||||||
return AE_OK;
|
return AE_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -944,7 +951,6 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
|
||||||
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
|
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
|
||||||
struct acpi_gpio_info info = {};
|
struct acpi_gpio_info info = {};
|
||||||
struct gpio_desc *desc;
|
struct gpio_desc *desc;
|
||||||
int ret;
|
|
||||||
|
|
||||||
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
|
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
|
||||||
if (IS_ERR(desc))
|
if (IS_ERR(desc))
|
||||||
|
|
@ -959,10 +965,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
|
||||||
acpi_gpio_update_gpiod_flags(dflags, &info);
|
acpi_gpio_update_gpiod_flags(dflags, &info);
|
||||||
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
|
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
|
||||||
|
|
||||||
/* ACPI uses hundredths of milliseconds units */
|
acpi_gpio_set_debounce_timeout(desc, info.debounce);
|
||||||
ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
|
|
||||||
if (ret)
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
|
||||||
struct dc_stream_state *stream,
|
struct dc_stream_state *stream,
|
||||||
struct dc_crtc_timing_adjust *adjust)
|
struct dc_crtc_timing_adjust *adjust)
|
||||||
{
|
{
|
||||||
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
|
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
|
||||||
if (!offload_work) {
|
if (!offload_work) {
|
||||||
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
|
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
|
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
|
||||||
if (!adjust_copy) {
|
if (!adjust_copy) {
|
||||||
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
|
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
|
||||||
kfree(offload_work);
|
kfree(offload_work);
|
||||||
|
|
|
||||||
|
|
@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)
|
||||||
*/
|
*/
|
||||||
struct dc_link *link = dc->links[i];
|
struct dc_link *link = dc->links[i];
|
||||||
|
|
||||||
|
if (link->ep_type != DISPLAY_ENDPOINT_PHY)
|
||||||
|
continue;
|
||||||
|
|
||||||
link->link_enc->funcs->hw_init(link->link_enc);
|
link->link_enc->funcs->hw_init(link->link_enc);
|
||||||
|
|
||||||
/* Check for enabled DIG to identify enabled display */
|
/* Check for enabled DIG to identify enabled display */
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,13 @@
|
||||||
*/
|
*/
|
||||||
#define MAX_PIPES 6
|
#define MAX_PIPES 6
|
||||||
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
|
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
|
||||||
#define MAX_LINKS (MAX_PIPES * 2 +2)
|
|
||||||
|
#define MAX_DPIA 6
|
||||||
|
#define MAX_CONNECTOR 6
|
||||||
|
#define MAX_VIRTUAL_LINKS 4
|
||||||
|
|
||||||
|
#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
|
||||||
|
|
||||||
#define MAX_DIG_LINK_ENCODERS 7
|
#define MAX_DIG_LINK_ENCODERS 7
|
||||||
#define MAX_DWB_PIPES 1
|
#define MAX_DWB_PIPES 1
|
||||||
#define MAX_HPO_DP2_ENCODERS 4
|
#define MAX_HPO_DP2_ENCODERS 4
|
||||||
|
|
|
||||||
|
|
@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
|
||||||
struct audio_output audio_output[MAX_PIPES];
|
struct audio_output audio_output[MAX_PIPES];
|
||||||
struct dc_stream_state *streams_on_link[MAX_PIPES];
|
struct dc_stream_state *streams_on_link[MAX_PIPES];
|
||||||
int num_streams_on_link = 0;
|
int num_streams_on_link = 0;
|
||||||
|
struct dc *dc = (struct dc *)link->dc;
|
||||||
|
|
||||||
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
|
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
|
||||||
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
|
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
|
||||||
|
|
@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
|
||||||
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
|
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
|
||||||
stream_update.stream = streams_on_link[i];
|
stream_update.stream = streams_on_link[i];
|
||||||
stream_update.dpms_off = &dpms_off;
|
stream_update.dpms_off = &dpms_off;
|
||||||
dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
|
dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
|
||||||
*p = color & 0xff;
|
*p = color & 0xff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Special case if the pixel crosses page boundaries
|
||||||
|
*/
|
||||||
|
static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
|
||||||
|
unsigned int offset, u32 color)
|
||||||
|
{
|
||||||
|
u8 *vaddr2;
|
||||||
|
u8 *p = vaddr + offset;
|
||||||
|
|
||||||
|
vaddr2 = kmap_local_page_try_from_panic(next_page);
|
||||||
|
|
||||||
|
*p++ = color & 0xff;
|
||||||
|
color >>= 8;
|
||||||
|
|
||||||
|
if (offset == PAGE_SIZE - 1)
|
||||||
|
p = vaddr2;
|
||||||
|
|
||||||
|
*p++ = color & 0xff;
|
||||||
|
color >>= 8;
|
||||||
|
|
||||||
|
if (offset == PAGE_SIZE - 2)
|
||||||
|
p = vaddr2;
|
||||||
|
|
||||||
|
*p = color & 0xff;
|
||||||
|
kunmap_local(vaddr2);
|
||||||
|
}
|
||||||
|
|
||||||
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
|
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
|
||||||
{
|
{
|
||||||
u32 *p = vaddr + offset;
|
u32 *p = vaddr + offset;
|
||||||
|
|
@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
|
||||||
page = new_page;
|
page = new_page;
|
||||||
vaddr = kmap_local_page_try_from_panic(pages[page]);
|
vaddr = kmap_local_page_try_from_panic(pages[page]);
|
||||||
}
|
}
|
||||||
if (vaddr)
|
if (!vaddr)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// Special case for 24bit, as a pixel might cross page boundaries
|
||||||
|
if (cpp == 3 && offset + 3 > PAGE_SIZE)
|
||||||
|
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
|
||||||
|
offset, fg32);
|
||||||
|
else
|
||||||
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
|
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -321,6 +355,14 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
|
||||||
page = new_page;
|
page = new_page;
|
||||||
vaddr = kmap_local_page_try_from_panic(pages[page]);
|
vaddr = kmap_local_page_try_from_panic(pages[page]);
|
||||||
}
|
}
|
||||||
|
if (!vaddr)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// Special case for 24bit, as a pixel might cross page boundaries
|
||||||
|
if (cpp == 3 && offset + 3 > PAGE_SIZE)
|
||||||
|
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
|
||||||
|
offset, color);
|
||||||
|
else
|
||||||
drm_panic_write_pixel(vaddr, offset, color, cpp);
|
drm_panic_write_pixel(vaddr, offset, color, cpp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
|
||||||
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
|
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
|
||||||
const struct font_desc *font, u32 fg_color)
|
const struct font_desc *font, u32 fg_color)
|
||||||
{
|
{
|
||||||
|
if (rect->x2 > sb->width || rect->y2 > sb->height)
|
||||||
|
return;
|
||||||
|
|
||||||
if (logo_mono)
|
if (logo_mono)
|
||||||
drm_panic_blit(sb, rect, logo_mono->data,
|
drm_panic_blit(sb, rect, logo_mono->data,
|
||||||
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
|
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
|
||||||
|
|
@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
|
||||||
struct drm_panic_line *line, int yoffset, u32 fg_color)
|
struct drm_panic_line *line, int yoffset, u32 fg_color)
|
||||||
{
|
{
|
||||||
int chars_per_row = sb->width / font->width;
|
int chars_per_row = sb->width / font->width;
|
||||||
struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
|
struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
|
||||||
struct drm_panic_line line_wrap;
|
struct drm_panic_line line_wrap;
|
||||||
|
|
||||||
if (line->len > chars_per_row) {
|
if (line->len > chars_per_row) {
|
||||||
|
|
@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
|
||||||
struct drm_panic_line line;
|
struct drm_panic_line line;
|
||||||
int yoffset;
|
int yoffset;
|
||||||
|
|
||||||
if (!font)
|
if (!font || font->width > sb->width)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
|
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
|
||||||
|
|
@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
|
||||||
pr_debug("QR width %d and scale %d\n", qr_width, scale);
|
pr_debug("QR width %d and scale %d\n", qr_width, scale);
|
||||||
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
|
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
|
||||||
|
|
||||||
v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
|
v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
|
||||||
|
if (v_margin < 0)
|
||||||
|
return -ENOSPC;
|
||||||
|
v_margin /= 5;
|
||||||
|
|
||||||
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
|
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
|
||||||
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
|
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
|
||||||
|
|
@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
|
||||||
/* Fill with the background color, and draw text on top */
|
/* Fill with the background color, and draw text on top */
|
||||||
drm_panic_fill(sb, &r_screen, bg_color);
|
drm_panic_fill(sb, &r_screen, bg_color);
|
||||||
|
|
||||||
if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
|
if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
|
||||||
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
|
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
|
||||||
|
|
||||||
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
|
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
|
||||||
|
|
|
||||||
|
|
@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||||
|
|
||||||
intel_frontbuffer_put(intel_fb->frontbuffer);
|
intel_frontbuffer_put(intel_fb->frontbuffer);
|
||||||
|
|
||||||
|
kfree(intel_fb->panic);
|
||||||
kfree(intel_fb);
|
kfree(intel_fb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
|
||||||
struct intel_display *display = to_intel_display(obj->dev);
|
struct intel_display *display = to_intel_display(obj->dev);
|
||||||
struct drm_framebuffer *fb = &intel_fb->base;
|
struct drm_framebuffer *fb = &intel_fb->base;
|
||||||
u32 max_stride;
|
u32 max_stride;
|
||||||
int ret = -EINVAL;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
intel_fb->panic = intel_panic_alloc();
|
||||||
|
if (!intel_fb->panic)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* intel_frontbuffer_get() must be done before
|
* intel_frontbuffer_get() must be done before
|
||||||
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
|
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
|
||||||
*/
|
*/
|
||||||
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
|
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
|
||||||
if (!intel_fb->frontbuffer)
|
if (!intel_fb->frontbuffer) {
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto err_free_panic;
|
||||||
|
}
|
||||||
|
|
||||||
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
|
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
@ -2323,6 +2330,9 @@ err_bo_framebuffer_fini:
|
||||||
intel_fb_bo_framebuffer_fini(obj);
|
intel_fb_bo_framebuffer_fini(obj);
|
||||||
err_frontbuffer_put:
|
err_frontbuffer_put:
|
||||||
intel_frontbuffer_put(intel_fb->frontbuffer);
|
intel_frontbuffer_put(intel_fb->frontbuffer);
|
||||||
|
err_free_panic:
|
||||||
|
kfree(intel_fb->panic);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
||||||
struct intel_framebuffer *intel_framebuffer_alloc(void)
|
struct intel_framebuffer *intel_framebuffer_alloc(void)
|
||||||
{
|
{
|
||||||
struct intel_framebuffer *intel_fb;
|
struct intel_framebuffer *intel_fb;
|
||||||
struct intel_panic *panic;
|
|
||||||
|
|
||||||
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
|
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
|
||||||
if (!intel_fb)
|
if (!intel_fb)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
panic = intel_panic_alloc();
|
|
||||||
if (!panic) {
|
|
||||||
kfree(intel_fb);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
intel_fb->panic = panic;
|
|
||||||
|
|
||||||
return intel_fb;
|
return intel_fb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
|
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
|
||||||
/* Partial unmaps might trigger a remap with either a prev or a next VA,
|
/* Two VMAs can be needed for an unmap, as an unmap can happen
|
||||||
* but not both.
|
* in the middle of a drm_gpuva, requiring a remap with both
|
||||||
|
* prev & next VA. Or an unmap can span more than one drm_gpuva
|
||||||
|
* where the first and last ones are covered partially, requring
|
||||||
|
* a remap for the first with a prev VA and remap for the last
|
||||||
|
* with a next VA.
|
||||||
*/
|
*/
|
||||||
vma_count = 1;
|
vma_count = 2;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
|
||||||
|
|
||||||
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
|
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
|
||||||
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
|
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
|
||||||
FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
|
FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum drm_connector_status
|
static enum drm_connector_status
|
||||||
|
|
|
||||||
|
|
@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
|
||||||
ggtt->pt_ops = &xelp_pt_ops;
|
ggtt->pt_ops = &xelp_pt_ops;
|
||||||
|
|
||||||
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
|
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
|
||||||
|
if (!ggtt->wq)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
|
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
|
||||||
|
|
||||||
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
|
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
|
||||||
|
|
|
||||||
|
|
@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
|
||||||
case DRM_GPUVA_OP_MAP:
|
case DRM_GPUVA_OP_MAP:
|
||||||
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
|
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
|
||||||
!op->map.invalidate_on_bind) ||
|
!op->map.invalidate_on_bind) ||
|
||||||
op->map.is_cpu_addr_mirror)
|
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
|
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
|
||||||
|
|
@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
|
||||||
switch (op->base.op) {
|
switch (op->base.op) {
|
||||||
case DRM_GPUVA_OP_MAP:
|
case DRM_GPUVA_OP_MAP:
|
||||||
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
|
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
|
||||||
op->map.is_cpu_addr_mirror)
|
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
|
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
|
||||||
|
|
|
||||||
|
|
@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
|
||||||
|
drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (xe_vma_has_default_mem_attrs(vma))
|
if (xe_vma_has_default_mem_attrs(vma))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
|
||||||
vops->pt_update_ops[i].num_ops += inc_val;
|
vops->pt_update_ops[i].num_ops += inc_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define XE_VMA_CREATE_MASK ( \
|
||||||
|
XE_VMA_READ_ONLY | \
|
||||||
|
XE_VMA_DUMPABLE | \
|
||||||
|
XE_VMA_SYSTEM_ALLOCATOR | \
|
||||||
|
DRM_GPUVA_SPARSE | \
|
||||||
|
XE_VMA_MADV_AUTORESET)
|
||||||
|
|
||||||
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
|
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
|
||||||
u8 tile_mask)
|
u8 tile_mask)
|
||||||
{
|
{
|
||||||
|
|
@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
|
||||||
op->base.map.gem.offset = vma->gpuva.gem.offset;
|
op->base.map.gem.offset = vma->gpuva.gem.offset;
|
||||||
op->map.vma = vma;
|
op->map.vma = vma;
|
||||||
op->map.immediate = true;
|
op->map.immediate = true;
|
||||||
op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
|
op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
|
||||||
op->map.is_null = xe_vma_is_null(vma);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
|
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
|
||||||
|
|
@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
|
||||||
kfree(vma);
|
kfree(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
|
|
||||||
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
|
|
||||||
#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
|
|
||||||
#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
|
|
||||||
|
|
||||||
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||||
struct xe_bo *bo,
|
struct xe_bo *bo,
|
||||||
u64 bo_offset_or_userptr,
|
u64 bo_offset_or_userptr,
|
||||||
|
|
@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||||
struct xe_vma *vma;
|
struct xe_vma *vma;
|
||||||
struct xe_tile *tile;
|
struct xe_tile *tile;
|
||||||
u8 id;
|
u8 id;
|
||||||
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
|
bool is_null = (flags & DRM_GPUVA_SPARSE);
|
||||||
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
|
bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
|
||||||
bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
|
|
||||||
bool is_cpu_addr_mirror =
|
|
||||||
(flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
|
|
||||||
|
|
||||||
xe_assert(vm->xe, start < end);
|
xe_assert(vm->xe, start < end);
|
||||||
xe_assert(vm->xe, end < vm->size);
|
xe_assert(vm->xe, end < vm->size);
|
||||||
|
|
@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (is_cpu_addr_mirror)
|
|
||||||
vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
|
|
||||||
if (is_null)
|
|
||||||
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
|
|
||||||
if (bo)
|
if (bo)
|
||||||
vma->gpuva.gem.obj = &bo->ttm.base;
|
vma->gpuva.gem.obj = &bo->ttm.base;
|
||||||
}
|
}
|
||||||
|
|
@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||||
vma->gpuva.vm = &vm->gpuvm;
|
vma->gpuva.vm = &vm->gpuvm;
|
||||||
vma->gpuva.va.addr = start;
|
vma->gpuva.va.addr = start;
|
||||||
vma->gpuva.va.range = end - start + 1;
|
vma->gpuva.va.range = end - start + 1;
|
||||||
if (read_only)
|
vma->gpuva.flags = flags;
|
||||||
vma->gpuva.flags |= XE_VMA_READ_ONLY;
|
|
||||||
if (dumpable)
|
|
||||||
vma->gpuva.flags |= XE_VMA_DUMPABLE;
|
|
||||||
|
|
||||||
for_each_tile(tile, vm->xe, id)
|
for_each_tile(tile, vm->xe, id)
|
||||||
vma->tile_mask |= 0x1 << id;
|
vma->tile_mask |= 0x1 << id;
|
||||||
|
|
@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
|
||||||
if (__op->op == DRM_GPUVA_OP_MAP) {
|
if (__op->op == DRM_GPUVA_OP_MAP) {
|
||||||
op->map.immediate =
|
op->map.immediate =
|
||||||
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
|
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
|
||||||
op->map.read_only =
|
if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
|
||||||
flags & DRM_XE_VM_BIND_FLAG_READONLY;
|
op->map.vma_flags |= XE_VMA_READ_ONLY;
|
||||||
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
|
if (flags & DRM_XE_VM_BIND_FLAG_NULL)
|
||||||
op->map.is_cpu_addr_mirror = flags &
|
op->map.vma_flags |= DRM_GPUVA_SPARSE;
|
||||||
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
|
if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
|
||||||
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
|
op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
|
||||||
|
if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
|
||||||
|
op->map.vma_flags |= XE_VMA_DUMPABLE;
|
||||||
|
if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
|
||||||
|
op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
|
||||||
op->map.pat_index = pat_index;
|
op->map.pat_index = pat_index;
|
||||||
op->map.invalidate_on_bind =
|
op->map.invalidate_on_bind =
|
||||||
__xe_vm_needs_clear_scratch_pages(vm, flags);
|
__xe_vm_needs_clear_scratch_pages(vm, flags);
|
||||||
|
|
@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
|
||||||
.pat_index = op->map.pat_index,
|
.pat_index = op->map.pat_index,
|
||||||
};
|
};
|
||||||
|
|
||||||
flags |= op->map.read_only ?
|
flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
|
||||||
VMA_CREATE_FLAG_READ_ONLY : 0;
|
|
||||||
flags |= op->map.is_null ?
|
|
||||||
VMA_CREATE_FLAG_IS_NULL : 0;
|
|
||||||
flags |= op->map.dumpable ?
|
|
||||||
VMA_CREATE_FLAG_DUMPABLE : 0;
|
|
||||||
flags |= op->map.is_cpu_addr_mirror ?
|
|
||||||
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
|
|
||||||
|
|
||||||
vma = new_vma(vm, &op->base.map, &default_attr,
|
vma = new_vma(vm, &op->base.map, &default_attr,
|
||||||
flags);
|
flags);
|
||||||
|
|
@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
|
||||||
|
|
||||||
op->map.vma = vma;
|
op->map.vma = vma;
|
||||||
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
|
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
|
||||||
!op->map.is_cpu_addr_mirror) ||
|
!(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
|
||||||
op->map.invalidate_on_bind)
|
op->map.invalidate_on_bind)
|
||||||
xe_vma_ops_incr_pt_update_ops(vops,
|
xe_vma_ops_incr_pt_update_ops(vops,
|
||||||
op->tile_mask, 1);
|
op->tile_mask, 1);
|
||||||
|
|
@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
|
||||||
op->remap.start = xe_vma_start(old);
|
op->remap.start = xe_vma_start(old);
|
||||||
op->remap.range = xe_vma_size(old);
|
op->remap.range = xe_vma_size(old);
|
||||||
|
|
||||||
flags |= op->base.remap.unmap->va->flags &
|
flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
|
||||||
XE_VMA_READ_ONLY ?
|
|
||||||
VMA_CREATE_FLAG_READ_ONLY : 0;
|
|
||||||
flags |= op->base.remap.unmap->va->flags &
|
|
||||||
DRM_GPUVA_SPARSE ?
|
|
||||||
VMA_CREATE_FLAG_IS_NULL : 0;
|
|
||||||
flags |= op->base.remap.unmap->va->flags &
|
|
||||||
XE_VMA_DUMPABLE ?
|
|
||||||
VMA_CREATE_FLAG_DUMPABLE : 0;
|
|
||||||
flags |= xe_vma_is_cpu_addr_mirror(old) ?
|
|
||||||
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
|
|
||||||
|
|
||||||
if (op->base.remap.prev) {
|
if (op->base.remap.prev) {
|
||||||
vma = new_vma(vm, op->base.remap.prev,
|
vma = new_vma(vm, op->base.remap.prev,
|
||||||
&old->attr, flags);
|
&old->attr, flags);
|
||||||
|
|
@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
|
||||||
DRM_XE_VM_BIND_FLAG_NULL | \
|
DRM_XE_VM_BIND_FLAG_NULL | \
|
||||||
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
|
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
|
||||||
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
|
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
|
||||||
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
|
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
|
||||||
|
DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
|
||||||
|
|
||||||
#ifdef TEST_VM_OPS_ERROR
|
#ifdef TEST_VM_OPS_ERROR
|
||||||
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
|
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
|
||||||
|
|
@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
||||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||||
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
|
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
|
||||||
XE_IOCTL_DBG(xe, obj &&
|
XE_IOCTL_DBG(xe, obj &&
|
||||||
op == DRM_XE_VM_BIND_OP_UNMAP)) {
|
op == DRM_XE_VM_BIND_OP_UNMAP) ||
|
||||||
|
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
|
||||||
|
(!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free_bind_ops;
|
goto free_bind_ops;
|
||||||
}
|
}
|
||||||
|
|
@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
|
||||||
struct xe_vma_ops vops;
|
struct xe_vma_ops vops;
|
||||||
struct drm_gpuva_ops *ops = NULL;
|
struct drm_gpuva_ops *ops = NULL;
|
||||||
struct drm_gpuva_op *__op;
|
struct drm_gpuva_op *__op;
|
||||||
bool is_cpu_addr_mirror = false;
|
unsigned int vma_flags = 0;
|
||||||
bool remap_op = false;
|
bool remap_op = false;
|
||||||
struct xe_vma_mem_attr tmp_attr;
|
struct xe_vma_mem_attr tmp_attr;
|
||||||
u16 default_pat;
|
u16 default_pat;
|
||||||
|
|
@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
|
||||||
vma = gpuva_to_vma(op->base.unmap.va);
|
vma = gpuva_to_vma(op->base.unmap.va);
|
||||||
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
|
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
|
||||||
default_pat = vma->attr.default_pat_index;
|
default_pat = vma->attr.default_pat_index;
|
||||||
|
vma_flags = vma->gpuva.flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (__op->op == DRM_GPUVA_OP_REMAP) {
|
if (__op->op == DRM_GPUVA_OP_REMAP) {
|
||||||
vma = gpuva_to_vma(op->base.remap.unmap->va);
|
vma = gpuva_to_vma(op->base.remap.unmap->va);
|
||||||
default_pat = vma->attr.default_pat_index;
|
default_pat = vma->attr.default_pat_index;
|
||||||
|
vma_flags = vma->gpuva.flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (__op->op == DRM_GPUVA_OP_MAP) {
|
if (__op->op == DRM_GPUVA_OP_MAP) {
|
||||||
op->map.is_cpu_addr_mirror = true;
|
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
|
||||||
op->map.pat_index = default_pat;
|
op->map.pat_index = default_pat;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
|
||||||
xe_assert(vm->xe, !remap_op);
|
xe_assert(vm->xe, !remap_op);
|
||||||
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
|
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
|
||||||
remap_op = true;
|
remap_op = true;
|
||||||
|
vma_flags = vma->gpuva.flags;
|
||||||
if (xe_vma_is_cpu_addr_mirror(vma))
|
|
||||||
is_cpu_addr_mirror = true;
|
|
||||||
else
|
|
||||||
is_cpu_addr_mirror = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (__op->op == DRM_GPUVA_OP_MAP) {
|
if (__op->op == DRM_GPUVA_OP_MAP) {
|
||||||
|
|
@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
|
||||||
/*
|
/*
|
||||||
* In case of madvise ops DRM_GPUVA_OP_MAP is
|
* In case of madvise ops DRM_GPUVA_OP_MAP is
|
||||||
* always after DRM_GPUVA_OP_REMAP, so ensure
|
* always after DRM_GPUVA_OP_REMAP, so ensure
|
||||||
* we assign op->map.is_cpu_addr_mirror true
|
* to propagate the flags from the vma we're
|
||||||
* if REMAP is for xe_vma_is_cpu_addr_mirror vma
|
* unmapping.
|
||||||
*/
|
*/
|
||||||
op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
|
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
print_op(vm->xe, __op);
|
print_op(vm->xe, __op);
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;
|
||||||
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
|
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
|
||||||
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
|
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
|
||||||
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
|
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
|
||||||
|
#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct xe_vma_mem_attr - memory attributes associated with vma
|
* struct xe_vma_mem_attr - memory attributes associated with vma
|
||||||
|
|
@ -345,17 +346,10 @@ struct xe_vm {
|
||||||
struct xe_vma_op_map {
|
struct xe_vma_op_map {
|
||||||
/** @vma: VMA to map */
|
/** @vma: VMA to map */
|
||||||
struct xe_vma *vma;
|
struct xe_vma *vma;
|
||||||
|
unsigned int vma_flags;
|
||||||
/** @immediate: Immediate bind */
|
/** @immediate: Immediate bind */
|
||||||
bool immediate;
|
bool immediate;
|
||||||
/** @read_only: Read only */
|
/** @read_only: Read only */
|
||||||
bool read_only;
|
|
||||||
/** @is_null: is NULL binding */
|
|
||||||
bool is_null;
|
|
||||||
/** @is_cpu_addr_mirror: is CPU address mirror binding */
|
|
||||||
bool is_cpu_addr_mirror;
|
|
||||||
/** @dumpable: whether BO is dumped on GPU hang */
|
|
||||||
bool dumpable;
|
|
||||||
/** @invalidate: invalidate the VMA before bind */
|
|
||||||
bool invalidate_on_bind;
|
bool invalidate_on_bind;
|
||||||
/** @pat_index: The pat index to use for this operation. */
|
/** @pat_index: The pat index to use for this operation. */
|
||||||
u16 pat_index;
|
u16 pat_index;
|
||||||
|
|
|
||||||
|
|
@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *
|
||||||
nb_sensors = data[0];
|
nb_sensors = data[0];
|
||||||
|
|
||||||
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
|
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
|
||||||
|
if (!hwmon->sensors)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
sensor = hwmon->sensors;
|
sensor = hwmon->sensors;
|
||||||
|
|
||||||
for (i = 0; i < nb_sensors; i++) {
|
for (i = 0; i < nb_sensors; i++) {
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue