Linux 6.17-rc3
-----BEGIN PGP SIGNATURE----- iQFSBAABCgA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmirN/weHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGqaYH/2+gqJMccbmCUiHq glXmEkk89bXvbFHvlYM9kARPTBBsLGYTYXEpsgswTeugSPum9ZgSW/7/sU+joxBn LCdo+VSzDXh3oL3+/z+iUh5pmyN6yFe+j5cXa4t6vS9OYQfNuck2hxapkLb9uJ3H 7W6CT7XzAT3FO9oLHQCCIUe6HdR6CXw7UK0nEzChCntL5tfaV/+rY06xRC6ZOsAK IFc8AmRI5nH4eDWcCwrslcbVBeYlCtFHfdC++xNpPNs3AwSvTkZIkM/2lluY0xoW AKpejS9tOTh9dWxxfuZDHAvbnn2ddIUsCBO1CPBMnc6L3Ca6IizIZXWwe6/IoQ1k OWQzVgM= =IqRh -----END PGP SIGNATURE----- Merge tag 'v6.17-rc3' into togreg Linux 6.17-rc3pull/1354/merge
commit
421d4487ef
2
.mailmap
2
.mailmap
|
|
@ -226,6 +226,8 @@ Domen Puncer <domen@coderock.org>
|
||||||
Douglas Gilbert <dougg@torque.net>
|
Douglas Gilbert <dougg@torque.net>
|
||||||
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
|
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
|
||||||
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
|
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
|
||||||
|
Easwar Hariharan <easwar.hariharan@linux.microsoft.com> <easwar.hariharan@intel.com>
|
||||||
|
Easwar Hariharan <easwar.hariharan@linux.microsoft.com> <eahariha@linux.microsoft.com>
|
||||||
Ed L. Cashin <ecashin@coraid.com>
|
Ed L. Cashin <ecashin@coraid.com>
|
||||||
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
||||||
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
||||||
|
|
|
||||||
|
|
@ -731,7 +731,7 @@ Contact: linux-block@vger.kernel.org
|
||||||
Description:
|
Description:
|
||||||
[RW] If the device is registered for writeback throttling, then
|
[RW] If the device is registered for writeback throttling, then
|
||||||
this file shows the target minimum read latency. If this latency
|
this file shows the target minimum read latency. If this latency
|
||||||
is exceeded in a given window of time (see wb_window_usec), then
|
is exceeded in a given window of time (see curr_win_nsec), then
|
||||||
the writeback throttling will start scaling back writes. Writing
|
the writeback throttling will start scaling back writes. Writing
|
||||||
a value of '0' to this file disables the feature. Writing a
|
a value of '0' to this file disables the feature. Writing a
|
||||||
value of '-1' to this file resets the value to the default
|
value of '-1' to this file resets the value to the default
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ zone_capacity_mb Device zone capacity (must always be equal to or lower than
|
||||||
the zone size. Default: zone size.
|
the zone size. Default: zone size.
|
||||||
conv_zones Total number of conventioanl zones starting from sector 0.
|
conv_zones Total number of conventioanl zones starting from sector 0.
|
||||||
Default: 8.
|
Default: 8.
|
||||||
base_dir Path to the base directoy where to create the directory
|
base_dir Path to the base directory where to create the directory
|
||||||
containing the zone files of the device.
|
containing the zone files of the device.
|
||||||
Default=/var/local/zloop.
|
Default=/var/local/zloop.
|
||||||
The device directory containing the zone files is always
|
The device directory containing the zone files is always
|
||||||
|
|
|
||||||
|
|
@ -435,8 +435,8 @@ both cgroups.
|
||||||
Controlling Controllers
|
Controlling Controllers
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
Availablity
|
Availability
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
A controller is available in a cgroup when it is supported by the kernel (i.e.,
|
A controller is available in a cgroup when it is supported by the kernel (i.e.,
|
||||||
compiled in, not disabled and not attached to a v1 hierarchy) and listed in the
|
compiled in, not disabled and not attached to a v1 hierarchy) and listed in the
|
||||||
|
|
|
||||||
|
|
@ -214,7 +214,7 @@ Spectre_v1 X
|
||||||
Spectre_v2 X X
|
Spectre_v2 X X
|
||||||
Spectre_v2_user X X * (Note 1)
|
Spectre_v2_user X X * (Note 1)
|
||||||
SRBDS X X X X
|
SRBDS X X X X
|
||||||
SRSO X X
|
SRSO X X X X
|
||||||
SSB (Note 4)
|
SSB (Note 4)
|
||||||
TAA X X X X * (Note 2)
|
TAA X X X X * (Note 2)
|
||||||
TSA X X X X
|
TSA X X X X
|
||||||
|
|
|
||||||
|
|
@ -76,20 +76,21 @@ unit as preprocessor statement. The above example would then read::
|
||||||
within the corresponding compilation unit before the #include for
|
within the corresponding compilation unit before the #include for
|
||||||
<linux/export.h>. Typically it's placed before the first #include statement.
|
<linux/export.h>. Typically it's placed before the first #include statement.
|
||||||
|
|
||||||
Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro
|
Using the EXPORT_SYMBOL_FOR_MODULES() macro
|
||||||
-----------------------------------------------
|
-------------------------------------------
|
||||||
|
|
||||||
Symbols exported using this macro are put into a module namespace. This
|
Symbols exported using this macro are put into a module namespace. This
|
||||||
namespace cannot be imported.
|
namespace cannot be imported. These exports are GPL-only as they are only
|
||||||
|
intended for in-tree modules.
|
||||||
|
|
||||||
The macro takes a comma separated list of module names, allowing only those
|
The macro takes a comma separated list of module names, allowing only those
|
||||||
modules to access this symbol. Simple tail-globs are supported.
|
modules to access this symbol. Simple tail-globs are supported.
|
||||||
|
|
||||||
For example::
|
For example::
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
|
EXPORT_SYMBOL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
|
||||||
|
|
||||||
will limit usage of this symbol to modules whoes name matches the given
|
will limit usage of this symbol to modules whose name matches the given
|
||||||
patterns.
|
patterns.
|
||||||
|
|
||||||
How to use Symbols exported in Namespaces
|
How to use Symbols exported in Namespaces
|
||||||
|
|
|
||||||
|
|
@ -62,11 +62,13 @@ properties:
|
||||||
items:
|
items:
|
||||||
- description: GMAC main clock
|
- description: GMAC main clock
|
||||||
- description: Peripheral registers interface clock
|
- description: Peripheral registers interface clock
|
||||||
|
- description: APB glue registers interface clock
|
||||||
|
|
||||||
clock-names:
|
clock-names:
|
||||||
items:
|
items:
|
||||||
- const: stmmaceth
|
- const: stmmaceth
|
||||||
- const: pclk
|
- const: pclk
|
||||||
|
- const: apb
|
||||||
|
|
||||||
interrupts:
|
interrupts:
|
||||||
items:
|
items:
|
||||||
|
|
@ -88,8 +90,8 @@ examples:
|
||||||
compatible = "thead,th1520-gmac", "snps,dwmac-3.70a";
|
compatible = "thead,th1520-gmac", "snps,dwmac-3.70a";
|
||||||
reg = <0xe7070000 0x2000>, <0xec003000 0x1000>;
|
reg = <0xe7070000 0x2000>, <0xec003000 0x1000>;
|
||||||
reg-names = "dwmac", "apb";
|
reg-names = "dwmac", "apb";
|
||||||
clocks = <&clk 1>, <&clk 2>;
|
clocks = <&clk 1>, <&clk 2>, <&clk 3>;
|
||||||
clock-names = "stmmaceth", "pclk";
|
clock-names = "stmmaceth", "pclk", "apb";
|
||||||
interrupts = <66>;
|
interrupts = <66>;
|
||||||
interrupt-names = "macirq";
|
interrupt-names = "macirq";
|
||||||
phy-mode = "rgmii-id";
|
phy-mode = "rgmii-id";
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||||
title: Infineon Buck Regulators with PMBUS interfaces
|
title: Infineon Buck Regulators with PMBUS interfaces
|
||||||
|
|
||||||
maintainers:
|
maintainers:
|
||||||
- Not Me.
|
- Guenter Roeck <linux@roeck-us.net>
|
||||||
|
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: regulator.yaml#
|
- $ref: regulator.yaml#
|
||||||
|
|
|
||||||
|
|
@ -1420,7 +1420,7 @@ udp_hash_entries - INTEGER
|
||||||
A negative value means the networking namespace does not own its
|
A negative value means the networking namespace does not own its
|
||||||
hash buckets and shares the initial networking namespace's one.
|
hash buckets and shares the initial networking namespace's one.
|
||||||
|
|
||||||
udp_child_ehash_entries - INTEGER
|
udp_child_hash_entries - INTEGER
|
||||||
Control the number of hash buckets for UDP sockets in the child
|
Control the number of hash buckets for UDP sockets in the child
|
||||||
networking namespace, which must be set before clone() or unshare().
|
networking namespace, which must be set before clone() or unshare().
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,8 @@ add_addr_timeout - INTEGER (seconds)
|
||||||
resent to an MPTCP peer that has not acknowledged a previous
|
resent to an MPTCP peer that has not acknowledged a previous
|
||||||
ADD_ADDR message.
|
ADD_ADDR message.
|
||||||
|
|
||||||
|
Do not retransmit if set to 0.
|
||||||
|
|
||||||
The default value matches TCP_RTO_MAX. This is a per-namespace
|
The default value matches TCP_RTO_MAX. This is a per-namespace
|
||||||
sysctl.
|
sysctl.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,22 @@ like to know when a security bug is found so that it can be fixed and
|
||||||
disclosed as quickly as possible. Please report security bugs to the
|
disclosed as quickly as possible. Please report security bugs to the
|
||||||
Linux kernel security team.
|
Linux kernel security team.
|
||||||
|
|
||||||
Contact
|
The security team and maintainers almost always require additional
|
||||||
-------
|
information beyond what was initially provided in a report and rely on
|
||||||
|
active and efficient collaboration with the reporter to perform further
|
||||||
|
testing (e.g., verifying versions, configuration options, mitigations, or
|
||||||
|
patches). Before contacting the security team, the reporter must ensure
|
||||||
|
they are available to explain their findings, engage in discussions, and
|
||||||
|
run additional tests. Reports where the reporter does not respond promptly
|
||||||
|
or cannot effectively discuss their findings may be abandoned if the
|
||||||
|
communication does not quickly improve.
|
||||||
|
|
||||||
|
As it is with any bug, the more information provided the easier it
|
||||||
|
will be to diagnose and fix. Please review the procedure outlined in
|
||||||
|
'Documentation/admin-guide/reporting-issues.rst' if you are unclear about what
|
||||||
|
information is helpful. Any exploit code is very helpful and will not
|
||||||
|
be released without consent from the reporter unless it has already been
|
||||||
|
made public.
|
||||||
|
|
||||||
The Linux kernel security team can be contacted by email at
|
The Linux kernel security team can be contacted by email at
|
||||||
<security@kernel.org>. This is a private list of security officers
|
<security@kernel.org>. This is a private list of security officers
|
||||||
|
|
@ -19,13 +33,6 @@ that can speed up the process considerably. It is possible that the
|
||||||
security team will bring in extra help from area maintainers to
|
security team will bring in extra help from area maintainers to
|
||||||
understand and fix the security vulnerability.
|
understand and fix the security vulnerability.
|
||||||
|
|
||||||
As it is with any bug, the more information provided the easier it
|
|
||||||
will be to diagnose and fix. Please review the procedure outlined in
|
|
||||||
'Documentation/admin-guide/reporting-issues.rst' if you are unclear about what
|
|
||||||
information is helpful. Any exploit code is very helpful and will not
|
|
||||||
be released without consent from the reporter unless it has already been
|
|
||||||
made public.
|
|
||||||
|
|
||||||
Please send plain text emails without attachments where possible.
|
Please send plain text emails without attachments where possible.
|
||||||
It is much harder to have a context-quoted discussion about a complex
|
It is much harder to have a context-quoted discussion about a complex
|
||||||
issue if all the details are hidden away in attachments. Think of it like a
|
issue if all the details are hidden away in attachments. Think of it like a
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ Following IOMMUFD objects are exposed to userspace:
|
||||||
|
|
||||||
- IOMMUFD_OBJ_HWPT_PAGING, representing an actual hardware I/O page table
|
- IOMMUFD_OBJ_HWPT_PAGING, representing an actual hardware I/O page table
|
||||||
(i.e. a single struct iommu_domain) managed by the iommu driver. "PAGING"
|
(i.e. a single struct iommu_domain) managed by the iommu driver. "PAGING"
|
||||||
primarly indicates this type of HWPT should be linked to an IOAS. It also
|
primarily indicates this type of HWPT should be linked to an IOAS. It also
|
||||||
indicates that it is backed by an iommu_domain with __IOMMU_DOMAIN_PAGING
|
indicates that it is backed by an iommu_domain with __IOMMU_DOMAIN_PAGING
|
||||||
feature flag. This can be either an UNMANAGED stage-1 domain for a device
|
feature flag. This can be either an UNMANAGED stage-1 domain for a device
|
||||||
running in the user space, or a nesting parent stage-2 domain for mappings
|
running in the user space, or a nesting parent stage-2 domain for mappings
|
||||||
|
|
@ -76,7 +76,7 @@ Following IOMMUFD objects are exposed to userspace:
|
||||||
|
|
||||||
* Security namespace for guest owned ID, e.g. guest-controlled cache tags
|
* Security namespace for guest owned ID, e.g. guest-controlled cache tags
|
||||||
* Non-device-affiliated event reporting, e.g. invalidation queue errors
|
* Non-device-affiliated event reporting, e.g. invalidation queue errors
|
||||||
* Access to a sharable nesting parent pagetable across physical IOMMUs
|
* Access to a shareable nesting parent pagetable across physical IOMMUs
|
||||||
* Virtualization of various platforms IDs, e.g. RIDs and others
|
* Virtualization of various platforms IDs, e.g. RIDs and others
|
||||||
* Delivery of paravirtualized invalidation
|
* Delivery of paravirtualized invalidation
|
||||||
* Direct assigned invalidation queues
|
* Direct assigned invalidation queues
|
||||||
|
|
|
||||||
48
MAINTAINERS
48
MAINTAINERS
|
|
@ -8431,6 +8431,17 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
F: drivers/gpu/drm/scheduler/
|
F: drivers/gpu/drm/scheduler/
|
||||||
F: include/drm/gpu_scheduler.h
|
F: include/drm/gpu_scheduler.h
|
||||||
|
|
||||||
|
DRM GPUVM
|
||||||
|
M: Danilo Krummrich <dakr@kernel.org>
|
||||||
|
R: Matthew Brost <matthew.brost@intel.com>
|
||||||
|
R: Thomas Hellström <thomas.hellstrom@linux.intel.com>
|
||||||
|
R: Alice Ryhl <aliceryhl@google.com>
|
||||||
|
L: dri-devel@lists.freedesktop.org
|
||||||
|
S: Supported
|
||||||
|
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
|
F: drivers/gpu/drm/drm_gpuvm.c
|
||||||
|
F: include/drm/drm_gpuvm.h
|
||||||
|
|
||||||
DRM LOG
|
DRM LOG
|
||||||
M: Jocelyn Falempe <jfalempe@redhat.com>
|
M: Jocelyn Falempe <jfalempe@redhat.com>
|
||||||
M: Javier Martinez Canillas <javierm@redhat.com>
|
M: Javier Martinez Canillas <javierm@redhat.com>
|
||||||
|
|
@ -10660,7 +10671,8 @@ S: Maintained
|
||||||
F: block/partitions/efi.*
|
F: block/partitions/efi.*
|
||||||
|
|
||||||
HABANALABS PCI DRIVER
|
HABANALABS PCI DRIVER
|
||||||
M: Yaron Avizrat <yaron.avizrat@intel.com>
|
M: Koby Elbaz <koby.elbaz@intel.com>
|
||||||
|
M: Konstantin Sinyuk <konstantin.sinyuk@intel.com>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Supported
|
S: Supported
|
||||||
C: irc://irc.oftc.net/dri-devel
|
C: irc://irc.oftc.net/dri-devel
|
||||||
|
|
@ -11018,7 +11030,7 @@ F: Documentation/admin-guide/perf/hns3-pmu.rst
|
||||||
F: drivers/perf/hisilicon/hns3_pmu.c
|
F: drivers/perf/hisilicon/hns3_pmu.c
|
||||||
|
|
||||||
HISILICON I2C CONTROLLER DRIVER
|
HISILICON I2C CONTROLLER DRIVER
|
||||||
M: Yicong Yang <yangyicong@hisilicon.com>
|
M: Devyn Liu <liudingyuan@h-partners.com>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: https://www.hisilicon.com
|
W: https://www.hisilicon.com
|
||||||
|
|
@ -11443,6 +11455,7 @@ F: drivers/tty/hvc/
|
||||||
HUNG TASK DETECTOR
|
HUNG TASK DETECTOR
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
R: Lance Yang <lance.yang@linux.dev>
|
R: Lance Yang <lance.yang@linux.dev>
|
||||||
|
R: Masami Hiramatsu <mhiramat@kernel.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: include/linux/hung_task.h
|
F: include/linux/hung_task.h
|
||||||
|
|
@ -12293,7 +12306,6 @@ F: include/linux/avf/virtchnl.h
|
||||||
F: include/linux/net/intel/*/
|
F: include/linux/net/intel/*/
|
||||||
|
|
||||||
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
|
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
|
||||||
M: Mustafa Ismail <mustafa.ismail@intel.com>
|
|
||||||
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
|
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
@ -12596,10 +12608,9 @@ S: Supported
|
||||||
F: drivers/cpufreq/intel_pstate.c
|
F: drivers/cpufreq/intel_pstate.c
|
||||||
|
|
||||||
INTEL PTP DFL ToD DRIVER
|
INTEL PTP DFL ToD DRIVER
|
||||||
M: Tianfei Zhang <tianfei.zhang@intel.com>
|
|
||||||
L: linux-fpga@vger.kernel.org
|
L: linux-fpga@vger.kernel.org
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Orphan
|
||||||
F: drivers/ptp/ptp_dfl_tod.c
|
F: drivers/ptp/ptp_dfl_tod.c
|
||||||
|
|
||||||
INTEL QUADRATURE ENCODER PERIPHERAL DRIVER
|
INTEL QUADRATURE ENCODER PERIPHERAL DRIVER
|
||||||
|
|
@ -12737,9 +12748,8 @@ S: Maintained
|
||||||
F: drivers/platform/x86/intel/wmi/thunderbolt.c
|
F: drivers/platform/x86/intel/wmi/thunderbolt.c
|
||||||
|
|
||||||
INTEL WWAN IOSM DRIVER
|
INTEL WWAN IOSM DRIVER
|
||||||
M: M Chetan Kumar <m.chetan.kumar@intel.com>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Orphan
|
||||||
F: drivers/net/wwan/iosm/
|
F: drivers/net/wwan/iosm/
|
||||||
|
|
||||||
INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY
|
INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY
|
||||||
|
|
@ -13699,7 +13709,6 @@ F: scripts/Makefile.kmsan
|
||||||
|
|
||||||
KPROBES
|
KPROBES
|
||||||
M: Naveen N Rao <naveen@kernel.org>
|
M: Naveen N Rao <naveen@kernel.org>
|
||||||
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
|
||||||
M: "David S. Miller" <davem@davemloft.net>
|
M: "David S. Miller" <davem@davemloft.net>
|
||||||
M: Masami Hiramatsu <mhiramat@kernel.org>
|
M: Masami Hiramatsu <mhiramat@kernel.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
|
|
@ -15692,7 +15701,6 @@ MEDIATEK T7XX 5G WWAN MODEM DRIVER
|
||||||
M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
|
M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
|
||||||
R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
|
R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
|
||||||
R: Liu Haijun <haijun.liu@mediatek.com>
|
R: Liu Haijun <haijun.liu@mediatek.com>
|
||||||
R: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
|
|
||||||
R: Ricardo Martinez <ricardo.martinez@linux.intel.com>
|
R: Ricardo Martinez <ricardo.martinez@linux.intel.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
@ -16079,6 +16087,23 @@ F: mm/mempolicy.c
|
||||||
F: mm/migrate.c
|
F: mm/migrate.c
|
||||||
F: mm/migrate_device.c
|
F: mm/migrate_device.c
|
||||||
|
|
||||||
|
MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU)
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: Axel Rasmussen <axelrasmussen@google.com>
|
||||||
|
M: Yuanchu Xie <yuanchu@google.com>
|
||||||
|
R: Wei Xu <weixugc@google.com>
|
||||||
|
L: linux-mm@kvack.org
|
||||||
|
S: Maintained
|
||||||
|
W: http://www.linux-mm.org
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
|
F: Documentation/admin-guide/mm/multigen_lru.rst
|
||||||
|
F: Documentation/mm/multigen_lru.rst
|
||||||
|
F: include/linux/mm_inline.h
|
||||||
|
F: include/linux/mmzone.h
|
||||||
|
F: mm/swap.c
|
||||||
|
F: mm/vmscan.c
|
||||||
|
F: mm/workingset.c
|
||||||
|
|
||||||
MEMORY MANAGEMENT - MISC
|
MEMORY MANAGEMENT - MISC
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
M: David Hildenbrand <david@redhat.com>
|
M: David Hildenbrand <david@redhat.com>
|
||||||
|
|
@ -16269,8 +16294,10 @@ S: Maintained
|
||||||
W: http://www.linux-mm.org
|
W: http://www.linux-mm.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
F: rust/helpers/mm.c
|
F: rust/helpers/mm.c
|
||||||
|
F: rust/helpers/page.c
|
||||||
F: rust/kernel/mm.rs
|
F: rust/kernel/mm.rs
|
||||||
F: rust/kernel/mm/
|
F: rust/kernel/mm/
|
||||||
|
F: rust/kernel/page.rs
|
||||||
|
|
||||||
MEMORY MAPPING
|
MEMORY MAPPING
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
|
@ -17469,6 +17496,7 @@ F: drivers/net/ethernet/neterion/
|
||||||
NETFILTER
|
NETFILTER
|
||||||
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||||
M: Jozsef Kadlecsik <kadlec@netfilter.org>
|
M: Jozsef Kadlecsik <kadlec@netfilter.org>
|
||||||
|
M: Florian Westphal <fw@strlen.de>
|
||||||
L: netfilter-devel@vger.kernel.org
|
L: netfilter-devel@vger.kernel.org
|
||||||
L: coreteam@netfilter.org
|
L: coreteam@netfilter.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
@ -22194,7 +22222,7 @@ F: arch/s390/mm
|
||||||
|
|
||||||
S390 NETWORK DRIVERS
|
S390 NETWORK DRIVERS
|
||||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||||
M: Thorsten Winkler <twinkler@linux.ibm.com>
|
R: Aswin Karuvally <aswin@linux.ibm.com>
|
||||||
L: linux-s390@vger.kernel.org
|
L: linux-s390@vger.kernel.org
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
|
||||||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 17
|
PATCHLEVEL = 17
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc1
|
EXTRAVERSION = -rc3
|
||||||
NAME = Baby Opossum Posse
|
NAME = Baby Opossum Posse
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,13 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
|
||||||
|
|
||||||
ifdef CONFIG_OBJTOOL
|
ifdef CONFIG_OBJTOOL
|
||||||
ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
|
ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
|
||||||
|
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
|
||||||
|
# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
|
||||||
|
# be passed via '-mllvm' to ld.lld.
|
||||||
KBUILD_CFLAGS += -mannotate-tablejump
|
KBUILD_CFLAGS += -mannotate-tablejump
|
||||||
|
ifdef CONFIG_LTO_CLANG
|
||||||
|
KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
|
||||||
|
endif
|
||||||
else
|
else
|
||||||
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
|
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
|
||||||
endif
|
endif
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro STACKLEAK_ERASE
|
.macro STACKLEAK_ERASE
|
||||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
#ifdef CONFIG_KSTACK_ERASE
|
||||||
bl stackleak_erase_on_task_stack
|
bl stackleak_erase_on_task_stack
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
|
|
||||||
|
#ifndef _UAPI_ASM_LOONGARCH_SETUP_H
|
||||||
|
#define _UAPI_ASM_LOONGARCH_SETUP_H
|
||||||
|
|
||||||
|
#define COMMAND_LINE_SIZE 4096
|
||||||
|
|
||||||
|
#endif /* _UAPI_ASM_LOONGARCH_SETUP_H */
|
||||||
|
|
@ -8,6 +8,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleloader.h>
|
#include <linux/moduleloader.h>
|
||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
|
#include <linux/sort.h>
|
||||||
|
|
||||||
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
|
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
|
||||||
{
|
{
|
||||||
|
|
@ -61,38 +62,37 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
|
||||||
return (Elf_Addr)&plt[nr];
|
return (Elf_Addr)&plt[nr];
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
|
#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
|
||||||
|
|
||||||
|
static int compare_rela(const void *x, const void *y)
|
||||||
{
|
{
|
||||||
return x->r_info == y->r_info && x->r_addend == y->r_addend;
|
int ret;
|
||||||
}
|
const Elf_Rela *rela_x = x, *rela_y = y;
|
||||||
|
|
||||||
static bool duplicate_rela(const Elf_Rela *rela, int idx)
|
ret = cmp_3way(rela_x->r_info, rela_y->r_info);
|
||||||
{
|
if (ret == 0)
|
||||||
int i;
|
ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
|
||||||
|
|
||||||
for (i = 0; i < idx; i++) {
|
return ret;
|
||||||
if (is_rela_equal(&rela[i], &rela[idx]))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void count_max_entries(Elf_Rela *relas, int num,
|
static void count_max_entries(Elf_Rela *relas, int num,
|
||||||
unsigned int *plts, unsigned int *gots)
|
unsigned int *plts, unsigned int *gots)
|
||||||
{
|
{
|
||||||
unsigned int i, type;
|
unsigned int i;
|
||||||
|
|
||||||
|
sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
|
||||||
|
|
||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
type = ELF_R_TYPE(relas[i].r_info);
|
if (i && !compare_rela(&relas[i-1], &relas[i]))
|
||||||
switch (type) {
|
continue;
|
||||||
|
|
||||||
|
switch (ELF_R_TYPE(relas[i].r_info)) {
|
||||||
case R_LARCH_SOP_PUSH_PLT_PCREL:
|
case R_LARCH_SOP_PUSH_PLT_PCREL:
|
||||||
case R_LARCH_B26:
|
case R_LARCH_B26:
|
||||||
if (!duplicate_rela(relas, i))
|
|
||||||
(*plts)++;
|
(*plts)++;
|
||||||
break;
|
break;
|
||||||
case R_LARCH_GOT_PC_HI20:
|
case R_LARCH_GOT_PC_HI20:
|
||||||
if (!duplicate_rela(relas, i))
|
|
||||||
(*gots)++;
|
(*gots)++;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -677,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
||||||
for (i = 1; i < 32; i++)
|
for (i = 1; i < 32; i++)
|
||||||
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
|
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_HAS_LBT
|
||||||
|
if (extctx->lbt.addr)
|
||||||
|
err |= protected_save_lbt_context(extctx);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (extctx->lasx.addr)
|
if (extctx->lasx.addr)
|
||||||
err |= protected_save_lasx_context(extctx);
|
err |= protected_save_lasx_context(extctx);
|
||||||
else if (extctx->lsx.addr)
|
else if (extctx->lsx.addr)
|
||||||
|
|
@ -684,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
||||||
else if (extctx->fpu.addr)
|
else if (extctx->fpu.addr)
|
||||||
err |= protected_save_fpu_context(extctx);
|
err |= protected_save_fpu_context(extctx);
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_HAS_LBT
|
|
||||||
if (extctx->lbt.addr)
|
|
||||||
err |= protected_save_lbt_context(extctx);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Set the "end" magic */
|
/* Set the "end" magic */
|
||||||
info = (struct sctx_info *)extctx->end.addr;
|
info = (struct sctx_info *)extctx->end.addr;
|
||||||
err |= __put_user(0, &info->magic);
|
err |= __put_user(0, &info->magic);
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@
|
||||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||||
*/
|
*/
|
||||||
#include <linux/clockchips.h>
|
#include <linux/clockchips.h>
|
||||||
|
#include <linux/cpuhotplug.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
@ -102,6 +103,23 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int arch_timer_starting(unsigned int cpu)
|
||||||
|
{
|
||||||
|
set_csr_ecfg(ECFGF_TIMER);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arch_timer_dying(unsigned int cpu)
|
||||||
|
{
|
||||||
|
constant_set_state_shutdown(this_cpu_ptr(&constant_clockevent_device));
|
||||||
|
|
||||||
|
/* Clear Timer Interrupt */
|
||||||
|
write_csr_tintclear(CSR_TINTCLR_TI);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long get_loops_per_jiffy(void)
|
static unsigned long get_loops_per_jiffy(void)
|
||||||
{
|
{
|
||||||
unsigned long lpj = (unsigned long)const_clock_freq;
|
unsigned long lpj = (unsigned long)const_clock_freq;
|
||||||
|
|
@ -172,6 +190,10 @@ int constant_clockevent_init(void)
|
||||||
lpj_fine = get_loops_per_jiffy();
|
lpj_fine = get_loops_per_jiffy();
|
||||||
pr_info("Constant clock event device register\n");
|
pr_info("Constant clock event device register\n");
|
||||||
|
|
||||||
|
cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
|
||||||
|
"clockevents/loongarch/timer:starting",
|
||||||
|
arch_timer_starting, arch_timer_dying);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,12 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu = s->sw_coremap[irq];
|
cpu = s->sw_coremap[irq];
|
||||||
vcpu = kvm_get_vcpu(s->kvm, cpu);
|
vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
|
||||||
|
if (unlikely(vcpu == NULL)) {
|
||||||
|
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (level) {
|
if (level) {
|
||||||
/* if not enable return false */
|
/* if not enable return false */
|
||||||
if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
|
if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
|
||||||
|
|
|
||||||
|
|
@ -99,7 +99,7 @@ static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int
|
||||||
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||||
{
|
{
|
||||||
int i, idx, ret;
|
int i, idx, ret;
|
||||||
uint32_t val = 0, mask = 0;
|
uint64_t val = 0, mask = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit 27-30 is mask for byte writing.
|
* Bit 27-30 is mask for byte writing.
|
||||||
|
|
@ -108,7 +108,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||||
if ((data >> 27) & 0xf) {
|
if ((data >> 27) & 0xf) {
|
||||||
/* Read the old val */
|
/* Read the old val */
|
||||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
|
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
|
||||||
|
|
@ -124,7 +124,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||||
}
|
}
|
||||||
val |= ((uint32_t)(data >> 32) & ~mask);
|
val |= ((uint32_t)(data >> 32) & ~mask);
|
||||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
|
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
|
||||||
|
|
@ -298,7 +298,7 @@ static int kvm_ipi_regs_access(struct kvm_device *dev,
|
||||||
cpu = (attr->attr >> 16) & 0x3ff;
|
cpu = (attr->attr >> 16) & 0x3ff;
|
||||||
addr = attr->attr & 0xff;
|
addr = attr->attr & 0xff;
|
||||||
|
|
||||||
vcpu = kvm_get_vcpu(dev->kvm, cpu);
|
vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
|
||||||
if (unlikely(vcpu == NULL)) {
|
if (unlikely(vcpu == NULL)) {
|
||||||
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
|
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
||||||
|
|
@ -195,6 +195,11 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (addr & (len - 1)) {
|
||||||
|
kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* statistics of pch pic reading */
|
/* statistics of pch pic reading */
|
||||||
vcpu->stat.pch_pic_read_exits++;
|
vcpu->stat.pch_pic_read_exits++;
|
||||||
ret = loongarch_pch_pic_read(s, addr, len, val);
|
ret = loongarch_pch_pic_read(s, addr, len, val);
|
||||||
|
|
@ -302,6 +307,11 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (addr & (len - 1)) {
|
||||||
|
kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* statistics of pch pic writing */
|
/* statistics of pch pic writing */
|
||||||
vcpu->stat.pch_pic_write_exits++;
|
vcpu->stat.pch_pic_write_exits++;
|
||||||
ret = loongarch_pch_pic_write(s, addr, len, val);
|
ret = loongarch_pch_pic_write(s, addr, len, val);
|
||||||
|
|
|
||||||
|
|
@ -1283,9 +1283,11 @@ int kvm_own_lbt(struct kvm_vcpu *vcpu)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
|
||||||
set_csr_euen(CSR_EUEN_LBTEN);
|
set_csr_euen(CSR_EUEN_LBTEN);
|
||||||
_restore_lbt(&vcpu->arch.lbt);
|
_restore_lbt(&vcpu->arch.lbt);
|
||||||
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
|
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
|
||||||
|
}
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -82,13 +82,16 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
etop@e180000 {
|
ethernet@e180000 {
|
||||||
compatible = "lantiq,etop-xway";
|
compatible = "lantiq,etop-xway";
|
||||||
reg = <0xe180000 0x40000>;
|
reg = <0xe180000 0x40000>;
|
||||||
interrupt-parent = <&icu0>;
|
interrupt-parent = <&icu0>;
|
||||||
interrupts = <73 78>;
|
interrupts = <73 78>;
|
||||||
|
interrupt-names = "tx", "rx";
|
||||||
phy-mode = "rmii";
|
phy-mode = "rmii";
|
||||||
mac-address = [ 00 11 22 33 44 55 ];
|
mac-address = [ 00 11 22 33 44 55 ];
|
||||||
|
lantiq,rx-burst-length = <4>;
|
||||||
|
lantiq,tx-burst-length = <4>;
|
||||||
};
|
};
|
||||||
|
|
||||||
stp0: stp@e100bb0 {
|
stp0: stp@e100bb0 {
|
||||||
|
|
|
||||||
|
|
@ -497,7 +497,7 @@ void __init ltq_soc_init(void)
|
||||||
ifccr = CGU_IFCCR_VR9;
|
ifccr = CGU_IFCCR_VR9;
|
||||||
pcicr = CGU_PCICR_VR9;
|
pcicr = CGU_PCICR_VR9;
|
||||||
} else {
|
} else {
|
||||||
clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
|
clkdev_add_pmu("1e180000.ethernet", NULL, 1, 0, PMU_PPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!of_machine_is_compatible("lantiq,ase"))
|
if (!of_machine_is_compatible("lantiq,ase"))
|
||||||
|
|
@ -531,9 +531,9 @@ void __init ltq_soc_init(void)
|
||||||
CLOCK_133M, CLOCK_133M);
|
CLOCK_133M, CLOCK_133M);
|
||||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
|
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
|
||||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||||
clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE);
|
clkdev_add_pmu("1e180000.ethernet", "ppe", 1, 0, PMU_PPE);
|
||||||
clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY);
|
clkdev_add_cgu("1e180000.ethernet", "ephycgu", CGU_EPHY);
|
||||||
clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY);
|
clkdev_add_pmu("1e180000.ethernet", "ephy", 1, 0, PMU_EPHY);
|
||||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
|
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
|
||||||
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
|
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
|
||||||
} else if (of_machine_is_compatible("lantiq,grx390")) {
|
} else if (of_machine_is_compatible("lantiq,grx390")) {
|
||||||
|
|
@ -592,7 +592,7 @@ void __init ltq_soc_init(void)
|
||||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||||
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
|
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
|
||||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
|
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
|
||||||
clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
|
clkdev_add_pmu("1e180000.ethernet", "switch", 1, 0, PMU_SWITCH);
|
||||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||||
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
|
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
|
||||||
|
|
|
||||||
|
|
@ -297,8 +297,9 @@
|
||||||
reg-names = "dwmac", "apb";
|
reg-names = "dwmac", "apb";
|
||||||
interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "macirq";
|
interrupt-names = "macirq";
|
||||||
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>;
|
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>,
|
||||||
clock-names = "stmmaceth", "pclk";
|
<&clk CLK_PERISYS_APB4_HCLK>;
|
||||||
|
clock-names = "stmmaceth", "pclk", "apb";
|
||||||
snps,pbl = <32>;
|
snps,pbl = <32>;
|
||||||
snps,fixed-burst;
|
snps,fixed-burst;
|
||||||
snps,multicast-filter-bins = <64>;
|
snps,multicast-filter-bins = <64>;
|
||||||
|
|
@ -319,8 +320,9 @@
|
||||||
reg-names = "dwmac", "apb";
|
reg-names = "dwmac", "apb";
|
||||||
interrupts = <66 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <66 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "macirq";
|
interrupt-names = "macirq";
|
||||||
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>;
|
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>,
|
||||||
clock-names = "stmmaceth", "pclk";
|
<&clk CLK_PERISYS_APB4_HCLK>;
|
||||||
|
clock-names = "stmmaceth", "pclk", "apb";
|
||||||
snps,pbl = <32>;
|
snps,pbl = <32>;
|
||||||
snps,fixed-burst;
|
snps,fixed-burst;
|
||||||
snps,multicast-filter-bins = <64>;
|
snps,multicast-filter-bins = <64>;
|
||||||
|
|
|
||||||
|
|
@ -530,6 +530,9 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
|
||||||
lowcore_address + sizeof(struct lowcore),
|
lowcore_address + sizeof(struct lowcore),
|
||||||
POPULATE_LOWCORE);
|
POPULATE_LOWCORE);
|
||||||
for_each_physmem_usable_range(i, &start, &end) {
|
for_each_physmem_usable_range(i, &start, &end) {
|
||||||
|
/* Do not map lowcore with identity mapping */
|
||||||
|
if (!start)
|
||||||
|
start = sizeof(struct lowcore);
|
||||||
pgtable_populate((unsigned long)__identity_va(start),
|
pgtable_populate((unsigned long)__identity_va(start),
|
||||||
(unsigned long)__identity_va(end),
|
(unsigned long)__identity_va(end),
|
||||||
POPULATE_IDENTITY);
|
POPULATE_IDENTITY);
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ CONFIG_WATCH_QUEUE=y
|
||||||
CONFIG_AUDIT=y
|
CONFIG_AUDIT=y
|
||||||
CONFIG_NO_HZ_IDLE=y
|
CONFIG_NO_HZ_IDLE=y
|
||||||
CONFIG_HIGH_RES_TIMERS=y
|
CONFIG_HIGH_RES_TIMERS=y
|
||||||
|
CONFIG_POSIX_AUX_CLOCKS=y
|
||||||
CONFIG_BPF_SYSCALL=y
|
CONFIG_BPF_SYSCALL=y
|
||||||
CONFIG_BPF_JIT=y
|
CONFIG_BPF_JIT=y
|
||||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||||
|
|
@ -19,6 +20,7 @@ CONFIG_TASK_XACCT=y
|
||||||
CONFIG_TASK_IO_ACCOUNTING=y
|
CONFIG_TASK_IO_ACCOUNTING=y
|
||||||
CONFIG_IKCONFIG=y
|
CONFIG_IKCONFIG=y
|
||||||
CONFIG_IKCONFIG_PROC=y
|
CONFIG_IKCONFIG_PROC=y
|
||||||
|
CONFIG_SCHED_PROXY_EXEC=y
|
||||||
CONFIG_NUMA_BALANCING=y
|
CONFIG_NUMA_BALANCING=y
|
||||||
CONFIG_MEMCG=y
|
CONFIG_MEMCG=y
|
||||||
CONFIG_BLK_CGROUP=y
|
CONFIG_BLK_CGROUP=y
|
||||||
|
|
@ -42,6 +44,7 @@ CONFIG_PROFILING=y
|
||||||
CONFIG_KEXEC=y
|
CONFIG_KEXEC=y
|
||||||
CONFIG_KEXEC_FILE=y
|
CONFIG_KEXEC_FILE=y
|
||||||
CONFIG_KEXEC_SIG=y
|
CONFIG_KEXEC_SIG=y
|
||||||
|
CONFIG_CRASH_DM_CRYPT=y
|
||||||
CONFIG_LIVEPATCH=y
|
CONFIG_LIVEPATCH=y
|
||||||
CONFIG_MARCH_Z13=y
|
CONFIG_MARCH_Z13=y
|
||||||
CONFIG_NR_CPUS=512
|
CONFIG_NR_CPUS=512
|
||||||
|
|
@ -105,6 +108,7 @@ CONFIG_CMA_AREAS=7
|
||||||
CONFIG_MEM_SOFT_DIRTY=y
|
CONFIG_MEM_SOFT_DIRTY=y
|
||||||
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
||||||
CONFIG_IDLE_PAGE_TRACKING=y
|
CONFIG_IDLE_PAGE_TRACKING=y
|
||||||
|
CONFIG_ZONE_DEVICE=y
|
||||||
CONFIG_PERCPU_STATS=y
|
CONFIG_PERCPU_STATS=y
|
||||||
CONFIG_GUP_TEST=y
|
CONFIG_GUP_TEST=y
|
||||||
CONFIG_ANON_VMA_NAME=y
|
CONFIG_ANON_VMA_NAME=y
|
||||||
|
|
@ -223,17 +227,19 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_CT=m
|
CONFIG_NETFILTER_XT_TARGET_CT=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
||||||
|
CONFIG_NETFILTER_XT_TARGET_HL=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||||
|
CONFIG_NETFILTER_XT_NAT=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
||||||
|
CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
|
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||||
|
|
@ -248,6 +254,7 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
|
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_CPU=m
|
CONFIG_NETFILTER_XT_MATCH_CPU=m
|
||||||
|
CONFIG_NETFILTER_XT_MATCH_DCCP=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
|
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||||
|
|
@ -318,16 +325,8 @@ CONFIG_IP_NF_MATCH_AH=m
|
||||||
CONFIG_IP_NF_MATCH_ECN=m
|
CONFIG_IP_NF_MATCH_ECN=m
|
||||||
CONFIG_IP_NF_MATCH_RPFILTER=m
|
CONFIG_IP_NF_MATCH_RPFILTER=m
|
||||||
CONFIG_IP_NF_MATCH_TTL=m
|
CONFIG_IP_NF_MATCH_TTL=m
|
||||||
CONFIG_IP_NF_FILTER=m
|
|
||||||
CONFIG_IP_NF_TARGET_REJECT=m
|
CONFIG_IP_NF_TARGET_REJECT=m
|
||||||
CONFIG_IP_NF_NAT=m
|
|
||||||
CONFIG_IP_NF_TARGET_MASQUERADE=m
|
|
||||||
CONFIG_IP_NF_MANGLE=m
|
|
||||||
CONFIG_IP_NF_TARGET_ECN=m
|
CONFIG_IP_NF_TARGET_ECN=m
|
||||||
CONFIG_IP_NF_TARGET_TTL=m
|
|
||||||
CONFIG_IP_NF_RAW=m
|
|
||||||
CONFIG_IP_NF_SECURITY=m
|
|
||||||
CONFIG_IP_NF_ARPFILTER=m
|
|
||||||
CONFIG_IP_NF_ARP_MANGLE=m
|
CONFIG_IP_NF_ARP_MANGLE=m
|
||||||
CONFIG_NFT_FIB_IPV6=m
|
CONFIG_NFT_FIB_IPV6=m
|
||||||
CONFIG_IP6_NF_IPTABLES=m
|
CONFIG_IP6_NF_IPTABLES=m
|
||||||
|
|
@ -340,15 +339,9 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
|
||||||
CONFIG_IP6_NF_MATCH_MH=m
|
CONFIG_IP6_NF_MATCH_MH=m
|
||||||
CONFIG_IP6_NF_MATCH_RPFILTER=m
|
CONFIG_IP6_NF_MATCH_RPFILTER=m
|
||||||
CONFIG_IP6_NF_MATCH_RT=m
|
CONFIG_IP6_NF_MATCH_RT=m
|
||||||
CONFIG_IP6_NF_TARGET_HL=m
|
|
||||||
CONFIG_IP6_NF_FILTER=m
|
|
||||||
CONFIG_IP6_NF_TARGET_REJECT=m
|
CONFIG_IP6_NF_TARGET_REJECT=m
|
||||||
CONFIG_IP6_NF_MANGLE=m
|
|
||||||
CONFIG_IP6_NF_RAW=m
|
|
||||||
CONFIG_IP6_NF_SECURITY=m
|
|
||||||
CONFIG_IP6_NF_NAT=m
|
|
||||||
CONFIG_IP6_NF_TARGET_MASQUERADE=m
|
|
||||||
CONFIG_NF_TABLES_BRIDGE=m
|
CONFIG_NF_TABLES_BRIDGE=m
|
||||||
|
CONFIG_IP_SCTP=m
|
||||||
CONFIG_RDS=m
|
CONFIG_RDS=m
|
||||||
CONFIG_RDS_RDMA=m
|
CONFIG_RDS_RDMA=m
|
||||||
CONFIG_RDS_TCP=m
|
CONFIG_RDS_TCP=m
|
||||||
|
|
@ -383,6 +376,7 @@ CONFIG_NET_SCH_FQ_CODEL=m
|
||||||
CONFIG_NET_SCH_INGRESS=m
|
CONFIG_NET_SCH_INGRESS=m
|
||||||
CONFIG_NET_SCH_PLUG=m
|
CONFIG_NET_SCH_PLUG=m
|
||||||
CONFIG_NET_SCH_ETS=m
|
CONFIG_NET_SCH_ETS=m
|
||||||
|
CONFIG_NET_SCH_DUALPI2=m
|
||||||
CONFIG_NET_CLS_BASIC=m
|
CONFIG_NET_CLS_BASIC=m
|
||||||
CONFIG_NET_CLS_ROUTE4=m
|
CONFIG_NET_CLS_ROUTE4=m
|
||||||
CONFIG_NET_CLS_FW=m
|
CONFIG_NET_CLS_FW=m
|
||||||
|
|
@ -504,6 +498,7 @@ CONFIG_DM_VDO=m
|
||||||
CONFIG_NETDEVICES=y
|
CONFIG_NETDEVICES=y
|
||||||
CONFIG_BONDING=m
|
CONFIG_BONDING=m
|
||||||
CONFIG_DUMMY=m
|
CONFIG_DUMMY=m
|
||||||
|
CONFIG_OVPN=m
|
||||||
CONFIG_EQUALIZER=m
|
CONFIG_EQUALIZER=m
|
||||||
CONFIG_IFB=m
|
CONFIG_IFB=m
|
||||||
CONFIG_MACVLAN=m
|
CONFIG_MACVLAN=m
|
||||||
|
|
@ -641,6 +636,7 @@ CONFIG_VP_VDPA=m
|
||||||
CONFIG_VHOST_NET=m
|
CONFIG_VHOST_NET=m
|
||||||
CONFIG_VHOST_VSOCK=m
|
CONFIG_VHOST_VSOCK=m
|
||||||
CONFIG_VHOST_VDPA=m
|
CONFIG_VHOST_VDPA=m
|
||||||
|
CONFIG_DEV_DAX=m
|
||||||
CONFIG_EXT4_FS=y
|
CONFIG_EXT4_FS=y
|
||||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||||
CONFIG_EXT4_FS_SECURITY=y
|
CONFIG_EXT4_FS_SECURITY=y
|
||||||
|
|
@ -665,6 +661,7 @@ CONFIG_NILFS2_FS=m
|
||||||
CONFIG_BCACHEFS_FS=y
|
CONFIG_BCACHEFS_FS=y
|
||||||
CONFIG_BCACHEFS_QUOTA=y
|
CONFIG_BCACHEFS_QUOTA=y
|
||||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||||
|
CONFIG_FS_DAX=y
|
||||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||||
CONFIG_FS_ENCRYPTION=y
|
CONFIG_FS_ENCRYPTION=y
|
||||||
CONFIG_FS_VERITY=y
|
CONFIG_FS_VERITY=y
|
||||||
|
|
@ -755,6 +752,8 @@ CONFIG_HARDENED_USERCOPY=y
|
||||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||||
CONFIG_CRYPTO_USER=m
|
CONFIG_CRYPTO_USER=m
|
||||||
CONFIG_CRYPTO_SELFTESTS=y
|
CONFIG_CRYPTO_SELFTESTS=y
|
||||||
|
CONFIG_CRYPTO_SELFTESTS_FULL=y
|
||||||
|
CONFIG_CRYPTO_NULL=y
|
||||||
CONFIG_CRYPTO_PCRYPT=m
|
CONFIG_CRYPTO_PCRYPT=m
|
||||||
CONFIG_CRYPTO_CRYPTD=m
|
CONFIG_CRYPTO_CRYPTD=m
|
||||||
CONFIG_CRYPTO_BENCHMARK=m
|
CONFIG_CRYPTO_BENCHMARK=m
|
||||||
|
|
@ -783,7 +782,6 @@ CONFIG_CRYPTO_HCTR2=m
|
||||||
CONFIG_CRYPTO_LRW=m
|
CONFIG_CRYPTO_LRW=m
|
||||||
CONFIG_CRYPTO_PCBC=m
|
CONFIG_CRYPTO_PCBC=m
|
||||||
CONFIG_CRYPTO_AEGIS128=m
|
CONFIG_CRYPTO_AEGIS128=m
|
||||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|
||||||
CONFIG_CRYPTO_GCM=y
|
CONFIG_CRYPTO_GCM=y
|
||||||
CONFIG_CRYPTO_SEQIV=y
|
CONFIG_CRYPTO_SEQIV=y
|
||||||
CONFIG_CRYPTO_MD4=m
|
CONFIG_CRYPTO_MD4=m
|
||||||
|
|
@ -822,6 +820,7 @@ CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||||
CONFIG_CRYPTO_KRB5=m
|
CONFIG_CRYPTO_KRB5=m
|
||||||
CONFIG_CRYPTO_KRB5_SELFTESTS=y
|
CONFIG_CRYPTO_KRB5_SELFTESTS=y
|
||||||
CONFIG_CORDIC=m
|
CONFIG_CORDIC=m
|
||||||
|
CONFIG_TRACE_MMIO_ACCESS=y
|
||||||
CONFIG_RANDOM32_SELFTEST=y
|
CONFIG_RANDOM32_SELFTEST=y
|
||||||
CONFIG_XZ_DEC_MICROLZMA=y
|
CONFIG_XZ_DEC_MICROLZMA=y
|
||||||
CONFIG_DMA_CMA=y
|
CONFIG_DMA_CMA=y
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ CONFIG_WATCH_QUEUE=y
|
||||||
CONFIG_AUDIT=y
|
CONFIG_AUDIT=y
|
||||||
CONFIG_NO_HZ_IDLE=y
|
CONFIG_NO_HZ_IDLE=y
|
||||||
CONFIG_HIGH_RES_TIMERS=y
|
CONFIG_HIGH_RES_TIMERS=y
|
||||||
|
CONFIG_POSIX_AUX_CLOCKS=y
|
||||||
CONFIG_BPF_SYSCALL=y
|
CONFIG_BPF_SYSCALL=y
|
||||||
CONFIG_BPF_JIT=y
|
CONFIG_BPF_JIT=y
|
||||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||||
|
|
@ -17,6 +18,7 @@ CONFIG_TASK_XACCT=y
|
||||||
CONFIG_TASK_IO_ACCOUNTING=y
|
CONFIG_TASK_IO_ACCOUNTING=y
|
||||||
CONFIG_IKCONFIG=y
|
CONFIG_IKCONFIG=y
|
||||||
CONFIG_IKCONFIG_PROC=y
|
CONFIG_IKCONFIG_PROC=y
|
||||||
|
CONFIG_SCHED_PROXY_EXEC=y
|
||||||
CONFIG_NUMA_BALANCING=y
|
CONFIG_NUMA_BALANCING=y
|
||||||
CONFIG_MEMCG=y
|
CONFIG_MEMCG=y
|
||||||
CONFIG_BLK_CGROUP=y
|
CONFIG_BLK_CGROUP=y
|
||||||
|
|
@ -40,11 +42,12 @@ CONFIG_PROFILING=y
|
||||||
CONFIG_KEXEC=y
|
CONFIG_KEXEC=y
|
||||||
CONFIG_KEXEC_FILE=y
|
CONFIG_KEXEC_FILE=y
|
||||||
CONFIG_KEXEC_SIG=y
|
CONFIG_KEXEC_SIG=y
|
||||||
|
CONFIG_CRASH_DM_CRYPT=y
|
||||||
CONFIG_LIVEPATCH=y
|
CONFIG_LIVEPATCH=y
|
||||||
CONFIG_MARCH_Z13=y
|
CONFIG_MARCH_Z13=y
|
||||||
CONFIG_NR_CPUS=512
|
CONFIG_NR_CPUS=512
|
||||||
CONFIG_NUMA=y
|
CONFIG_NUMA=y
|
||||||
CONFIG_HZ_100=y
|
CONFIG_HZ_1000=y
|
||||||
CONFIG_CERT_STORE=y
|
CONFIG_CERT_STORE=y
|
||||||
CONFIG_EXPOLINE=y
|
CONFIG_EXPOLINE=y
|
||||||
CONFIG_EXPOLINE_AUTO=y
|
CONFIG_EXPOLINE_AUTO=y
|
||||||
|
|
@ -97,6 +100,7 @@ CONFIG_CMA_AREAS=7
|
||||||
CONFIG_MEM_SOFT_DIRTY=y
|
CONFIG_MEM_SOFT_DIRTY=y
|
||||||
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
||||||
CONFIG_IDLE_PAGE_TRACKING=y
|
CONFIG_IDLE_PAGE_TRACKING=y
|
||||||
|
CONFIG_ZONE_DEVICE=y
|
||||||
CONFIG_PERCPU_STATS=y
|
CONFIG_PERCPU_STATS=y
|
||||||
CONFIG_ANON_VMA_NAME=y
|
CONFIG_ANON_VMA_NAME=y
|
||||||
CONFIG_USERFAULTFD=y
|
CONFIG_USERFAULTFD=y
|
||||||
|
|
@ -214,17 +218,19 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_CT=m
|
CONFIG_NETFILTER_XT_TARGET_CT=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
||||||
|
CONFIG_NETFILTER_XT_TARGET_HL=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||||
|
CONFIG_NETFILTER_XT_NAT=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
||||||
|
CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
|
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||||
|
|
@ -239,6 +245,7 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
|
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_CPU=m
|
CONFIG_NETFILTER_XT_MATCH_CPU=m
|
||||||
|
CONFIG_NETFILTER_XT_MATCH_DCCP=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
|
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||||
|
|
@ -309,16 +316,8 @@ CONFIG_IP_NF_MATCH_AH=m
|
||||||
CONFIG_IP_NF_MATCH_ECN=m
|
CONFIG_IP_NF_MATCH_ECN=m
|
||||||
CONFIG_IP_NF_MATCH_RPFILTER=m
|
CONFIG_IP_NF_MATCH_RPFILTER=m
|
||||||
CONFIG_IP_NF_MATCH_TTL=m
|
CONFIG_IP_NF_MATCH_TTL=m
|
||||||
CONFIG_IP_NF_FILTER=m
|
|
||||||
CONFIG_IP_NF_TARGET_REJECT=m
|
CONFIG_IP_NF_TARGET_REJECT=m
|
||||||
CONFIG_IP_NF_NAT=m
|
|
||||||
CONFIG_IP_NF_TARGET_MASQUERADE=m
|
|
||||||
CONFIG_IP_NF_MANGLE=m
|
|
||||||
CONFIG_IP_NF_TARGET_ECN=m
|
CONFIG_IP_NF_TARGET_ECN=m
|
||||||
CONFIG_IP_NF_TARGET_TTL=m
|
|
||||||
CONFIG_IP_NF_RAW=m
|
|
||||||
CONFIG_IP_NF_SECURITY=m
|
|
||||||
CONFIG_IP_NF_ARPFILTER=m
|
|
||||||
CONFIG_IP_NF_ARP_MANGLE=m
|
CONFIG_IP_NF_ARP_MANGLE=m
|
||||||
CONFIG_NFT_FIB_IPV6=m
|
CONFIG_NFT_FIB_IPV6=m
|
||||||
CONFIG_IP6_NF_IPTABLES=m
|
CONFIG_IP6_NF_IPTABLES=m
|
||||||
|
|
@ -331,15 +330,9 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
|
||||||
CONFIG_IP6_NF_MATCH_MH=m
|
CONFIG_IP6_NF_MATCH_MH=m
|
||||||
CONFIG_IP6_NF_MATCH_RPFILTER=m
|
CONFIG_IP6_NF_MATCH_RPFILTER=m
|
||||||
CONFIG_IP6_NF_MATCH_RT=m
|
CONFIG_IP6_NF_MATCH_RT=m
|
||||||
CONFIG_IP6_NF_TARGET_HL=m
|
|
||||||
CONFIG_IP6_NF_FILTER=m
|
|
||||||
CONFIG_IP6_NF_TARGET_REJECT=m
|
CONFIG_IP6_NF_TARGET_REJECT=m
|
||||||
CONFIG_IP6_NF_MANGLE=m
|
|
||||||
CONFIG_IP6_NF_RAW=m
|
|
||||||
CONFIG_IP6_NF_SECURITY=m
|
|
||||||
CONFIG_IP6_NF_NAT=m
|
|
||||||
CONFIG_IP6_NF_TARGET_MASQUERADE=m
|
|
||||||
CONFIG_NF_TABLES_BRIDGE=m
|
CONFIG_NF_TABLES_BRIDGE=m
|
||||||
|
CONFIG_IP_SCTP=m
|
||||||
CONFIG_RDS=m
|
CONFIG_RDS=m
|
||||||
CONFIG_RDS_RDMA=m
|
CONFIG_RDS_RDMA=m
|
||||||
CONFIG_RDS_TCP=m
|
CONFIG_RDS_TCP=m
|
||||||
|
|
@ -373,6 +366,7 @@ CONFIG_NET_SCH_FQ_CODEL=m
|
||||||
CONFIG_NET_SCH_INGRESS=m
|
CONFIG_NET_SCH_INGRESS=m
|
||||||
CONFIG_NET_SCH_PLUG=m
|
CONFIG_NET_SCH_PLUG=m
|
||||||
CONFIG_NET_SCH_ETS=m
|
CONFIG_NET_SCH_ETS=m
|
||||||
|
CONFIG_NET_SCH_DUALPI2=m
|
||||||
CONFIG_NET_CLS_BASIC=m
|
CONFIG_NET_CLS_BASIC=m
|
||||||
CONFIG_NET_CLS_ROUTE4=m
|
CONFIG_NET_CLS_ROUTE4=m
|
||||||
CONFIG_NET_CLS_FW=m
|
CONFIG_NET_CLS_FW=m
|
||||||
|
|
@ -494,6 +488,7 @@ CONFIG_DM_VDO=m
|
||||||
CONFIG_NETDEVICES=y
|
CONFIG_NETDEVICES=y
|
||||||
CONFIG_BONDING=m
|
CONFIG_BONDING=m
|
||||||
CONFIG_DUMMY=m
|
CONFIG_DUMMY=m
|
||||||
|
CONFIG_OVPN=m
|
||||||
CONFIG_EQUALIZER=m
|
CONFIG_EQUALIZER=m
|
||||||
CONFIG_IFB=m
|
CONFIG_IFB=m
|
||||||
CONFIG_MACVLAN=m
|
CONFIG_MACVLAN=m
|
||||||
|
|
@ -631,6 +626,7 @@ CONFIG_VP_VDPA=m
|
||||||
CONFIG_VHOST_NET=m
|
CONFIG_VHOST_NET=m
|
||||||
CONFIG_VHOST_VSOCK=m
|
CONFIG_VHOST_VSOCK=m
|
||||||
CONFIG_VHOST_VDPA=m
|
CONFIG_VHOST_VDPA=m
|
||||||
|
CONFIG_DEV_DAX=m
|
||||||
CONFIG_EXT4_FS=y
|
CONFIG_EXT4_FS=y
|
||||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||||
CONFIG_EXT4_FS_SECURITY=y
|
CONFIG_EXT4_FS_SECURITY=y
|
||||||
|
|
@ -652,6 +648,7 @@ CONFIG_NILFS2_FS=m
|
||||||
CONFIG_BCACHEFS_FS=m
|
CONFIG_BCACHEFS_FS=m
|
||||||
CONFIG_BCACHEFS_QUOTA=y
|
CONFIG_BCACHEFS_QUOTA=y
|
||||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||||
|
CONFIG_FS_DAX=y
|
||||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||||
CONFIG_FS_ENCRYPTION=y
|
CONFIG_FS_ENCRYPTION=y
|
||||||
CONFIG_FS_VERITY=y
|
CONFIG_FS_VERITY=y
|
||||||
|
|
@ -683,7 +680,6 @@ CONFIG_TMPFS_POSIX_ACL=y
|
||||||
CONFIG_TMPFS_INODE64=y
|
CONFIG_TMPFS_INODE64=y
|
||||||
CONFIG_TMPFS_QUOTA=y
|
CONFIG_TMPFS_QUOTA=y
|
||||||
CONFIG_HUGETLBFS=y
|
CONFIG_HUGETLBFS=y
|
||||||
CONFIG_CONFIGFS_FS=m
|
|
||||||
CONFIG_ECRYPT_FS=m
|
CONFIG_ECRYPT_FS=m
|
||||||
CONFIG_CRAMFS=m
|
CONFIG_CRAMFS=m
|
||||||
CONFIG_SQUASHFS=m
|
CONFIG_SQUASHFS=m
|
||||||
|
|
@ -741,6 +737,7 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||||
CONFIG_CRYPTO_FIPS=y
|
CONFIG_CRYPTO_FIPS=y
|
||||||
CONFIG_CRYPTO_USER=m
|
CONFIG_CRYPTO_USER=m
|
||||||
CONFIG_CRYPTO_SELFTESTS=y
|
CONFIG_CRYPTO_SELFTESTS=y
|
||||||
|
CONFIG_CRYPTO_NULL=y
|
||||||
CONFIG_CRYPTO_PCRYPT=m
|
CONFIG_CRYPTO_PCRYPT=m
|
||||||
CONFIG_CRYPTO_CRYPTD=m
|
CONFIG_CRYPTO_CRYPTD=m
|
||||||
CONFIG_CRYPTO_BENCHMARK=m
|
CONFIG_CRYPTO_BENCHMARK=m
|
||||||
|
|
@ -769,7 +766,6 @@ CONFIG_CRYPTO_HCTR2=m
|
||||||
CONFIG_CRYPTO_LRW=m
|
CONFIG_CRYPTO_LRW=m
|
||||||
CONFIG_CRYPTO_PCBC=m
|
CONFIG_CRYPTO_PCBC=m
|
||||||
CONFIG_CRYPTO_AEGIS128=m
|
CONFIG_CRYPTO_AEGIS128=m
|
||||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
|
||||||
CONFIG_CRYPTO_GCM=y
|
CONFIG_CRYPTO_GCM=y
|
||||||
CONFIG_CRYPTO_SEQIV=y
|
CONFIG_CRYPTO_SEQIV=y
|
||||||
CONFIG_CRYPTO_MD4=m
|
CONFIG_CRYPTO_MD4=m
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
CONFIG_NO_HZ_IDLE=y
|
CONFIG_NO_HZ_IDLE=y
|
||||||
CONFIG_HIGH_RES_TIMERS=y
|
CONFIG_HIGH_RES_TIMERS=y
|
||||||
|
CONFIG_POSIX_AUX_CLOCKS=y
|
||||||
CONFIG_BPF_SYSCALL=y
|
CONFIG_BPF_SYSCALL=y
|
||||||
# CONFIG_CPU_ISOLATION is not set
|
# CONFIG_CPU_ISOLATION is not set
|
||||||
# CONFIG_UTS_NS is not set
|
# CONFIG_UTS_NS is not set
|
||||||
|
|
@ -11,7 +12,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||||
CONFIG_KEXEC=y
|
CONFIG_KEXEC=y
|
||||||
CONFIG_MARCH_Z13=y
|
CONFIG_MARCH_Z13=y
|
||||||
CONFIG_NR_CPUS=2
|
CONFIG_NR_CPUS=2
|
||||||
CONFIG_HZ_100=y
|
CONFIG_HZ_1000=y
|
||||||
# CONFIG_CHSC_SCH is not set
|
# CONFIG_CHSC_SCH is not set
|
||||||
# CONFIG_SCM_BUS is not set
|
# CONFIG_SCM_BUS is not set
|
||||||
# CONFIG_AP is not set
|
# CONFIG_AP is not set
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@
|
||||||
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/security.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include "hypfs.h"
|
#include "hypfs.h"
|
||||||
|
|
||||||
|
|
@ -66,23 +67,27 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
long rc;
|
long rc;
|
||||||
|
|
||||||
mutex_lock(&df->lock);
|
mutex_lock(&df->lock);
|
||||||
if (df->unlocked_ioctl)
|
|
||||||
rc = df->unlocked_ioctl(file, cmd, arg);
|
rc = df->unlocked_ioctl(file, cmd, arg);
|
||||||
else
|
|
||||||
rc = -ENOTTY;
|
|
||||||
mutex_unlock(&df->lock);
|
mutex_unlock(&df->lock);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations dbfs_ops = {
|
static const struct file_operations dbfs_ops_ioctl = {
|
||||||
.read = dbfs_read,
|
.read = dbfs_read,
|
||||||
.unlocked_ioctl = dbfs_ioctl,
|
.unlocked_ioctl = dbfs_ioctl,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct file_operations dbfs_ops = {
|
||||||
|
.read = dbfs_read,
|
||||||
|
};
|
||||||
|
|
||||||
void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
|
void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
|
||||||
{
|
{
|
||||||
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
|
const struct file_operations *fops = &dbfs_ops;
|
||||||
&dbfs_ops);
|
|
||||||
|
if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
|
||||||
|
fops = &dbfs_ops_ioctl;
|
||||||
|
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
|
||||||
mutex_init(&df->lock);
|
mutex_init(&df->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -106,5 +106,18 @@ void get_cpuflags(void)
|
||||||
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
|
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
|
||||||
&cpu.flags[1]);
|
&cpu.flags[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (max_amd_level >= 0x8000001f) {
|
||||||
|
u32 ebx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
|
||||||
|
* the virtualization flags entry (word 8) and set by
|
||||||
|
* scattered.c, so the bit needs to be explicitly set.
|
||||||
|
*/
|
||||||
|
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
|
||||||
|
if (ebx & BIT(31))
|
||||||
|
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -785,6 +785,7 @@ static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
|
||||||
pc->entry[0].page_size = RMP_PG_SIZE_4K;
|
pc->entry[0].page_size = RMP_PG_SIZE_4K;
|
||||||
pc->entry[0].action = validate;
|
pc->entry[0].action = validate;
|
||||||
pc->entry[0].ignore_cf = 0;
|
pc->entry[0].ignore_cf = 0;
|
||||||
|
pc->entry[0].rsvd = 0;
|
||||||
pc->entry[0].pfn = paddr >> PAGE_SHIFT;
|
pc->entry[0].pfn = paddr >> PAGE_SHIFT;
|
||||||
|
|
||||||
/* Protocol 0, Call ID 1 */
|
/* Protocol 0, Call ID 1 */
|
||||||
|
|
@ -810,6 +811,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
|
||||||
if (ret)
|
if (ret)
|
||||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
|
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If validating memory (making it private) and affected by the
|
||||||
|
* cache-coherency vulnerability, perform the cache eviction mitigation.
|
||||||
|
*/
|
||||||
|
if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
|
||||||
|
sev_evict_cache((void *)vaddr, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -227,6 +227,7 @@ static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
|
||||||
pe->page_size = RMP_PG_SIZE_4K;
|
pe->page_size = RMP_PG_SIZE_4K;
|
||||||
pe->action = action;
|
pe->action = action;
|
||||||
pe->ignore_cf = 0;
|
pe->ignore_cf = 0;
|
||||||
|
pe->rsvd = 0;
|
||||||
pe->pfn = pfn;
|
pe->pfn = pfn;
|
||||||
|
|
||||||
pe++;
|
pe++;
|
||||||
|
|
@ -257,6 +258,7 @@ static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int d
|
||||||
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
|
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
|
||||||
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
|
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
|
||||||
pe->ignore_cf = 0;
|
pe->ignore_cf = 0;
|
||||||
|
pe->rsvd = 0;
|
||||||
pe->pfn = e->gfn;
|
pe->pfn = e->gfn;
|
||||||
|
|
||||||
pe++;
|
pe++;
|
||||||
|
|
@ -358,10 +360,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
|
||||||
|
|
||||||
static void pvalidate_pages(struct snp_psc_desc *desc)
|
static void pvalidate_pages(struct snp_psc_desc *desc)
|
||||||
{
|
{
|
||||||
|
struct psc_entry *e;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
if (snp_vmpl)
|
if (snp_vmpl)
|
||||||
svsm_pval_pages(desc);
|
svsm_pval_pages(desc);
|
||||||
else
|
else
|
||||||
pval_pages(desc);
|
pval_pages(desc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If not affected by the cache-coherency vulnerability there is no need
|
||||||
|
* to perform the cache eviction mitigation.
|
||||||
|
*/
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i <= desc->hdr.end_entry; i++) {
|
||||||
|
e = &desc->entries[i];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If validating memory (making it private) perform the cache
|
||||||
|
* eviction mitigation.
|
||||||
|
*/
|
||||||
|
if (e->operation == SNP_PAGE_STATE_PRIVATE)
|
||||||
|
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
|
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
|
||||||
|
|
|
||||||
|
|
@ -371,29 +371,30 @@ static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
|
||||||
* executing with Secure TSC enabled, so special handling is required for
|
* executing with Secure TSC enabled, so special handling is required for
|
||||||
* accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
|
* accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
|
||||||
*/
|
*/
|
||||||
static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
|
static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write)
|
||||||
{
|
{
|
||||||
|
struct pt_regs *regs = ctxt->regs;
|
||||||
u64 tsc;
|
u64 tsc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
|
* Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to
|
||||||
* Terminate the SNP guest when the interception is enabled.
|
* return undefined values, and GUEST_TSC_FREQ is read-only. Generate
|
||||||
|
* a #GP on all writes.
|
||||||
|
*/
|
||||||
|
if (write) {
|
||||||
|
ctxt->fi.vector = X86_TRAP_GP;
|
||||||
|
ctxt->fi.error_code = 0;
|
||||||
|
return ES_EXCEPTION;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GUEST_TSC_FREQ read should not be intercepted when Secure TSC is
|
||||||
|
* enabled. Terminate the guest if a read is attempted.
|
||||||
*/
|
*/
|
||||||
if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
|
if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
|
||||||
return ES_VMM_ERROR;
|
return ES_VMM_ERROR;
|
||||||
|
|
||||||
/*
|
/* Reads of MSR_IA32_TSC should return the current TSC value. */
|
||||||
* Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
|
|
||||||
* to return undefined values, so ignore all writes.
|
|
||||||
*
|
|
||||||
* Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
|
|
||||||
* the value returned by rdtsc_ordered().
|
|
||||||
*/
|
|
||||||
if (write) {
|
|
||||||
WARN_ONCE(1, "TSC MSR writes are verboten!\n");
|
|
||||||
return ES_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
tsc = rdtsc_ordered();
|
tsc = rdtsc_ordered();
|
||||||
regs->ax = lower_32_bits(tsc);
|
regs->ax = lower_32_bits(tsc);
|
||||||
regs->dx = upper_32_bits(tsc);
|
regs->dx = upper_32_bits(tsc);
|
||||||
|
|
@ -416,7 +417,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||||
case MSR_IA32_TSC:
|
case MSR_IA32_TSC:
|
||||||
case MSR_AMD64_GUEST_TSC_FREQ:
|
case MSR_AMD64_GUEST_TSC_FREQ:
|
||||||
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
|
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
|
||||||
return __vc_handle_secure_tsc_msrs(regs, write);
|
return __vc_handle_secure_tsc_msrs(ctxt, write);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
|
|
@ -218,6 +218,7 @@
|
||||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
|
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
|
||||||
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
|
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
|
||||||
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
|
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
|
||||||
|
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
|
||||||
|
|
||||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
|
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
|
||||||
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
|
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
|
||||||
|
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
|
|
||||||
#ifndef _ASM_X86_CPUID_H
|
|
||||||
#define _ASM_X86_CPUID_H
|
|
||||||
|
|
||||||
#include <asm/cpuid/api.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_CPUID_H */
|
|
||||||
|
|
@ -619,6 +619,24 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
|
||||||
void snp_leak_pages(u64 pfn, unsigned int npages);
|
void snp_leak_pages(u64 pfn, unsigned int npages);
|
||||||
void kdump_sev_callback(void);
|
void kdump_sev_callback(void);
|
||||||
void snp_fixup_e820_tables(void);
|
void snp_fixup_e820_tables(void);
|
||||||
|
|
||||||
|
static inline void sev_evict_cache(void *va, int npages)
|
||||||
|
{
|
||||||
|
volatile u8 val __always_unused;
|
||||||
|
u8 *bytes = va;
|
||||||
|
int page_idx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For SEV guests, a read from the first/last cache-lines of a 4K page
|
||||||
|
* using the guest key is sufficient to cause a flush of all cache-lines
|
||||||
|
* associated with that 4K page without incurring all the overhead of a
|
||||||
|
* full CLFLUSH sequence.
|
||||||
|
*/
|
||||||
|
for (page_idx = 0; page_idx < npages; page_idx++) {
|
||||||
|
val = bytes[page_idx * PAGE_SIZE];
|
||||||
|
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
static inline bool snp_probe_rmptable_info(void) { return false; }
|
static inline bool snp_probe_rmptable_info(void) { return false; }
|
||||||
static inline int snp_rmptable_init(void) { return -ENOSYS; }
|
static inline int snp_rmptable_init(void) { return -ENOSYS; }
|
||||||
|
|
@ -634,6 +652,7 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
|
||||||
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
|
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
|
||||||
static inline void kdump_sev_callback(void) { }
|
static inline void kdump_sev_callback(void) { }
|
||||||
static inline void snp_fixup_e820_tables(void) {}
|
static inline void snp_fixup_e820_tables(void) {}
|
||||||
|
static inline void sev_evict_cache(void *va, int npages) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -94,12 +94,13 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
|
||||||
#ifdef MODULE
|
#ifdef MODULE
|
||||||
#define __ADDRESSABLE_xen_hypercall
|
#define __ADDRESSABLE_xen_hypercall
|
||||||
#else
|
#else
|
||||||
#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
|
#define __ADDRESSABLE_xen_hypercall \
|
||||||
|
__stringify(.global STATIC_CALL_KEY(xen_hypercall);)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define __HYPERCALL \
|
#define __HYPERCALL \
|
||||||
__ADDRESSABLE_xen_hypercall \
|
__ADDRESSABLE_xen_hypercall \
|
||||||
"call __SCT__xen_hypercall"
|
__stringify(call STATIC_CALL_TRAMP(xen_hypercall))
|
||||||
|
|
||||||
#define __HYPERCALL_ENTRY(x) "a" (x)
|
#define __HYPERCALL_ENTRY(x) "a" (x)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1326,8 +1326,8 @@ static const char * const s5_reset_reason_txt[] = {
|
||||||
|
|
||||||
static __init int print_s5_reset_status_mmio(void)
|
static __init int print_s5_reset_status_mmio(void)
|
||||||
{
|
{
|
||||||
unsigned long value;
|
|
||||||
void __iomem *addr;
|
void __iomem *addr;
|
||||||
|
u32 value;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
|
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
|
||||||
|
|
@ -1340,12 +1340,16 @@ static __init int print_s5_reset_status_mmio(void)
|
||||||
value = ioread32(addr);
|
value = ioread32(addr);
|
||||||
iounmap(addr);
|
iounmap(addr);
|
||||||
|
|
||||||
|
/* Value with "all bits set" is an error response and should be ignored. */
|
||||||
|
if (value == U32_MAX)
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
|
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
|
||||||
if (!(value & BIT(i)))
|
if (!(value & BIT(i)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (s5_reset_reason_txt[i]) {
|
if (s5_reset_reason_txt[i]) {
|
||||||
pr_info("x86/amd: Previous system reset reason [0x%08lx]: %s\n",
|
pr_info("x86/amd: Previous system reset reason [0x%08x]: %s\n",
|
||||||
value, s5_reset_reason_txt[i]);
|
value, s5_reset_reason_txt[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -386,7 +386,6 @@ static bool __init should_mitigate_vuln(unsigned int bug)
|
||||||
|
|
||||||
case X86_BUG_SPECTRE_V2:
|
case X86_BUG_SPECTRE_V2:
|
||||||
case X86_BUG_RETBLEED:
|
case X86_BUG_RETBLEED:
|
||||||
case X86_BUG_SRSO:
|
|
||||||
case X86_BUG_L1TF:
|
case X86_BUG_L1TF:
|
||||||
case X86_BUG_ITS:
|
case X86_BUG_ITS:
|
||||||
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
|
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
|
||||||
|
|
@ -1069,10 +1068,8 @@ static void __init gds_select_mitigation(void)
|
||||||
if (gds_mitigation == GDS_MITIGATION_AUTO) {
|
if (gds_mitigation == GDS_MITIGATION_AUTO) {
|
||||||
if (should_mitigate_vuln(X86_BUG_GDS))
|
if (should_mitigate_vuln(X86_BUG_GDS))
|
||||||
gds_mitigation = GDS_MITIGATION_FULL;
|
gds_mitigation = GDS_MITIGATION_FULL;
|
||||||
else {
|
else
|
||||||
gds_mitigation = GDS_MITIGATION_OFF;
|
gds_mitigation = GDS_MITIGATION_OFF;
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No microcode */
|
/* No microcode */
|
||||||
|
|
@ -3184,8 +3181,18 @@ static void __init srso_select_mitigation(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (srso_mitigation == SRSO_MITIGATION_AUTO) {
|
if (srso_mitigation == SRSO_MITIGATION_AUTO) {
|
||||||
if (should_mitigate_vuln(X86_BUG_SRSO)) {
|
/*
|
||||||
|
* Use safe-RET if user->kernel or guest->host protection is
|
||||||
|
* required. Otherwise the 'microcode' mitigation is sufficient
|
||||||
|
* to protect the user->user and guest->guest vectors.
|
||||||
|
*/
|
||||||
|
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
|
||||||
|
(cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
|
||||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||||
|
} else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
|
||||||
|
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
|
||||||
|
srso_mitigation = SRSO_MITIGATION_MICROCODE;
|
||||||
} else {
|
} else {
|
||||||
srso_mitigation = SRSO_MITIGATION_NONE;
|
srso_mitigation = SRSO_MITIGATION_NONE;
|
||||||
return;
|
return;
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@
|
||||||
#include <asm/spec-ctrl.h>
|
#include <asm/spec-ctrl.h>
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
|
#include <asm/resctrl.h>
|
||||||
|
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
|
|
@ -117,6 +118,8 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
|
||||||
x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
|
x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resctrl_cpu_detect(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void early_init_hygon(struct cpuinfo_x86 *c)
|
static void early_init_hygon(struct cpuinfo_x86 *c)
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||||
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
|
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
|
||||||
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
||||||
|
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
|
||||||
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
||||||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||||
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
|
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
|
||||||
|
|
|
||||||
|
|
@ -1881,19 +1881,20 @@ long fpu_xstate_prctl(int option, unsigned long arg2)
|
||||||
#ifdef CONFIG_PROC_PID_ARCH_STATUS
|
#ifdef CONFIG_PROC_PID_ARCH_STATUS
|
||||||
/*
|
/*
|
||||||
* Report the amount of time elapsed in millisecond since last AVX512
|
* Report the amount of time elapsed in millisecond since last AVX512
|
||||||
* use in the task.
|
* use in the task. Report -1 if no AVX-512 usage.
|
||||||
*/
|
*/
|
||||||
static void avx512_status(struct seq_file *m, struct task_struct *task)
|
static void avx512_status(struct seq_file *m, struct task_struct *task)
|
||||||
{
|
{
|
||||||
unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
|
unsigned long timestamp;
|
||||||
long delta;
|
long delta = -1;
|
||||||
|
|
||||||
if (!timestamp) {
|
/* AVX-512 usage is not tracked for kernel threads. Don't report anything. */
|
||||||
/*
|
if (task->flags & (PF_KTHREAD | PF_USER_WORKER))
|
||||||
* Report -1 if no AVX512 usage
|
return;
|
||||||
*/
|
|
||||||
delta = -1;
|
timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
|
||||||
} else {
|
|
||||||
|
if (timestamp) {
|
||||||
delta = (long)(jiffies - timestamp);
|
delta = (long)(jiffies - timestamp);
|
||||||
/*
|
/*
|
||||||
* Cap to LONG_MAX if time difference > LONG_MAX
|
* Cap to LONG_MAX if time difference > LONG_MAX
|
||||||
|
|
|
||||||
|
|
@ -5847,8 +5847,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
bfqq = kmem_cache_alloc_node(bfq_pool,
|
bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO,
|
||||||
GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
|
|
||||||
bfqd->queue->node);
|
bfqd->queue->node);
|
||||||
|
|
||||||
if (bfqq) {
|
if (bfqq) {
|
||||||
|
|
|
||||||
|
|
@ -394,7 +394,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
|
||||||
|
|
||||||
/* allocate */
|
/* allocate */
|
||||||
if (!new_blkg) {
|
if (!new_blkg) {
|
||||||
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
|
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT);
|
||||||
if (unlikely(!new_blkg)) {
|
if (unlikely(!new_blkg)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_put_css;
|
goto err_put_css;
|
||||||
|
|
@ -1467,7 +1467,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||||
|
|
||||||
spin_lock_init(&blkcg->lock);
|
spin_lock_init(&blkcg->lock);
|
||||||
refcount_set(&blkcg->online_pin, 1);
|
refcount_set(&blkcg->online_pin, 1);
|
||||||
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
|
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
|
||||||
INIT_HLIST_HEAD(&blkcg->blkg_list);
|
INIT_HLIST_HEAD(&blkcg->blkg_list);
|
||||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||||
INIT_LIST_HEAD(&blkcg->cgwb_list);
|
INIT_LIST_HEAD(&blkcg->cgwb_list);
|
||||||
|
|
@ -1630,7 +1630,7 @@ retry:
|
||||||
pd_prealloc = NULL;
|
pd_prealloc = NULL;
|
||||||
} else {
|
} else {
|
||||||
pd = pol->pd_alloc_fn(disk, blkg->blkcg,
|
pd = pol->pd_alloc_fn(disk, blkg->blkcg,
|
||||||
GFP_NOWAIT | __GFP_NOWARN);
|
GFP_NOWAIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pd) {
|
if (!pd) {
|
||||||
|
|
|
||||||
|
|
@ -557,7 +557,7 @@ static inline int bio_check_eod(struct bio *bio)
|
||||||
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
|
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
|
||||||
unsigned int nr_sectors = bio_sectors(bio);
|
unsigned int nr_sectors = bio_sectors(bio);
|
||||||
|
|
||||||
if (nr_sectors &&
|
if (nr_sectors && maxsector &&
|
||||||
(nr_sectors > maxsector ||
|
(nr_sectors > maxsector ||
|
||||||
bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
|
bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
|
||||||
pr_info_ratelimited("%s: attempt to access beyond end of device\n"
|
pr_info_ratelimited("%s: attempt to access beyond end of device\n"
|
||||||
|
|
|
||||||
|
|
@ -95,6 +95,7 @@ static const char *const blk_queue_flag_name[] = {
|
||||||
QUEUE_FLAG_NAME(SQ_SCHED),
|
QUEUE_FLAG_NAME(SQ_SCHED),
|
||||||
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
|
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
|
||||||
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
|
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
|
||||||
|
QUEUE_FLAG_NAME(QOS_ENABLED),
|
||||||
};
|
};
|
||||||
#undef QUEUE_FLAG_NAME
|
#undef QUEUE_FLAG_NAME
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5033,6 +5033,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||||
unsigned int memflags;
|
unsigned int memflags;
|
||||||
int i;
|
int i;
|
||||||
struct xarray elv_tbl, et_tbl;
|
struct xarray elv_tbl, et_tbl;
|
||||||
|
bool queues_frozen = false;
|
||||||
|
|
||||||
lockdep_assert_held(&set->tag_list_lock);
|
lockdep_assert_held(&set->tag_list_lock);
|
||||||
|
|
||||||
|
|
@ -5056,9 +5057,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||||
blk_mq_sysfs_unregister_hctxs(q);
|
blk_mq_sysfs_unregister_hctxs(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
|
||||||
blk_mq_freeze_queue_nomemsave(q);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Switch IO scheduler to 'none', cleaning up the data associated
|
* Switch IO scheduler to 'none', cleaning up the data associated
|
||||||
* with the previous scheduler. We will switch back once we are done
|
* with the previous scheduler. We will switch back once we are done
|
||||||
|
|
@ -5068,6 +5066,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||||
if (blk_mq_elv_switch_none(q, &elv_tbl))
|
if (blk_mq_elv_switch_none(q, &elv_tbl))
|
||||||
goto switch_back;
|
goto switch_back;
|
||||||
|
|
||||||
|
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||||
|
blk_mq_freeze_queue_nomemsave(q);
|
||||||
|
queues_frozen = true;
|
||||||
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
|
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
|
||||||
goto switch_back;
|
goto switch_back;
|
||||||
|
|
||||||
|
|
@ -5091,8 +5092,12 @@ fallback:
|
||||||
}
|
}
|
||||||
switch_back:
|
switch_back:
|
||||||
/* The blk_mq_elv_switch_back unfreezes queue for us. */
|
/* The blk_mq_elv_switch_back unfreezes queue for us. */
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||||
|
/* switch_back expects queue to be frozen */
|
||||||
|
if (!queues_frozen)
|
||||||
|
blk_mq_freeze_queue_nomemsave(q);
|
||||||
blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
|
blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||||
blk_mq_sysfs_register_hctxs(q);
|
blk_mq_sysfs_register_hctxs(q);
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,6 @@
|
||||||
|
|
||||||
#include "blk-rq-qos.h"
|
#include "blk-rq-qos.h"
|
||||||
|
|
||||||
__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
|
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
|
||||||
* false if 'v' + 1 would be bigger than 'below'.
|
* false if 'v' + 1 would be bigger than 'below'.
|
||||||
|
|
@ -319,8 +317,8 @@ void rq_qos_exit(struct request_queue *q)
|
||||||
struct rq_qos *rqos = q->rq_qos;
|
struct rq_qos *rqos = q->rq_qos;
|
||||||
q->rq_qos = rqos->next;
|
q->rq_qos = rqos->next;
|
||||||
rqos->ops->exit(rqos);
|
rqos->ops->exit(rqos);
|
||||||
static_branch_dec(&block_rq_qos);
|
|
||||||
}
|
}
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
|
||||||
mutex_unlock(&q->rq_qos_mutex);
|
mutex_unlock(&q->rq_qos_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -346,7 +344,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
|
||||||
goto ebusy;
|
goto ebusy;
|
||||||
rqos->next = q->rq_qos;
|
rqos->next = q->rq_qos;
|
||||||
q->rq_qos = rqos;
|
q->rq_qos = rqos;
|
||||||
static_branch_inc(&block_rq_qos);
|
blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q);
|
||||||
|
|
||||||
blk_mq_unfreeze_queue(q, memflags);
|
blk_mq_unfreeze_queue(q, memflags);
|
||||||
|
|
||||||
|
|
@ -377,6 +375,8 @@ void rq_qos_del(struct rq_qos *rqos)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!q->rq_qos)
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
|
||||||
blk_mq_unfreeze_queue(q, memflags);
|
blk_mq_unfreeze_queue(q, memflags);
|
||||||
|
|
||||||
mutex_lock(&q->debugfs_mutex);
|
mutex_lock(&q->debugfs_mutex);
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,6 @@
|
||||||
#include "blk-mq-debugfs.h"
|
#include "blk-mq-debugfs.h"
|
||||||
|
|
||||||
struct blk_mq_debugfs_attr;
|
struct blk_mq_debugfs_attr;
|
||||||
extern struct static_key_false block_rq_qos;
|
|
||||||
|
|
||||||
enum rq_qos_id {
|
enum rq_qos_id {
|
||||||
RQ_QOS_WBT,
|
RQ_QOS_WBT,
|
||||||
|
|
@ -113,43 +112,55 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
|
||||||
|
|
||||||
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_cleanup(q->rq_qos, bio);
|
__rq_qos_cleanup(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
!blk_rq_is_passthrough(rq))
|
q->rq_qos && !blk_rq_is_passthrough(rq))
|
||||||
__rq_qos_done(q->rq_qos, rq);
|
__rq_qos_done(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_issue(q->rq_qos, rq);
|
__rq_qos_issue(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_requeue(q->rq_qos, rq);
|
__rq_qos_requeue(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_done_bio(struct bio *bio)
|
static inline void rq_qos_done_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) &&
|
struct request_queue *q;
|
||||||
bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
|
|
||||||
bio_flagged(bio, BIO_QOS_MERGED))) {
|
if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
!bio_flagged(bio, BIO_QOS_MERGED)))
|
||||||
if (q->rq_qos)
|
return;
|
||||||
|
|
||||||
|
q = bdev_get_queue(bio->bi_bdev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a bio has BIO_QOS_xxx set, it implicitly implies that
|
||||||
|
* q->rq_qos is present. So, we skip re-checking q->rq_qos
|
||||||
|
* here as an extra optimization and directly call
|
||||||
|
* __rq_qos_done_bio().
|
||||||
|
*/
|
||||||
__rq_qos_done_bio(q->rq_qos, bio);
|
__rq_qos_done_bio(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos) {
|
||||||
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
||||||
__rq_qos_throttle(q->rq_qos, bio);
|
__rq_qos_throttle(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
|
|
@ -158,14 +169,16 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||||
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_track(q->rq_qos, rq, bio);
|
__rq_qos_track(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos) {
|
||||||
bio_set_flag(bio, BIO_QOS_MERGED);
|
bio_set_flag(bio, BIO_QOS_MERGED);
|
||||||
__rq_qos_merge(q->rq_qos, rq, bio);
|
__rq_qos_merge(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
|
|
@ -173,7 +186,8 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
||||||
|
|
||||||
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_queue_depth_changed(q->rq_qos);
|
__rq_qos_queue_depth_changed(q->rq_qos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -157,16 +157,14 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
|
||||||
switch (bi->csum_type) {
|
switch (bi->csum_type) {
|
||||||
case BLK_INTEGRITY_CSUM_NONE:
|
case BLK_INTEGRITY_CSUM_NONE:
|
||||||
if (bi->pi_tuple_size) {
|
if (bi->pi_tuple_size) {
|
||||||
pr_warn("pi_tuple_size must be 0 when checksum type \
|
pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
|
||||||
is none\n");
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BLK_INTEGRITY_CSUM_CRC:
|
case BLK_INTEGRITY_CSUM_CRC:
|
||||||
case BLK_INTEGRITY_CSUM_IP:
|
case BLK_INTEGRITY_CSUM_IP:
|
||||||
if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
|
if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
|
||||||
pr_warn("pi_tuple_size mismatch for T10 PI: expected \
|
pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
|
||||||
%zu, got %u\n",
|
|
||||||
sizeof(struct t10_pi_tuple),
|
sizeof(struct t10_pi_tuple),
|
||||||
bi->pi_tuple_size);
|
bi->pi_tuple_size);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
@ -174,8 +172,7 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
|
||||||
break;
|
break;
|
||||||
case BLK_INTEGRITY_CSUM_CRC64:
|
case BLK_INTEGRITY_CSUM_CRC64:
|
||||||
if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
|
if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
|
||||||
pr_warn("pi_tuple_size mismatch for CRC64 PI: \
|
pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
|
||||||
expected %zu, got %u\n",
|
|
||||||
sizeof(struct crc64_pi_tuple),
|
sizeof(struct crc64_pi_tuple),
|
||||||
bi->pi_tuple_size);
|
bi->pi_tuple_size);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
@ -972,6 +969,8 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
|
||||||
goto incompatible;
|
goto incompatible;
|
||||||
if (ti->csum_type != bi->csum_type)
|
if (ti->csum_type != bi->csum_type)
|
||||||
goto incompatible;
|
goto incompatible;
|
||||||
|
if (ti->pi_tuple_size != bi->pi_tuple_size)
|
||||||
|
goto incompatible;
|
||||||
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
|
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
|
||||||
(bi->flags & BLK_INTEGRITY_REF_TAG))
|
(bi->flags & BLK_INTEGRITY_REF_TAG))
|
||||||
goto incompatible;
|
goto incompatible;
|
||||||
|
|
@ -980,6 +979,7 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
|
||||||
ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
|
ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
|
||||||
(bi->flags & BLK_INTEGRITY_REF_TAG);
|
(bi->flags & BLK_INTEGRITY_REF_TAG);
|
||||||
ti->csum_type = bi->csum_type;
|
ti->csum_type = bi->csum_type;
|
||||||
|
ti->pi_tuple_size = bi->pi_tuple_size;
|
||||||
ti->metadata_size = bi->metadata_size;
|
ti->metadata_size = bi->metadata_size;
|
||||||
ti->pi_offset = bi->pi_offset;
|
ti->pi_offset = bi->pi_offset;
|
||||||
ti->interval_exp = bi->interval_exp;
|
ti->interval_exp = bi->interval_exp;
|
||||||
|
|
|
||||||
|
|
@ -847,7 +847,7 @@ static void blk_queue_release(struct kobject *kobj)
|
||||||
/* nothing to do here, all data is associated with the parent gendisk */
|
/* nothing to do here, all data is associated with the parent gendisk */
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct kobj_type blk_queue_ktype = {
|
const struct kobj_type blk_queue_ktype = {
|
||||||
.default_groups = blk_queue_attr_groups,
|
.default_groups = blk_queue_attr_groups,
|
||||||
.sysfs_ops = &queue_sysfs_ops,
|
.sysfs_ops = &queue_sysfs_ops,
|
||||||
.release = blk_queue_release,
|
.release = blk_queue_release,
|
||||||
|
|
@ -875,15 +875,14 @@ int blk_register_queue(struct gendisk *disk)
|
||||||
struct request_queue *q = disk->queue;
|
struct request_queue *q = disk->queue;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
|
|
||||||
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
|
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_put_queue_kobj;
|
return ret;
|
||||||
|
|
||||||
if (queue_is_mq(q)) {
|
if (queue_is_mq(q)) {
|
||||||
ret = blk_mq_sysfs_register(disk);
|
ret = blk_mq_sysfs_register(disk);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_put_queue_kobj;
|
goto out_del_queue_kobj;
|
||||||
}
|
}
|
||||||
mutex_lock(&q->sysfs_lock);
|
mutex_lock(&q->sysfs_lock);
|
||||||
|
|
||||||
|
|
@ -903,9 +902,9 @@ int blk_register_queue(struct gendisk *disk)
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
elevator_set_default(q);
|
elevator_set_default(q);
|
||||||
wbt_enable_default(disk);
|
|
||||||
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
|
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
|
||||||
|
wbt_enable_default(disk);
|
||||||
|
|
||||||
/* Now everything is ready and send out KOBJ_ADD uevent */
|
/* Now everything is ready and send out KOBJ_ADD uevent */
|
||||||
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
|
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
|
||||||
|
|
@ -934,8 +933,8 @@ out_debugfs_remove:
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
blk_mq_sysfs_unregister(disk);
|
blk_mq_sysfs_unregister(disk);
|
||||||
out_put_queue_kobj:
|
out_del_queue_kobj:
|
||||||
kobject_put(&disk->queue_kobj);
|
kobject_del(&disk->queue_kobj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -986,5 +985,4 @@ void blk_unregister_queue(struct gendisk *disk)
|
||||||
elevator_set_none(q);
|
elevator_set_none(q);
|
||||||
|
|
||||||
blk_debugfs_remove(disk);
|
blk_debugfs_remove(disk);
|
||||||
kobject_put(&disk->queue_kobj);
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -85,8 +85,8 @@ struct rq_wb {
|
||||||
u64 sync_issue;
|
u64 sync_issue;
|
||||||
void *sync_cookie;
|
void *sync_cookie;
|
||||||
|
|
||||||
unsigned long last_issue; /* last non-throttled issue */
|
unsigned long last_issue; /* issue time of last read rq */
|
||||||
unsigned long last_comp; /* last non-throttled comp */
|
unsigned long last_comp; /* completion time of last read rq */
|
||||||
unsigned long min_lat_nsec;
|
unsigned long min_lat_nsec;
|
||||||
struct rq_qos rqos;
|
struct rq_qos rqos;
|
||||||
struct rq_wait rq_wait[WBT_NUM_RWQ];
|
struct rq_wait rq_wait[WBT_NUM_RWQ];
|
||||||
|
|
@ -248,13 +248,14 @@ static void wbt_done(struct rq_qos *rqos, struct request *rq)
|
||||||
struct rq_wb *rwb = RQWB(rqos);
|
struct rq_wb *rwb = RQWB(rqos);
|
||||||
|
|
||||||
if (!wbt_is_tracked(rq)) {
|
if (!wbt_is_tracked(rq)) {
|
||||||
|
if (wbt_is_read(rq)) {
|
||||||
if (rwb->sync_cookie == rq) {
|
if (rwb->sync_cookie == rq) {
|
||||||
rwb->sync_issue = 0;
|
rwb->sync_issue = 0;
|
||||||
rwb->sync_cookie = NULL;
|
rwb->sync_cookie = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wbt_is_read(rq))
|
|
||||||
wb_timestamp(rwb, &rwb->last_comp);
|
wb_timestamp(rwb, &rwb->last_comp);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
WARN_ON_ONCE(rq == rwb->sync_cookie);
|
WARN_ON_ONCE(rq == rwb->sync_cookie);
|
||||||
__wbt_done(rqos, wbt_flags(rq));
|
__wbt_done(rqos, wbt_flags(rq));
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,7 @@ struct elevator_tags;
|
||||||
/* Max future timer expiry for timeouts */
|
/* Max future timer expiry for timeouts */
|
||||||
#define BLK_MAX_TIMEOUT (5 * HZ)
|
#define BLK_MAX_TIMEOUT (5 * HZ)
|
||||||
|
|
||||||
|
extern const struct kobj_type blk_queue_ktype;
|
||||||
extern struct dentry *blk_debugfs_root;
|
extern struct dentry *blk_debugfs_root;
|
||||||
|
|
||||||
struct blk_flush_queue {
|
struct blk_flush_queue {
|
||||||
|
|
|
||||||
|
|
@ -1303,6 +1303,7 @@ static void disk_release(struct device *dev)
|
||||||
disk_free_zone_resources(disk);
|
disk_free_zone_resources(disk);
|
||||||
xa_destroy(&disk->part_tbl);
|
xa_destroy(&disk->part_tbl);
|
||||||
|
|
||||||
|
kobject_put(&disk->queue_kobj);
|
||||||
disk->queue->disk = NULL;
|
disk->queue->disk = NULL;
|
||||||
blk_put_queue(disk->queue);
|
blk_put_queue(disk->queue);
|
||||||
|
|
||||||
|
|
@ -1486,6 +1487,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
||||||
INIT_LIST_HEAD(&disk->slave_bdevs);
|
INIT_LIST_HEAD(&disk->slave_bdevs);
|
||||||
#endif
|
#endif
|
||||||
mutex_init(&disk->rqos_state_mutex);
|
mutex_init(&disk->rqos_state_mutex);
|
||||||
|
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
|
||||||
return disk;
|
return disk;
|
||||||
|
|
||||||
out_erase_part0:
|
out_erase_part0:
|
||||||
|
|
|
||||||
|
|
@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
|
||||||
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
|
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
|
||||||
struct hl_ctx *ctx;
|
struct hl_ctx *ctx;
|
||||||
|
|
||||||
if (!hl_dmabuf)
|
|
||||||
return;
|
|
||||||
|
|
||||||
ctx = hl_dmabuf->ctx;
|
ctx = hl_dmabuf->ctx;
|
||||||
|
|
||||||
if (hl_dmabuf->memhash_hnode)
|
if (hl_dmabuf->memhash_hnode)
|
||||||
|
|
@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
|
||||||
{
|
{
|
||||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||||
struct hl_device *hdev = ctx->hdev;
|
struct hl_device *hdev = ctx->hdev;
|
||||||
int rc, fd;
|
CLASS(get_unused_fd, fd)(flags);
|
||||||
|
|
||||||
|
if (fd < 0) {
|
||||||
|
dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
exp_info.ops = &habanalabs_dmabuf_ops;
|
exp_info.ops = &habanalabs_dmabuf_ops;
|
||||||
exp_info.size = total_size;
|
exp_info.size = total_size;
|
||||||
|
|
@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
|
||||||
return PTR_ERR(hl_dmabuf->dmabuf);
|
return PTR_ERR(hl_dmabuf->dmabuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
|
|
||||||
if (fd < 0) {
|
|
||||||
dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
|
|
||||||
rc = fd;
|
|
||||||
goto err_dma_buf_put;
|
|
||||||
}
|
|
||||||
|
|
||||||
hl_dmabuf->ctx = ctx;
|
hl_dmabuf->ctx = ctx;
|
||||||
hl_ctx_get(hl_dmabuf->ctx);
|
hl_ctx_get(hl_dmabuf->ctx);
|
||||||
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
|
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
|
||||||
|
|
@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
|
||||||
get_file(ctx->hpriv->file_priv->filp);
|
get_file(ctx->hpriv->file_priv->filp);
|
||||||
|
|
||||||
*dmabuf_fd = fd;
|
*dmabuf_fd = fd;
|
||||||
|
fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_dma_buf_put:
|
|
||||||
hl_dmabuf->dmabuf->priv = NULL;
|
|
||||||
dma_buf_put(hl_dmabuf->dmabuf);
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
|
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
|
||||||
|
|
|
||||||
|
|
@ -10437,7 +10437,7 @@ end:
|
||||||
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
|
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
|
||||||
WREG32(sob_addr, 0);
|
WREG32(sob_addr, 0);
|
||||||
|
|
||||||
kfree(lin_dma_pkts_arr);
|
kvfree(lin_dma_pkts_arr);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -315,7 +315,7 @@ static void __iomem *einj_get_parameter_address(void)
|
||||||
memcpy_fromio(&v5param, p, v5param_size);
|
memcpy_fromio(&v5param, p, v5param_size);
|
||||||
acpi5 = 1;
|
acpi5 = 1;
|
||||||
check_vendor_extension(pa_v5, &v5param);
|
check_vendor_extension(pa_v5, &v5param);
|
||||||
if (available_error_type & ACPI65_EINJV2_SUPP) {
|
if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
|
||||||
len = v5param.einjv2_struct.length;
|
len = v5param.einjv2_struct.length;
|
||||||
offset = offsetof(struct einjv2_extension_struct, component_arr);
|
offset = offsetof(struct einjv2_extension_struct, component_arr);
|
||||||
max_nr_components = (len - offset) /
|
max_nr_components = (len - offset) /
|
||||||
|
|
@ -540,6 +540,9 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
||||||
struct set_error_type_with_address *v5param;
|
struct set_error_type_with_address *v5param;
|
||||||
|
|
||||||
v5param = kmalloc(v5param_size, GFP_KERNEL);
|
v5param = kmalloc(v5param_size, GFP_KERNEL);
|
||||||
|
if (!v5param)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
memcpy_fromio(v5param, einj_param, v5param_size);
|
memcpy_fromio(v5param, einj_param, v5param_size);
|
||||||
v5param->type = type;
|
v5param->type = type;
|
||||||
if (type & ACPI5_VENDOR_BIT) {
|
if (type & ACPI5_VENDOR_BIT) {
|
||||||
|
|
@ -1091,7 +1094,7 @@ err_put_table:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit einj_remove(struct faux_device *fdev)
|
static void einj_remove(struct faux_device *fdev)
|
||||||
{
|
{
|
||||||
struct apei_exec_context ctx;
|
struct apei_exec_context ctx;
|
||||||
|
|
||||||
|
|
@ -1114,15 +1117,9 @@ static void __exit einj_remove(struct faux_device *fdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct faux_device *einj_dev;
|
static struct faux_device *einj_dev;
|
||||||
/*
|
static struct faux_device_ops einj_device_ops = {
|
||||||
* einj_remove() lives in .exit.text. For drivers registered via
|
|
||||||
* platform_driver_probe() this is ok because they cannot get unbound at
|
|
||||||
* runtime. So mark the driver struct with __refdata to prevent modpost
|
|
||||||
* triggering a section mismatch warning.
|
|
||||||
*/
|
|
||||||
static struct faux_device_ops einj_device_ops __refdata = {
|
|
||||||
.probe = einj_probe,
|
.probe = einj_probe,
|
||||||
.remove = __exit_p(einj_remove),
|
.remove = einj_remove,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init einj_init(void)
|
static int __init einj_init(void)
|
||||||
|
|
|
||||||
|
|
@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!strstarts(ecdt_ptr->id, "\\")) {
|
if (!strlen(ecdt_ptr->id)) {
|
||||||
/*
|
/*
|
||||||
* The ECDT table on some MSI notebooks contains invalid data, together
|
* The ECDT table on some MSI notebooks contains invalid data, together
|
||||||
* with an empty ID string ("").
|
* with an empty ID string ("").
|
||||||
|
|
@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void)
|
||||||
* a "fully qualified reference to the (...) embedded controller device",
|
* a "fully qualified reference to the (...) embedded controller device",
|
||||||
* so this string always has to start with a backslash.
|
* so this string always has to start with a backslash.
|
||||||
*
|
*
|
||||||
* By verifying this we can avoid such faulty ECDT tables in a safe way.
|
* However some ThinkBook machines have a ECDT table with a valid EC
|
||||||
|
* description but an invalid ID string ("_SB.PC00.LPCB.EC0").
|
||||||
|
*
|
||||||
|
* Because of this we only check if the ID string is empty in order to
|
||||||
|
* avoid the obvious cases.
|
||||||
*/
|
*/
|
||||||
pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
|
pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -329,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
|
||||||
if (type == PFRU_CODE_INJECT_TYPE)
|
if (type == PFRU_CODE_INJECT_TYPE)
|
||||||
return payload_hdr->rt_ver >= cap->code_rt_version;
|
return payload_hdr->rt_ver >= cap->code_rt_version;
|
||||||
|
|
||||||
return payload_hdr->rt_ver >= cap->drv_rt_version;
|
return payload_hdr->svn_ver >= cap->drv_svn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_update_debug_info(struct pfru_updated_result *result,
|
static void print_update_debug_info(struct pfru_updated_result *result,
|
||||||
|
|
|
||||||
|
|
@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
|
||||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!pr || !pr->performance)
|
if (!pr)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
|
||||||
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
|
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
|
||||||
cpu, ret);
|
cpu, ret);
|
||||||
|
|
||||||
|
if (!pr->performance)
|
||||||
|
continue;
|
||||||
|
|
||||||
ret = acpi_processor_get_platform_limit(pr);
|
ret = acpi_processor_get_platform_limit(pr);
|
||||||
if (ret)
|
if (ret)
|
||||||
pr_err("Failed to update freq constraint for CPU%d (%d)\n",
|
pr_err("Failed to update freq constraint for CPU%d (%d)\n",
|
||||||
|
|
|
||||||
|
|
@ -2075,7 +2075,7 @@ out:
|
||||||
* Check if a link is established. This is a relaxed version of
|
* Check if a link is established. This is a relaxed version of
|
||||||
* ata_phys_link_online() which accounts for the fact that this is potentially
|
* ata_phys_link_online() which accounts for the fact that this is potentially
|
||||||
* called after changing the link power management policy, which may not be
|
* called after changing the link power management policy, which may not be
|
||||||
* reflected immediately in the SSTAUS register (e.g., we may still be seeing
|
* reflected immediately in the SStatus register (e.g., we may still be seeing
|
||||||
* the PHY in partial, slumber or devsleep Partial power management state.
|
* the PHY in partial, slumber or devsleep Partial power management state.
|
||||||
* So check that:
|
* So check that:
|
||||||
* - A device is still present, that is, DET is 1h (Device presence detected
|
* - A device is still present, that is, DET is 1h (Device presence detected
|
||||||
|
|
@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link)
|
||||||
u32 sstatus;
|
u32 sstatus;
|
||||||
u8 det, ipm;
|
u8 det, ipm;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For old IDE/PATA adapters that do not have a valid scr_read method,
|
||||||
|
* or if reading the SStatus register fails, assume that the device is
|
||||||
|
* present. Device probe will determine if that is really the case.
|
||||||
|
*/
|
||||||
if (sata_scr_read(link, SCR_STATUS, &sstatus))
|
if (sata_scr_read(link, SCR_STATUS, &sstatus))
|
||||||
return false;
|
return true;
|
||||||
|
|
||||||
det = sstatus & 0x0f;
|
det = sstatus & 0x0f;
|
||||||
ipm = (sstatus >> 8) & 0x0f;
|
ipm = (sstatus >> 8) & 0x0f;
|
||||||
|
|
|
||||||
|
|
@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
||||||
/* Check cdl_ctrl */
|
/* Check cdl_ctrl */
|
||||||
switch (buf[0] & 0x03) {
|
switch (buf[0] & 0x03) {
|
||||||
case 0:
|
case 0:
|
||||||
/* Disable CDL if it is enabled */
|
/* Disable CDL */
|
||||||
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
|
|
||||||
return 0;
|
|
||||||
ata_dev_dbg(dev, "Disabling CDL\n");
|
ata_dev_dbg(dev, "Disabling CDL\n");
|
||||||
cdl_action = 0;
|
cdl_action = 0;
|
||||||
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
|
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
|
||||||
break;
|
break;
|
||||||
case 0x02:
|
case 0x02:
|
||||||
/*
|
/*
|
||||||
* Enable CDL if not already enabled. Since this is mutually
|
* Enable CDL. Since CDL is mutually exclusive with NCQ
|
||||||
* exclusive with NCQ priority, allow this only if NCQ priority
|
* priority, allow this only if NCQ priority is disabled.
|
||||||
* is disabled.
|
|
||||||
*/
|
*/
|
||||||
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
|
|
||||||
return 0;
|
|
||||||
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
|
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
|
||||||
ata_dev_err(dev,
|
ata_dev_err(dev,
|
||||||
"NCQ priority must be disabled to enable CDL\n");
|
"NCQ priority must be disabled to enable CDL\n");
|
||||||
|
|
|
||||||
|
|
@ -380,6 +380,9 @@ enum {
|
||||||
/* this is/was a write request */
|
/* this is/was a write request */
|
||||||
__EE_WRITE,
|
__EE_WRITE,
|
||||||
|
|
||||||
|
/* hand back using mempool_free(e, drbd_buffer_page_pool) */
|
||||||
|
__EE_RELEASE_TO_MEMPOOL,
|
||||||
|
|
||||||
/* this is/was a write same request */
|
/* this is/was a write same request */
|
||||||
__EE_WRITE_SAME,
|
__EE_WRITE_SAME,
|
||||||
|
|
||||||
|
|
@ -402,6 +405,7 @@ enum {
|
||||||
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
|
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
|
||||||
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
|
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
|
||||||
#define EE_WRITE (1<<__EE_WRITE)
|
#define EE_WRITE (1<<__EE_WRITE)
|
||||||
|
#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
|
||||||
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
|
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
|
||||||
#define EE_APPLICATION (1<<__EE_APPLICATION)
|
#define EE_APPLICATION (1<<__EE_APPLICATION)
|
||||||
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
|
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
|
||||||
|
|
@ -858,7 +862,6 @@ struct drbd_device {
|
||||||
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
|
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
|
||||||
struct list_head done_ee; /* need to send P_WRITE_ACK */
|
struct list_head done_ee; /* need to send P_WRITE_ACK */
|
||||||
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
|
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
|
||||||
struct list_head net_ee; /* zero-copy network send in progress */
|
|
||||||
|
|
||||||
struct list_head resync_reads;
|
struct list_head resync_reads;
|
||||||
atomic_t pp_in_use; /* allocated from page pool */
|
atomic_t pp_in_use; /* allocated from page pool */
|
||||||
|
|
@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
|
||||||
extern mempool_t drbd_request_mempool;
|
extern mempool_t drbd_request_mempool;
|
||||||
extern mempool_t drbd_ee_mempool;
|
extern mempool_t drbd_ee_mempool;
|
||||||
|
|
||||||
/* drbd's page pool, used to buffer data received from the peer,
|
|
||||||
* or data requested by the peer.
|
|
||||||
*
|
|
||||||
* This does not have an emergency reserve.
|
|
||||||
*
|
|
||||||
* When allocating from this pool, it first takes pages from the pool.
|
|
||||||
* Only if the pool is depleted will try to allocate from the system.
|
|
||||||
*
|
|
||||||
* The assumption is that pages taken from this pool will be processed,
|
|
||||||
* and given back, "quickly", and then can be recycled, so we can avoid
|
|
||||||
* frequent calls to alloc_page(), and still will be able to make progress even
|
|
||||||
* under memory pressure.
|
|
||||||
*/
|
|
||||||
extern struct page *drbd_pp_pool;
|
|
||||||
extern spinlock_t drbd_pp_lock;
|
|
||||||
extern int drbd_pp_vacant;
|
|
||||||
extern wait_queue_head_t drbd_pp_wait;
|
|
||||||
|
|
||||||
/* We also need a standard (emergency-reserve backed) page pool
|
/* We also need a standard (emergency-reserve backed) page pool
|
||||||
* for meta data IO (activity log, bitmap).
|
* for meta data IO (activity log, bitmap).
|
||||||
* We can keep it global, as long as it is used as "N pages at a time".
|
* We can keep it global, as long as it is used as "N pages at a time".
|
||||||
|
|
@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
|
||||||
*/
|
*/
|
||||||
#define DRBD_MIN_POOL_PAGES 128
|
#define DRBD_MIN_POOL_PAGES 128
|
||||||
extern mempool_t drbd_md_io_page_pool;
|
extern mempool_t drbd_md_io_page_pool;
|
||||||
|
extern mempool_t drbd_buffer_page_pool;
|
||||||
|
|
||||||
/* We also need to make sure we get a bio
|
/* We also need to make sure we get a bio
|
||||||
* when we need it for housekeeping purposes */
|
* when we need it for housekeeping purposes */
|
||||||
|
|
@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
|
||||||
sector_t, unsigned int,
|
sector_t, unsigned int,
|
||||||
unsigned int,
|
unsigned int,
|
||||||
gfp_t) __must_hold(local);
|
gfp_t) __must_hold(local);
|
||||||
extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
|
extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
|
||||||
int);
|
|
||||||
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
|
|
||||||
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
|
|
||||||
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
|
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
|
||||||
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
|
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
|
||||||
extern int drbd_connected(struct drbd_peer_device *);
|
extern int drbd_connected(struct drbd_peer_device *);
|
||||||
|
|
@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
|
||||||
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
|
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
|
||||||
|
|
||||||
|
|
||||||
static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
|
|
||||||
{
|
|
||||||
struct page *page = peer_req->pages;
|
|
||||||
page_chain_for_each(page) {
|
|
||||||
if (page_count(page) > 1)
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline union drbd_state drbd_read_state(struct drbd_device *device)
|
static inline union drbd_state drbd_read_state(struct drbd_device *device)
|
||||||
{
|
{
|
||||||
struct drbd_resource *resource = device->resource;
|
struct drbd_resource *resource = device->resource;
|
||||||
|
|
|
||||||
|
|
@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
|
||||||
mempool_t drbd_request_mempool;
|
mempool_t drbd_request_mempool;
|
||||||
mempool_t drbd_ee_mempool;
|
mempool_t drbd_ee_mempool;
|
||||||
mempool_t drbd_md_io_page_pool;
|
mempool_t drbd_md_io_page_pool;
|
||||||
|
mempool_t drbd_buffer_page_pool;
|
||||||
struct bio_set drbd_md_io_bio_set;
|
struct bio_set drbd_md_io_bio_set;
|
||||||
struct bio_set drbd_io_bio_set;
|
struct bio_set drbd_io_bio_set;
|
||||||
|
|
||||||
/* I do not use a standard mempool, because:
|
|
||||||
1) I want to hand out the pre-allocated objects first.
|
|
||||||
2) I want to be able to interrupt sleeping allocation with a signal.
|
|
||||||
Note: This is a single linked list, the next pointer is the private
|
|
||||||
member of struct page.
|
|
||||||
*/
|
|
||||||
struct page *drbd_pp_pool;
|
|
||||||
DEFINE_SPINLOCK(drbd_pp_lock);
|
|
||||||
int drbd_pp_vacant;
|
|
||||||
wait_queue_head_t drbd_pp_wait;
|
|
||||||
|
|
||||||
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
|
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
|
||||||
|
|
||||||
static const struct block_device_operations drbd_ops = {
|
static const struct block_device_operations drbd_ops = {
|
||||||
|
|
@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
|
||||||
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
|
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
|
||||||
struct drbd_peer_request *peer_req)
|
struct drbd_peer_request *peer_req)
|
||||||
{
|
{
|
||||||
|
bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
|
||||||
struct page *page = peer_req->pages;
|
struct page *page = peer_req->pages;
|
||||||
unsigned len = peer_req->i.size;
|
unsigned len = peer_req->i.size;
|
||||||
int err;
|
int err;
|
||||||
|
|
@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
|
||||||
page_chain_for_each(page) {
|
page_chain_for_each(page) {
|
||||||
unsigned l = min_t(unsigned, len, PAGE_SIZE);
|
unsigned l = min_t(unsigned, len, PAGE_SIZE);
|
||||||
|
|
||||||
|
if (likely(use_sendpage))
|
||||||
err = _drbd_send_page(peer_device, page, 0, l,
|
err = _drbd_send_page(peer_device, page, 0, l,
|
||||||
page_chain_next(page) ? MSG_MORE : 0);
|
page_chain_next(page) ? MSG_MORE : 0);
|
||||||
|
else
|
||||||
|
err = _drbd_no_send_page(peer_device, page, 0, l,
|
||||||
|
page_chain_next(page) ? MSG_MORE : 0);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
len -= l;
|
len -= l;
|
||||||
|
|
@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
|
||||||
INIT_LIST_HEAD(&device->sync_ee);
|
INIT_LIST_HEAD(&device->sync_ee);
|
||||||
INIT_LIST_HEAD(&device->done_ee);
|
INIT_LIST_HEAD(&device->done_ee);
|
||||||
INIT_LIST_HEAD(&device->read_ee);
|
INIT_LIST_HEAD(&device->read_ee);
|
||||||
INIT_LIST_HEAD(&device->net_ee);
|
|
||||||
INIT_LIST_HEAD(&device->resync_reads);
|
INIT_LIST_HEAD(&device->resync_reads);
|
||||||
INIT_LIST_HEAD(&device->resync_work.list);
|
INIT_LIST_HEAD(&device->resync_work.list);
|
||||||
INIT_LIST_HEAD(&device->unplug_work.list);
|
INIT_LIST_HEAD(&device->unplug_work.list);
|
||||||
|
|
@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
|
||||||
D_ASSERT(device, list_empty(&device->sync_ee));
|
D_ASSERT(device, list_empty(&device->sync_ee));
|
||||||
D_ASSERT(device, list_empty(&device->done_ee));
|
D_ASSERT(device, list_empty(&device->done_ee));
|
||||||
D_ASSERT(device, list_empty(&device->read_ee));
|
D_ASSERT(device, list_empty(&device->read_ee));
|
||||||
D_ASSERT(device, list_empty(&device->net_ee));
|
|
||||||
D_ASSERT(device, list_empty(&device->resync_reads));
|
D_ASSERT(device, list_empty(&device->resync_reads));
|
||||||
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
|
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
|
||||||
D_ASSERT(device, list_empty(&device->resync_work.list));
|
D_ASSERT(device, list_empty(&device->resync_work.list));
|
||||||
|
|
@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
|
||||||
|
|
||||||
static void drbd_destroy_mempools(void)
|
static void drbd_destroy_mempools(void)
|
||||||
{
|
{
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
while (drbd_pp_pool) {
|
|
||||||
page = drbd_pp_pool;
|
|
||||||
drbd_pp_pool = (struct page *)page_private(page);
|
|
||||||
__free_page(page);
|
|
||||||
drbd_pp_vacant--;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
|
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
|
||||||
|
|
||||||
bioset_exit(&drbd_io_bio_set);
|
bioset_exit(&drbd_io_bio_set);
|
||||||
bioset_exit(&drbd_md_io_bio_set);
|
bioset_exit(&drbd_md_io_bio_set);
|
||||||
|
mempool_exit(&drbd_buffer_page_pool);
|
||||||
mempool_exit(&drbd_md_io_page_pool);
|
mempool_exit(&drbd_md_io_page_pool);
|
||||||
mempool_exit(&drbd_ee_mempool);
|
mempool_exit(&drbd_ee_mempool);
|
||||||
mempool_exit(&drbd_request_mempool);
|
mempool_exit(&drbd_request_mempool);
|
||||||
|
|
@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
|
||||||
|
|
||||||
static int drbd_create_mempools(void)
|
static int drbd_create_mempools(void)
|
||||||
{
|
{
|
||||||
struct page *page;
|
|
||||||
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
|
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
|
||||||
int i, ret;
|
int ret;
|
||||||
|
|
||||||
/* caches */
|
/* caches */
|
||||||
drbd_request_cache = kmem_cache_create(
|
drbd_request_cache = kmem_cache_create(
|
||||||
|
|
@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto Enomem;
|
goto Enomem;
|
||||||
|
|
||||||
|
ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
|
||||||
|
if (ret)
|
||||||
|
goto Enomem;
|
||||||
|
|
||||||
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
|
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
|
||||||
drbd_request_cache);
|
drbd_request_cache);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto Enomem;
|
goto Enomem;
|
||||||
|
|
||||||
for (i = 0; i < number; i++) {
|
|
||||||
page = alloc_page(GFP_HIGHUSER);
|
|
||||||
if (!page)
|
|
||||||
goto Enomem;
|
|
||||||
set_page_private(page, (unsigned long)drbd_pp_pool);
|
|
||||||
drbd_pp_pool = page;
|
|
||||||
}
|
|
||||||
drbd_pp_vacant = number;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
Enomem:
|
Enomem:
|
||||||
|
|
@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
|
||||||
rr = drbd_free_peer_reqs(device, &device->done_ee);
|
rr = drbd_free_peer_reqs(device, &device->done_ee);
|
||||||
if (rr)
|
if (rr)
|
||||||
drbd_err(device, "%d EEs in done list found!\n", rr);
|
drbd_err(device, "%d EEs in done list found!\n", rr);
|
||||||
|
|
||||||
rr = drbd_free_peer_reqs(device, &device->net_ee);
|
|
||||||
if (rr)
|
|
||||||
drbd_err(device, "%d EEs in net list found!\n", rr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* caution. no locking. */
|
/* caution. no locking. */
|
||||||
|
|
@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* allocate all necessary structs
|
|
||||||
*/
|
|
||||||
init_waitqueue_head(&drbd_pp_wait);
|
|
||||||
|
|
||||||
drbd_proc = NULL; /* play safe for drbd_cleanup */
|
drbd_proc = NULL; /* play safe for drbd_cleanup */
|
||||||
idr_init(&drbd_devices);
|
idr_init(&drbd_devices);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/part_stat.h>
|
#include <linux/part_stat.h>
|
||||||
|
#include <linux/mempool.h>
|
||||||
#include "drbd_int.h"
|
#include "drbd_int.h"
|
||||||
#include "drbd_protocol.h"
|
#include "drbd_protocol.h"
|
||||||
#include "drbd_req.h"
|
#include "drbd_req.h"
|
||||||
|
|
@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
|
||||||
|
|
||||||
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
|
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
|
||||||
|
|
||||||
/*
|
static struct page *__drbd_alloc_pages(unsigned int number)
|
||||||
* some helper functions to deal with single linked page lists,
|
|
||||||
* page->private being our "next" pointer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* If at least n pages are linked at head, get n pages off.
|
|
||||||
* Otherwise, don't modify head, and return NULL.
|
|
||||||
* Locking is the responsibility of the caller.
|
|
||||||
*/
|
|
||||||
static struct page *page_chain_del(struct page **head, int n)
|
|
||||||
{
|
|
||||||
struct page *page;
|
|
||||||
struct page *tmp;
|
|
||||||
|
|
||||||
BUG_ON(!n);
|
|
||||||
BUG_ON(!head);
|
|
||||||
|
|
||||||
page = *head;
|
|
||||||
|
|
||||||
if (!page)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
while (page) {
|
|
||||||
tmp = page_chain_next(page);
|
|
||||||
if (--n == 0)
|
|
||||||
break; /* found sufficient pages */
|
|
||||||
if (tmp == NULL)
|
|
||||||
/* insufficient pages, don't use any of them. */
|
|
||||||
return NULL;
|
|
||||||
page = tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* add end of list marker for the returned list */
|
|
||||||
set_page_private(page, 0);
|
|
||||||
/* actual return value, and adjustment of head */
|
|
||||||
page = *head;
|
|
||||||
*head = tmp;
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* may be used outside of locks to find the tail of a (usually short)
|
|
||||||
* "private" page chain, before adding it back to a global chain head
|
|
||||||
* with page_chain_add() under a spinlock. */
|
|
||||||
static struct page *page_chain_tail(struct page *page, int *len)
|
|
||||||
{
|
|
||||||
struct page *tmp;
|
|
||||||
int i = 1;
|
|
||||||
while ((tmp = page_chain_next(page))) {
|
|
||||||
++i;
|
|
||||||
page = tmp;
|
|
||||||
}
|
|
||||||
if (len)
|
|
||||||
*len = i;
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int page_chain_free(struct page *page)
|
|
||||||
{
|
|
||||||
struct page *tmp;
|
|
||||||
int i = 0;
|
|
||||||
page_chain_for_each_safe(page, tmp) {
|
|
||||||
put_page(page);
|
|
||||||
++i;
|
|
||||||
}
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void page_chain_add(struct page **head,
|
|
||||||
struct page *chain_first, struct page *chain_last)
|
|
||||||
{
|
|
||||||
#if 1
|
|
||||||
struct page *tmp;
|
|
||||||
tmp = page_chain_tail(chain_first, NULL);
|
|
||||||
BUG_ON(tmp != chain_last);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* add chain to head */
|
|
||||||
set_page_private(chain_last, (unsigned long)*head);
|
|
||||||
*head = chain_first;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct page *__drbd_alloc_pages(struct drbd_device *device,
|
|
||||||
unsigned int number)
|
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
struct page *tmp = NULL;
|
struct page *tmp = NULL;
|
||||||
unsigned int i = 0;
|
unsigned int i = 0;
|
||||||
|
|
||||||
/* Yes, testing drbd_pp_vacant outside the lock is racy.
|
|
||||||
* So what. It saves a spin_lock. */
|
|
||||||
if (drbd_pp_vacant >= number) {
|
|
||||||
spin_lock(&drbd_pp_lock);
|
|
||||||
page = page_chain_del(&drbd_pp_pool, number);
|
|
||||||
if (page)
|
|
||||||
drbd_pp_vacant -= number;
|
|
||||||
spin_unlock(&drbd_pp_lock);
|
|
||||||
if (page)
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
|
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
|
||||||
* "criss-cross" setup, that might cause write-out on some other DRBD,
|
* "criss-cross" setup, that might cause write-out on some other DRBD,
|
||||||
* which in turn might block on the other node at this very place. */
|
* which in turn might block on the other node at this very place. */
|
||||||
for (i = 0; i < number; i++) {
|
for (i = 0; i < number; i++) {
|
||||||
tmp = alloc_page(GFP_TRY);
|
tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
break;
|
goto fail;
|
||||||
set_page_private(tmp, (unsigned long)page);
|
set_page_private(tmp, (unsigned long)page);
|
||||||
page = tmp;
|
page = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == number)
|
|
||||||
return page;
|
return page;
|
||||||
|
fail:
|
||||||
/* Not enough pages immediately available this time.
|
page_chain_for_each_safe(page, tmp) {
|
||||||
* No need to jump around here, drbd_alloc_pages will retry this
|
set_page_private(page, 0);
|
||||||
* function "soon". */
|
mempool_free(page, &drbd_buffer_page_pool);
|
||||||
if (page) {
|
|
||||||
tmp = page_chain_tail(page, NULL);
|
|
||||||
spin_lock(&drbd_pp_lock);
|
|
||||||
page_chain_add(&drbd_pp_pool, page, tmp);
|
|
||||||
drbd_pp_vacant += i;
|
|
||||||
spin_unlock(&drbd_pp_lock);
|
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
|
|
||||||
struct list_head *to_be_freed)
|
|
||||||
{
|
|
||||||
struct drbd_peer_request *peer_req, *tmp;
|
|
||||||
|
|
||||||
/* The EEs are always appended to the end of the list. Since
|
|
||||||
they are sent in order over the wire, they have to finish
|
|
||||||
in order. As soon as we see the first not finished we can
|
|
||||||
stop to examine the list... */
|
|
||||||
|
|
||||||
list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
|
|
||||||
if (drbd_peer_req_has_active_page(peer_req))
|
|
||||||
break;
|
|
||||||
list_move(&peer_req->w.list, to_be_freed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
|
|
||||||
{
|
|
||||||
LIST_HEAD(reclaimed);
|
|
||||||
struct drbd_peer_request *peer_req, *t;
|
|
||||||
|
|
||||||
spin_lock_irq(&device->resource->req_lock);
|
|
||||||
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
|
||||||
spin_unlock_irq(&device->resource->req_lock);
|
|
||||||
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
|
||||||
drbd_free_net_peer_req(device, peer_req);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
|
|
||||||
{
|
|
||||||
struct drbd_peer_device *peer_device;
|
|
||||||
int vnr;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
|
||||||
struct drbd_device *device = peer_device->device;
|
|
||||||
if (!atomic_read(&device->pp_in_use_by_net))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
kref_get(&device->kref);
|
|
||||||
rcu_read_unlock();
|
|
||||||
drbd_reclaim_net_peer_reqs(device);
|
|
||||||
kref_put(&device->kref, drbd_destroy_device);
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
|
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
|
||||||
* @peer_device: DRBD device.
|
* @peer_device: DRBD device.
|
||||||
|
|
@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
|
||||||
bool retry)
|
bool retry)
|
||||||
{
|
{
|
||||||
struct drbd_device *device = peer_device->device;
|
struct drbd_device *device = peer_device->device;
|
||||||
struct page *page = NULL;
|
struct page *page;
|
||||||
struct net_conf *nc;
|
struct net_conf *nc;
|
||||||
DEFINE_WAIT(wait);
|
|
||||||
unsigned int mxb;
|
unsigned int mxb;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
|
||||||
mxb = nc ? nc->max_buffers : 1000000;
|
mxb = nc ? nc->max_buffers : 1000000;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (atomic_read(&device->pp_in_use) < mxb)
|
if (atomic_read(&device->pp_in_use) >= mxb)
|
||||||
page = __drbd_alloc_pages(device, number);
|
schedule_timeout_interruptible(HZ / 10);
|
||||||
|
page = __drbd_alloc_pages(number);
|
||||||
/* Try to keep the fast path fast, but occasionally we need
|
|
||||||
* to reclaim the pages we lended to the network stack. */
|
|
||||||
if (page && atomic_read(&device->pp_in_use_by_net) > 512)
|
|
||||||
drbd_reclaim_net_peer_reqs(device);
|
|
||||||
|
|
||||||
while (page == NULL) {
|
|
||||||
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
|
|
||||||
|
|
||||||
drbd_reclaim_net_peer_reqs(device);
|
|
||||||
|
|
||||||
if (atomic_read(&device->pp_in_use) < mxb) {
|
|
||||||
page = __drbd_alloc_pages(device, number);
|
|
||||||
if (page)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!retry)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
|
||||||
drbd_warn(device, "drbd_alloc_pages interrupted!\n");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (schedule_timeout(HZ/10) == 0)
|
|
||||||
mxb = UINT_MAX;
|
|
||||||
}
|
|
||||||
finish_wait(&drbd_pp_wait, &wait);
|
|
||||||
|
|
||||||
if (page)
|
if (page)
|
||||||
atomic_add(number, &device->pp_in_use);
|
atomic_add(number, &device->pp_in_use);
|
||||||
|
|
@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
|
||||||
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
|
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
|
||||||
* Either links the page chain back to the global pool,
|
* Either links the page chain back to the global pool,
|
||||||
* or returns all pages to the system. */
|
* or returns all pages to the system. */
|
||||||
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
|
static void drbd_free_pages(struct drbd_device *device, struct page *page)
|
||||||
{
|
{
|
||||||
atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
|
struct page *tmp;
|
||||||
int i;
|
int i = 0;
|
||||||
|
|
||||||
if (page == NULL)
|
if (page == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
|
page_chain_for_each_safe(page, tmp) {
|
||||||
i = page_chain_free(page);
|
set_page_private(page, 0);
|
||||||
else {
|
if (page_count(page) == 1)
|
||||||
struct page *tmp;
|
mempool_free(page, &drbd_buffer_page_pool);
|
||||||
tmp = page_chain_tail(page, &i);
|
else
|
||||||
spin_lock(&drbd_pp_lock);
|
put_page(page);
|
||||||
page_chain_add(&drbd_pp_pool, page, tmp);
|
i++;
|
||||||
drbd_pp_vacant += i;
|
|
||||||
spin_unlock(&drbd_pp_lock);
|
|
||||||
}
|
}
|
||||||
i = atomic_sub_return(i, a);
|
i = atomic_sub_return(i, &device->pp_in_use);
|
||||||
if (i < 0)
|
if (i < 0)
|
||||||
drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
|
drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
|
||||||
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
|
|
||||||
wake_up(&drbd_pp_wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
|
||||||
gfpflags_allow_blocking(gfp_mask));
|
gfpflags_allow_blocking(gfp_mask));
|
||||||
if (!page)
|
if (!page)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
if (!mempool_is_saturated(&drbd_buffer_page_pool))
|
||||||
|
peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(peer_req, 0, sizeof(*peer_req));
|
memset(peer_req, 0, sizeof(*peer_req));
|
||||||
|
|
@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
|
void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
|
||||||
int is_net)
|
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
if (peer_req->flags & EE_HAS_DIGEST)
|
if (peer_req->flags & EE_HAS_DIGEST)
|
||||||
kfree(peer_req->digest);
|
kfree(peer_req->digest);
|
||||||
drbd_free_pages(device, peer_req->pages, is_net);
|
drbd_free_pages(device, peer_req->pages);
|
||||||
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
|
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
|
||||||
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
|
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
|
||||||
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
|
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
|
||||||
|
|
@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
|
||||||
LIST_HEAD(work_list);
|
LIST_HEAD(work_list);
|
||||||
struct drbd_peer_request *peer_req, *t;
|
struct drbd_peer_request *peer_req, *t;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
int is_net = list == &device->net_ee;
|
|
||||||
|
|
||||||
spin_lock_irq(&device->resource->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_splice_init(list, &work_list);
|
list_splice_init(list, &work_list);
|
||||||
spin_unlock_irq(&device->resource->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
||||||
__drbd_free_peer_req(device, peer_req, is_net);
|
drbd_free_peer_req(device, peer_req);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
|
|
@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
|
||||||
static int drbd_finish_peer_reqs(struct drbd_device *device)
|
static int drbd_finish_peer_reqs(struct drbd_device *device)
|
||||||
{
|
{
|
||||||
LIST_HEAD(work_list);
|
LIST_HEAD(work_list);
|
||||||
LIST_HEAD(reclaimed);
|
|
||||||
struct drbd_peer_request *peer_req, *t;
|
struct drbd_peer_request *peer_req, *t;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
spin_lock_irq(&device->resource->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
|
||||||
list_splice_init(&device->done_ee, &work_list);
|
list_splice_init(&device->done_ee, &work_list);
|
||||||
spin_unlock_irq(&device->resource->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
|
||||||
drbd_free_net_peer_req(device, peer_req);
|
|
||||||
|
|
||||||
/* possible callbacks here:
|
/* possible callbacks here:
|
||||||
* e_end_block, and e_end_resync_block, e_send_superseded.
|
* e_end_block, and e_end_resync_block, e_send_superseded.
|
||||||
* all ignore the last argument.
|
* all ignore the last argument.
|
||||||
|
|
@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
|
||||||
data_size -= len;
|
data_size -= len;
|
||||||
}
|
}
|
||||||
kunmap(page);
|
kunmap(page);
|
||||||
drbd_free_pages(peer_device->device, page, 0);
|
drbd_free_pages(peer_device->device, page);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* tcp_close and release of sendpage pages can be deferred. I don't
|
|
||||||
* want to use SO_LINGER, because apparently it can be deferred for
|
|
||||||
* more than 20 seconds (longest time I checked).
|
|
||||||
*
|
|
||||||
* Actually we don't care for exactly when the network stack does its
|
|
||||||
* put_page(), but release our reference on these pages right here.
|
|
||||||
*/
|
|
||||||
i = drbd_free_peer_reqs(device, &device->net_ee);
|
|
||||||
if (i)
|
|
||||||
drbd_info(device, "net_ee not empty, killed %u entries\n", i);
|
|
||||||
i = atomic_read(&device->pp_in_use_by_net);
|
i = atomic_read(&device->pp_in_use_by_net);
|
||||||
if (i)
|
if (i)
|
||||||
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
|
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
|
||||||
|
|
@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
|
||||||
while (get_t_state(thi) == RUNNING) {
|
while (get_t_state(thi) == RUNNING) {
|
||||||
drbd_thread_current_set_cpu(thi);
|
drbd_thread_current_set_cpu(thi);
|
||||||
|
|
||||||
conn_reclaim_net_peer_reqs(connection);
|
|
||||||
|
|
||||||
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
|
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
|
||||||
if (drbd_send_ping(connection)) {
|
if (drbd_send_ping(connection)) {
|
||||||
drbd_err(connection, "drbd_send_ping has failed\n");
|
drbd_err(connection, "drbd_send_ping has failed\n");
|
||||||
|
|
|
||||||
|
|
@ -1030,22 +1030,6 @@ out:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* helper */
|
|
||||||
static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
|
|
||||||
{
|
|
||||||
if (drbd_peer_req_has_active_page(peer_req)) {
|
|
||||||
/* This might happen if sendpage() has not finished */
|
|
||||||
int i = PFN_UP(peer_req->i.size);
|
|
||||||
atomic_add(i, &device->pp_in_use_by_net);
|
|
||||||
atomic_sub(i, &device->pp_in_use);
|
|
||||||
spin_lock_irq(&device->resource->req_lock);
|
|
||||||
list_add_tail(&peer_req->w.list, &device->net_ee);
|
|
||||||
spin_unlock_irq(&device->resource->req_lock);
|
|
||||||
wake_up(&drbd_pp_wait);
|
|
||||||
} else
|
|
||||||
drbd_free_peer_req(device, peer_req);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
|
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
|
||||||
* @w: work object.
|
* @w: work object.
|
||||||
|
|
@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (unlikely(cancel)) {
|
if (unlikely(cancel)) {
|
||||||
drbd_free_peer_req(device, peer_req);
|
err = 0;
|
||||||
dec_unacked(device);
|
goto out;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||||
|
|
@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
|
||||||
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
|
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
|
||||||
}
|
}
|
||||||
|
|
||||||
dec_unacked(device);
|
|
||||||
|
|
||||||
move_to_net_ee_or_free(device, peer_req);
|
|
||||||
|
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
drbd_err(device, "drbd_send_block() failed\n");
|
drbd_err(device, "drbd_send_block() failed\n");
|
||||||
|
out:
|
||||||
|
dec_unacked(device);
|
||||||
|
drbd_free_peer_req(device, peer_req);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (unlikely(cancel)) {
|
if (unlikely(cancel)) {
|
||||||
drbd_free_peer_req(device, peer_req);
|
err = 0;
|
||||||
dec_unacked(device);
|
goto out;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_ldev_if_state(device, D_FAILED)) {
|
if (get_ldev_if_state(device, D_FAILED)) {
|
||||||
|
|
@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
|
||||||
/* update resync data with failure */
|
/* update resync data with failure */
|
||||||
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
|
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
dec_unacked(device);
|
|
||||||
|
|
||||||
move_to_net_ee_or_free(device, peer_req);
|
|
||||||
|
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
drbd_err(device, "drbd_send_block() failed\n");
|
drbd_err(device, "drbd_send_block() failed\n");
|
||||||
|
out:
|
||||||
|
dec_unacked(device);
|
||||||
|
drbd_free_peer_req(device, peer_req);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
|
||||||
int err, eq = 0;
|
int err, eq = 0;
|
||||||
|
|
||||||
if (unlikely(cancel)) {
|
if (unlikely(cancel)) {
|
||||||
drbd_free_peer_req(device, peer_req);
|
err = 0;
|
||||||
dec_unacked(device);
|
goto out;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_ldev(device)) {
|
if (get_ldev(device)) {
|
||||||
|
|
@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
|
||||||
if (drbd_ratelimit())
|
if (drbd_ratelimit())
|
||||||
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
|
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
dec_unacked(device);
|
|
||||||
move_to_net_ee_or_free(device, peer_req);
|
|
||||||
|
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
drbd_err(device, "drbd_send_block/ack() failed\n");
|
drbd_err(device, "drbd_send_block/ack() failed\n");
|
||||||
|
out:
|
||||||
|
dec_unacked(device);
|
||||||
|
drbd_free_peer_req(device, peer_req);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -137,20 +137,29 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
|
||||||
static int max_part;
|
static int max_part;
|
||||||
static int part_shift;
|
static int part_shift;
|
||||||
|
|
||||||
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
|
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
|
||||||
{
|
{
|
||||||
|
struct kstat stat;
|
||||||
loff_t loopsize;
|
loff_t loopsize;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Compute loopsize in bytes */
|
/*
|
||||||
loopsize = i_size_read(file->f_mapping->host);
|
* Get the accurate file size. This provides better results than
|
||||||
if (offset > 0)
|
* cached inode data, particularly for network filesystems where
|
||||||
loopsize -= offset;
|
* metadata may be stale.
|
||||||
|
*/
|
||||||
|
ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
loopsize = stat.size;
|
||||||
|
if (lo->lo_offset > 0)
|
||||||
|
loopsize -= lo->lo_offset;
|
||||||
/* offset is beyond i_size, weird but possible */
|
/* offset is beyond i_size, weird but possible */
|
||||||
if (loopsize < 0)
|
if (loopsize < 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
|
||||||
if (sizelimit > 0 && sizelimit < loopsize)
|
loopsize = lo->lo_sizelimit;
|
||||||
loopsize = sizelimit;
|
|
||||||
/*
|
/*
|
||||||
* Unfortunately, if we want to do I/O on the device,
|
* Unfortunately, if we want to do I/O on the device,
|
||||||
* the number of 512-byte sectors has to fit into a sector_t.
|
* the number of 512-byte sectors has to fit into a sector_t.
|
||||||
|
|
@ -158,11 +167,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
|
||||||
return loopsize >> 9;
|
return loopsize >> 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
static loff_t get_loop_size(struct loop_device *lo, struct file *file)
|
|
||||||
{
|
|
||||||
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We support direct I/O only if lo_offset is aligned with the logical I/O size
|
* We support direct I/O only if lo_offset is aligned with the logical I/O size
|
||||||
* of backing device, and the logical block size of loop is bigger than that of
|
* of backing device, and the logical block size of loop is bigger than that of
|
||||||
|
|
@ -569,7 +573,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
|
|
||||||
/* size of the new backing store needs to be the same */
|
/* size of the new backing store needs to be the same */
|
||||||
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -1063,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
|
||||||
loop_update_dio(lo);
|
loop_update_dio(lo);
|
||||||
loop_sysfs_init(lo);
|
loop_sysfs_init(lo);
|
||||||
|
|
||||||
size = get_loop_size(lo, file);
|
size = lo_calculate_size(lo, file);
|
||||||
loop_set_size(lo, size);
|
loop_set_size(lo, size);
|
||||||
|
|
||||||
/* Order wrt reading lo_state in loop_validate_file(). */
|
/* Order wrt reading lo_state in loop_validate_file(). */
|
||||||
|
|
@ -1255,8 +1259,7 @@ out_unfreeze:
|
||||||
if (partscan)
|
if (partscan)
|
||||||
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
||||||
if (!err && size_changed) {
|
if (!err && size_changed) {
|
||||||
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
|
loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
|
||||||
lo->lo_backing_file);
|
|
||||||
loop_set_size(lo, new_size);
|
loop_set_size(lo, new_size);
|
||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
|
@ -1399,7 +1402,7 @@ static int loop_set_capacity(struct loop_device *lo)
|
||||||
if (unlikely(lo->lo_state != Lo_bound))
|
if (unlikely(lo->lo_state != Lo_bound))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
size = get_loop_size(lo, lo->lo_backing_file);
|
size = lo_calculate_size(lo, lo->lo_backing_file);
|
||||||
loop_set_size(lo, size);
|
loop_set_size(lo, size);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -235,7 +235,7 @@ struct ublk_device {
|
||||||
|
|
||||||
struct completion completion;
|
struct completion completion;
|
||||||
unsigned int nr_queues_ready;
|
unsigned int nr_queues_ready;
|
||||||
unsigned int nr_privileged_daemon;
|
bool unprivileged_daemons;
|
||||||
struct mutex cancel_mutex;
|
struct mutex cancel_mutex;
|
||||||
bool canceling;
|
bool canceling;
|
||||||
pid_t ublksrv_tgid;
|
pid_t ublksrv_tgid;
|
||||||
|
|
@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
|
||||||
{
|
{
|
||||||
blk_status_t res;
|
blk_status_t res;
|
||||||
|
|
||||||
if (unlikely(ubq->fail_io))
|
if (unlikely(READ_ONCE(ubq->fail_io)))
|
||||||
return BLK_STS_TARGET;
|
return BLK_STS_TARGET;
|
||||||
|
|
||||||
/* With recovery feature enabled, force_abort is set in
|
/* With recovery feature enabled, force_abort is set in
|
||||||
|
|
@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
|
||||||
* Note: force_abort is guaranteed to be seen because it is set
|
* Note: force_abort is guaranteed to be seen because it is set
|
||||||
* before request queue is unqiuesced.
|
* before request queue is unqiuesced.
|
||||||
*/
|
*/
|
||||||
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
|
if (ublk_nosrv_should_queue_io(ubq) &&
|
||||||
|
unlikely(READ_ONCE(ubq->force_abort)))
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
if (check_cancel && unlikely(ubq->canceling))
|
if (check_cancel && unlikely(ubq->canceling))
|
||||||
|
|
@ -1550,7 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
|
||||||
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
|
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
|
||||||
ub->mm = NULL;
|
ub->mm = NULL;
|
||||||
ub->nr_queues_ready = 0;
|
ub->nr_queues_ready = 0;
|
||||||
ub->nr_privileged_daemon = 0;
|
ub->unprivileged_daemons = false;
|
||||||
ub->ublksrv_tgid = -1;
|
ub->ublksrv_tgid = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1644,7 +1645,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||||
* Transition the device to the nosrv state. What exactly this
|
* Transition the device to the nosrv state. What exactly this
|
||||||
* means depends on the recovery flags
|
* means depends on the recovery flags
|
||||||
*/
|
*/
|
||||||
blk_mq_quiesce_queue(disk->queue);
|
|
||||||
if (ublk_nosrv_should_stop_dev(ub)) {
|
if (ublk_nosrv_should_stop_dev(ub)) {
|
||||||
/*
|
/*
|
||||||
* Allow any pending/future I/O to pass through quickly
|
* Allow any pending/future I/O to pass through quickly
|
||||||
|
|
@ -1652,8 +1652,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||||
* waits for all pending I/O to complete
|
* waits for all pending I/O to complete
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
||||||
ublk_get_queue(ub, i)->force_abort = true;
|
WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
|
||||||
blk_mq_unquiesce_queue(disk->queue);
|
|
||||||
|
|
||||||
ublk_stop_dev_unlocked(ub);
|
ublk_stop_dev_unlocked(ub);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
|
||||||
} else {
|
} else {
|
||||||
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
|
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
|
||||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
||||||
ublk_get_queue(ub, i)->fail_io = true;
|
WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
|
||||||
}
|
}
|
||||||
blk_mq_unquiesce_queue(disk->queue);
|
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&ub->mutex);
|
mutex_unlock(&ub->mutex);
|
||||||
|
|
@ -1980,12 +1978,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||||
__must_hold(&ub->mutex)
|
__must_hold(&ub->mutex)
|
||||||
{
|
{
|
||||||
ubq->nr_io_ready++;
|
ubq->nr_io_ready++;
|
||||||
if (ublk_queue_ready(ubq)) {
|
if (ublk_queue_ready(ubq))
|
||||||
ub->nr_queues_ready++;
|
ub->nr_queues_ready++;
|
||||||
|
if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
|
||||||
if (capable(CAP_SYS_ADMIN))
|
ub->unprivileged_daemons = true;
|
||||||
ub->nr_privileged_daemon++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
|
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
|
||||||
/* now we are ready for handling ublk io request */
|
/* now we are ready for handling ublk io request */
|
||||||
|
|
@ -2880,8 +2876,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
|
||||||
|
|
||||||
ublk_apply_params(ub);
|
ublk_apply_params(ub);
|
||||||
|
|
||||||
/* don't probe partitions if any one ubq daemon is un-trusted */
|
/* don't probe partitions if any daemon task is un-trusted */
|
||||||
if (ub->nr_privileged_daemon != ub->nr_queues_ready)
|
if (ub->unprivileged_daemons)
|
||||||
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
||||||
|
|
||||||
ublk_get_device(ub);
|
ublk_get_device(ub);
|
||||||
|
|
|
||||||
|
|
@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
|
||||||
* WMT command.
|
* WMT command.
|
||||||
*/
|
*/
|
||||||
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
|
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
|
||||||
TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
|
TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
|
||||||
if (err == -EINTR) {
|
|
||||||
bt_dev_err(hdev, "Execution of wmt command interrupted");
|
|
||||||
clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
|
|
||||||
goto err_free_wc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
bt_dev_err(hdev, "Execution of wmt command timed out");
|
bt_dev_err(hdev, "Execution of wmt command timed out");
|
||||||
|
|
|
||||||
|
|
@ -543,9 +543,9 @@ static int ps_setup(struct hci_dev *hdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (psdata->wakeup_source) {
|
if (psdata->wakeup_source) {
|
||||||
ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
|
ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
|
||||||
ps_host_wakeup_irq_handler,
|
NULL, ps_host_wakeup_irq_handler,
|
||||||
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
|
IRQF_ONESHOT,
|
||||||
dev_name(&serdev->dev), nxpdev);
|
dev_name(&serdev->dev), nxpdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
|
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
|
||||||
|
|
|
||||||
|
|
@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
|
||||||
|
|
||||||
chinfo.src = RPMSG_ADDR_ANY;
|
chinfo.src = RPMSG_ADDR_ANY;
|
||||||
chinfo.dst = rpdev->dst;
|
chinfo.dst = rpdev->dst;
|
||||||
strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
|
strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
|
||||||
strlen(cdx_rpmsg_id_table[0].name));
|
|
||||||
|
|
||||||
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
|
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
|
||||||
if (!cdx_mcdi->ept) {
|
if (!cdx_mcdi->ept) {
|
||||||
|
|
|
||||||
|
|
@ -1587,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
|
||||||
memset(&data[n], 0, (MIN_SAMPLES - n) *
|
memset(&data[n], 0, (MIN_SAMPLES - n) *
|
||||||
sizeof(unsigned int));
|
sizeof(unsigned int));
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) *
|
||||||
|
sizeof(unsigned int));
|
||||||
}
|
}
|
||||||
ret = parse_insn(dev, insns + i, data, file);
|
ret = parse_insn(dev, insns + i, data, file);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|
@ -1670,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev,
|
||||||
memset(&data[insn->n], 0,
|
memset(&data[insn->n], 0,
|
||||||
(MIN_SAMPLES - insn->n) * sizeof(unsigned int));
|
(MIN_SAMPLES - insn->n) * sizeof(unsigned int));
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
memset(data, 0, n_data * sizeof(unsigned int));
|
||||||
}
|
}
|
||||||
ret = parse_insn(dev, insn, data, file);
|
ret = parse_insn(dev, insn, data, file);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|
|
||||||
|
|
@ -620,11 +620,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
|
||||||
unsigned int chan = CR_CHAN(insn->chanspec);
|
unsigned int chan = CR_CHAN(insn->chanspec);
|
||||||
unsigned int base_chan = (chan < 32) ? 0 : chan;
|
unsigned int base_chan = (chan < 32) ? 0 : chan;
|
||||||
unsigned int _data[2];
|
unsigned int _data[2];
|
||||||
|
unsigned int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (insn->n == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
memset(_data, 0, sizeof(_data));
|
memset(_data, 0, sizeof(_data));
|
||||||
memset(&_insn, 0, sizeof(_insn));
|
memset(&_insn, 0, sizeof(_insn));
|
||||||
_insn.insn = INSN_BITS;
|
_insn.insn = INSN_BITS;
|
||||||
|
|
@ -636,17 +634,20 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
|
||||||
if (!(s->subdev_flags & SDF_WRITABLE))
|
if (!(s->subdev_flags & SDF_WRITABLE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
_data[0] = 1U << (chan - base_chan); /* mask */
|
_data[0] = 1U << (chan - base_chan); /* mask */
|
||||||
_data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
|
|
||||||
}
|
}
|
||||||
|
for (i = 0; i < insn->n; i++) {
|
||||||
|
if (insn->insn == INSN_WRITE)
|
||||||
|
_data[1] = data[i] ? _data[0] : 0; /* bits */
|
||||||
|
|
||||||
ret = s->insn_bits(dev, s, &_insn, _data);
|
ret = s->insn_bits(dev, s, &_insn, _data);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (insn->insn == INSN_READ)
|
if (insn->insn == INSN_READ)
|
||||||
data[0] = (_data[1] >> (chan - base_chan)) & 1;
|
data[i] = (_data[1] >> (chan - base_chan)) & 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return insn->n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __comedi_device_postconfig_async(struct comedi_device *dev,
|
static int __comedi_device_postconfig_async(struct comedi_device *dev,
|
||||||
|
|
|
||||||
|
|
@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev,
|
||||||
* Hook up the external trigger source interrupt only if the
|
* Hook up the external trigger source interrupt only if the
|
||||||
* user config option is valid and the board supports interrupts.
|
* user config option is valid and the board supports interrupts.
|
||||||
*/
|
*/
|
||||||
if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
|
if (it->options[1] > 0 && it->options[1] < 16 &&
|
||||||
|
(board->irq_mask & (1U << it->options[1]))) {
|
||||||
ret = request_irq(it->options[1], pcl726_interrupt, 0,
|
ret = request_irq(it->options[1], pcl726_interrupt, 0,
|
||||||
dev->board_name, dev);
|
dev->board_name, dev);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
|
|
|
||||||
|
|
@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
||||||
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
|
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
|
||||||
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
|
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
|
||||||
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
|
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
|
||||||
|
X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
||||||
|
|
||||||
|
static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
|
||||||
|
{
|
||||||
|
/* Update the repeating-pattern data. */
|
||||||
|
data->intervals[data->interval_ptr++] = interval_us;
|
||||||
|
if (data->interval_ptr >= INTERVALS)
|
||||||
|
data->interval_ptr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
|
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
if (data->needs_update) {
|
if (data->needs_update) {
|
||||||
menu_update(drv, dev);
|
menu_update(drv, dev);
|
||||||
data->needs_update = 0;
|
data->needs_update = 0;
|
||||||
|
} else if (!dev->last_residency_ns) {
|
||||||
|
/*
|
||||||
|
* This happens when the driver rejects the previously selected
|
||||||
|
* idle state and returns an error, so update the recent
|
||||||
|
* intervals table to prevent invalid information from being
|
||||||
|
* used going forward.
|
||||||
|
*/
|
||||||
|
menu_update_intervals(data, UINT_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find the shortest expected idle interval. */
|
/* Find the shortest expected idle interval. */
|
||||||
|
|
@ -271,20 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tick_nohz_tick_stopped()) {
|
|
||||||
/*
|
/*
|
||||||
* If the tick is already stopped, the cost of possible short
|
* If the tick is already stopped, the cost of possible short idle
|
||||||
* idle duration misprediction is much higher, because the CPU
|
* duration misprediction is much higher, because the CPU may be stuck
|
||||||
* may be stuck in a shallow idle state for a long time as a
|
* in a shallow idle state for a long time as a result of it. In that
|
||||||
* result of it. In that case say we might mispredict and use
|
* case, say we might mispredict and use the known time till the closest
|
||||||
* the known time till the closest timer event for the idle
|
* timer event for the idle state selection.
|
||||||
* state selection.
|
|
||||||
*/
|
*/
|
||||||
if (predicted_ns < TICK_NSEC)
|
if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)
|
||||||
predicted_ns = data->next_timer_ns;
|
predicted_ns = data->next_timer_ns;
|
||||||
} else if (latency_req > predicted_ns) {
|
|
||||||
latency_req = predicted_ns;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the idle state with the lowest power while satisfying
|
* Find the idle state with the lowest power while satisfying
|
||||||
|
|
@ -300,13 +311,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
if (idx == -1)
|
if (idx == -1)
|
||||||
idx = i; /* first enabled state */
|
idx = i; /* first enabled state */
|
||||||
|
|
||||||
|
if (s->exit_latency_ns > latency_req)
|
||||||
|
break;
|
||||||
|
|
||||||
if (s->target_residency_ns > predicted_ns) {
|
if (s->target_residency_ns > predicted_ns) {
|
||||||
/*
|
/*
|
||||||
* Use a physical idle state, not busy polling, unless
|
* Use a physical idle state, not busy polling, unless
|
||||||
* a timer is going to trigger soon enough.
|
* a timer is going to trigger soon enough.
|
||||||
*/
|
*/
|
||||||
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
|
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
|
||||||
s->exit_latency_ns <= latency_req &&
|
|
||||||
s->target_residency_ns <= data->next_timer_ns) {
|
s->target_residency_ns <= data->next_timer_ns) {
|
||||||
predicted_ns = s->target_residency_ns;
|
predicted_ns = s->target_residency_ns;
|
||||||
idx = i;
|
idx = i;
|
||||||
|
|
@ -338,8 +351,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
if (s->exit_latency_ns > latency_req)
|
|
||||||
break;
|
|
||||||
|
|
||||||
idx = i;
|
idx = i;
|
||||||
}
|
}
|
||||||
|
|
@ -482,10 +493,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
|
|
||||||
data->correction_factor[data->bucket] = new_factor;
|
data->correction_factor[data->bucket] = new_factor;
|
||||||
|
|
||||||
/* update the repeating-pattern data */
|
menu_update_intervals(data, ktime_to_us(measured_ns));
|
||||||
data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
|
|
||||||
if (data->interval_ptr >= INTERVALS)
|
|
||||||
data->interval_ptr = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region =
|
||||||
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
|
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
|
||||||
#endif /* 0 */
|
#endif /* 0 */
|
||||||
|
|
||||||
|
static void complete_address_handler(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
|
||||||
|
|
||||||
|
complete(&handler->done);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void get_address_handler(struct fw_address_handler *handler)
|
||||||
|
{
|
||||||
|
kref_get(&handler->kref);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int put_address_handler(struct fw_address_handler *handler)
|
||||||
|
{
|
||||||
|
return kref_put(&handler->kref, complete_address_handler);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fw_core_add_address_handler() - register for incoming requests
|
* fw_core_add_address_handler() - register for incoming requests
|
||||||
* @handler: callback
|
* @handler: callback
|
||||||
|
|
@ -596,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
||||||
if (other != NULL) {
|
if (other != NULL) {
|
||||||
handler->offset += other->length;
|
handler->offset += other->length;
|
||||||
} else {
|
} else {
|
||||||
|
init_completion(&handler->done);
|
||||||
|
kref_init(&handler->kref);
|
||||||
list_add_tail_rcu(&handler->link, &address_handler_list);
|
list_add_tail_rcu(&handler->link, &address_handler_list);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
|
|
@ -621,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
|
||||||
list_del_rcu(&handler->link);
|
list_del_rcu(&handler->link);
|
||||||
|
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
|
if (!put_address_handler(handler))
|
||||||
|
wait_for_completion(&handler->done);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(fw_core_remove_address_handler);
|
EXPORT_SYMBOL(fw_core_remove_address_handler);
|
||||||
|
|
||||||
|
|
@ -914,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
|
||||||
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
|
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
|
||||||
request->length);
|
request->length);
|
||||||
if (handler)
|
if (handler)
|
||||||
handler->address_callback(card, request, tcode, destination, source,
|
get_address_handler(handler);
|
||||||
p->generation, offset, request->data,
|
|
||||||
request->length, handler->callback_data);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!handler)
|
if (!handler) {
|
||||||
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
|
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Outside the RCU read-side critical section. Without spinlock. With reference count.
|
||||||
|
handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
|
||||||
|
request->data, request->length, handler->callback_data);
|
||||||
|
put_address_handler(handler);
|
||||||
|
}
|
||||||
|
|
||||||
|
// To use kmalloc allocator efficiently, this should be power of two.
|
||||||
|
#define BUFFER_ON_KERNEL_STACK_SIZE 4
|
||||||
|
|
||||||
static void handle_fcp_region_request(struct fw_card *card,
|
static void handle_fcp_region_request(struct fw_card *card,
|
||||||
struct fw_packet *p,
|
struct fw_packet *p,
|
||||||
struct fw_request *request,
|
struct fw_request *request,
|
||||||
unsigned long long offset)
|
unsigned long long offset)
|
||||||
{
|
{
|
||||||
struct fw_address_handler *handler;
|
struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
|
||||||
int tcode, destination, source;
|
struct fw_address_handler *handler, **handlers;
|
||||||
|
int tcode, destination, source, i, count, buffer_size;
|
||||||
|
|
||||||
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
|
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
|
||||||
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
|
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
|
||||||
|
|
@ -950,14 +981,54 @@ static void handle_fcp_region_request(struct fw_card *card,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
count = 0;
|
||||||
|
handlers = buffer_on_kernel_stack;
|
||||||
|
buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
|
||||||
scoped_guard(rcu) {
|
scoped_guard(rcu) {
|
||||||
list_for_each_entry_rcu(handler, &address_handler_list, link) {
|
list_for_each_entry_rcu(handler, &address_handler_list, link) {
|
||||||
if (is_enclosing_handler(handler, offset, request->length))
|
if (is_enclosing_handler(handler, offset, request->length)) {
|
||||||
|
if (count >= buffer_size) {
|
||||||
|
int next_size = buffer_size * 2;
|
||||||
|
struct fw_address_handler **buffer_on_kernel_heap;
|
||||||
|
|
||||||
|
if (handlers == buffer_on_kernel_stack)
|
||||||
|
buffer_on_kernel_heap = NULL;
|
||||||
|
else
|
||||||
|
buffer_on_kernel_heap = handlers;
|
||||||
|
|
||||||
|
buffer_on_kernel_heap =
|
||||||
|
krealloc_array(buffer_on_kernel_heap, next_size,
|
||||||
|
sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
|
||||||
|
// FCP is used for purposes unrelated to significant system
|
||||||
|
// resources (e.g. storage or networking), so allocation
|
||||||
|
// failures are not considered so critical.
|
||||||
|
if (!buffer_on_kernel_heap)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (handlers == buffer_on_kernel_stack) {
|
||||||
|
memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
|
||||||
|
sizeof(buffer_on_kernel_stack));
|
||||||
|
}
|
||||||
|
|
||||||
|
handlers = buffer_on_kernel_heap;
|
||||||
|
buffer_size = next_size;
|
||||||
|
}
|
||||||
|
get_address_handler(handler);
|
||||||
|
handlers[count++] = handler;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < count; ++i) {
|
||||||
|
handler = handlers[i];
|
||||||
handler->address_callback(card, request, tcode, destination, source,
|
handler->address_callback(card, request, tcode, destination, source,
|
||||||
p->generation, offset, request->data,
|
p->generation, offset, request->data,
|
||||||
request->length, handler->callback_data);
|
request->length, handler->callback_data);
|
||||||
|
put_address_handler(handler);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
if (handlers != buffer_on_kernel_stack)
|
||||||
|
kfree(handlers);
|
||||||
|
|
||||||
fw_send_response(card, request, RCODE_COMPLETE);
|
fw_send_response(card, request, RCODE_COMPLETE);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->dma_nelms =
|
err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
|
||||||
dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
|
if (err) {
|
||||||
if (priv->dma_nelms == 0) {
|
|
||||||
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
|
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
|
||||||
return -ENOMEM;
|
return err;
|
||||||
}
|
}
|
||||||
|
priv->dma_nelms = sgt->nents;
|
||||||
|
|
||||||
/* enable clock */
|
/* enable clock */
|
||||||
err = clk_enable(priv->clk);
|
err = clk_enable(priv->clk);
|
||||||
|
|
|
||||||
|
|
@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
|
||||||
struct mlxbf3_gpio_context *gs;
|
struct mlxbf3_gpio_context *gs;
|
||||||
struct gpio_irq_chip *girq;
|
struct gpio_irq_chip *girq;
|
||||||
struct gpio_chip *gc;
|
struct gpio_chip *gc;
|
||||||
char *colon_ptr;
|
|
||||||
int ret, irq;
|
int ret, irq;
|
||||||
long num;
|
|
||||||
|
|
||||||
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
|
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
|
||||||
if (!gs)
|
if (!gs)
|
||||||
|
|
@ -229,20 +227,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
|
||||||
gc->owner = THIS_MODULE;
|
gc->owner = THIS_MODULE;
|
||||||
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
|
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
|
||||||
|
|
||||||
colon_ptr = strchr(dev_name(dev), ':');
|
irq = platform_get_irq_optional(pdev, 0);
|
||||||
if (!colon_ptr) {
|
|
||||||
dev_err(dev, "invalid device name format\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = kstrtol(++colon_ptr, 16, &num);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "invalid device instance\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!num) {
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
|
||||||
if (irq >= 0) {
|
if (irq >= 0) {
|
||||||
girq = &gs->gc.irq;
|
girq = &gs->gc.irq;
|
||||||
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
|
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
|
||||||
|
|
@ -262,7 +247,6 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
return dev_err_probe(dev, ret, "failed to request IRQ");
|
return dev_err_probe(dev, ret, "failed to request IRQ");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, gs);
|
platform_set_drvdata(pdev, gs);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!amdgpu_vm_ready(vm))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
|
||||||
|
|
@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
|
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
|
||||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||||
AMDGPU_PTE_EXECUTABLE);
|
AMDGPU_VM_PAGE_EXECUTABLE);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||||
|
|
|
||||||
|
|
@ -514,7 +514,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (drm_gem_is_imported(obj)) {
|
if (drm_gem_is_imported(obj)) {
|
||||||
struct dma_buf *dma_buf = obj->dma_buf;
|
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||||
|
|
||||||
if (dma_buf->ops != &amdgpu_dmabuf_ops)
|
if (dma_buf->ops != &amdgpu_dmabuf_ops)
|
||||||
/* No XGMI with non AMD GPUs */
|
/* No XGMI with non AMD GPUs */
|
||||||
|
|
|
||||||
|
|
@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
||||||
*/
|
*/
|
||||||
if (!vm->is_compute_context || !vm->process_info)
|
if (!vm->is_compute_context || !vm->process_info)
|
||||||
return 0;
|
return 0;
|
||||||
if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
|
if (!drm_gem_is_imported(obj) ||
|
||||||
|
!dma_buf_is_dynamic(obj->import_attach->dmabuf))
|
||||||
return 0;
|
return 0;
|
||||||
mutex_lock_nested(&vm->process_info->lock, 1);
|
mutex_lock_nested(&vm->process_info->lock, 1);
|
||||||
if (!WARN_ON(!vm->process_info->eviction_fence)) {
|
if (!WARN_ON(!vm->process_info->eviction_fence)) {
|
||||||
|
|
|
||||||
|
|
@ -1039,15 +1039,28 @@ int psp_update_fw_reservation(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
uint64_t reserv_addr, reserv_addr_ext;
|
uint64_t reserv_addr, reserv_addr_ext;
|
||||||
uint32_t reserv_size, reserv_size_ext;
|
uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
if (amdgpu_sriov_vf(psp->adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
|
switch (mp0_ip_ver) {
|
||||||
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
|
case IP_VERSION(14, 0, 2):
|
||||||
|
if (adev->psp.sos.fw_version < 0x3b0e0d)
|
||||||
return 0;
|
return 0;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case IP_VERSION(14, 0, 3):
|
||||||
|
if (adev->psp.sos.fw_version < 0x3a0e14)
|
||||||
|
return 0;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
|
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
||||||
|
|
@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
* Check if all VM PDs/PTs are ready for updates
|
* Check if all VM PDs/PTs are ready for updates
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* True if VM is not evicting.
|
* True if VM is not evicting and all VM entities are not stopped
|
||||||
*/
|
*/
|
||||||
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
||||||
{
|
{
|
||||||
bool empty;
|
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
amdgpu_vm_eviction_lock(vm);
|
amdgpu_vm_eviction_lock(vm);
|
||||||
|
|
@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
||||||
amdgpu_vm_eviction_unlock(vm);
|
amdgpu_vm_eviction_unlock(vm);
|
||||||
|
|
||||||
spin_lock(&vm->status_lock);
|
spin_lock(&vm->status_lock);
|
||||||
empty = list_empty(&vm->evicted);
|
ret &= list_empty(&vm->evicted);
|
||||||
spin_unlock(&vm->status_lock);
|
spin_unlock(&vm->status_lock);
|
||||||
|
|
||||||
return ret && empty;
|
spin_lock(&vm->immediate.lock);
|
||||||
|
ret &= !vm->immediate.stopped;
|
||||||
|
spin_unlock(&vm->immediate.lock);
|
||||||
|
|
||||||
|
spin_lock(&vm->delayed.lock);
|
||||||
|
ret &= !vm->delayed.stopped;
|
||||||
|
spin_unlock(&vm->delayed.lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -1276,7 +1283,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||||
struct drm_gem_object *obj = &bo->tbo.base;
|
struct drm_gem_object *obj = &bo->tbo.base;
|
||||||
|
|
||||||
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
|
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
|
||||||
struct dma_buf *dma_buf = obj->dma_buf;
|
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||||
struct drm_gem_object *gobj = dma_buf->priv;
|
struct drm_gem_object *gobj = dma_buf->priv;
|
||||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
||||||
list_for_each_entry(block, &vres->blocks, link)
|
list_for_each_entry(block, &vres->blocks, link)
|
||||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
|
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
|
||||||
|
|
||||||
amdgpu_vram_mgr_do_reserve(man);
|
|
||||||
|
|
||||||
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
|
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
|
||||||
|
amdgpu_vram_mgr_do_reserve(man);
|
||||||
mutex_unlock(&mgr->lock);
|
mutex_unlock(&mgr->lock);
|
||||||
|
|
||||||
atomic64_sub(vis_usage, &mgr->vis_usage);
|
atomic64_sub(vis_usage, &mgr->vis_usage);
|
||||||
|
|
|
||||||
|
|
@ -7792,6 +7792,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
|
||||||
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
|
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
trace_amdgpu_dm_connector_atomic_check(new_con_state);
|
trace_amdgpu_dm_connector_atomic_check(new_con_state);
|
||||||
|
|
||||||
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
|
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
|
||||||
|
|
|
||||||
|
|
@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||||
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
|
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
|
struct dc *dc = adev->dm.dc;
|
||||||
|
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
|
||||||
|
struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
|
||||||
|
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
|
||||||
|
bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
|
||||||
|
pr->config.replay_supported;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IPS & self-refresh feature can cause vblank counter resets between
|
||||||
|
* vblank disable and enable.
|
||||||
|
* It may cause system stuck due to waiting for the vblank counter.
|
||||||
|
* Call this function to estimate missed vblanks by using timestamps and
|
||||||
|
* update the vblank counter in DRM.
|
||||||
|
*/
|
||||||
|
if (dc->caps.ips_support &&
|
||||||
|
dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
|
||||||
|
sr_supported && vblank->config.disable_immediate)
|
||||||
|
drm_crtc_vblank_restore(crtc);
|
||||||
|
|
||||||
/* vblank irq on -> Only need vupdate irq in vrr mode */
|
/* vblank irq on -> Only need vupdate irq in vrr mode */
|
||||||
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
|
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
|
||||||
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
|
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
|
||||||
|
|
|
||||||
|
|
@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
|
||||||
return object_id;
|
return object_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tbl->ucNumberOfObjects <= i) {
|
if (tbl->ucNumberOfObjects <= i)
|
||||||
dm_error("Can't find connector id %d in connector table of size %d.\n",
|
|
||||||
i, tbl->ucNumberOfObjects);
|
|
||||||
return object_id;
|
return object_id;
|
||||||
}
|
|
||||||
|
|
||||||
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
|
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
|
||||||
object_id = object_id_from_bios_object_id(id);
|
object_id = object_id_from_bios_object_id(id);
|
||||||
|
|
|
||||||
|
|
@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3(
|
||||||
allocation.sPCLKInput.usFbDiv =
|
allocation.sPCLKInput.usFbDiv =
|
||||||
cpu_to_le16((uint16_t)bp_params->feedback_divider);
|
cpu_to_le16((uint16_t)bp_params->feedback_divider);
|
||||||
allocation.sPCLKInput.ucFracFbDiv =
|
allocation.sPCLKInput.ucFracFbDiv =
|
||||||
(uint8_t)bp_params->fractional_feedback_divider;
|
(uint8_t)(bp_params->fractional_feedback_divider / 100000);
|
||||||
allocation.sPCLKInput.ucPostDiv =
|
allocation.sPCLKInput.ucPostDiv =
|
||||||
(uint8_t)bp_params->pixel_clock_post_divider;
|
(uint8_t)bp_params->pixel_clock_post_divider;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
|
||||||
/* ClocksStateLow */
|
/* ClocksStateLow */
|
||||||
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
|
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
|
||||||
/* ClocksStateNominal */
|
/* ClocksStateNominal */
|
||||||
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
|
{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
|
||||||
/* ClocksStatePerformance */
|
/* ClocksStatePerformance */
|
||||||
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
|
{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
|
||||||
|
|
||||||
int dentist_get_divider_from_did(int did)
|
int dentist_get_divider_from_did(int did)
|
||||||
{
|
{
|
||||||
|
|
@ -391,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
|
||||||
{
|
{
|
||||||
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
|
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
|
||||||
|
|
||||||
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
|
|
||||||
|
|
||||||
dce110_fill_display_configs(context, pp_display_cfg);
|
dce110_fill_display_configs(context, pp_display_cfg);
|
||||||
|
|
||||||
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
|
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
|
||||||
|
|
@ -405,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
{
|
{
|
||||||
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||||
struct dm_pp_power_level_change_request level_change_req;
|
struct dm_pp_power_level_change_request level_change_req;
|
||||||
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
|
const int max_disp_clk =
|
||||||
|
clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
|
||||||
/*TODO: W/A for dal3 linux, investigate why this works */
|
int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
|
||||||
if (!clk_mgr_dce->dfs_bypass_active)
|
|
||||||
patched_disp_clk = patched_disp_clk * 115 / 100;
|
|
||||||
|
|
||||||
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
|
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
|
||||||
/* get max clock state from PPLIB */
|
/* get max clock state from PPLIB */
|
||||||
|
|
|
||||||
|
|
@ -120,9 +120,15 @@ void dce110_fill_display_configs(
|
||||||
const struct dc_state *context,
|
const struct dc_state *context,
|
||||||
struct dm_pp_display_configuration *pp_display_cfg)
|
struct dm_pp_display_configuration *pp_display_cfg)
|
||||||
{
|
{
|
||||||
|
struct dc *dc = context->clk_mgr->ctx->dc;
|
||||||
int j;
|
int j;
|
||||||
int num_cfgs = 0;
|
int num_cfgs = 0;
|
||||||
|
|
||||||
|
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
|
||||||
|
pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
|
||||||
|
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
|
||||||
|
pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
|
||||||
|
|
||||||
for (j = 0; j < context->stream_count; j++) {
|
for (j = 0; j < context->stream_count; j++) {
|
||||||
int k;
|
int k;
|
||||||
|
|
||||||
|
|
@ -164,6 +170,23 @@ void dce110_fill_display_configs(
|
||||||
cfg->v_refresh /= stream->timing.h_total;
|
cfg->v_refresh /= stream->timing.h_total;
|
||||||
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
|
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
|
||||||
/ stream->timing.v_total;
|
/ stream->timing.v_total;
|
||||||
|
|
||||||
|
/* Find first CRTC index and calculate its line time.
|
||||||
|
* This is necessary for DPM on SI GPUs.
|
||||||
|
*/
|
||||||
|
if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
|
||||||
|
const struct dc_crtc_timing *timing =
|
||||||
|
&context->streams[0]->timing;
|
||||||
|
|
||||||
|
pp_display_cfg->crtc_index = cfg->pipe_idx;
|
||||||
|
pp_display_cfg->line_time_in_us =
|
||||||
|
timing->h_total * 10000 / timing->pix_clk_100hz;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!num_cfgs) {
|
||||||
|
pp_display_cfg->crtc_index = 0;
|
||||||
|
pp_display_cfg->line_time_in_us = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pp_display_cfg->display_count = num_cfgs;
|
pp_display_cfg->display_count = num_cfgs;
|
||||||
|
|
@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
|
||||||
pp_display_cfg->min_engine_clock_deep_sleep_khz
|
pp_display_cfg->min_engine_clock_deep_sleep_khz
|
||||||
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
|
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
|
||||||
|
|
||||||
pp_display_cfg->avail_mclk_switch_time_us =
|
|
||||||
dce110_get_min_vblank_time_us(context);
|
|
||||||
/* TODO: dce11.2*/
|
|
||||||
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
|
|
||||||
|
|
||||||
pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
|
|
||||||
|
|
||||||
dce110_fill_display_configs(context, pp_display_cfg);
|
dce110_fill_display_configs(context, pp_display_cfg);
|
||||||
|
|
||||||
/* TODO: is this still applicable?*/
|
|
||||||
if (pp_display_cfg->display_count == 1) {
|
|
||||||
const struct dc_crtc_timing *timing =
|
|
||||||
&context->streams[0]->timing;
|
|
||||||
|
|
||||||
pp_display_cfg->crtc_index =
|
|
||||||
pp_display_cfg->disp_configs[0].pipe_idx;
|
|
||||||
pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
|
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
|
||||||
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
|
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
|
||||||
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
|
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
|
||||||
{
|
{
|
||||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||||
int dprefclk_wdivider;
|
struct dc_context *ctx = clk_mgr_base->ctx;
|
||||||
int dp_ref_clk_khz;
|
int dp_ref_clk_khz = 0;
|
||||||
int target_div;
|
|
||||||
|
|
||||||
/* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
|
if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
|
||||||
|
dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
|
||||||
/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
|
else
|
||||||
* programmed DID DENTIST_DPREFCLK_WDIVIDER*/
|
dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
|
||||||
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
|
|
||||||
|
|
||||||
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
|
|
||||||
target_div = dentist_get_divider_from_did(dprefclk_wdivider);
|
|
||||||
|
|
||||||
/* Calculate the current DFS clock, in kHz.*/
|
|
||||||
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
|
||||||
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
|
|
||||||
|
|
||||||
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
|
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
|
||||||
}
|
}
|
||||||
|
|
@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
|
||||||
{
|
{
|
||||||
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
|
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
|
||||||
|
|
||||||
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
|
|
||||||
|
|
||||||
dce110_fill_display_configs(context, pp_display_cfg);
|
dce110_fill_display_configs(context, pp_display_cfg);
|
||||||
|
|
||||||
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
|
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
|
||||||
|
|
@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
{
|
{
|
||||||
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||||
struct dm_pp_power_level_change_request level_change_req;
|
struct dm_pp_power_level_change_request level_change_req;
|
||||||
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
|
const int max_disp_clk =
|
||||||
|
clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
|
||||||
/*TODO: W/A for dal3 linux, investigate why this works */
|
int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
|
||||||
if (!clk_mgr_dce->dfs_bypass_active)
|
|
||||||
patched_disp_clk = patched_disp_clk * 115 / 100;
|
|
||||||
|
|
||||||
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
|
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
|
||||||
/* get max clock state from PPLIB */
|
/* get max clock state from PPLIB */
|
||||||
|
|
|
||||||
|
|
@ -217,11 +217,24 @@ static bool create_links(
|
||||||
connectors_num,
|
connectors_num,
|
||||||
num_virtual_links);
|
num_virtual_links);
|
||||||
|
|
||||||
// condition loop on link_count to allow skipping invalid indices
|
/* When getting the number of connectors, the VBIOS reports the number of valid indices,
|
||||||
|
* but it doesn't say which indices are valid, and not every index has an actual connector.
|
||||||
|
* So, if we don't find a connector on an index, that is not an error.
|
||||||
|
*
|
||||||
|
* - There is no guarantee that the first N indices will be valid
|
||||||
|
* - VBIOS may report a higher amount of valid indices than there are actual connectors
|
||||||
|
* - Some VBIOS have valid configurations for more connectors than there actually are
|
||||||
|
* on the card. This may be because the manufacturer used the same VBIOS for different
|
||||||
|
* variants of the same card.
|
||||||
|
*/
|
||||||
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
|
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
|
||||||
|
struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
|
||||||
struct link_init_data link_init_params = {0};
|
struct link_init_data link_init_params = {0};
|
||||||
struct dc_link *link;
|
struct dc_link *link;
|
||||||
|
|
||||||
|
if (connector_id.id == CONNECTOR_ID_UNKNOWN)
|
||||||
|
continue;
|
||||||
|
|
||||||
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
|
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
|
||||||
|
|
||||||
link_init_params.ctx = dc->ctx;
|
link_init_params.ctx = dc->ctx;
|
||||||
|
|
|
||||||
|
|
@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
|
||||||
enc110->base.id, &bp_cap_info);
|
enc110->base.id, &bp_cap_info);
|
||||||
|
|
||||||
/* Override features with DCE-specific values */
|
/* Override features with DCE-specific values */
|
||||||
if (BP_RESULT_OK == result) {
|
if (result == BP_RESULT_OK) {
|
||||||
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
|
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
|
||||||
bp_cap_info.DP_HBR2_EN;
|
bp_cap_info.DP_HBR2_EN;
|
||||||
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
|
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
|
||||||
bp_cap_info.DP_HBR3_EN;
|
bp_cap_info.DP_HBR3_EN;
|
||||||
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
||||||
} else {
|
} else if (result != BP_RESULT_NORECORD) {
|
||||||
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||||
__func__,
|
__func__,
|
||||||
result);
|
result);
|
||||||
|
|
@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
|
||||||
enc110->base.id, &bp_cap_info);
|
enc110->base.id, &bp_cap_info);
|
||||||
|
|
||||||
/* Override features with DCE-specific values */
|
/* Override features with DCE-specific values */
|
||||||
if (BP_RESULT_OK == result) {
|
if (result == BP_RESULT_OK) {
|
||||||
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
|
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
|
||||||
bp_cap_info.DP_HBR2_EN;
|
bp_cap_info.DP_HBR2_EN;
|
||||||
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
|
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
|
||||||
bp_cap_info.DP_HBR3_EN;
|
bp_cap_info.DP_HBR3_EN;
|
||||||
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
||||||
} else {
|
} else if (result != BP_RESULT_NORECORD) {
|
||||||
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||||
__func__,
|
__func__,
|
||||||
result);
|
result);
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@
|
||||||
|
|
||||||
#include "dc.h"
|
#include "dc.h"
|
||||||
#include "dc_dmub_srv.h"
|
#include "dc_dmub_srv.h"
|
||||||
#include "dc_dp_types.h"
|
|
||||||
#include "dmub/dmub_srv.h"
|
#include "dmub/dmub_srv.h"
|
||||||
#include "core_types.h"
|
#include "core_types.h"
|
||||||
#include "dmub_replay.h"
|
#include "dmub_replay.h"
|
||||||
|
|
@ -44,45 +43,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
|
||||||
/*
|
/*
|
||||||
* Enable/Disable Replay.
|
* Enable/Disable Replay.
|
||||||
*/
|
*/
|
||||||
static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
|
static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
|
||||||
struct dc_link *link)
|
|
||||||
{
|
{
|
||||||
union dmub_rb_cmd cmd;
|
union dmub_rb_cmd cmd;
|
||||||
struct dc_context *dc = dmub->ctx;
|
struct dc_context *dc = dmub->ctx;
|
||||||
uint32_t retry_count;
|
uint32_t retry_count;
|
||||||
enum replay_state state = REPLAY_STATE_0;
|
enum replay_state state = REPLAY_STATE_0;
|
||||||
struct pipe_ctx *pipe_ctx = NULL;
|
|
||||||
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
|
|
||||||
uint8_t i;
|
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
memset(&cmd, 0, sizeof(cmd));
|
||||||
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
|
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
|
||||||
cmd.replay_enable.data.panel_inst = panel_inst;
|
cmd.replay_enable.data.panel_inst = panel_inst;
|
||||||
|
|
||||||
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
|
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
|
||||||
if (enable) {
|
if (enable)
|
||||||
cmd.replay_enable.data.enable = REPLAY_ENABLE;
|
cmd.replay_enable.data.enable = REPLAY_ENABLE;
|
||||||
// hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
|
else
|
||||||
if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
|
|
||||||
for (i = 0; i < MAX_PIPES; i++) {
|
|
||||||
if (res_ctx &&
|
|
||||||
res_ctx->pipe_ctx[i].stream &&
|
|
||||||
res_ctx->pipe_ctx[i].stream->link &&
|
|
||||||
res_ctx->pipe_ctx[i].stream->link == link &&
|
|
||||||
res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
|
|
||||||
pipe_ctx = &res_ctx->pipe_ctx[i];
|
|
||||||
//TODO: refactor for multi edp support
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pipe_ctx)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
|
||||||
cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
cmd.replay_enable.data.enable = REPLAY_DISABLE;
|
cmd.replay_enable.data.enable = REPLAY_DISABLE;
|
||||||
|
|
||||||
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
|
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
|
||||||
|
|
@ -174,17 +149,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
|
||||||
copy_settings_data->digbe_inst = replay_context->digbe_inst;
|
copy_settings_data->digbe_inst = replay_context->digbe_inst;
|
||||||
copy_settings_data->digfe_inst = replay_context->digfe_inst;
|
copy_settings_data->digfe_inst = replay_context->digfe_inst;
|
||||||
|
|
||||||
if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
|
|
||||||
if (pipe_ctx->stream_res.hpo_dp_stream_enc)
|
|
||||||
copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
|
||||||
else
|
|
||||||
copy_settings_data->hpo_stream_enc_inst = 0;
|
|
||||||
if (pipe_ctx->link_res.hpo_dp_link_enc)
|
|
||||||
copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
|
||||||
else
|
|
||||||
copy_settings_data->hpo_link_enc_inst = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pipe_ctx->plane_res.dpp)
|
if (pipe_ctx->plane_res.dpp)
|
||||||
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
|
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
|
||||||
else
|
else
|
||||||
|
|
@ -247,7 +211,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
|
||||||
pCmd->header.type = DMUB_CMD__REPLAY;
|
pCmd->header.type = DMUB_CMD__REPLAY;
|
||||||
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
|
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
|
||||||
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
|
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
|
||||||
pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
|
|
||||||
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
|
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
|
||||||
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
|
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ struct dmub_replay_funcs {
|
||||||
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
|
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
|
||||||
uint8_t panel_inst);
|
uint8_t panel_inst);
|
||||||
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
|
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
|
||||||
uint8_t panel_inst, struct dc_link *link);
|
uint8_t panel_inst);
|
||||||
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
|
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
|
||||||
struct replay_context *replay_context, uint8_t panel_inst);
|
struct replay_context *replay_context, uint8_t panel_inst);
|
||||||
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
|
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue