/allrandom.config
/allyes.config
+# Kconfig savedefconfig output
+/defconfig
+
# Kdevelop4
*.kdev4
Frank Zago <fzago@systemfabricworks.com>
Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
+Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
+Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
Henrik Rydberg <rydberg@bitmath.org>
seek after the last record available at the time
the last SYSLOG_ACTION_CLEAR was issued.
- Due to the record nature of this interface with a "read all"
- behavior and the specific positions each seek operation sets,
- SEEK_CUR is not supported, returning -ESPIPE (invalid seek) to
- errno whenever requested.
-
The output format consists of a prefix carrying the syslog
prefix including priority and facility, the 64 bit message
sequence number and the monotonic timestamp in microseconds,
What: /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
Date: Feb 2014
-Contact: Li Jun <b47624@freescale.com>
+Contact: Li Jun <jun.li@nxp.com>
Description:
Can be set and read.
Set a_bus_req(A-device bus request) input to be 1 if
What: /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
Date: Feb 2014
-Contact: Li Jun <b47624@freescale.com>
+Contact: Li Jun <jun.li@nxp.com>
Description:
Can be set and read
The a_bus_drop(A-device bus drop) input is 1 when the
What: /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
Date: Feb 2014
-Contact: Li Jun <b47624@freescale.com>
+Contact: Li Jun <jun.li@nxp.com>
Description:
Can be set and read.
The b_bus_req(B-device bus request) input is 1 during the time
What: /sys/bus/platform/devices/ci_hdrc.0/inputs/a_clr_err
Date: Feb 2014
-Contact: Li Jun <b47624@freescale.com>
+Contact: Li Jun <jun.li@nxp.com>
Description:
Only can be set.
The a_clr_err(A-device Vbus error clear) input is used to clear
Compiling the kernel
--------------------
- - Make sure you have at least gcc 4.6 available.
+ - Make sure you have at least gcc 4.9 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
Please note that you can still run a.out user programs with this kernel.
thp_fault_alloc
Number of transparent hugepages which were allocated to satisfy
- a page fault, including COW faults. This counter is not present
- when CONFIG_TRANSPARENT_HUGEPAGE is not set.
+ a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE
+ is not set.
thp_collapse_alloc
Number of transparent hugepages which were allocated to allow
dm-clone
dm-crypt
dm-dust
+ dm-ebs
dm-flakey
dm-init
dm-integrity
thp_fault_alloc
is incremented every time a huge page is successfully
- allocated to handle a page fault. This applies to both the
- first time a page is faulted and for COW faults.
+ allocated to handle a page fault.
thp_collapse_alloc
is incremented by khugepaged when it has found
3) ID_AA64PFR1_EL1 - Processor Feature Register 1
+
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
4) MIDR_EL1 - Main ID Register
+
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
+----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1463225 |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1418040 |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1530923 |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+
it with auto-tuning. An alternative way to achieve this goal is to
just increase the value of timeout_sync, leaving max_budget equal to 0.
-weights
--------
-
-Read-only parameter, used to show the weights of the currently active
-BFQ queues.
-
-
4. Group scheduling with BFQ
============================
For each group, there is only the following parameter to set.
weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
-group inside its parent. Available values: 1..10000 (default 100). The
+group inside its parent. Available values: 1..1000 (default 100). The
linear mapping between ioprio and weights, described at the beginning
of the tunable section, is still valid, but all weights higher than
IOPRIO_BE_NR*10 are mapped to ioprio 0.
*not* the original input ``setsockopt`` arguments. The potentially
modified values will be then passed down to the kernel.
+Large optval
+============
+When the ``optval`` is greater than the ``PAGE_SIZE``, the BPF program
+can access only the first ``PAGE_SIZE`` of that data. So it has to options:
+
+* Set ``optlen`` to zero, which indicates that the kernel should
+ use the original buffer from the userspace. Any modifications
+ done by the BPF program to the ``optval`` are ignored.
+* Set ``optlen`` to the value less than ``PAGE_SIZE``, which
+ indicates that the kernel should use BPF's trimmed ``optval``.
+
+When the BPF program returns with the ``optlen`` greater than
+``PAGE_SIZE``, the userspace will receive ``EFAULT`` errno.
+
Example
=======
of the mapping functions like dma_map_single(), dma_map_page() and
others should not be larger than the returned value.
+::
+
+ bool
+ dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+
+Returns %true if dma_sync_single_for_{device,cpu} calls are required to
+transfer memory ownership. Returns %false if those calls can be skipped.
+
::
unsigned long
pin_user_pages*() APIs are clearly distinct from the get_user_pages*() APIs, so
that's a natural dividing line, and a good point to make separate wrapper calls.
In other words, use pin_user_pages*() for DMA-pinned pages, and
-get_user_pages*() for other cases. There are four cases described later on in
+get_user_pages*() for other cases. There are five cases described later on in
this document, to further clarify that concept.
FOLL_PIN and FOLL_GET are mutually exclusive for a given gup call. However,
To dynamically limit for which functions to generate reports, see the
`DebugFS interface`_ blacklist/whitelist feature.
- For ``__always_inline`` functions, replace ``__always_inline`` with
- ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
-
- static __no_kcsan_or_inline void foo(void) {
- ...
-
* To disable data race detection for a particular compilation unit, add to the
``Makefile``::
kernel by installing a production configuration of the kernel on production
hardware with a production userspace and then trying to exercise some behavior
that depends on interactions between the hardware, the kernel, and userspace.
+
+KUnit isn't working, what should I do?
+======================================
+
+Unfortunately, there are a number of things which can break, but here are some
+things to try.
+
+1. Try running ``./tools/testing/kunit/kunit.py run`` with the ``--raw_output``
+ parameter. This might show details or error messages hidden by the kunit_tool
+ parser.
+2. Instead of running ``kunit.py run``, try running ``kunit.py config``,
+ ``kunit.py build``, and ``kunit.py exec`` independently. This can help track
+ down where an issue is occurring. (If you think the parser is at fault, you
+ can run it manually against stdin or a file with ``kunit.py parse``.)
+3. Running the UML kernel directly can often reveal issues or error messages
+ kunit_tool ignores. This should be as simple as running ``./vmlinux`` after
+ building the UML kernel (e.g., by using ``kunit.py build``). Note that UML
+ has some unusual requirements (such as the host having a tmpfs filesystem
+ mounted), and has had issues in the past when built statically and the host
+ has KASLR enabled. (On older host kernels, you may need to run ``setarch
+ `uname -m` -R ./vmlinux`` to disable KASLR.)
+4. Make sure the kernel .config has ``CONFIG_KUNIT=y`` and at least one test
+ (e.g. ``CONFIG_KUNIT_EXAMPLE_TEST=y``). kunit_tool will keep its .config
+ around, so you can see what config was used after running ``kunit.py run``.
+ It also preserves any config changes you might make, so you can
+ enable/disable things with ``make ARCH=um menuconfig`` or similar, and then
+ re-run kunit_tool.
+5. Try to run ``make ARCH=um defconfig`` before running ``kunit.py run``. This
+ may help clean up any residual config items which could be causing problems.
+6. Finally, try running KUnit outside UML. KUnit and KUnit tests can run be
+ built into any kernel, or can be built as a module and loaded at runtime.
+ Doing so should allow you to determine if UML is causing the issue you're
+ seeing. When tests are built-in, they will execute when the kernel boots, and
+ modules will automatically execute associated tests when loaded. Test results
+ can be collected from ``/sys/kernel/debug/kunit/<test suite>/results``, and
+ can be parsed with ``kunit.py parse``. For more details, see "KUnit on
+ non-UML architectures" in :doc:`usage`.
+
+If none of the above tricks help, you are always welcome to email any issues to
+kunit-dev@googlegroups.com.
DT_DOC_CHECKER ?= dt-doc-validate
DT_EXTRACT_EX ?= dt-extract-example
DT_MK_SCHEMA ?= dt-mk-schema
-DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u)
DT_SCHEMA_MIN_VERSION = 2020.5
DT_DOCS = $(shell $(find_cmd) | sed -e 's|^$(srctree)/||')
-DT_SCHEMA_FILES ?= $(DT_DOCS)
-
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
-
override DTC_FLAGS := \
-Wno-avoid_unnecessary_addr_size \
- -Wno-graph_child_address
+ -Wno-graph_child_address \
+ -Wno-interrupt_provider
$(obj)/processed-schema-examples.yaml: $(DT_DOCS) check_dtschema_version FORCE
$(call if_changed,mk_schema)
-$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG)
+ifeq ($(DT_SCHEMA_FILES),)
+
+# Unless DT_SCHEMA_FILES is specified, use the full schema for dtbs_check too.
+# Just copy processed-schema-examples.yaml
+
+$(obj)/processed-schema.yaml: $(obj)/processed-schema-examples.yaml FORCE
+ $(call if_changed,copy)
+
+DT_SCHEMA_FILES = $(DT_DOCS)
+
+else
+
+# If DT_SCHEMA_FILES is specified, use it for processed-schema.yaml
+
+$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := -u
$(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) check_dtschema_version FORCE
$(call if_changed,mk_schema)
-extra-y += processed-schema.yaml
+endif
+
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
+extra-$(CHECK_DTBS) += processed-schema.yaml
+
+# Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of
+# build artifacts here before they are processed by scripts/Makefile.clean
+clean-files = $(shell find $(obj) \( -name '*.example.dts' -o \
+ -name '*.example.dt.yaml' \) -delete 2>/dev/null)
&lsio_mu1 1 2
&lsio_mu1 1 3
&lsio_mu1 3 3>;
- See Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+ See Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
for detailed mailbox binding.
Note: Each mu which supports general interrupt should have an alias correctly
ranges = <1 0x00000000 0x42000000 0x02000000>,
<5 0x00000000 0x46000000 0x01000000>;
- ethernet@1,01f00000 {
+ ethernet@1,1f00000 {
compatible = "smsc,lan9115";
reg = <1 0x01f00000 0x1000>;
interrupts = <0 48 4>;
phy-mode = "mii";
};
- uart@5,00200000 {
+ serial@5,200000 {
compatible = "ns16550a";
reg = <5 0x00200000 0x20>;
interrupts = <0 49 4>;
title: Clock bindings for Freescale i.MX27
maintainers:
- - Fabio Estevam <fabio.estevam@freescale.com>
+ - Fabio Estevam <fabio.estevam@nxp.com>
description: |
The clock consumer should specify the desired clock by having the clock
title: Clock bindings for Freescale i.MX31
maintainers:
- - Fabio Estevam <fabio.estevam@freescale.com>
+ - Fabio Estevam <fabio.estevam@nxp.com>
description: |
The clock consumer should specify the desired clock by having the clock
title: Clock bindings for Freescale i.MX5
maintainers:
- - Fabio Estevam <fabio.estevam@freescale.com>
+ - Fabio Estevam <fabio.estevam@nxp.com>
description: |
The clock consumer should specify the desired clock by having the clock
simple-card or audio-graph-card binding. See their binding
documents on how to describe the way the sii902x device is
connected to the rest of the audio system:
- Documentation/devicetree/bindings/sound/simple-card.txt
+ Documentation/devicetree/bindings/sound/simple-card.yaml
Documentation/devicetree/bindings/sound/audio-graph-card.txt
Note: In case of the audio-graph-card binding the used port
index should be 3.
datasheet
- clocks : phandle to the PRE axi clock input, as described
in Documentation/devicetree/bindings/clock/clock-bindings.txt and
- Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+ Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
- clock-names: should be "axi"
- interrupts: should contain the PRE interrupt
- fsl,iram: phandle pointing to the mmio-sram device node, that should be
datasheet
- clocks : phandles to the PRG ipg and axi clock inputs, as described
in Documentation/devicetree/bindings/clock/clock-bindings.txt and
- Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+ Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
- clock-names: should be "ipg" and "axi"
- fsl,pres: phandles to the PRE units attached to this PRG, with the fixed
PRE as the first entry and the muxable PREs following.
"di2_sel" - IPU2 DI0 mux
"di3_sel" - IPU2 DI1 mux
The needed clock numbers for each are documented in
- Documentation/devicetree/bindings/clock/imx5-clock.txt, and in
- Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+ Documentation/devicetree/bindings/clock/imx5-clock.yaml, and in
+ Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
Optional properties:
- pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
examples:
- |
- sysreg {
+ sysreg@0 {
compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
reg = <0x00000 0x1000>;
description: |
Should contain a list of phandles pointing to display interface port
of vop devices. vop definitions as defined in
- Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+ Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
required:
- compatible
Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt. Should be 2. The first cell defines the interrupt number,
- the second encodes the triger flags encoded as described in
+ the second encodes the trigger flags encoded as described in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- compatible:
- "mediatek,mt7621-gpio" for Mediatek controllers
16-31 : private irq, and we use 16 as the co-processor timer.
31-1024: common irq for soc ip.
-Interrupt triger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
+Interrupt trigger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
IRQ_TYPE_LEVEL_HIGH (default)
IRQ_TYPE_LEVEL_LOW
IRQ_TYPE_EDGE_RISING
ranges;
/* APU<->RPU0 IPI mailbox controller */
- ipi_mailbox_rpu0: mailbox@ff90400 {
+ ipi_mailbox_rpu0: mailbox@ff990400 {
reg = <0xff990400 0x20>,
<0xff990420 0x20>,
<0xff990080 0x20>,
to receive a transfer (that is, when TX FIFO contains the response data) by
strobing the ACK pin with the ready signal. See the "ready-gpios" property of the
SSP binding as documented in:
-<Documentation/devicetree/bindings/spi/spi-pxa2xx.txt>.
+<Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml>.
Example:
&ssp3 {
This device is a serial attached device to BTIF device and thus it must be a
child node of the serial node with BTIF. The dt-bindings details for BTIF
-device can be known via Documentation/devicetree/bindings/serial/8250.txt.
+device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
Required properties:
[flags]>
On other mach-shmobile platforms GPIO is handled by the gpio-rcar driver.
-Please refer to Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+Please refer to Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
for documentation of the GPIO device tree bindings on those platforms.
see ${LINUX}/Documentation/devicetree/bindings/graph.txt
Basically, Audio Graph Card property is same as Simple Card.
-see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
+see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.yaml
Below are same as Simple-Card.
sti sound drivers allows to expose sti SoC audio interface through the
generic ASoC simple card. For details about sound card declaration please refer to
-Documentation/devicetree/bindings/sound/simple-card.txt.
+Documentation/devicetree/bindings/sound/simple-card.yaml.
1) sti-uniperiph-dai: audio dai device.
---------------------------------------
maxItems: 1
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: controller register bus clock
+ - description: baud rate generator and delay control clock
clock-names:
- description: input clock for the baud rate generator
- items:
- - const: core
+ minItems: 1
+ maxItems: 2
if:
properties:
then:
properties:
clocks:
- contains:
- items:
- - description: controller register bus clock
- - description: baud rate generator and delay control clock
+ minItems: 2
clock-names:
- minItems: 2
items:
- const: core
- const: pclk
+else:
+ properties:
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: core
+
required:
- compatible
- reg
SPI Controller nodes must be child of GENI based Qualcomm Universal
Peripharal. Please refer GENI based QUP wrapper controller node bindings
-described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt.
+described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml.
SPI slave nodes must be children of the SPI master node and conform to SPI bus
binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
#include <dt-bindings/interrupt-controller/arm-gic.h>
// Example 1: SDM845 TSENS
- soc: soc@0 {
+ soc: soc {
#address-cells = <2>;
#size-cells = <2>;
#include <dt-bindings/thermal/thermal.h>
// Example 1: SDM845 TSENS
- soc: soc@0 {
+ soc {
#address-cells = <2>;
#size-cells = <2>;
#include <dt-bindings/soc/ti,sci_pm_domain.h>
vtm: thermal@42050000 {
compatible = "ti,am654-vtm";
- reg = <0x0 0x42050000 0x0 0x25c>;
+ reg = <0x42050000 0x25c>;
power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
#thermal-sensor-cells = <1>;
};
- PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
- PTIM_TSR "cr<1, 14>" Interrupt cleanup status reg.
- PTIM_CCVR "cr<3, 14>" Current counter value reg.
- - PTIM_LVR "cr<6, 14>" Window value reg to triger next event.
+ - PTIM_LVR "cr<6, 14>" Window value reg to trigger next event.
==============================
timer node bindings definition
#address-cells = <1>;
#size-cells = <0>;
- string@0409 {
- reg = <0x0409>;
+ string@409 {
+ reg = <0x409>;
manufacturer = "ASPEED";
product = "USB Virtual Hub";
serial-number = "0000";
-:orphan:
+.. SPDX-License-Identifier: GPL-2.0
Writing DeviceTree Bindings in json-schema
==========================================
libyaml and its headers be installed on the host system. For some distributions
that involves installing the development package, such as:
-Debian:
+Debian::
+
apt-get install libyaml-dev
-Fedora:
+
+Fedora::
+
dnf -y install libyaml-devel
Running checks
==============================
-Linux I2C slave eeprom backend
+Linux I2C slave EEPROM backend
==============================
-by Wolfram Sang <wsa@sang-engineering.com> in 2014-15
+by Wolfram Sang <wsa@sang-engineering.com> in 2014-20
-This is a proof-of-concept backend which acts like an EEPROM on the connected
-I2C bus. The memory contents can be modified from userspace via this file
-located in sysfs::
+This backend simulates an EEPROM on the connected I2C bus. Its memory contents
+can be accessed from userspace via this file located in sysfs::
/sys/bus/i2c/devices/<device-directory>/slave-eeprom
+The following types are available: 24c02, 24c32, 24c64, and 24c512. Read-only
+variants are also supported. The name needed for instantiating has the form
+'slave-<type>[ro]'. Examples follow:
+
+24c02, read/write, address 0x64:
+ # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device
+
+24c512, read-only, address 0x42:
+ # echo slave-24c512ro 0x1042 > /sys/bus/i2c/devices/i2c-1/new_device
+
+You can also preload data during boot if a device-property named
+'firmware-name' contains a valid filename (DT or ACPI only).
+
As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
notification when another master changed the content.
8123_pci.c
8123_bin.o_shipped <= Binary blob
---- 3.1 Shared Makefile
+3.1 Shared Makefile
+-------------------
An external module always includes a wrapper makefile that
supports building the module using "make" with no arguments.
The syntax of the Module.symvers file is::
- <CRC> <Symbol> <Module> <Export Type> <Namespace>
+ <CRC> <Symbol> <Module> <Export Type> <Namespace>
- 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
+ 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE
The fields are separated by tabs and values may be empty (e.g.
if no namespace is defined for an exported symbol).
If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to
pre-generate the random seed in
-``scripts/gcc-plgins/randomize_layout_seed.h`` so the same value
+``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value
is used in rebuilds.
Debug info conflicts
drivers access their registers through the same regmap.
For more information regarding the devicetree bindings of the TCU drivers,
-have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt.
+have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.yaml.
ifconfig arc0 insight
route add insight arc0
route add freedom arc0 /* I would use the subnet here (like I said
- to to in "single protocol" above),
+ to in "single protocol" above),
but the rest of the subnet
unfortunately lies across the PPP
link on freedom, which confuses
To use the amateur radio protocols within Linux you will need to get a
suitable copy of the AX.25 Utilities. More detailed information about
-AX.25, NET/ROM and ROSE, associated programs and and utilities can be
+AX.25, NET/ROM and ROSE, associated programs and utilities can be
found on http://www.linux-ax25.org.
There is an active mailing list for discussing Linux amateur radio matters
*Host2Dev; mandatory*
-Setup bittiming by sending the the structure
+Setup bittiming by sending the structure
``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for
details)
zero
The CAN device has sent a message to the CAN bus. It answers with a
-list of of tuples <echo-ids, flags>.
+list of tuples <echo-ids, flags>.
The echo-id identifies the frame from (echos the id from a previous
UCAN_OUT_TX message). The flag indicates the result of the
Networking stack hooks
----------------------
-When a master netdev is used with DSA, a small hook is placed in in the
+When a master netdev is used with DSA, a small hook is placed in the
networking stack is in order to have the DSA subsystem process the Ethernet
switch specific tagging protocol. DSA accomplishes this by registering a
specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
The address family, socket addresses etc. are defined in the
include/net/af_ieee802154.h header or in the special header
-in the userspace package (see either http://wpan.cakelab.org/ or the
-git tree at https://github.com/linux-wpan/wpan-tools).
+in the userspace package (see either https://linux-wpan.org/wpan-tools.html
+or the git tree at https://github.com/linux-wpan/wpan-tools).
6LoWPAN Linux implementation
============================
Default: 0x1
- Note that that additional client or server features are only
+ Note that additional client or server features are only
effective if the basic support (0x1 and 0x2) are enabled respectively.
tcp_fastopen_blackhole_timeout_sec - INTEGER
modes (when there is no enough available memory, the strategy
is enabled and the variable is automatically set to 2,
otherwise the strategy is disabled and the variable is set to
- 1), and 3 means that that the strategy is always enabled.
+ 1), and 3 means that the strategy is always enabled.
drop_packet - INTEGER
- 0 - disabled (default)
time [tunable] after the last connection using it discarded, in case a new
connection is made that could use it.
- (#) A client-side connection is only shared between calls if they have have
+ (#) A client-side connection is only shared between calls if they have
the same key struct describing their security (and assuming the calls
would otherwise share the connection). Non-secured calls would also be
able to share connections with each other.
====================== =============== ========================================
Program Minimal version Command to check the version
====================== =============== ========================================
-GNU C 4.8 gcc --version
+GNU C 4.9 gcc --version
GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
problem, which is called the function-growth-hormone-imbalance syndrome.
See chapter 6 (Functions).
+For symbol names and documentation, avoid introducing new usage of
+'master / slave' (or 'slave' independent of 'master') and 'blacklist /
+whitelist'.
+
+Recommended replacements for 'master / slave' are:
+ '{primary,main} / {secondary,replica,subordinate}'
+ '{initiator,requester} / {target,responder}'
+ '{controller,host} / {device,worker,proxy}'
+ 'leader / follower'
+ 'director / performer'
+
+Recommended replacements for 'blacklist/whitelist' are:
+ 'denylist / allowlist'
+ 'blocklist / passlist'
+
+Exceptions for introducing new usage is to maintain a userspace ABI/API,
+or when updating code for an existing (as of 2020) hardware or protocol
+specification that mandates those terms. For new specifications
+translate specification usage of the terminology to the kernel coding
+standard where possible.
5) Typedefs
-----------
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
struct kvm_vmx_nested_state_hdr {
- __u32 flags;
__u64 vmxon_pa;
__u64 vmcs12_pa;
- __u64 preemption_timer_deadline;
struct {
__u16 flags;
} smm;
+
+ __u32 flags;
+ __u64 preemption_timer_deadline;
};
struct kvm_vmx_nested_state_data {
ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
+M: Claudiu Beznea <claudiu.beznea@microchip.com>
S: Supported
F: drivers/net/ethernet/cadence/
BPF JIT for S390
M: Ilya Leoshkevich <iii@linux.ibm.com>
-M: Heiko Carstens <heiko.carstens@de.ibm.com>
+M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
F: drivers/char/hw_random/cctrng.c
F: drivers/char/hw_random/cctrng.h
-F: Documentation/devicetree/bindings/rng/arm-cctrng.txt
+F: Documentation/devicetree/bindings/rng/arm-cctrng.yaml
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
CEC FRAMEWORK
DRM DRIVER FOR RAYDIUM RM67191 PANELS
M: Robert Chiras <robert.chiras@nxp.com>
S: Maintained
-F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
+F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
F: drivers/gpu/drm/panel/panel-raydium-rm67191.c
DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
S: Maintained
-W: http://wpan.cakelab.org/
+W: https://linux-wpan.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
F: Documentation/networking/ieee802154.rst
F: drivers/dma/mediatek/
MEDIATEK ETHERNET DRIVER
-M: Felix Fietkau <nbd@openwrt.org>
+M: Felix Fietkau <nbd@nbd.name>
M: John Crispin <john@phrozen.org>
M: Sean Wang <sean.wang@mediatek.com>
M: Mark Lee <Mark-MC.Lee@mediatek.com>
OP-TEE DRIVER
M: Jens Wiklander <jens.wiklander@linaro.org>
-L: tee-dev@lists.linaro.org
+L: op-tee@lists.trustedfirmware.org
S: Maintained
F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
M: Sumit Garg <sumit.garg@linaro.org>
-L: tee-dev@lists.linaro.org
+L: op-tee@lists.trustedfirmware.org
S: Maintained
F: drivers/char/hw_random/optee-rng.c
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
L: linux-renesas-soc@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
-F: Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+F: Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
F: drivers/thermal/rcar_gen3_thermal.c
F: drivers/thermal/rcar_thermal.c
F: drivers/video/fbdev/savage/
S390
-M: Heiko Carstens <heiko.carstens@de.ibm.com>
+M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
M: Christian Borntraeger <borntraeger@de.ibm.com>
L: linux-s390@vger.kernel.org
F: include/linux/dasd_mod.h
S390 IOMMU (PCI)
-M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
S390 PCI SUBSYSTEM
M: Niklas Schnelle <schnelle@linux.ibm.com>
-M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
L: linux-sparse@vger.kernel.org
S: Maintained
-W: https://sparse.wiki.kernel.org/
+W: https://sparse.docs.kernel.org/
T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+Q: https://patchwork.kernel.org/project/linux-sparse/list/
+B: https://bugzilla.kernel.org/enter_bug.cgi?component=Sparse&product=Tools
F: include/linux/compiler.h
SPEAR CLOCK FRAMEWORK SUPPORT
TEE SUBSYSTEM
M: Jens Wiklander <jens.wiklander@linaro.org>
-L: tee-dev@lists.linaro.org
+L: op-tee@lists.trustedfirmware.org
S: Maintained
F: Documentation/tee.txt
F: drivers/tee/
VERSION = 5
PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
endif
# Align the bit size of userspace programs with the kernel
-KBUILD_USERCFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
-KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
endchoice
+config ARC_TUNE_MCPU
+ string "Override default -mcpu compiler flag"
+ default ""
+ help
+ Override default -mcpu=xxx compiler flag (which is set depending on
+ the ISA version) with the specified value.
+ NOTE: If specified flag isn't supported by current compiler the
+ ISA default value will be used as a fallback.
+
config CPU_BIG_ENDIAN
bool "Enable Big Endian Mode"
help
This is programmable and can be optionally disabled in which case
software INTERRUPT_PROLOGUE/EPILGUE do the needed work
+config ARC_LPB_DISABLE
+ bool "Disable loop buffer (LPB)"
+ help
+ On HS cores, loop buffer (LPB) is programmable in runtime and can
+ be optionally disabled.
+
endif # ISA_ARCV2
endmenu # "ARC CPU Configuration"
endif
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
-cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
+
+tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT) := -mcpu=arc700
+tune-mcpu-def-$(CONFIG_ISA_ARCV2) := -mcpu=hs38
+
+ifeq ($(CONFIG_ARC_TUNE_MCPU),"")
+cflags-y += $(tune-mcpu-def-y)
+else
+tune-mcpu := $(shell echo $(CONFIG_ARC_TUNE_MCPU))
+tune-mcpu-ok := $(call cc-option-yn, $(tune-mcpu))
+ifeq ($(tune-mcpu-ok),y)
+cflags-y += $(tune-mcpu)
+else
+# The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler
+# (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option.
+$(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)')
+cflags-y += $(tune-mcpu-def-y)
+endif
+endif
+
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
#define R_ARC_32_PCREL 0x31
/*to set parameters in the core dumps */
-#define ELF_ARCH EM_ARCOMPACT
+#define ELF_ARCH EM_ARC_INUSE
#define ELF_CLASS ELFCLASS32
#ifdef CONFIG_CPU_BIG_ENDIAN
/*
* Unconditionally Enable IRQs
*/
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
static inline void arch_local_irq_enable(void)
{
unsigned long temp;
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
: "cc", "memory");
}
-
+#endif
/*
* Unconditionally Disable IRQs
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
- lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; Breakpoint TRAP
; ---------------------------------------------
trap_with_param:
-
- ; stop_pc info by gdb needs this info
- lr r0, [efa]
+ mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
mov r1, sp
- ; Now that we have read EFA, it is safe to do "fake" rtie
- ; and get out of CPU exception mode
- FAKE_RET_FROM_EXCPN
-
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
EXCEPTION_PROLOGUE
+ lr r12, [efa]
+
+ FAKE_RET_FROM_EXCPN
+
;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
bmsk.f 0, r10, 7
;============ TRAP (no param): syscall top level
- ; First return from Exception to pure K mode (Exception/IRQs renabled)
- FAKE_RET_FROM_EXCPN
-
; If syscall tracing ongoing, invoke pre-post-hooks
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bclr r5, r5, STATUS_AD_BIT
#endif
kflag r5
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+ lr r5, [ARC_REG_LPB_BUILD]
+ breq r5, 0, 1f ; LPB doesn't exist
+ mov r5, 1
+ sr r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
#endif
; Config DSP_CTRL properly, so kernel may use integer multiply,
; multiply-accumulate, and divide operations
{ 0x00, NULL }
};
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_hs_ver54_rel[] = {
/* UARCH.MAJOR, Release */
{ 0, "R3.10a"},
{ 1, "R3.50a"},
+ { 2, "R3.60a"},
+ { 3, "R4.00a"},
{ 0xFF, NULL }
};
struct bcr_uarch_build_arcv2 uarch;
const struct id_to_str *tbl;
- /*
- * Up until (including) the first core4 release (0x54) things were
- * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
- * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
- */
-
if (cpu->core.family < 0x54) { /* includes arc700 */
for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
}
/*
- * However the subsequent HS release (same 0x54) allow HS38 or HS48
- * configurations and encode this info in a different BCR.
- * The BCR was introduced in 0x54 so can't be read unconditionally.
+ * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+ * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+ * releases only update it.
*/
-
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
if (uarch.prod == 4) {
cpu->name = "HS38";
}
- for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+ for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
if (uarch.maj == tbl->id) {
cpu->release = tbl->str;
break;
&mmc2 {
status = "okay";
vmmc-supply = <&wl12xx_vmmc>;
- ti,non-removable;
+ non-removable;
bus-width = <4>;
cap-power-off-card;
pinctrl-names = "default";
pinctrl-0 = <&emmc_pins>;
bus-width = <8>;
status = "okay";
+ non-removable;
};
&am33xx_pinmux {
bus-width = <4>;
non-removable;
cap-power-off-card;
- ti,needs-special-hs-handling;
keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
bus-width = <4>;
non-removable;
cap-power-off-card;
- ti,needs-special-hs-handling;
keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
bus-width = <4>;
non-removable;
cap-power-off-card;
- ti,needs-special-hs-handling;
keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
bus-width = <4>;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins &wlan_pins>;
- ti,non-removable;
- ti,needs-special-hs-handling;
+ non-removable;
cap-power-off-card;
keep-power-in-suspend;
&mmc2 {
status = "okay";
vmmc-supply = <&wl12xx_vmmc>;
- ti,non-removable;
+ non-removable;
bus-width = <4>;
cap-power-off-card;
keep-power-in-suspend;
pinctrl-0 = <&emmc_pins>;
vmmc-supply = <&vmmcsd_fixed>;
bus-width = <8>;
- ti,non-removable;
+ non-removable;
status = "okay";
};
vmmc-supply = <&vmmcsd_fixed>;
bus-width = <8>;
pinctrl-0 = <&mmc1_pins_default>;
- ti,non-removable;
+ non-removable;
status = "okay";
};
vmmc-supply = <&vmmcsd_fixed>;
bus-width = <8>;
pinctrl-0 = <&mmc2_pins_default>;
- ti,non-removable;
+ non-removable;
status = "okay";
};
pinctrl-0 = <&emmc_pins>;
vmmc-supply = <&ldo3_reg>;
bus-width = <8>;
- ti,non-removable;
+ non-removable;
};
&mmc3 {
pinctrl-0 = <&wireless_pins>;
vmmmc-supply = <&v3v3c_reg>;
bus-width = <4>;
- ti,non-removable;
+ non-removable;
dmas = <&edma_xbar 12 0 1
&edma_xbar 13 0 2>;
dma-names = "tx", "rx";
pinctrl-0 = <&emmc_pins>;
vmmc-supply = <&vmmc_reg>;
bus-width = <8>;
- ti,non-removable;
+ non-removable;
status = "disabled";
};
AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
>;
};
ranges = <0x0 0x60000 0x1000>;
mmc1: mmc@0 {
- compatible = "ti,omap4-hsmmc";
- ti,dual-volt;
+ compatible = "ti,am335-sdhci";
ti,needs-special-reset;
- ti,needs-special-hs-handling;
dmas = <&edma_xbar 24 0 0
&edma_xbar 25 0 0>;
dma-names = "tx", "rx";
ranges = <0x0 0xd8000 0x1000>;
mmc2: mmc@0 {
- compatible = "ti,omap4-hsmmc";
+ compatible = "ti,am335-sdhci";
ti,needs-special-reset;
dmas = <&edma 2 0
&edma 3 0>;
ranges = <0x0 0x47810000 0x1000>;
mmc3: mmc@0 {
- compatible = "ti,omap4-hsmmc";
+ compatible = "ti,am335-sdhci";
ti,needs-special-reset;
interrupts = <29>;
reg = <0x0 0x1000>;
+ status = "disabled";
};
};
<0x47400010 0x4>;
reg-names = "rev", "sysc";
ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
- SYSC_OMAP2_SOFTRESET)>;
+ SYSC_OMAP4_SOFTRESET)>;
ti,sysc-midle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x0 0x47400000 0x5000>;
+ ranges = <0x0 0x47400000 0x8000>;
usb0_phy: usb-phy@1300 {
compatible = "ti,am335x-usb-phy";
ranges = <0x0 0x47810000 0x1000>;
mmc3: mmc@0 {
- compatible = "ti,omap4-hsmmc";
+ compatible = "ti,am437-sdhci";
ti,needs-special-reset;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x0 0x1000>;
+ status = "disabled";
};
};
pinctrl-0 = <&emmc_pins>;
vmmc-supply = <&vmmc_3v3>;
bus-width = <8>;
- ti,non-removable;
+ non-removable;
};
&spi0 {
backlight = <&lcd_bl>;
- panel-timing {
- clock-frequency = <33000000>;
- hactive = <800>;
- vactive = <480>;
- hfront-porch = <210>;
- hback-porch = <16>;
- hsync-len = <30>;
- vback-porch = <10>;
- vfront-porch = <22>;
- vsync-len = <13>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <1>;
- };
-
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&emmc_pins_default>;
pinctrl-1 = <&emmc_pins_sleep>;
- ti,non-removable;
+ non-removable;
};
&mmc3 {
pinctrl-1 = <&mmc3_pins_sleep>;
cap-power-off-card;
keep-power-in-suspend;
- ti,non-removable;
+ non-removable;
#address-cells = <1>;
#size-cells = <0>;
ranges = <0x0 0x60000 0x1000>;
mmc1: mmc@0 {
- compatible = "ti,omap4-hsmmc";
+ compatible = "ti,am437-sdhci";
reg = <0x0 0x1000>;
- ti,dual-volt;
ti,needs-special-reset;
dmas = <&edma 24 0>,
<&edma 25 0>;
ranges = <0x0 0xd8000 0x1000>;
mmc2: mmc@0 {
- compatible = "ti,omap4-hsmmc";
+ compatible = "ti,am437-sdhci";
reg = <0x0 0x1000>;
ti,needs-special-reset;
dmas = <&edma 2 0>,
enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
- panel-timing {
- clock-frequency = <9000000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <2>;
- hback-porch = <2>;
- hsync-len = <41>;
- vfront-porch = <2>;
- vback-porch = <2>;
- vsync-len = <10>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <1>;
- };
-
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
pinctrl-1 = <&mmc3_pins_sleep>;
cap-power-off-card;
keep-power-in-suspend;
- ti,non-removable;
+ non-removable;
#address-cells = <1>;
#size-cells = <0>;
backlight = <&lcd_bl>;
- panel-timing {
- clock-frequency = <33000000>;
- hactive = <800>;
- vactive = <480>;
- hfront-porch = <210>;
- hback-porch = <16>;
- hsync-len = <30>;
- vback-porch = <10>;
- vfront-porch = <22>;
- vsync-len = <13>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <1>;
- };
-
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
&cpsw_emac0 {
phy-handle = <&phy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
};
&ocp {
status = "disabled";
};
- dma@20000 {
+ dma: dma@20000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x20000 0x1000>;
interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
clocks = <&iprocslow>;
clock-names = "apb_pclk";
#dma-cells = <1>;
+ dma-coherent;
+ status = "disabled";
};
sdio: sdhci@21000 {
status = "disabled";
};
- mailbox: mailbox@25000 {
+ mailbox: mailbox@25c00 {
compatible = "brcm,iproc-fa2-mbox";
- reg = <0x25000 0x445>;
- interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x25c00 0x400>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x18000000>;
};
/* USB 3 support needed to be complete */
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
/* USB 3 support needed to be complete */
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
/* XHCI support needed to be complete */
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
/* USB 3 and SLIC support needed to be complete */
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
/* USB 3 and SLIC support needed to be complete */
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
status = "okay";
};
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
};
};
+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
rx-num-evt = <32>;
};
-&mailbox5 {
- status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
- status = "okay";
- };
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
- status = "okay";
- };
-};
-
-&mailbox6 {
- status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
- status = "okay";
- };
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
- status = "okay";
- };
-};
-
&pcie1_rc {
status = "okay";
};
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
/* Domains (P, C): l4per_pwrdm, l4per_clkdm */
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>,
- <&timer_sys_clk_div>;
- clock-names = "fck", "timer_sys_ck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>;
+ clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x36000 0x1000>;
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
/* Domains (P, C): ipu_pwrdm, ipu_clkdm */
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>, <&timer_sys_clk_div>;
- clock-names = "fck", "timer_sys_ck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>;
+ clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x20000 0x1000>;
timer5: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
};
};
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
/* Domains (P, C): ipu_pwrdm, ipu_clkdm */
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>,
- <&timer_sys_clk_div>;
- clock-names = "fck", "timer_sys_ck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>;
+ clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x22000 0x1000>;
timer6: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
};
};
timer14: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
timer15: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
timer16: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
status = "okay";
};
-&wdog1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_wdog>;
- fsl,ext-reset-output;
- status = "okay";
-};
-
&iomuxc {
pinctrl-0 = <&pinctrl_reset_out &pinctrl_gpio>;
MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9
>;
};
-
- pinctrl_wdog: wdoggrp {
- fsl,pins = <
- MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x30b0
- >;
- };
};
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reset_out>;
MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x1b0b0
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x18b0
+ >;
+ };
};
#interrupt-cells = <2>;
#address-cells = <1>;
#size-cells = <0>;
- spi-max-frequency = <3000000>;
+ spi-max-frequency = <9600000>;
spi-cs-high;
+ spi-cpol;
+ spi-cpha;
cpcap_adc: adc {
compatible = "motorola,mapphone-cpcap-adc";
ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio2>;
- interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */
phy-mode = "mii";
ti,no-idle;
timer@0 {
assigned-clocks = <&l4_wkup_clkctrl OMAP4_TIMER1_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
};
};
};
- mcc {
- compatible = "arm,vexpress,config-bus";
- arm,vexpress,config-bridge = <&v2m_sysreg>;
-
- oscclk0 {
- /* MCC static memory clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 0>;
- freq-range = <25000000 60000000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk0";
- };
-
- v2m_oscclk1: oscclk1 {
- /* CLCD clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 1>;
- freq-range = <23750000 65000000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk1";
- };
-
- v2m_oscclk2: oscclk2 {
- /* IO FPGA peripheral clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 2>;
- freq-range = <24000000 24000000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk2";
- };
-
- volt-vio {
- /* Logic level voltage */
- compatible = "arm,vexpress-volt";
- arm,vexpress-sysreg,func = <2 0>;
- regulator-name = "VIO";
- regulator-always-on;
- label = "VIO";
- };
-
- temp-mcc {
- /* MCC internal operating temperature */
- compatible = "arm,vexpress-temp";
- arm,vexpress-sysreg,func = <4 0>;
- label = "MCC";
- };
-
- reset {
- compatible = "arm,vexpress-reset";
- arm,vexpress-sysreg,func = <5 0>;
- };
-
- muxfpga {
- compatible = "arm,vexpress-muxfpga";
- arm,vexpress-sysreg,func = <7 0>;
- };
-
- shutdown {
- compatible = "arm,vexpress-shutdown";
- arm,vexpress-sysreg,func = <8 0>;
- };
-
- reboot {
- compatible = "arm,vexpress-reboot";
- arm,vexpress-sysreg,func = <9 0>;
- };
-
- dvimode {
- compatible = "arm,vexpress-dvimode";
- arm,vexpress-sysreg,func = <11 0>;
- };
- };
-
bus@8000000 {
motherboard-bus {
model = "V2M-P1";
};
};
};
+
+ mcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ oscclk0 {
+ /* MCC static memory clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <25000000 60000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk0";
+ };
+
+ v2m_oscclk1: oscclk1 {
+ /* CLCD clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <23750000 65000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk1";
+ };
+
+ v2m_oscclk2: oscclk2 {
+ /* IO FPGA peripheral clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <24000000 24000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk2";
+ };
+
+ volt-vio {
+ /* Logic level voltage */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "VIO";
+ regulator-always-on;
+ label = "VIO";
+ };
+
+ temp-mcc {
+ /* MCC internal operating temperature */
+ compatible = "arm,vexpress-temp";
+ arm,vexpress-sysreg,func = <4 0>;
+ label = "MCC";
+ };
+
+ reset {
+ compatible = "arm,vexpress-reset";
+ arm,vexpress-sysreg,func = <5 0>;
+ };
+
+ muxfpga {
+ compatible = "arm,vexpress-muxfpga";
+ arm,vexpress-sysreg,func = <7 0>;
+ };
+
+ shutdown {
+ compatible = "arm,vexpress-shutdown";
+ arm,vexpress-sysreg,func = <8 0>;
+ };
+
+ reboot {
+ compatible = "arm,vexpress-reboot";
+ arm,vexpress-sysreg,func = <9 0>;
+ };
+
+ dvimode {
+ compatible = "arm,vexpress-dvimode";
+ arm,vexpress-sysreg,func = <11 0>;
+ };
+ };
};
};
};
return dram_base + SZ_512M;
}
+struct efi_arm_entry_state {
+ u32 cpsr_before_ebs;
+ u32 sctlr_before_ebs;
+ u32 cpsr_after_ebs;
+ u32 sctlr_after_ebs;
+};
+
#endif /* _ASM_ARM_EFI_H */
#if defined(__APCS_26__)
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
#endif
-/*
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- * miscompiles find_get_entry(), and can result in EXT3 and EXT4
- * filesystem corruption (possibly other FS too).
- */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
int main(void)
{
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select ARM_ERRATA_764369 if SMP
+ select ARM_TIMER_SP804
select THERMAL
select THERMAL_OF
help
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
- goto put_node;
+ goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, size);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}
phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
if (virt_out)
*virt_out = virt;
+put_device:
+ put_device(&pdev->dev);
put_node:
of_node_put(node);
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
- goto put_node;
+ goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
if (ret) {
pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
- goto put_node;
+ goto put_device;
}
ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
&imx6_suspend,
MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
- goto put_node;
+ goto put_device;
pl310_cache_map_failed:
iounmap(pm_info->gpc_base.vbase);
iounmap(pm_info->src_base.vbase);
src_map_failed:
iounmap(pm_info->mmdc_base.vbase);
+put_device:
+ put_device(&pdev->dev);
put_node:
of_node_put(node);
};
static const struct omap_hwmod_reset omap_reset_quirks[] = {
- { .match = "dss", .len = 3, .reset = omap_dss_reset, },
+ { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
{ .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
{ .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
{ .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
NULL
};
-static void sti_l2_write_sec(unsigned long val, unsigned reg)
-{
- /*
- * We can't write to secure registers as we are in non-secure
- * mode, until we have some SMI service available.
- */
-}
-
DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree")
.dt_compat = stih41x_dt_match,
.l2c_aux_val = L2C_AUX_CTRL_SHARED_OVERRIDE |
L2C_AUX_CTRL_WAY_SIZE(4),
.l2c_aux_mask = 0xc0000fff,
.smp = smp_ops(sti_smp_ops),
- .l2c_write_sec = sti_l2_write_sec,
MACHINE_END
* see Documentation/devicetree/bindings/arm/xen.txt for the
* documentation of the Xen Device Tree format.
*/
-#define GRANT_TABLE_PHYSADDR 0
void __init xen_early_init(void)
{
of_scan_flat_dt(fdt_find_hyper_node, NULL);
default y
depends on !KVM || ARM64_VHE
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
- # GCC 9.1 and later inserts a .note.gnu.property section note for PAC
+ # Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
- depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000
+ depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
ldo1_reg: LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <3000000>;
+ regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
ldo2_reg: LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <900000>;
+ regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;
ldo1_reg: LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <3000000>;
+ regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
ldo2_reg: LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <900000>;
+ regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;
ldo1_reg: LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <3000000>;
+ regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
ldo2_reg: LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <900000>;
+ regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \
".popsection\n" \
- ".pushsection .altinstr_replacement, \"a\"\n" \
+ ".subsection 1\n" \
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
- ".popsection\n\t" \
+ ".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".endif\n"
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
663: \insn2
-664: .popsection
+664: .previous
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endif
.pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
.align 2 /* So GAS knows label 661 is suitably aligned */
661:
.endm
.macro alternative_else
662:
.if .Lasm_alt_mode==0
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
.else
- .popsection
+ .previous
.endif
663:
.endm
.macro alternative_endif
664:
.if .Lasm_alt_mode==0
- .popsection
+ .previous
.endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
return read_sysreg_s(SYS_ICC_PMR_EL1);
}
-static inline void gic_write_pmr(u32 val)
+static __always_inline void gic_write_pmr(u32 val)
{
write_sysreg_s(val, SYS_ICC_PMR_EL1);
}
u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
+ bool disable_compat_vdso;
};
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}
-static inline bool system_uses_irq_prio_masking(void)
+static __always_inline bool system_uses_irq_prio_masking(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
+#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
#define NVIDIA_CPU_PART_DENVER 0x003
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
* instead.
*/
#define BTI_C hint 34 ;
-#define BTI_J hint 36 ;
/*
* When using in-kernel BTI we need to ensure that PCS-conformant assembly
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
BTI_C
-#define SYM_INNER_LABEL(name, linkage) \
- .type name SYM_T_NONE ASM_NL \
- SYM_ENTRY(name, linkage, SYM_A_NONE) \
- BTI_J
-
#endif
/*
typedef struct {
atomic64_t id;
+#ifdef CONFIG_COMPAT
+ void *sigpage;
+#endif
void *vdso;
unsigned long flags;
} mm_context_t;
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
-#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
#define PAGE_S2_MEMATTR(attr) \
({ \
#ifndef __ASM_VDSOCLOCKSOURCE_H
#define __ASM_VDSOCLOCKSOURCE_H
-#define VDSO_ARCH_CLOCKMODES \
- VDSO_CLOCKMODE_ARCHTIMER
+#define VDSO_ARCH_CLOCKMODES \
+ /* vdso clocksource for both 32 and 64bit tasks */ \
+ VDSO_CLOCKMODE_ARCHTIMER, \
+ /* vdso clocksource for 64bit tasks only */ \
+ VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT
#endif
* update. Return something. Core will do another round and then
* see the mode change and fallback to the syscall.
*/
- if (clock_mode == VDSO_CLOCKMODE_NONE)
+ if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
return 0;
/*
return ret;
}
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
+}
+#define vdso_clocksource_ok vdso_clocksource_ok
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
-ifneq ($(CONFIG_COMPAT_VDSO), y)
obj-$(CONFIG_COMPAT) += sigreturn32.o
-endif
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
- unsigned long replptr;
-
- if (kernel_text_address(pc))
- return true;
-
- replptr = (unsigned long)ALT_REPL_PTR(alt);
- if (pc >= replptr && pc <= (replptr + alt->alt_len))
- return false;
-
- /*
- * Branching into *another* alternate sequence is doomed, and
- * we're not even trying to fix it up.
- */
- BUG();
+ unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+ return !(pc >= replptr && pc <= (replptr + alt->alt_len));
}
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u32 midr = read_cpuid_id();
- /* Cortex-A76 r0p0 - r3p1 */
- struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+ return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
}
#endif
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Neoverse-N1 r0p0 to r3p1 */
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
{},
};
#endif
#ifdef CONFIG_ARM64_ERRATUM_1530923
/* Cortex A55 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+ /* Cortex-A76 r0p0 - r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
.capability = ARM64_WORKAROUND_1463225,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
+ .midr_range_list = erratum_1463225,
},
#endif
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
};
char const *str = "kpti command line option";
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
/*
* The CPU masked interrupts, and we are leaving them masked during
* do_debug_exception(). Update PMR as if we had called
- * local_mask_daif().
+ * local_daif_mask().
*/
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
- // This macro corrupts x0-x3. It is the caller's duty
- // to save/restore them if required.
+ /*
+ * This macro corrupts x0-x3. It is the caller's duty to save/restore
+ * them if required.
+ */
.macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
stp x28, x29, [sp, #16 * 14]
.if \el == 0
+ .if \regsize == 32
+ /*
+ * If we're returning from a 32-bit task on a system affected by
+ * 1418040 then re-enable userspace access to the virtual counter.
+ */
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+ mrs x0, cntkctl_el1
+ orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+ .endif
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20
msr sp_el0, tsk
- // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
- // when scheduling.
+ /*
+ * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+ * when scheduling.
+ */
ldr x19, [tsk, #TSK_TI_FLAGS]
disable_step_tsk x19, x20
tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+ mrs x0, cntkctl_el1
+ bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
alternative_else_nop_endif
#endif
3:
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if_not ARM64_WORKAROUND_1418040
- b 4f
-alternative_else_nop_endif
- /*
- * if (x22.mode32 == cntkctl_el1.el0vcten)
- * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
- */
- mrs x1, cntkctl_el1
- eon x0, x1, x22, lsr #3
- tbz x0, #1, 4f
- eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x1
-4:
-#endif
scs_save tsk, x0
/* No kernel C function calls after this as user keys are set. */
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 5f
+ bne 4f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
br x30
-5:
+4:
tramp_alias x30, tramp_exit_compat
br x30
#endif
if (!kgdb_single_step)
return DBG_HOOK_ERROR;
- kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
return 0;
/*
- * Compat (i.e. 32 bit) mode:
- * - PC has been set in the pt_regs struct in kernel_entry,
- * - Handle SP and LR here.
+ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+ * we're stuck with it for ABI compatability reasons.
+ *
+ * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+ * These correspond directly to a prefix of the registers saved in our
+ * 'struct pt_regs', with the exception of the PC, so we copy that down
+ * (x15 corresponds to SP_hyp in the architecture).
+ *
+ * So far, so good.
+ *
+ * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+ * PC registers would normally live. The initial idea was to allow a
+ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+ * how well that works in practice, somebody might be relying on it.
+ *
+ * At the time we make a sample, we don't know whether the consumer is
+ * 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
+ if (idx == 15)
+ return regs->pc;
}
if ((u32)idx == PERF_REG_ARM64_SP)
void *alloc_insn_page(void)
{
- void *page;
-
- page = vmalloc_exec(PAGE_SIZE);
- if (page) {
- set_memory_ro((unsigned long)page, 1);
- set_vm_flush_reset_perms(page);
- }
-
- return page;
+ return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
/* arm kprobe: install breakpoint in text */
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
/* Set up sigreturn pointer */
-#ifdef CONFIG_COMPAT_VDSO
- void *vdso_base = current->mm->context.vdso;
- void *vdso_trampoline;
-
- if (ka->sa.sa_flags & SA_SIGINFO) {
- if (thumb) {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_rt_sigreturn_thumb);
- } else {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_rt_sigreturn_arm);
- }
- } else {
- if (thumb) {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_sigreturn_thumb);
- } else {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_sigreturn_arm);
- }
- }
-
- retcode = ptr_to_compat(vdso_trampoline) + thumb;
-#else
unsigned int idx = thumb << 1;
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
- retcode = (unsigned long)current->mm->context.vdso +
+ retcode = (unsigned long)current->mm->context.sigpage +
(idx << 2) + thumb;
-#endif
}
regs->regs[0] = usig;
#ifdef CONFIG_COMPAT_VDSO
AA32_MAP_VVAR,
AA32_MAP_VDSO,
-#else
- AA32_MAP_SIGPAGE
#endif
+ AA32_MAP_SIGPAGE
};
static struct page *aarch32_vectors_page __ro_after_init;
-#ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init;
-#endif
static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = {
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
-#else
+#endif /* CONFIG_COMPAT_VDSO */
[AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page,
},
-#endif /* CONFIG_COMPAT_VDSO */
};
static int aarch32_alloc_kuser_vdso_page(void)
return 0;
}
-#ifdef CONFIG_COMPAT_VDSO
-static int __aarch32_alloc_vdso_pages(void)
-{
- int ret;
-
- vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
- vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
-
- ret = __vdso_init(VDSO_ABI_AA32);
- if (ret)
- return ret;
-
- return aarch32_alloc_kuser_vdso_page();
-}
-#else
-static int __aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_sigpage(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage;
- int ret;
sigpage = get_zeroed_page(GFP_ATOMIC);
if (!sigpage)
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage);
flush_dcache_page(aarch32_sig_page);
+ return 0;
+}
- ret = aarch32_alloc_kuser_vdso_page();
- if (ret)
- free_page(sigpage);
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+ vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+ vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
- return ret;
+ return __vdso_init(VDSO_ABI_AA32);
}
#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
- return __aarch32_alloc_vdso_pages();
+ int ret;
+
+#ifdef CONFIG_COMPAT_VDSO
+ ret = __aarch32_alloc_vdso_pages();
+ if (ret)
+ return ret;
+#endif
+
+ ret = aarch32_alloc_sigpage();
+ if (ret)
+ return ret;
+
+ return aarch32_alloc_kuser_vdso_page();
}
arch_initcall(aarch32_alloc_vdso_pages);
return PTR_ERR_OR_ZERO(ret);
}
-#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
unsigned long addr;
if (IS_ERR(ret))
goto out;
- mm->context.vdso = (void *)addr;
+ mm->context.sigpage = (void *)addr;
out:
return PTR_ERR_OR_ZERO(ret);
}
-#endif /* !CONFIG_COMPAT_VDSO */
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
mm,
bprm,
uses_interp);
-#else
- ret = aarch32_sigreturn_setup(mm);
+ if (ret)
+ goto out;
#endif /* CONFIG_COMPAT_VDSO */
+ ret = aarch32_sigreturn_setup(mm);
out:
mmap_write_unlock(mm);
return ret;
# potential future proofing if we end up with internal calls to the exported
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
- -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n \
+ $(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
KBUILD_CFLAGS += $(DISABLE_LTO)
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
.text
+/*
+ * NOTE!!! You may notice that all of the .cfi directives in this file have
+ * been commented out. This is because they have been shown to trigger segfaults
+ * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread
+ * cleanup handlers during the thread cancellation dance. By omitting the
+ * directives, we trigger an arm64-specific fallback path in the unwinder which
+ * recognises the signal frame and restores many of the registers directly from
+ * the sigcontext. Re-enabling the cfi directives here therefore needs to be
+ * much more comprehensive to reduce the risk of further regressions.
+ */
+
/* Ensure that the mysterious NOP can be associated with a function. */
- .cfi_startproc
+// .cfi_startproc
/*
- * .cfi_signal_frame causes the corresponding Frame Description Entry in the
- * .eh_frame section to be annotated as a signal frame. This allows DWARF
- * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits
- * unwinding out of the signal trampoline without the need for the mysterious
- * NOP.
+ * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in
+ * the .eh_frame section to be annotated as a signal frame. This allows DWARF
+ * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify
+ * the next frame using the unmodified return address instead of subtracting 1,
+ * which may yield the wrong FDE.
*/
- .cfi_signal_frame
+// .cfi_signal_frame
/*
* Tell the unwinder where to locate the frame record linking back to the
- * interrupted context. We don't provide unwind info for registers other
- * than the frame pointer and the link register here; in practice, this
- * is sufficient for unwinding in C/C++ based runtimes and the values in
- * the sigcontext may have been modified by this point anyway. Debuggers
+ * interrupted context. We don't provide unwind info for registers other than
+ * the frame pointer and the link register here; in practice, this is likely to
+ * be insufficient for unwinding in C/C++ based runtimes, especially without a
+ * means to restore the stack pointer. Thankfully, unwinders and debuggers
* already have baked-in strategies for attempting to unwind out of signals.
*/
- .cfi_def_cfa x29, 0
- .cfi_offset x29, 0 * 8
- .cfi_offset x30, 1 * 8
+// .cfi_def_cfa x29, 0
+// .cfi_offset x29, 0 * 8
+// .cfi_offset x30, 1 * 8
/*
* This mysterious NOP is required for some unwinders (e.g. libc++) that
nop // Mysterious NOP
/*
- * GDB relies on being able to identify the sigreturn instruction sequence to
- * unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START()
- * here, as it will emit a BTI C instruction and break the unwinder. Thankfully,
- * this function is only ever called from a RET and so omitting the landing pad
- * is perfectly fine.
+ * GDB, libgcc and libunwind rely on being able to identify the sigreturn
+ * instruction sequence to unwind from signal handlers. We cannot, therefore,
+ * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the
+ * unwinder. Thankfully, this function is only ever called from a RET and so
+ * omitting the landing pad is perfectly fine.
*/
SYM_CODE_START(__kernel_rt_sigreturn)
+// PLEASE DO NOT MODIFY
mov x8, #__NR_rt_sigreturn
+// PLEASE DO NOT MODIFY
svc #0
- .cfi_endproc
+// PLEASE DO NOT MODIFY
+// .cfi_endproc
SYM_CODE_END(__kernel_rt_sigreturn)
emit_aarch64_feature_1_and
c-obj-vdso := note.o
c-obj-vdso-gettimeofday := vgettimeofday.o
-asm-obj-vdso := sigreturn.o
ifneq ($(c-gettimeofday-y),)
VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file provides both A32 and T32 versions, in accordance with the
- * arm sigreturn code.
- *
- * Please read the comments in arch/arm64/kernel/vdso/sigreturn.S to
- * understand some of the craziness in here.
- *
- * Copyright (C) 2018 ARM Limited
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
- .text
-
- .arm
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_sigreturn_arm)
- mov r7, #__NR_compat_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_sigreturn_arm)
-
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_rt_sigreturn_arm)
- mov r7, #__NR_compat_rt_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_arm)
-
- .thumb
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_sigreturn_thumb)
- mov r7, #__NR_compat_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_sigreturn_thumb)
-
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_rt_sigreturn_thumb)
- mov r7, #__NR_compat_rt_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_thumb)
__vdso_clock_gettime;
__vdso_gettimeofday;
__vdso_clock_getres;
- __kernel_sigreturn_arm;
- __kernel_sigreturn_thumb;
- __kernel_rt_sigreturn_arm;
- __kernel_rt_sigreturn_thumb;
__vdso_clock_gettime64;
local: *;
};
}
-
-/*
- * Make the sigreturn code visible to the kernel.
- */
-VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm;
-VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb;
-VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm;
-VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
*(.altinstructions)
__alt_instructions_end = .;
}
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
. = ALIGN(SEGMENT_ALIGN);
__inittext_end = .;
1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f
-reset:
+
/*
- * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
- * case we coming via HVC_SOFT_RESTART.
+ * Set the HVC_RESET_VECTORS return code before entering the common
+ * path so that we do not clobber x0-x2 in case we are coming via
+ * HVC_SOFT_RESTART.
*/
+ mov x0, xzr
+reset:
+ /* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2
mov_q x6, SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc
/* Install stub vectors */
adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5
- mov x0, xzr
eret
1: /* Bad stub call */
}
/*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
if (!has_vhe())
return;
+ preempt_disable();
host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
+ preempt_enable();
}
/*
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
#include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h>
return base;
}
+static bool kvm_arm_pvtime_supported(void)
+{
+ return !!sched_info_on();
+}
+
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret = 0;
int idx;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
if (get_user(ipa, user))
u64 __user *user = (u64 __user *)attr->addr;
u64 ipa;
- if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+ if (!kvm_arm_pvtime_supported() ||
+ attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO;
ipa = vcpu->arch.steal.base;
{
switch (attr->attr) {
case KVM_ARM_VCPU_PVTIME_IPA:
- return 0;
+ if (kvm_arm_pvtime_supported())
+ return 0;
}
return -ENXIO;
}
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
- int ret = -EINVAL;
+ int ret;
bool loaded;
u32 pstate;
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
- if (kvm_vcpu_enable_ptrauth(vcpu))
+ if (kvm_vcpu_enable_ptrauth(vcpu)) {
+ ret = -EINVAL;
goto out;
+ }
}
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+ ret = -EINVAL;
goto out;
+ }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
+ /*
+ * The v4.1 doorbell can fire concurrently with the vPE being
+ * made non-resident. Ensure we only update pending_last
+ * *after* the non-residency sequence has completed.
+ */
+ raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
__bss_stop, memory_start, memory_start, memory_end);
- memblock_add(memory_start, memory_end - memory_start);
+ memblock_add(_rambase, memory_end - _rambase);
+ memblock_reserve(_rambase, memory_start - _rambase);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
- memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+ memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
/* compute total pages in system */
num_pages = PFN_DOWN(_ramend - _rambase);
"MIC1N", "Built-in Mic";
simple-audio-card,pin-switches = "Speaker", "Headphones";
- simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_HIGH>;
+ simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>;
simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>;
simple-audio-card,bitclock-master = <&dai_codec>;
\
/* \
* We can't unroll if the number of iterations isn't \
- * compile-time constant. Unfortunately GCC versions \
- * up until 4.6 tend to miss obvious constants & cause \
+ * compile-time constant. Unfortunately clang versions \
+ * up until 8.0 tend to miss obvious constants & cause \
* this check to fail, even though they go on to \
* generate reasonable code for the switch statement, \
* so we skip the sanity check for those compilers. \
*/ \
- BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 || \
- CONFIG_CLANG_VERSION >= 80000) && \
- !__builtin_constant_p(times)); \
+ BUILD_BUG_ON(!__builtin_constant_p(times)); \
\
switch (times) { \
case 32: fn(__VA_ARGS__); /* fall through */ \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/* Do not emulate on unsupported core models. */
- if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data))
+ preempt_disable();
+ if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
+ preempt_enable();
return -1;
-
+ }
regs->regs[rd] = loongson3_cpucfg_read_synthesized(
¤t_cpu_data, sel);
-
+ preempt_enable();
return 0;
}
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+ back_to_back_c0_hazard();
}
unsigned int hwrena;
vcpu->arch.gprs[rt], *(u32 *)data);
break;
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
case sdl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu->arch.gprs[rt], *(u64 *)data);
break;
+#endif
#ifdef CONFIG_CPU_LOONGSON64
case sdc2_op:
}
break;
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
case ldl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
break;
}
break;
+#endif
#ifdef CONFIG_CPU_LOONGSON64
case ldc2_op:
VCPU_STAT("vz_ghfc", vz_ghfc_exits),
VCPU_STAT("vz_gpa", vz_gpa_exits),
VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
+#endif
#endif
VCPU_STAT("halt_successful_poll", halt_successful_poll),
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
PMU_PPE_DP | PMU_PPE_TC);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
- clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
- clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
- clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
- clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
+ mmap_read_lock(&init_mm);
error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
NULL);
+ mmap_read_unlock(&init_mm);
+
if (error)
return ERR_PTR(error);
return cpu_addr;
{
unsigned long va = (unsigned long)cpu_addr;
+ mmap_read_lock(&init_mm);
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(&init_mm, va, va + size,
&clear_nocache_walk_ops, NULL));
+ mmap_read_unlock(&init_mm);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
INT_DEFINE_BEGIN(denorm_exception)
IVEC=0x1500
IHSRR=1
- IBRANCH_COMMON=0
+ IBRANCH_TO_COMMON=0
IKVM_REAL=1
INT_DEFINE_END(denorm_exception)
/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
if (kvmhv_on_pseries())
return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
- __pa(to), __pa(from), n);
+ (to != NULL) ? __pa(to): 0,
+ (from != NULL) ? __pa(from): 0, n);
quadrant = 1;
if (!pid)
int pkey_shift;
u64 amr;
- if (!is_pkey_enabled(pkey))
- return true;
-
pkey_shift = pkeyshift(pkey);
if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
return true;
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/crash_core.h>
+#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/kdump.h>
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
#ifndef __ASM_GDB_XML_H_
#define __ASM_GDB_XML_H_
-#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
-static const char riscv_gdb_stub_feature[64] =
+const char riscv_gdb_stub_feature[64] =
"PacketSize=800;qXfer:features:read+;";
static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
#ifndef __ASSEMBLY__
-extern int kgdb_has_hit_break(unsigned long addr);
extern unsigned long kgdb_compiled_break;
static inline void arch_kgdb_breakpoint(void)
#define DBG_REG_BADADDR_OFF 34
#define DBG_REG_CAUSE_OFF 35
-#include <asm/gdb_xml.h>
+extern const char riscv_gdb_stub_feature[64];
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
#endif
#endif
DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
-int decode_register_index(unsigned long opcode, int offset)
+static int decode_register_index(unsigned long opcode, int offset)
{
return (opcode >> offset) & 0x1F;
}
-int decode_register_index_short(unsigned long opcode, int offset)
+static int decode_register_index_short(unsigned long opcode, int offset)
{
return ((opcode >> offset) & 0x7) + 8;
}
/* Calculate the new address for after a step */
-int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
{
unsigned long pc = regs->epc;
unsigned long *regs_ptr = (unsigned long *)regs;
return 0;
}
-int do_single_step(struct pt_regs *regs)
+static int do_single_step(struct pt_regs *regs)
{
/* Determine where the target instruction will send us to */
unsigned long addr = 0;
return err;
}
-int kgdb_riscv_kgdbbreak(unsigned long addr)
+static int kgdb_riscv_kgdbbreak(unsigned long addr)
{
if (stepped_address == addr)
return KGDB_SW_SINGLE_STEP;
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
ifneq ($(c-gettimeofday-y),)
- CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+ CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
endif
# Build rules
obj-y += vdso.o vdso-syms.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+# Disable -pg to prevent insert call site
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
#include <linux/time.h>
#include <linux/types.h>
+extern
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
+extern
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
+extern
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
{
return __cvdso_clock_getres(clock_id, res);
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=y
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_FQ_CODEL=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
CONFIG_NET_PKTGEN=m
# CONFIG_NET_DROP_MONITOR is not set
CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
+CONFIG_TEST_LOCKUP=m
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
+# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_MIN_HEAP=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
-# CONFIG_NUMA_EMU is not set
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_FRONTSWAP=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_FQ_CODEL=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
CONFIG_NET_PKTGEN=m
# CONFIG_NET_DROP_MONITOR is not set
CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_DLM=m
CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CORDIC=m
+CONFIG_PRIME_NUMBERS=m
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
+CONFIG_TEST_LOCKUP=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
# CONFIG_BOUNCE is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
+# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_MONWRITER is not set
# CONFIG_S390_VMUR is not set
# CONFIG_HID is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_ZLIB_DFLTCC is not set
CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
#define KVM_USER_MEM_SLOTS 32
/*
- * These seem to be used for allocating ->chip in the routing table,
- * which we don't use. 4096 is an out-of-thin-air value. If we need
- * to look at ->chip later on, we'll need to revisit this.
+ * These seem to be used for allocating ->chip in the routing table, which we
+ * don't use. 1 is as small as we can get to reduce the needed memory. If we
+ * need to look at ->chip later on, we'll need to revisit this.
*/
#define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_IRQCHIP_NUM_PINS 1
#define KVM_HALT_POLL_NS_DEFAULT 50000
/* s390-specific vcpu->requests bit members */
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
+ /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
areas[i] = kmalloc_array(pages_per_area,
sizeof(debug_entry_t *),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!areas[i])
goto fail_malloc_areas2;
for (j = 0; j < pages_per_area; j++) {
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
+ if (IS_ENABLED(CONFIG_KASAN))
+ psw.mask |= PSW_MASK_DAT;
psw.addr = (unsigned long) s390_base_ext_handler;
S390_lowcore.external_new_psw = psw;
psw.addr = (unsigned long) s390_base_pgm_handler;
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
BPOFF
lg %r12,__LC_CURRENT
- lghi %r13,__TASK_thread
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
+ lghi %r13,__TASK_thread
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
return err;
}
+static bool is_callchain_event(struct perf_event *event)
+{
+ u64 sample_type = event->attr.sample_type;
+
+ return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_STACK_USER);
+}
+
static int cpumsf_pmu_event_init(struct perf_event *event)
{
int err;
/* No support for taken branch sampling */
- if (has_branch_stack(event))
+ /* No support for callchain, stacks and registers */
+ if (has_branch_stack(event) || is_callchain_event(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
nospec_auto_detect();
+ jump_label_init();
parse_early_param();
#ifdef CONFIG_CRASH_DUMP
/* Deactivate elfcorehdr= kernel parameter */
_PAGE_YOUNG);
#ifdef CONFIG_MEM_SOFT_DIRTY
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
- _PAGE_DIRTY);
+ _PAGE_SOFT_DIRTY);
#endif
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
_PAGE_NOEXEC);
long copied;
spin_lock_irqsave(&s390_kernel_write_lock, flags);
- while (size) {
- copied = s390_kernel_write_odd(tmp, src, size);
- tmp += copied;
- src += copied;
- size -= copied;
+ if (!(flags & PSW_MASK_DAT)) {
+ memcpy(dst, src, size);
+ } else {
+ while (size) {
+ copied = s390_kernel_write_odd(tmp, src, size);
+ tmp += copied;
+ src += copied;
+ size -= copied;
+ }
}
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
}
zdev->fh = ccdf->fh;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- zpci_create_device(zdev);
+ ret = zpci_enable_device(zdev);
+ if (ret)
+ break;
+
+ pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+ if (!pdev)
+ break;
+
+ pci_bus_add_device(pdev);
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zdev->zbus->bus);
+ pci_unlock_rescan_remove();
break;
case 0x0302: /* Reserved -> Standby */
if (!zdev) {
select ARCH_HAS_FILTER_PGPROT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_KCOV if X86_64 && STACK_VALIDATION
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
* We place all of the values on our mini stack so lret can
* used to perform that far jump.
*/
- pushl $__KERNEL_CS
leal startup_64(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
movl efi32_boot_args(%ebp), %edi
movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
cmpl $0, %edx
jnz 1f
+ /*
+ * efi_pe_entry uses MS calling convention, which requires 32 bytes of
+ * shadow space on the stack even if all arguments are passed in
+ * registers. We also need an additional 8 bytes for the space that
+ * would be occupied by the return address, and this also results in
+ * the correct stack alignment for entry.
+ */
+ subl $40, %esp
leal efi_pe_entry(%ebp), %eax
movl %edi, %ecx // MS calling convention
movl %esi, %edx
1:
#endif
+ pushl $__KERNEL_CS
pushl %eax
/* Enter paged protected Mode, activating Long Mode */
SYM_DATA_START_LOCAL(boot_stack)
.fill BOOT_STACK_SIZE, 1, 0
+ .balign 16
SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
/*
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+/* Check that the stack and regs on entry from user mode are sane. */
+static void check_user_regs(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
+ /*
+ * Make sure that the entry code gave us a sensible EFLAGS
+ * register. Native because we want to check the actual CPU
+ * state, not the interrupt state as imagined by Xen.
+ */
+ unsigned long flags = native_save_fl();
+ WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
+ X86_EFLAGS_NT));
+
+ /* We think we came from user mode. Make sure pt_regs agrees. */
+ WARN_ON_ONCE(!user_mode(regs));
+
+ /*
+ * All entries from user mode (except #DF) should be on the
+ * normal thread stack and should have user pt_regs in the
+ * correct location.
+ */
+ WARN_ON_ONCE(!on_thread_stack());
+ WARN_ON_ONCE(regs != task_pt_regs(current));
+ }
+}
+
#ifdef CONFIG_CONTEXT_TRACKING
/**
* enter_from_user_mode - Establish state when coming from user mode
unsigned long ret = 0;
u32 work;
- if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
- BUG_ON(regs != task_pt_regs(current));
-
work = READ_ONCE(ti->flags);
if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
{
struct thread_info *ti;
+ check_user_regs(regs);
+
enter_from_user_mode();
instrumentation_begin();
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
+ check_user_regs(regs);
+
enter_from_user_mode();
instrumentation_begin();
vdso_image_32.sym_int80_landing_pad;
bool success;
+ check_user_regs(regs);
+
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
* so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
#endif
}
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
+{
+ /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
+ regs->sp = regs->bp;
+
+ /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */
+ regs->flags |= X86_EFLAGS_IF;
+
+ return do_fast_syscall_32(regs);
+}
#endif
SYSCALL_DEFINE0(ni_syscall)
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
{
if (user_mode(regs)) {
+ check_user_regs(regs);
enter_from_user_mode();
return false;
}
*/
void noinstr idtentry_enter_user(struct pt_regs *regs)
{
+ check_user_regs(regs);
enter_from_user_mode();
}
.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
- pushl %ebp /* pt_regs->sp (stashed in bp) */
+ pushl $0 /* pt_regs->sp (placeholder) */
pushfl /* pt_regs->flags (except IF = 0) */
- orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
pushl %eax /* pt_regs->orig_ax */
.Lsysenter_flags_fixed:
movl %esp, %eax
- call do_fast_syscall_32
+ call do_SYSENTER_32
/* XEN PV guests always use IRET path */
ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
"jmp .Lsyscall_32_done", X86_FEATURE_XENPV
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- /*
- * User tracing code (ptrace or signal handlers) might assume that
- * the saved RAX contains a 32-bit number when we're invoking a 32-bit
- * syscall. Just in case the high bits are nonzero, zero-extend
- * the syscall number. (This could almost certainly be deleted
- * with no ill effects.)
- */
- movl %eax, %eax
-
/* Construct struct pt_regs on stack */
pushq $__USER32_DS /* pt_regs->ss */
- pushq %rbp /* pt_regs->sp (stashed in bp) */
+ pushq $0 /* pt_regs->sp = 0 (placeholder) */
/*
* Push flags. This is nasty. First, interrupts are currently
- * off, but we need pt_regs->flags to have IF set. Second, even
- * if TF was set when SYSENTER started, it's clear by now. We fix
- * that later using TIF_SINGLESTEP.
+ * off, but we need pt_regs->flags to have IF set. Second, if TS
+ * was set in usermode, it's still set, and we're singlestepping
+ * through this code. do_SYSENTER_32() will fix up IF.
*/
pushfq /* pt_regs->flags (except IF = 0) */
- orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */
pushq $__USER32_CS /* pt_regs->cs */
pushq $0 /* pt_regs->ip = 0 (placeholder) */
+SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+
+ /*
+ * User tracing code (ptrace or signal handlers) might assume that
+ * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+ * syscall. Just in case the high bits are nonzero, zero-extend
+ * the syscall number. (This could almost certainly be deleted
+ * with no ill effects.)
+ */
+ movl %eax, %eax
+
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
.Lsysenter_flags_fixed:
movq %rsp, %rdi
- call do_fast_syscall_32
+ call do_SYSENTER_32
/* XEN PV guests always use IRET path */
ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
"jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
# SPDX-License-Identifier: GPL-2.0-only
obj-y += core.o probe.o
-obj-$(PERF_EVENTS_INTEL_RAPL) += rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += rapl.o
obj-y += amd/
obj-$(CONFIG_X86_LOCAL_APIC) += msr.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel/
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
- hv_hypercall_pg = vmalloc_exec(PAGE_SIZE);
+ hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
+ VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+ VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+ __builtin_return_address(0));
if (hv_hypercall_pg == NULL) {
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
goto remove_cpuhp_state;
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
-static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
- /*
- * Because this is a plain access, we need to disable KCSAN here to
- * avoid double instrumentation via instrumented bitops.
- */
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
unreachable(); \
} while (0)
+/*
+ * This instrumentation_begin() is strictly speaking incorrect; but it
+ * suppresses the complaints from WARN()s in noinstr code. If such a WARN()
+ * were to trigger, we'd rather wreck the machine in an attempt to get the
+ * message out than not know about it.
+ */
#define __WARN_FLAGS(flags) \
do { \
instrumentation_begin(); \
return false;
}
#endif
+#ifdef CONFIG_IA32_FEAT_CTL
+void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
+#else
+static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
+#endif
#endif /* _ASM_X86_CPU_H */
extern void setup_cpu_local_masks(void);
+/*
+ * NMI and MCE exceptions need cpu_is_offline() _really_ early,
+ * provide an arch_ special for them to avoid instrumentation.
+ */
+#if NR_CPUS > 1
+static __always_inline bool arch_cpu_online(int cpu)
+{
+ return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
+}
+#else
+static __always_inline bool arch_cpu_online(int cpu)
+{
+ return cpu == 0;
+}
+#endif
+
+#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */
* MXCSR and XCR definitions:
*/
+static inline void ldmxcsr(u32 mxcsr)
+{
+ asm volatile("ldmxcsr %0" :: "m" (mxcsr));
+}
+
extern unsigned int mxcsr_feature_mask;
#define XCR_XFEATURE_ENABLED_MASK 0x00000000
#else /* CONFIG_X86_64 */
-/* Maps to a regular IDTENTRY on 32bit for now */
-# define DECLARE_IDTENTRY_IST DECLARE_IDTENTRY
-# define DEFINE_IDTENTRY_IST DEFINE_IDTENTRY
-
/**
* DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
* @vector: Vector number (ignored for C)
#endif /* !CONFIG_X86_64 */
/* C-Code mapping */
+#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
+
+#ifdef CONFIG_X86_64
#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
-#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
-#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
-
#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
-
-/**
- * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
- * @vector: Vector number (ignored for C)
- * @func: Function name of the entry point
- *
- * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
- * indirection magic.
- */
-#define DECLARE_IDTENTRY_XEN(vector, func) \
- asmlinkage void xen_asm_exc_xen##func(void); \
- asmlinkage void asm_exc_xen##func(void)
+#endif
#else /* !__ASSEMBLY__ */
# define DECLARE_IDTENTRY_MCE(vector, func) \
DECLARE_IDTENTRY(vector, func)
-# define DECLARE_IDTENTRY_DEBUG(vector, func) \
- DECLARE_IDTENTRY(vector, func)
-
/* No ASM emitted for DF as this goes through a C shim */
# define DECLARE_IDTENTRY_DF(vector, func)
/* No ASM code emitted for NMI */
#define DECLARE_IDTENTRY_NMI(vector, func)
-/* XEN NMI and DB wrapper */
-#define DECLARE_IDTENTRY_XEN(vector, func) \
- idtentry vector asm_exc_xen##func exc_##func has_error_code=0
-
/*
* ASM code to emit the common vector entry stubs where each stub is
* packed into 8 bytes.
DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
#ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_64
DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
+#endif
#endif
/* NMI */
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
-DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi);
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
+#endif
/* #DB */
+#ifdef CONFIG_X86_64
DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
-DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug);
+#endif
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
+#endif
/* #DF */
DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock;
struct kvm_apic_map *apic_map;
- bool apic_map_dirty;
+ atomic_t apic_map_dirty;
bool apic_access_page_done;
unsigned long apicv_inhibit_reasons;
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
- int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
#define TPAUSE_C01_STATE 1
#define TPAUSE_C02_STATE 0
-u32 get_umwait_control_msr(void);
-
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
#define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
#define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
#define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
+#define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0|___D| 0|___G)
#define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
#define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
#define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
#define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC)
#define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC)
#define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0)
+#define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC)
#define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC)
#define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC)
#define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
struct entry_stack {
- unsigned long words[64];
+ char stack[PAGE_SIZE];
};
struct entry_stack_page {
};
struct kvm_vmx_nested_state_hdr {
- __u32 flags;
__u64 vmxon_pa;
__u64 vmcs12_pa;
- __u64 preemption_timer_deadline;
struct {
__u16 flags;
} smm;
+
+ __u32 flags;
+ __u64 preemption_timer_deadline;
};
struct kvm_svm_nested_state_data {
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/e820/api.h>
#include <asm/mtrr.h>
cr4_clear_bits(X86_CR4_UMIP);
}
+/* These bits should not change their value after CPU init is finished. */
+static const unsigned long cr4_pinned_mask =
+ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
static unsigned long cr4_pinned_bits __ro_after_init;
void native_write_cr4(unsigned long val)
{
- unsigned long bits_missing = 0;
+ unsigned long bits_changed = 0;
set_register:
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
if (static_branch_likely(&cr_pinning)) {
- if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
- bits_missing = ~val & cr4_pinned_bits;
- val |= bits_missing;
+ if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+ bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
+ val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
goto set_register;
}
- /* Warn after we've set the missing bits. */
- WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
- bits_missing);
+ /* Warn after we've corrected the changed bits. */
+ WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
+ bits_changed);
}
}
#if IS_MODULE(CONFIG_LKDTM)
if (boot_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (static_branch_likely(&cr_pinning))
- cr4 |= cr4_pinned_bits;
+ cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
__write_cr4(cr4);
*/
static void __init setup_cr_pinning(void)
{
- unsigned long mask;
-
- mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
- cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
+ cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
static_key_enable(&cr_pinning.key);
}
extern u64 x86_read_arch_cap_msr(void);
-#ifdef CONFIG_IA32_FEAT_CTL
-void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
-#endif
-
#endif /* ARCH_X86_CPU_H */
static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
static u64 msr_test_ctrl_cache __ro_after_init;
+/*
+ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
+ * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
+ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
+ */
+static bool cpu_model_supports_sld __ro_after_init;
+
/*
* Processors which have self-snooping capability can handle conflicting
* memory type across CPUs by snooping its own cache. However, there exists
static void split_lock_init(void)
{
- split_lock_verify_msr(sld_state != sld_off);
+ if (cpu_model_supports_sld)
+ split_lock_verify_msr(sld_state != sld_off);
}
static void split_lock_warn(unsigned long ip)
return;
}
+ cpu_model_supports_sld = true;
split_lock_setup();
}
{
unsigned int cpu = smp_processor_id();
- if (cpu_is_offline(cpu) ||
+ if (arch_cpu_is_offline(cpu) ||
(crashing_cpu != -1 && crashing_cpu != cpu)) {
u64 mcgstatus;
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
+ WARN_ON_ONCE(user_mode(regs));
+
/*
* Only required when from kernel mode. See
* mce_check_crashing_cpu() for details.
}
#else
/* 32bit unified entry point */
-DEFINE_IDTENTRY_MCE(exc_machine_check)
+DEFINE_IDTENTRY_RAW(exc_machine_check)
{
unsigned long dr7;
c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx;
- if (c->x86_vendor == X86_VENDOR_INTEL)
- c->x86_cache_mbm_width_offset = eax & 0xff;
- else
- c->x86_cache_mbm_width_offset = -1;
+ c->x86_cache_mbm_width_offset = eax & 0xff;
+
+ if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
+ c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
}
}
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#define MAX_MBA_BW_AMD 0x800
+#define MBM_CNTR_WIDTH_OFFSET_AMD 20
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
+ _d_cdp = NULL;
ret = -EINVAL;
}
*/
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
-u32 get_umwait_control_msr(void)
-{
- return umwait_control_cached;
-}
-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
-
/*
* Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
* hardware or BIOS before kernel boot.
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include "cpu.h"
copy_fpregs_to_fpstate(¤t->thread.fpu);
}
__cpu_invalidate_fpregs_state();
+
+ if (boot_cpu_has(X86_FEATURE_XMM))
+ ldmxcsr(MXCSR_DEFAULT);
+
+ if (boot_cpu_has(X86_FEATURE_FPU))
+ asm volatile ("fninit");
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
#include <asm/mmu_context.h>
#include <asm/pgtable_areas.h>
+#include <xen/xen.h>
+
/* This is a multiple of PAGE_SIZE. */
#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
return bytecount;
}
+static bool allow_16bit_segments(void)
+{
+ if (!IS_ENABLED(CONFIG_X86_16BIT))
+ return false;
+
+#ifdef CONFIG_XEN_PV
+ /*
+ * Xen PV does not implement ESPFIX64, which means that 16-bit
+ * segments will not work correctly. Until either Xen PV implements
+ * ESPFIX64 and can signal this fact to the guest or unless someone
+ * provides compelling evidence that allowing broken 16-bit segments
+ * is worthwhile, disallow 16-bit segments under Xen PV.
+ */
+ if (xen_pv_domain()) {
+ pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
+ return false;
+ }
+#endif
+
+ return true;
+}
+
static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct *mm = current->mm;
/* The user wants to clear the entry. */
memset(&ldt, 0, sizeof(ldt));
} else {
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
error = -EINVAL;
goto out;
}
DEFINE_IDTENTRY_RAW(exc_nmi)
{
- if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
+ if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
return;
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
local_irq_disable();
}
-int is_valid_bugaddr(unsigned long addr)
+__always_inline int is_valid_bugaddr(unsigned long addr)
{
- unsigned short ud;
-
if (addr < TASK_SIZE_MAX)
return 0;
- if (get_kernel_nofault(ud, (unsigned short *)addr))
- return 0;
-
- return ud == INSN_UD0 || ud == INSN_UD2;
+ /*
+ * We got #UD, if the text isn't readable we'd have gotten
+ * a different exception.
+ */
+ return *(unsigned short *)addr == INSN_UD2;
}
static nokprobe_inline int
ILL_ILLOPN, error_get_trap_addr(regs));
}
-DEFINE_IDTENTRY_RAW(exc_invalid_op)
+static noinstr bool handle_bug(struct pt_regs *regs)
{
- bool rcu_exit;
+ bool handled = false;
+
+ if (!is_valid_bugaddr(regs->ip))
+ return handled;
/*
- * Handle BUG/WARN like NMIs instead of like normal idtentries:
- * if we bugged/warned in a bad RCU context, for example, the last
- * thing we want is to BUG/WARN again in the idtentry code, ad
- * infinitum.
+ * All lies, just get the WARN/BUG out.
+ */
+ instrumentation_begin();
+ /*
+ * Since we're emulating a CALL with exceptions, restore the interrupt
+ * state to what it was at the exception site.
*/
- if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
- enum bug_trap_type type;
+ if (regs->flags & X86_EFLAGS_IF)
+ raw_local_irq_enable();
+ if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+ regs->ip += LEN_UD2;
+ handled = true;
+ }
+ if (regs->flags & X86_EFLAGS_IF)
+ raw_local_irq_disable();
+ instrumentation_end();
- nmi_enter();
- instrumentation_begin();
- trace_hardirqs_off_finish();
- type = report_bug(regs->ip, regs);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
- instrumentation_end();
- nmi_exit();
+ return handled;
+}
- if (type == BUG_TRAP_TYPE_WARN) {
- /* Skip the ud2. */
- regs->ip += LEN_UD2;
- return;
- }
+DEFINE_IDTENTRY_RAW(exc_invalid_op)
+{
+ bool rcu_exit;
- /*
- * Else, if this was a BUG and report_bug returns or if this
- * was just a normal #UD, we want to continue onward and
- * crash.
- */
- }
+ /*
+ * We use UD2 as a short encoding for 'CALL __WARN', as such
+ * handle it before exception entry to avoid recursive WARN
+ * in case exception entry is the one triggering WARNs.
+ */
+ if (!user_mode(regs) && handle_bug(regs))
+ return;
rcu_exit = idtentry_enter_cond_rcu(regs);
instrumentation_begin();
(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
/* Copy the IRET target to the temporary storage. */
- memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
+ __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
/* Copy the remainder of the stack from the current stack. */
- memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
+ __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
/* Update the entry stack */
- memcpy(new_stack, &tmp, sizeof(tmp));
+ __memcpy(new_stack, &tmp, sizeof(tmp));
BUG_ON(!user_mode(&new_stack->regs));
return new_stack;
instrumentation_begin();
trace_hardirqs_off_finish();
+ /*
+ * If something gets miswired and we end up here for a user mode
+ * #DB, we will malfunction.
+ */
+ WARN_ON_ONCE(user_mode(regs));
+
/*
* Catch SYSENTER with TF set and clear DR_STEP. If this hit a
* watchpoint at the same time then that will still be handled.
static __always_inline void exc_debug_user(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * If something gets miswired and we end up here for a kernel mode
+ * #DB, we will malfunction.
+ */
+ WARN_ON_ONCE(!user_mode(regs));
+
idtentry_enter_user(regs);
instrumentation_begin();
}
#else
/* 32 bit does not have separate entry points. */
-DEFINE_IDTENTRY_DEBUG(exc_debug)
+DEFINE_IDTENTRY_RAW(exc_debug)
{
unsigned long dr6, dr7;
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
kvfree(map);
}
+/*
+ * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
+ *
+ * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
+ * apic_map_lock_held.
+ */
+enum {
+ CLEAN,
+ UPDATE_IN_PROGRESS,
+ DIRTY
+};
+
void kvm_recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
int i;
u32 max_id = 255; /* enough space for any xAPIC ID */
- if (!kvm->arch.apic_map_dirty) {
- /*
- * Read kvm->arch.apic_map_dirty before
- * kvm->arch.apic_map
- */
- smp_rmb();
+ /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
+ if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
return;
- }
mutex_lock(&kvm->arch.apic_map_lock);
- if (!kvm->arch.apic_map_dirty) {
+ /*
+ * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
+ * (if clean) or the APIC registers (if dirty).
+ */
+ if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
+ DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
/* Someone else has updated the map. */
mutex_unlock(&kvm->arch.apic_map_lock);
return;
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
/*
- * Write kvm->arch.apic_map before
- * clearing apic->apic_map_dirty
+ * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
+ * If another update has come in, leave it DIRTY.
*/
- smp_wmb();
- kvm->arch.apic_map_dirty = false;
+ atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
+ UPDATE_IN_PROGRESS, CLEAN);
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
else
static_key_slow_inc(&apic_sw_disabled.key);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
}
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
kvm_lapic_set_reg(apic, APIC_LDR, id);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} else
ret = 1;
break;
static_key_slow_dec_deferred(&apic_hw_disabled);
} else {
static_key_slow_inc(&apic_hw_disabled.key);
- vcpu->kvm->arch.apic_map_dirty = true;
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
}
if (!apic)
return;
- vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
}
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
+ atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
kvm_recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
* Emulate arch specific page modification logging for the
* nested hypervisor
*/
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
{
if (kvm_x86_ops.write_log_dirty)
- return kvm_x86_ops.write_log_dirty(vcpu);
+ return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
return 0;
}
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | gbpages_bit_rsvd |
+ gbpages_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
- int write_fault)
+ gpa_t addr, int write_fault)
{
unsigned level, index;
pt_element_t pte, orig_pte;
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
- if (kvm_arch_write_log_dirty(vcpu))
+ if (kvm_arch_write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
++walker->level;
do {
- gfn_t real_gfn;
unsigned long host_addr;
pt_access = pte_access;
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
- real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
+ real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
nested_access,
&walker->fault);
* information to fix the exit_qualification or exit_info_1
* fields.
*/
- if (unlikely(real_gfn == UNMAPPED_GVA))
+ if (unlikely(real_gpa == UNMAPPED_GVA))
return 0;
- real_gfn = gpa_to_gfn(real_gfn);
-
- host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
+ host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
&walker->pte_writable[walker->level - 1]);
if (unlikely(kvm_is_error_hva(host_addr)))
goto error;
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) {
- ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+ ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+ addr, write_fault);
if (unlikely(ret < 0))
goto error;
else if (ret)
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
-static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{
fastpath_t exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu);
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
* (KVM doesn't change it);
*/
- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vmx_set_cr0(vcpu, vmcs12->host_cr0);
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
*/
vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
goto error_guest_mode;
}
+ vmx->nested.has_preemption_timer_deadline = false;
if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
vmx->nested.has_preemption_timer_deadline = true;
vmx->nested.preemption_timer_deadline =
struct vmcs_controls_shadow controls_shadow;
};
+static inline bool is_intr_type(u32 intr_info, u32 type)
+{
+ const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
+
+ return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
+}
+
+static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
+{
+ const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
+ INTR_INFO_VECTOR_MASK;
+
+ return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
+}
+
static inline bool is_exception_n(u32 intr_info, u8 vector)
{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
- INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+ return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
}
static inline bool is_debug(u32 intr_info)
static inline bool is_machine_check(u32 intr_info)
{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
- INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+ return is_exception_n(intr_info, MC_VECTOR);
}
/* Undocumented: icebp/int1 */
static inline bool is_icebp(u32 intr_info)
{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+ return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
}
static inline bool is_nmi(u32 intr_info)
{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+ return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
}
static inline bool is_external_intr(u32 intr_info)
{
- return (intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
- == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR);
+ return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
}
enum vmcs_field_width {
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS \
- (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
{
- vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
- if (enable_ept)
- vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+ if (!enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
if (is_guest_mode(&vmx->vcpu))
vmx->vcpu.arch.cr4_guest_owned_bits &=
~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
/* 22.2.1, 20.8.1 */
vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
- vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
- vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+ vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
set_cr4_guest_host_mask(vmx);
msrs[i].host, false);
}
-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
-{
- u32 host_umwait_control;
-
- if (!vmx_has_waitpkg(vmx))
- return;
-
- host_umwait_control = get_umwait_control_msr();
-
- if (vmx->msr_ia32_umwait_control != host_umwait_control)
- add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
- vmx->msr_ia32_umwait_control,
- host_umwait_control, false);
- else
- clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
-}
-
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
pt_guest_enter(vmx);
- if (vcpu_to_pmu(vcpu)->version)
- atomic_switch_perf_msrs(vmx);
- atomic_switch_umwait_control_msr(vmx);
+ atomic_switch_perf_msrs(vmx);
if (enable_preemption_timer)
vmx_update_hv_timer(vcpu);
kvm_flush_pml_buffers(kvm);
}
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa, dst;
+ gpa_t dst;
if (is_guest_mode(vcpu)) {
WARN_ON_ONCE(vmx->nested.pml_full);
return 1;
}
- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+ gpa &= ~0xFFFull;
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
u64 current_tsc_ratio;
- u32 host_pkru;
-
unsigned long host_debugctlmsr;
/*
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
+ if ((cr4 ^ old_cr4) & X86_CR4_LA57)
+ return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
if (data & 0x30)
return 1;
+ if (!lapic_in_kernel(vcpu))
+ return 1;
+
vcpu->arch.apf.msr_en_val = data;
if (!kvm_pv_async_pf_enabled(vcpu)) {
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
case MSR_IA32_APICBASE:
msr_info->data = kvm_get_apic_base(vcpu);
break;
- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
case MSR_IA32_TSCDEADLINE:
msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
r = -EINVAL;
user_tsc_khz = (u32)arg;
- if (user_tsc_khz >= kvm_max_guest_tsc_khz)
+ if (kvm_has_tsc_control &&
+ user_tsc_khz >= kvm_max_guest_tsc_khz)
goto out;
if (user_tsc_khz == 0)
#include <asm/alternative-asm.h>
#include <asm/export.h>
+.pushsection .noinstr.text, "ax"
+
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
retq
SYM_FUNC_END(memcpy_orig)
+.popsection
+
#ifndef CONFIG_UML
MCSAFE_TEST_CTL
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
+ " .align 16\n"
"0: movq $0,(%[dst])\n"
" addq $8,%[dst]\n"
" decl %%ecx ; jnz 0b\n"
*/
static void notrace __restore_processor_state(struct saved_context *ctxt)
{
+ struct cpuinfo_x86 *c;
+
if (ctxt->misc_enable_saved)
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
mtrr_bp_restore();
perf_restore_debug_store();
msr_restore_context(ctxt);
+
+ c = &cpu_data(smp_processor_id());
+ if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
+ init_ia32_feat_ctl(c);
}
/* Needed by apm.c */
}
#ifdef CONFIG_X86_64
+void noist_exc_debug(struct pt_regs *regs);
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
+{
+ /* On Xen PV, NMI doesn't use IST. The C part is the sane as native. */
+ exc_nmi(regs);
+}
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
+{
+ /*
+ * There's no IST on Xen PV, but we still need to dispatch
+ * to the correct handler.
+ */
+ if (user_mode(regs))
+ noist_exc_debug(regs);
+ else
+ exc_debug(regs);
+}
+
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
.xen = xen_asm_##func, \
.ist_okay = ist_ok }
-#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \
+#define TRAP_ENTRY_REDIR(func, ist_ok) { \
.orig = asm_##func, \
- .xen = xen_asm_##xenfunc, \
+ .xen = xen_asm_xenpv_##func, \
.ist_okay = ist_ok }
static struct trap_array_entry trap_array[] = {
- TRAP_ENTRY_REDIR(exc_debug, exc_xendebug, true ),
+ TRAP_ENTRY_REDIR(exc_debug, true ),
TRAP_ENTRY(exc_double_fault, true ),
#ifdef CONFIG_X86_MCE
TRAP_ENTRY(exc_machine_check, true ),
#endif
- TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ),
+ TRAP_ENTRY_REDIR(exc_nmi, true ),
TRAP_ENTRY(exc_int3, false ),
TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
.endm
xen_pv_trap asm_exc_divide_error
-xen_pv_trap asm_exc_debug
-xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_xenpv_exc_debug
xen_pv_trap asm_exc_int3
-xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_xenpv_exc_nmi
xen_pv_trap asm_exc_overflow
xen_pv_trap asm_exc_bounds
xen_pv_trap asm_exc_invalid_op
/* 32-bit compat sysenter target */
SYM_FUNC_START(xen_sysenter_target)
- mov 0*8(%rsp), %rcx
- mov 1*8(%rsp), %r11
- mov 5*8(%rsp), %rsp
- jmp entry_SYSENTER_compat
+ /*
+ * NB: Xen is polite and clears TF from EFLAGS for us. This means
+ * that we don't need to guard against single step exceptions here.
+ */
+ popq %rcx
+ popq %r11
+
+ /*
+ * Neither Xen nor the kernel really knows what the old SS and
+ * CS were. The kernel expects __USER32_DS and __USER32_CS, so
+ * report those values even though Xen will guess its own values.
+ */
+ movq $__USER32_DS, 4*8(%rsp)
+ movq $__USER32_CS, 1*8(%rsp)
+
+ jmp entry_SYSENTER_compat_after_hwframe
SYM_FUNC_END(xen_sysenter_target)
#else /* !CONFIG_IA32_EMULATION */
flush_workqueue(kintegrityd_wq);
}
+static void __bio_integrity_free(struct bio_set *bs,
+ struct bio_integrity_payload *bip)
+{
+ if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
+ if (bip->bip_vec)
+ bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
+ bip->bip_slab);
+ mempool_free(bip, &bs->bio_integrity_pool);
+ } else {
+ kfree(bip);
+ }
+}
+
/**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
return bip;
err:
- mempool_free(bip, &bs->bio_integrity_pool);
+ __bio_integrity_free(bs, bip);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(bio_integrity_alloc);
kfree(page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset);
- if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
- bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
-
- mempool_free(bip, &bs->bio_integrity_pool);
- } else {
- kfree(bip);
- }
-
+ __bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
+ QUEUE_FLAG_NAME(PCI_P2PDMA),
+ QUEUE_FLAG_NAME(ZONE_RESETALL),
+ QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
};
#undef QUEUE_FLAG_NAME
void *priv, bool reserved)
{
/*
- * If we find a request that is inflight and the queue matches,
+ * If we find a request that isn't idle and the queue matches,
* we know the queue is busy. Return false to stop the iteration.
*/
- if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
+ if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
bool *busy = priv;
*busy = true;
if (!ksm)
return;
kvfree(ksm->slot_hashtable);
- memzero_explicit(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
- kvfree(ksm->slots);
+ kvfree_sensitive(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
memzero_explicit(ksm, sizeof(*ksm));
}
EXPORT_SYMBOL_GPL(blk_ksm_destroy);
void af_alg_release_parent(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- unsigned int nokey = ask->nokey_refcnt;
- bool last = nokey && !ask->refcnt;
+ unsigned int nokey = atomic_read(&ask->nokey_refcnt);
sk = ask->parent;
ask = alg_sk(sk);
- local_bh_disable();
- bh_lock_sock(sk);
- ask->nokey_refcnt -= nokey;
- if (!last)
- last = !--ask->refcnt;
- bh_unlock_sock(sk);
- local_bh_enable();
+ if (nokey)
+ atomic_dec(&ask->nokey_refcnt);
- if (last)
+ if (atomic_dec_and_test(&ask->refcnt))
sock_put(sk);
}
EXPORT_SYMBOL_GPL(af_alg_release_parent);
err = -EBUSY;
lock_sock(sk);
- if (ask->refcnt | ask->nokey_refcnt)
+ if (atomic_read(&ask->refcnt))
goto unlock;
swap(ask->type, type);
int err = -EBUSY;
lock_sock(sk);
- if (ask->refcnt)
+ if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
goto unlock;
type = ask->type;
if (err)
goto unlock;
- if (nokey || !ask->refcnt++)
+ if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
sock_hold(sk);
- ask->nokey_refcnt += nokey;
+ if (nokey) {
+ atomic_inc(&ask->nokey_refcnt);
+ atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
+ }
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
- alg_sk(sk2)->nokey_refcnt = nokey;
newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
- if (ask->refcnt)
+ if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
- if (!pask->refcnt++)
- sock_hold(psk);
-
- ask->refcnt = 1;
- sock_put(psk);
+ atomic_dec(&pask->nokey_refcnt);
+ atomic_set(&ask->nokey_refcnt, 0);
err = 0;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
- if (ask->refcnt)
+ if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
- if (!pask->refcnt++)
- sock_hold(psk);
-
- ask->refcnt = 1;
- sock_put(psk);
+ atomic_dec(&pask->nokey_refcnt);
+ atomic_set(&ask->nokey_refcnt, 0);
err = 0;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
- if (ask->refcnt)
+ if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
- if (!pask->refcnt++)
- sock_hold(psk);
-
- ask->refcnt = 1;
- sock_put(psk);
+ atomic_dec(&pask->nokey_refcnt);
+ atomic_set(&ask->nokey_refcnt, 0);
err = 0;
#include <linux/module.h>
#include <linux/configfs.h>
#include <linux/acpi.h>
+#include <linux/security.h>
#include "acpica/accommon.h"
#include "acpica/actables.h"
{
const struct acpi_table_header *header = data;
struct acpi_table *table;
- int ret;
+ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+ if (ret)
+ return ret;
table = container_of(cfg, struct acpi_table, cfg);
{"INT3407", 0},
{"INT3532", 0},
{"INTC1047", 0},
+ {"INTC1050", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
- {"INT1044", 0},
{"INT3404", 0},
+ {"INTC1044", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
}
static ssize_t
-acpi_show_profile(struct device *dev, struct device_attribute *attr,
+acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
-static const struct device_attribute pm_profile_attr =
+static const struct kobj_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
static ssize_t hotplug_enabled_show(struct kobject *kobj,
static void binder_free_proc(struct binder_proc *proc)
{
+ struct binder_device *device;
+
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
+ device = container_of(proc->context, struct binder_device, context);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(proc->context->name);
+ kfree(device);
+ }
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
binder_stats_deleted(BINDER_STAT_PROC);
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_context *context = proc->context;
- struct binder_device *device;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
context->binder_context_mgr_node = NULL;
}
mutex_unlock(&context->context_mgr_node_lock);
- device = container_of(proc->context, struct binder_device, context);
- if (refcount_dec_and_test(&device->ref)) {
- kfree(context->name);
- kfree(device);
- }
- proc->context = NULL;
binder_inner_proc_lock(proc);
/*
* Make sure proc stays alive after we
.notifier_call = pm_trace_notify,
};
-static int early_resume_init(void)
+static int __init early_resume_init(void)
{
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
}
-static int late_resume_init(void)
+static int __init late_resume_init(void)
{
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
- __be16 *b = buf;
-
- b[0] = cpu_to_be16(val << shift);
+ put_unaligned_be16(val << shift, buf);
}
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
{
- __le16 *b = buf;
-
- b[0] = cpu_to_le16(val << shift);
+ put_unaligned_le16(val << shift, buf);
}
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u16 *)buf = val << shift;
+ u16 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
- __be32 *b = buf;
-
- b[0] = cpu_to_be32(val << shift);
+ put_unaligned_be32(val << shift, buf);
}
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
{
- __le32 *b = buf;
-
- b[0] = cpu_to_le32(val << shift);
+ put_unaligned_le32(val << shift, buf);
}
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u32 *)buf = val << shift;
+ u32 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#ifdef CONFIG_64BIT
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
{
- __be64 *b = buf;
-
- b[0] = cpu_to_be64((u64)val << shift);
+ put_unaligned_be64((u64) val << shift, buf);
}
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
{
- __le64 *b = buf;
-
- b[0] = cpu_to_le64((u64)val << shift);
+ put_unaligned_le64((u64) val << shift, buf);
}
static void regmap_format_64_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u64 *)buf = (u64)val << shift;
+ u64 v = (u64) val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#endif
static unsigned int regmap_parse_16_be(const void *buf)
{
- const __be16 *b = buf;
-
- return be16_to_cpu(b[0]);
+ return get_unaligned_be16(buf);
}
static unsigned int regmap_parse_16_le(const void *buf)
{
- const __le16 *b = buf;
-
- return le16_to_cpu(b[0]);
+ return get_unaligned_le16(buf);
}
static void regmap_parse_16_be_inplace(void *buf)
{
- __be16 *b = buf;
+ u16 v = get_unaligned_be16(buf);
- b[0] = be16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_16_le_inplace(void *buf)
{
- __le16 *b = buf;
+ u16 v = get_unaligned_le16(buf);
- b[0] = le16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_16_native(const void *buf)
{
- return *(u16 *)buf;
+ u16 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
static unsigned int regmap_parse_24(const void *buf)
static unsigned int regmap_parse_32_be(const void *buf)
{
- const __be32 *b = buf;
-
- return be32_to_cpu(b[0]);
+ return get_unaligned_be32(buf);
}
static unsigned int regmap_parse_32_le(const void *buf)
{
- const __le32 *b = buf;
-
- return le32_to_cpu(b[0]);
+ return get_unaligned_le32(buf);
}
static void regmap_parse_32_be_inplace(void *buf)
{
- __be32 *b = buf;
+ u32 v = get_unaligned_be32(buf);
- b[0] = be32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_32_le_inplace(void *buf)
{
- __le32 *b = buf;
+ u32 v = get_unaligned_le32(buf);
- b[0] = le32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_32_native(const void *buf)
{
- return *(u32 *)buf;
+ u32 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#ifdef CONFIG_64BIT
static unsigned int regmap_parse_64_be(const void *buf)
{
- const __be64 *b = buf;
-
- return be64_to_cpu(b[0]);
+ return get_unaligned_be64(buf);
}
static unsigned int regmap_parse_64_le(const void *buf)
{
- const __le64 *b = buf;
-
- return le64_to_cpu(b[0]);
+ return get_unaligned_le64(buf);
}
static void regmap_parse_64_be_inplace(void *buf)
{
- __be64 *b = buf;
+ u64 v = get_unaligned_be64(buf);
- b[0] = be64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_64_le_inplace(void *buf)
{
- __le64 *b = buf;
+ u64 v = get_unaligned_le64(buf);
- b[0] = le64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_64_native(const void *buf)
{
- return *(u64 *)buf;
+ u64 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#endif
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
+ kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
* @reg: Register to read from
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
dev_err(disk_to_dev(nbd->disk),
"Device being setup by another task");
- sockfd_put(sock);
- return -EBUSY;
+ err = -EBUSY;
+ goto put_socket;
+ }
+
+ nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
+ if (!nsock) {
+ err = -ENOMEM;
+ goto put_socket;
}
socks = krealloc(config->socks, (config->num_connections + 1) *
sizeof(struct nbd_sock *), GFP_KERNEL);
if (!socks) {
- sockfd_put(sock);
- return -ENOMEM;
+ kfree(nsock);
+ err = -ENOMEM;
+ goto put_socket;
}
config->socks = socks;
- nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
- if (!nsock) {
- sockfd_put(sock);
- return -ENOMEM;
- }
-
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
atomic_inc(&config->live_connections);
return 0;
+
+put_socket:
+ sockfd_put(sock);
+ return err;
}
static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
put_disk(vblk->disk);
out_free_vq:
vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
out_free_vblk:
kfree(vblk);
out_free_index:
return sysc_read(ddata, offset);
}
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ u32 sysc_mask, syss_done, rstval;
+ int syss_offset, error = 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (syss_offset >= 0) {
+ error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
+ (rstval & ddata->cfg.syss_mask) ==
+ syss_done,
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+
+ } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
+ !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ }
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
+ int error;
ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
- /* Set CLOCKACTIVITY, we only use it for ick */
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
if (regbits->clkact_shift >= 0 &&
- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
/* Set SIDLE mode */
sysc_write_sysconfig(ddata, reg);
}
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
return 0;
}
bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
int manager_count;
- bool framedonetv_irq;
+ bool framedonetv_irq = true;
u32 val, irq_mask = 0;
switch (sysc_soc->soc) {
break;
case SOC_AM4:
manager_count = 1;
+ framedonetv_irq = false;
break;
case SOC_UNKNOWN:
default:
*/
static int sysc_reset(struct sysc *ddata)
{
- int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
- u32 sysc_mask, syss_done;
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
- syss_done = 0;
- else
- syss_done = ddata->cfg.syss_mask;
-
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
- /* Poll on reset status */
- if (syss_offset >= 0) {
- error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
- (rstval & ddata->cfg.syss_mask) ==
- syss_done,
- 100, MAX_MODULE_SOFTRESET_WAIT);
-
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
- error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
- !(rstval & sysc_mask),
- 100, MAX_MODULE_SOFTRESET_WAIT);
- }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
if (ddata->reset_done_quirk)
ddata->reset_done_quirk(ddata);
/*
* st33zp24_i2c_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
* @param: id, the i2c_device_id struct.
* @return: 0 in case of success.
* -1 in other case.
/*
* st33zp24_spi_probe initialize the TPM device
- * @param: dev, the spi_device drescription (TPM SPI description).
+ * @param: dev, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
* or a negative value describing the error.
*/
/*
* st33zp24_spi_remove remove the TPM device
- * @param: client, the spi_device drescription (TPM SPI description).
+ * @param: client, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
*/
static int st33zp24_spi_remove(struct spi_device *dev)
/*
* st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
* @param: id, the i2c_device_id struct.
* @return: 0 in case of success.
* -1 in other case.
goto out;
}
- /* atomic tpm command send and result receive. We only hold the ops
- * lock during this period so that the tpm can be unregistered even if
- * the char dev is held open.
- */
- if (tpm_try_get_ops(priv->chip)) {
- ret = -EPIPE;
- goto out;
- }
-
priv->response_length = 0;
priv->response_read = false;
*off = 0;
if (file->f_flags & O_NONBLOCK) {
priv->command_enqueued = true;
queue_work(tpm_dev_wq, &priv->async_work);
- tpm_put_ops(priv->chip);
mutex_unlock(&priv->buffer_mutex);
return size;
}
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
if (rc)
goto init_irq_cleanup;
- if (!strcmp(id->compat, "IBM,vtpm20")) {
- chip->flags |= TPM_CHIP_FLAG_TPM2;
- rc = tpm2_get_cc_attrs_tbl(chip);
- if (rc)
- goto init_irq_cleanup;
- }
-
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
goto init_irq_cleanup;
}
+ if (!strcmp(id->compat, "IBM,vtpm20")) {
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc)
+ goto init_irq_cleanup;
+ }
+
return tpm_chip_register(chip);
init_irq_cleanup:
do {
return tpm_tis_init(&pnp_dev->dev, &tpm_info);
}
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
return 0;
out_err:
- if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
tpm_tis_remove(chip);
if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->iobuf[0] = 0;
-
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer->len = 1;
spi_message_init(&m);
if (ret < 0)
goto exit;
+ /* Flow control transfers are receive only */
+ spi_xfer.tx_buf = NULL;
ret = phy->flow_control(phy, &spi_xfer);
if (ret < 0)
goto exit;
spi_xfer.delay.value = 5;
spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
- if (in) {
- spi_xfer.tx_buf = NULL;
- } else if (out) {
+ if (out) {
+ spi_xfer.tx_buf = phy->iobuf;
spi_xfer.rx_buf = NULL;
memcpy(phy->iobuf, out, transfer_len);
out += transfer_len;
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_spi_match),
.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tpm_tis_spi_driver_probe,
.remove = tpm_tis_spi_remove,
struct __prci_data *pd;
int r;
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ pd = devm_kzalloc(dev,
+ struct_size(pd, hw_clks.hws,
+ ARRAY_SIZE(__prci_init_clocks)),
+ GFP_KERNEL);
if (!pd)
return -ENOMEM;
.set_next_event_virt = erratum_set_next_event_tval_virt,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+ {
+ .match_type = ate_match_local_cap_id,
+ .id = (void *)ARM64_WORKAROUND_1418040,
+ .desc = "ARM erratum 1418040",
+ .disable_compat_vdso = true,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
if (wa->read_cntvct_el0) {
clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
vdso_default = VDSO_CLOCKMODE_NONE;
+ } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
+ vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
+ clocksource_counter.vdso_clock_mode = vdso_default;
}
}
{ } /* End */
};
+#define BITMASK_OOB (BIT(8) | BIT(18))
+
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
const struct x86_cpu_id *id;
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
- if (misc_pwr & (1 << 8)) {
- pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
+ if (misc_pwr & BITMASK_OOB) {
+ pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
+ pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
return true;
}
}
* be frozen safely.
*/
index = find_deepest_state(drv, dev, U64_MAX, 0, true);
- if (index > 0)
+ if (index > 0) {
enter_s2idle_proper(drv, dev, index);
-
+ local_irq_enable();
+ }
return index;
}
#endif /* CONFIG_SUSPEND */
dentry->d_name.name, ret > 0 ? name : "");
}
-static const struct dentry_operations dma_buf_dentry_ops = {
- .d_dname = dmabuffs_dname,
-};
-
-static struct vfsmount *dma_buf_mnt;
-
-static int dma_buf_fs_init_context(struct fs_context *fc)
-{
- struct pseudo_fs_context *ctx;
-
- ctx = init_pseudo(fc, DMA_BUF_MAGIC);
- if (!ctx)
- return -ENOMEM;
- ctx->dops = &dma_buf_dentry_ops;
- return 0;
-}
-
-static struct file_system_type dma_buf_fs_type = {
- .name = "dmabuf",
- .init_fs_context = dma_buf_fs_init_context,
- .kill_sb = kill_anon_super,
-};
-
-static int dma_buf_release(struct inode *inode, struct file *file)
+static void dma_buf_release(struct dentry *dentry)
{
struct dma_buf *dmabuf;
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
+ dmabuf = dentry->d_fsdata;
BUG_ON(dmabuf->vmapping_counter);
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+ .d_dname = dmabuffs_dname,
+ .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx;
+
+ ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dops = &dma_buf_dentry_ops;
return 0;
}
+static struct file_system_type dma_buf_fs_type = {
+ .name = "dmabuf",
+ .init_fs_context = dma_buf_fs_init_context,
+ .kill_sb = kill_anon_super,
+};
+
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
}
static const struct file_operations dma_buf_fops = {
- .release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ else
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
} else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
}
depends on SERIAL_EARLYCON && !ARM && !IA64
select FONT_SUPPORT
select ARCH_USE_MEMREMAP_PROT
+
+config EFI_CUSTOM_SSDT_OVERLAYS
+ bool "Load custom ACPI SSDT overlay from an EFI variable"
+ depends on EFI_VARS && ACPI
+ default ACPI_TABLE_UPGRADE
+ help
+ Allow loading of an ACPI SSDT overlay from an EFI variable specified
+ by a kernel command line option.
+
+ See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+ information.
}
static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
static const efi_config_table_type_t arch_tables[] __initconst = {
{LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+ {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
{}
};
{
struct screen_info *si;
- if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+ if (IS_ENABLED(CONFIG_ARM) &&
+ screen_info_table != EFI_INVALID_TABLE_ADDR) {
si = early_memremap_ro(screen_info_table, sizeof(*si));
if (!si) {
pr_err("Could not map screen_info config table\n");
goto out;
}
retval = efi_config_parse_tables(config_tables, systab->nr_tables,
- arch_tables);
+ IS_ENABLED(CONFIG_ARM) ? arch_tables
+ : NULL);
early_memunmap(config_tables, table_size);
out:
init_screen_info();
+#ifdef CONFIG_ARM
/* ARM does not permit early mappings to persist across paging_init() */
- if (IS_ENABLED(CONFIG_ARM))
- efi_memmap_unmap();
+ efi_memmap_unmap();
+
+ if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
+ struct efi_arm_entry_state *state;
+ bool dump_state = true;
+
+ state = early_memremap_ro(cpu_state_table,
+ sizeof(struct efi_arm_entry_state));
+ if (state == NULL) {
+ pr_warn("Unable to map CPU entry state table.\n");
+ return;
+ }
+
+ if ((state->sctlr_before_ebs & 1) == 0)
+ pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
+ else if ((state->sctlr_after_ebs & 1) == 0)
+ pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
+ else
+ dump_state = false;
+
+ if (dump_state || efi_enabled(EFI_DBG)) {
+ pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs);
+ pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs);
+ pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
+ pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
+ }
+ early_memunmap(state, sizeof(struct efi_arm_entry_state));
+ }
+#endif
}
static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
efivars_unregister(&generic_efivars);
}
-#if IS_ENABLED(CONFIG_ACPI)
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
#define EFIVAR_SSDT_NAME_MAX 16
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
static int __init efivar_ssdt_setup(char *str)
rsv = (void *)(p + prsv % PAGE_SIZE);
/* reserve the entry itself */
- memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+ memblock_reserve(prsv,
+ struct_size(rsv, entry, rsv->size));
for (i = 0; i < atomic_read(&rsv->count); i++) {
memblock_reserve(rsv->entry[i].base,
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
"entry%d", entry_num);
if (rc) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return rc;
}
}
# enabled, even if doing so doesn't break the build.
#
cflags-$(CONFIG_X86_32) := -march=i386
-cflags-$(CONFIG_X86_64) := -mcmodel=small
+cflags-$(CONFIG_X86_64) := -mcmodel=small \
+ $(call cc-option,-maccumulate-outgoing-args)
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
#include "efistub.h"
+static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID;
+
+struct efi_arm_entry_state *efi_entry_state;
+
+static void get_cpu_state(u32 *cpsr, u32 *sctlr)
+{
+ asm("mrs %0, cpsr" : "=r"(*cpsr));
+ if ((*cpsr & MODE_MASK) == HYP_MODE)
+ asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr));
+ else
+ asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr));
+}
+
efi_status_t check_platform_features(void)
{
+ efi_status_t status;
+ u32 cpsr, sctlr;
int block;
+ get_cpu_state(&cpsr, &sctlr);
+
+ efi_info("Entering in %s mode with MMU %sabled\n",
+ ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC",
+ (sctlr & 1) ? "en" : "dis");
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*efi_entry_state),
+ (void **)&efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("allocate_pool() failed\n");
+ return status;
+ }
+
+ efi_entry_state->cpsr_before_ebs = cpsr;
+ efi_entry_state->sctlr_before_ebs = sctlr;
+
+ status = efi_bs_call(install_configuration_table, &cpu_state_guid,
+ efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("install_configuration_table() failed\n");
+ goto free_state;
+ }
+
/* non-LPAE kernels can run anywhere */
if (!IS_ENABLED(CONFIG_ARM_LPAE))
return EFI_SUCCESS;
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
efi_err("This LPAE kernel is not supported by your CPU\n");
- return EFI_UNSUPPORTED;
+ status = EFI_UNSUPPORTED;
+ goto drop_table;
}
return EFI_SUCCESS;
+
+drop_table:
+ efi_bs_call(install_configuration_table, &cpu_state_guid, NULL);
+free_state:
+ efi_bs_call(free_pool, efi_entry_state);
+ return status;
+}
+
+void efi_handle_post_ebs_state(void)
+{
+ get_cpu_state(&efi_entry_state->cpsr_after_ebs,
+ &efi_entry_state->sctlr_after_ebs);
}
static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
return !efi_nosoftreserve;
}
+/**
+ * efi_char16_puts() - Write a UCS-2 encoded string to the console
+ * @str: UCS-2 encoded string
+ */
void efi_char16_puts(efi_char16_t *str)
{
efi_call_proto(efi_table_attr(efi_system_table, con_out),
return c32;
}
+/**
+ * efi_puts() - Write a UTF-8 encoded string to the console
+ * @str: UTF-8 encoded string
+ */
void efi_puts(const char *str)
{
efi_char16_t buf[128];
}
}
+/**
+ * efi_printk() - Print a kernel message
+ * @fmt: format string
+ *
+ * The first letter of the format string is used to determine the logging level
+ * of the message. If the level is less then the current EFI logging level, the
+ * message is suppressed. The message will be truncated to 255 bytes.
+ *
+ * Return: number of printed characters
+ */
int efi_printk(const char *fmt, ...)
{
char printf_buf[256];
return printed;
}
-/*
- * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
+/**
+ * efi_parse_options() - Parse EFI command line options
+ * @cmdline: kernel command line
+ *
+ * Parse the ASCII string @cmdline for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
*
* It should be noted that efi= is parsed in two very different
* environments, first in the early boot environment of the EFI boot
* stub, and subsequently during the kernel boot.
+ *
+ * Return: status code
*/
efi_status_t efi_parse_options(char const *cmdline)
{
return (char *)cmdline_addr;
}
-/*
+/**
+ * efi_exit_boot_services() - Exit boot services
+ * @handle: handle of the exiting image
+ * @map: pointer to receive the memory map
+ * @priv: argument to be passed to @priv_func
+ * @priv_func: function to process the memory map before exiting boot services
+ *
* Handle calling ExitBootServices according to the requirements set out by the
* spec. Obtains the current memory map, and returns that info after calling
* ExitBootServices. The client must specify a function to perform any
* processing of the memory map data prior to ExitBootServices. A client
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
+ *
+ * Return: status code
*/
efi_status_t efi_exit_boot_services(void *handle,
struct efi_boot_memmap *map,
return status;
}
+/**
+ * get_efi_config_table() - retrieve UEFI configuration table
+ * @guid: GUID of the configuration table to be retrieved
+ * Return: pointer to the configuration table or NULL
+ */
void *get_efi_config_table(efi_guid_t guid)
{
unsigned long tables = efi_table_attr(efi_system_table, tables);
};
/**
- * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path
+ * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path
* @load_addr: pointer to store the address where the initrd was loaded
* @load_size: pointer to store the size of the loaded initrd
* @max: upper limit for the initrd memory allocation
- * @return: %EFI_SUCCESS if the initrd was loaded successfully, in which
- * case @load_addr and @load_size are assigned accordingly
- * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd
- * device path
- * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
- * %EFI_OUT_OF_RESOURCES if memory allocation failed
- * %EFI_LOAD_ERROR in all other cases
+ *
+ * Return:
+ * * %EFI_SUCCESS if the initrd was loaded successfully, in which
+ * case @load_addr and @load_size are assigned accordingly
+ * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
+ * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
+ * * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * * %EFI_LOAD_ERROR in all other cases
*/
static
efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
load_addr, load_size);
}
+/**
+ * efi_load_initrd() - Load initial RAM disk
+ * @image: EFI loaded image protocol
+ * @load_addr: pointer to loaded initrd
+ * @load_size: size of loaded initrd
+ * @soft_limit: preferred size of allocated memory for loading the initrd
+ * @hard_limit: minimum size of allocated memory
+ *
+ * Return: status code
+ */
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_addr,
unsigned long *load_size,
return status;
}
+/**
+ * efi_wait_for_key() - Wait for key stroke
+ * @usec: number of microseconds to wait for key stroke
+ * @key: key entered
+ *
+ * Wait for up to @usec microseconds for a key stroke.
+ *
+ * Return: status code, EFI_SUCCESS if key received
+ */
efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
{
efi_event_t events[2], timer;
if (status != EFI_SUCCESS)
goto fail_free_initrd;
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_handle_post_ebs_state();
+
efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
/* not reached */
#define EFI_EVT_NOTIFY_WAIT 0x00000100U
#define EFI_EVT_NOTIFY_SIGNAL 0x00000200U
-/*
- * boottime->wait_for_event takes an array of events as input.
+/**
+ * efi_set_event_at() - add event to events array
+ *
+ * @events: array of UEFI events
+ * @ids: index where to put the event in the array
+ * @event: event to add to the aray
+ *
+ * boottime->wait_for_event() takes an array of events as input.
* Provide a helper to set it up correctly for mixed mode.
*/
static inline
unsigned long soft_limit,
unsigned long hard_limit);
+void efi_handle_post_ebs_state(void);
+
#endif
if (!found)
return 0;
+ /* Skip any leading slashes */
+ while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+ i++;
+
while (--result_len > 0 && i < cmdline_len) {
- if (cmdline[i] == L'\0' ||
- cmdline[i] == L'\n' ||
- cmdline[i] == L' ')
+ efi_char16_t c = cmdline[i++];
+
+ if (c == L'\0' || c == L'\n' || c == L' ')
break;
- *result++ = cmdline[i++];
+ else if (c == L'/')
+ /* Replace UNIX dir separators with EFI standard ones */
+ *result++ = L'\\';
+ else
+ *result++ = c;
}
*result = L'\0';
return i;
// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/types.h>
char *skip_spaces(const char *str)
cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
GFP_KERNEL);
- if (!cpu_groups)
+ if (!cpu_groups) {
+ free_cpumask_var(tmp);
return -ENOMEM;
+ }
cpumask_copy(tmp, cpu_online_mask);
topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+ free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM;
}
if (!page_buf)
goto out_free_cpu_groups;
- err = 0;
/*
* Of course the last CPU cannot be powered down and cpu_down() should
* refuse doing that.
*/
pr_info("Trying to turn off and on again all CPUs\n");
- err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+ err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
* Take down CPUs by cpu group this time. When the last CPU is turned
static void
rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
{
+ time64_t date_and_time;
u32 packet;
int ret = rpi_firmware_property(fw,
RPI_FIRMWARE_GET_FIRMWARE_REVISION,
if (ret)
return;
- dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet);
+ /* This is not compatible with y2038 */
+ date_and_time = packet;
+ dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
}
static void
config FPGA_MGR_ZYNQMP_FPGA
tristate "Xilinx ZynqMP FPGA"
- depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST)
help
FPGA manager driver support for Xilinx ZynqMP FPGAs.
This driver uses the processor configuration port(PCAP)
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
}
if (ret < 0) {
dev_err(chip->parent, "Failed to drop cache: %d\n",
ret);
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
}
ret = regmap_read(arizona->regmap, reg, &val);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
+ }
pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ pm_runtime_put(chip->parent);
return ret;
}
}
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+
+#include <linux/dmi.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+
+static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+ {
+ /*
+ * On Intel Galileo Gen 2 board the IRQ pin of one of
+ * the I²C GPIO expanders, which has GpioInt() resource,
+ * is provided as an absolute number instead of being
+ * relative. Since first controller (gpio-sch.c) and
+ * second (gpio-dwapb.c) are at the fixed bases, we may
+ * safely refer to the number in the global space to get
+ * an IRQ out of it.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ },
+ {}
+};
+
+#ifdef CONFIG_ACPI
+static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_gpio *agpio;
+ int *pin = data;
+
+ if (acpi_gpio_get_irq_resource(ares, &agpio))
+ *pin = agpio->pin_table[0];
+ return 1;
+}
+
+static int pca953x_acpi_find_pin(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ int pin = -ENOENT, ret;
+ LIST_HEAD(r);
+
+ ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
+ acpi_dev_free_resource_list(&r);
+ if (ret < 0)
+ return ret;
+
+ return pin;
+}
+#else
+static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
+#endif
+
+static int pca953x_acpi_get_irq(struct device *dev)
+{
+ int pin, ret;
+
+ pin = pca953x_acpi_find_pin(dev);
+ if (pin < 0)
+ return pin;
+
+ dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
+
+ if (!gpio_is_valid(pin))
+ return -EINVAL;
+
+ ret = gpio_request(pin, "pca953x interrupt");
+ if (ret)
+ return ret;
+
+ ret = gpio_to_irq(pin);
+
+ /* When pin is used as an IRQ, no need to keep it requested */
+ gpio_free(pin);
+
+ return ret;
+}
+#endif
+
static const struct acpi_device_id pca953x_acpi_ids[] = {
{ "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ }
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
+ .disable_locking = true,
.cache_type = REGCACHE_RBTREE,
.max_register = 0x7f,
};
DECLARE_BITMAP(reg_direction, MAX_LINE);
int level;
- pca953x_read_regs(chip, chip->regs->direction, reg_direction);
-
if (chip->driver_data & PCA_PCAL) {
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
}
+ /* Switch direction to input if needed */
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_complement(reg_direction, reg_direction, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
/* Look for any newly setup interrupt */
struct gpio_chip *gc = &chip->gpio_chip;
DECLARE_BITMAP(pending, MAX_LINE);
int level;
+ bool ret;
- if (!pca953x_irq_pending(chip, pending))
- return IRQ_NONE;
+ mutex_lock(&chip->i2c_lock);
+ ret = pca953x_irq_pending(chip, pending);
+ mutex_unlock(&chip->i2c_lock);
for_each_set_bit(level, pending, gc->ngpio)
handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
- return IRQ_HANDLED;
+ return IRQ_RETVAL(ret);
}
static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
DECLARE_BITMAP(irq_stat, MAX_LINE);
int ret;
+ if (dmi_first_match(pca953x_dmi_acpi_irq_info)) {
+ ret = pca953x_acpi_get_irq(&client->dev);
+ if (ret > 0)
+ client->irq = ret;
+ }
+
if (!client->irq)
return 0;
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ case 12:
mem_channel_number = igp_info->v11.umachannelnumber;
/* channel width is 64 */
if (vram_width)
memset(&ti, 0, sizeof(struct amdgpu_task_info));
- if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
return;
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
return ret;
}
+static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd)
+{
+ if (amdgpu_sriov_vf(psp->adev))
+ cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
+}
+
+static int psp_tmr_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_tmr_unload_cmd_buf(psp, cmd);
+ DRM_INFO("free PSP TMR buffer\n");
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_tmr_terminate(struct psp_context *psp)
+{
+ int ret;
+ void *tmr_buf;
+ void **pptr;
+
+ ret = psp_tmr_unload(psp);
+ if (ret)
+ return ret;
+
+ /* free TMR memory buffer */
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+
+ return 0;
+}
+
static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t asd_mc, uint32_t size)
{
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- void *tmr_buf;
- void **pptr;
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
psp_asd_unload(psp);
+ psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
}
}
+ ret = psp_asd_unload(psp);
+ if (ret) {
+ DRM_ERROR("Failed to unload asd\n");
+ return ret;
+ }
+
+ ret = psp_tmr_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Falied to terminate tmr\n");
+ return ret;
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.instance[i].fw != NULL)
+ release_firmware(adev->sdma.instance[i].fw);
+
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ }
return 0;
}
(int)process->lead_thread->pid);
if (ret) {
pr_warn("Creating procfs pid directory failed");
+ kobject_put(process->kobj);
goto out;
}
struct dmcu *dmcu = NULL;
bool ret;
- if (!adev->dm.fw_dmcu)
+ if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
return detect_mst_link_for_all_connectors(adev->ddev);
dmcu = adev->dm.dc->res_pool->dmcu;
struct drm_connector *connector = &aconnector->base;
struct amdgpu_device *adev = connector->dev->dev_private;
struct dc_stream_state *stream;
- int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
do {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ debugfs_create_file("output_bpc", 0644, dir, connector,
+ &output_bpc_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
- if (!srm)
- return -EINVAL;
+ if (!srm) {
+ ret = -EINVAL;
+ goto ret;
+ }
if (pos >= srm_size)
ret = 0;
copy_stream_update_to_stream(dc, context, stream, stream_update);
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
- return;
+ if (update_type > UPDATE_TYPE_FAST) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
}
commit_planes_for_stream(
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
- ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
- if (ret)
- goto err4;
+ if (adev->psp.ras.ras) {
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+ }
return 0;
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
- smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+ if (adev->psp.ras.ras)
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+ bool force)
{
bool do_delayed;
int ret;
return 0;
mutex_lock(&fb_helper->lock);
- ret = drm_client_modeset_commit(&fb_helper->client);
+ if (force) {
+ /*
+ * Yes this is the _locked version which expects the master lock
+ * to be held. But for forced restores we're intentionally
+ * racing here, see drm_fb_helper_set_par().
+ */
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
+ } else {
+ ret = drm_client_modeset_commit(&fb_helper->client);
+ }
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
return ret;
}
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
{
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
+ bool force;
if (oops_in_progress)
return -EBUSY;
return -EINVAL;
}
- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+ /*
+ * Normally we want to make sure that a kms master takes precedence over
+ * fbdev, to avoid fbdev flickering and occasionally stealing the
+ * display status. But Xorg first sets the vt back to text mode using
+ * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+ * status when exiting.
+ *
+ * In the past this was caught by drm_fb_helper_lastclose(), but on
+ * modern systems where logind always keeps a drm fd open to orchestrate
+ * the vt switching, this doesn't work.
+ *
+ * To not break the userspace ABI we have this special case here, which
+ * is only used for the above case. Everything else uses the normal
+ * commit function, which ensures that we never steal the display from
+ * an active drm master.
+ */
+ force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+ __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}
int orientation;
};
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.width = 800,
.height = 1280,
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
- .driver_data = (void *)&acer_s1003,
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&asus_t100ha,
+ }, { /* Asus T101HA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
+ int ret = 0;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
if (ret)
clear_dma_max_seg_size(subdrv_dev);
- return 0;
+ return ret;
}
/*
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
- dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
goto err_put_clk;
}
goto unlock;
ret = pm_runtime_get_sync(mic->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(mic->dev);
goto unlock;
+ }
mic_set_path(mic, 1);
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-
return 0;
err:
ret);
goto err_unload;
}
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
return 0;
err_unload:
return true;
}
+unsigned int
+intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
+{
+ int x = 0, y = 0;
+
+ intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ plane_state->color_plane[0].offset, 0);
+
+ return y;
+}
+
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
u32 pixel_format, u64 modifier,
unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-/*
- * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
- * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
- * origin so the x and y offsets can actually fit the registers. As a
- * consequence, the fence doesn't really start exactly at the display plane
- * address we program because it starts at the real start of the buffer, so we
- * have to take this into consideration here.
- */
-static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
-{
- return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
-}
-
/*
* For SKL+, the plane source size used by the hardware is based on the value we
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
intel_de_write(dev_priv, FBC_FENCE_OFF,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
}
/* enable it... */
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
intel_de_write(dev_priv, DPFC_FENCE_YOFF,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
} else {
intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
}
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
}
} else {
if (IS_GEN(dev_priv, 6)) {
}
intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
/* enable it... */
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
} else if (dev_priv->ggtt.num_fences) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
- * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
- * variables instead of just looking at the pipe/plane size.
+ * the X and Y offset registers. That's why we include the src x/y offsets
+ * instead of just looking at the plane size.
*/
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
- cache->plane.y = plane_state->uapi.src.y1 >> 16;
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
cache->fb.stride = fb->pitches[0];
cache->fb.modifier = fb->modifier;
+ cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
+
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->vma->fence);
memset(params, 0, sizeof(*params));
params->fence_id = cache->fence_id;
+ params->fence_y_offset = cache->fence_y_offset;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
- params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
{
int err;
- err = i915_active_acquire(&ring->vma->active);
+ err = intel_ring_pin(ring);
if (err)
return err;
- err = intel_ring_pin(ring);
+ err = i915_active_acquire(&ring->vma->active);
if (err)
- goto err_active;
+ goto err_pin;
return 0;
-err_active:
- i915_active_release(&ring->vma->active);
+err_pin:
+ intel_ring_unpin(ring);
return err;
}
static void __ring_retire(struct intel_ring *ring)
{
- intel_ring_unpin(ring);
i915_active_release(&ring->vma->active);
+ intel_ring_unpin(ring);
}
__i915_active_call
--- /dev/null
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+ * IGT gpu tools source code is located on your home directory (~) as ~/igt
+ * Mesa source code is located on your home directory (~) as ~/mesa
+ and built under the ~/mesa/build directory
+ * Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+ -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+ -m ~/mesa/build/src/intel/tools/i965_asm
\ No newline at end of file
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout:
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout :
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(1)) {
+ if (IS_MASKED_BITS_ENABLED(data, 1)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
- data & _MASKED_BIT_ENABLE(2)) {
+ IS_MASKED_BITS_ENABLED(data, 2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
*/
- if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
- (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
- && !vgpu->pv_notified) {
+ if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+ IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+ !vgpu->pv_notified) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
- if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
- || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+ IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %s\n",
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+ IS_MASKED_BITS_ENABLED(data, 0x8))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
return 0;
}
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a) \
- (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
- ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+ IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
#endif
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+ (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+ ((_val) & _MASKED_BIT_DISABLE(_b))
+
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
struct file_stats *stats = data;
struct i915_vma *vma;
- if (!kref_get_unless_zero(&obj->base.refcount))
+ if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
return 0;
stats->count++;
int adjusted_x;
int adjusted_y;
- int y;
-
u16 pixel_blend_mode;
} plane;
unsigned int stride;
u64 modifier;
} fb;
+
+ unsigned int fence_y_offset;
u16 gen9_wa_cfb_stride;
s8 fence_id;
} state_cache;
struct {
enum pipe pipe;
enum i9xx_plane_id i9xx_plane;
- unsigned int fence_y_offset;
} crtc;
struct {
} fb;
int cfb_size;
+ unsigned int fence_y_offset;
u16 gen9_wa_cfb_stride;
s8 fence_id;
bool plane_visible;
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
+ struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
rb = NULL;
p = &obj->vma.tree.rb_node;
while (*p) {
- struct i915_vma *pos;
long cmp;
rb = *p;
* and dispose of ours.
*/
cmp = i915_vma_compare(pos, vm, view);
- if (cmp == 0) {
- spin_unlock(&obj->vma.lock);
- i915_vma_free(vma);
- return pos;
- }
-
if (cmp < 0)
p = &rb->rb_right;
- else
+ else if (cmp > 0)
p = &rb->rb_left;
+ else
+ goto err_unlock;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma.tree);
err_unlock:
spin_unlock(&obj->vma.lock);
err_vma:
+ i915_vm_put(vm);
i915_vma_free(vma);
- return ERR_PTR(-E2BIG);
+ return pos;
}
static struct i915_vma *
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
u32 format = fb->format->format;
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- drm_fbdev_generic_setup(drm, 32);
return 0;
}
if (ret < 0)
goto unbind;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
unbind:
depends on COMMON_CLK
depends on HAVE_ARM_SMCCC
depends on OF
+ depends on MTK_MMSYS
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
- select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
int ret;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
if (ret) {
{
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
int ret;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
if (WARN_ON(!crtc->state))
return -EINVAL;
goto err_mutex_unprepare;
}
- DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
struct drm_crtc *crtc = &mtk_crtc->base;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
if (i == 1)
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
- cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ cmdq_mbox_create(mtk_crtc->mmsys_dev,
+ drm_crtc_index(&mtk_crtc->base),
2000);
if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
if (!private)
return -ENOMEM;
- private->data = of_device_get_match_data(dev);
private->mmsys_dev = dev->parent;
if (!private->mmsys_dev) {
dev_err(dev, "Failed to get MMSYS device\n");
goto err_node;
}
- ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+ ret = mtk_ddp_comp_init(dev->parent, node, comp,
+ comp_id, NULL);
if (ret) {
of_node_put(node);
goto err_node;
int ret;
ret = drm_mode_config_helper_suspend(drm);
- DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
return ret;
}
int ret;
ret = drm_mode_config_helper_resume(drm);
- DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
return ret;
}
true, true);
}
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ state->pending.enable = false;
+ wmb(); /* Make sure the above parameter is set before update */
+ state->pending.dirty = true;
+}
+
static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
if (!crtc || WARN_ON(!fb))
return;
+ if (!plane->state->visible) {
+ mtk_plane_atomic_disable(plane, old_state);
+ return;
+ }
+
gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
state->pending.dirty = true;
}
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
- state->pending.enable = false;
- wmb(); /* Make sure the above parameter is set before update */
- state->pending.dirty = true;
-}
-
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mtk_plane_atomic_check,
static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
- u32 tmp_reg1;
-
- tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
- return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+ return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
}
static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mtk_hdmi_audio_enable(hdmi);
return 0;
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mtk_hdmi_audio_disable(hdmi);
}
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s(%d)\n", __func__, enable);
-
if (enable)
mtk_hdmi_hw_aud_mute(hdmi);
else
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
return 0;
goto err_bridge_remove;
}
- dev_dbg(dev, "mediatek hdmi probe success\n");
return 0;
err_bridge_remove:
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_clk_disable_audio(hdmi);
- dev_dbg(dev, "hdmi suspend success!\n");
+
return 0;
}
return ret;
}
- dev_dbg(dev, "hdmi resume success!\n");
return 0;
}
#endif
#define RGS_HDMITX_5T1_EDG (0xf << 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
-static const u8 PREDIV[3][4] = {
- {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
- {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
-};
-
-static const u8 TXDIV[3][4] = {
- {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
- {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
-};
-
-static const u8 FBKSEL[3][4] = {
- {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
- {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
-};
-
-static const u8 FBKDIV[3][4] = {
- {19, 24, 29, 19}, /* 27Mhz */
- {19, 24, 14, 19}, /* 74Mhz */
- {19, 24, 14, 19} /* 148Mhz */
-};
-
-static const u8 DIVEN[3][4] = {
- {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
- {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
- {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
-};
-
-static const u8 HTPLLBP[3][4] = {
- {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
- {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
- {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
-};
-
-static const u8 HTPLLBC[3][4] = {
- {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
- {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
- {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
-};
-
-static const u8 HTPLLBR[3][4] = {
- {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
- {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
- {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
-};
-
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
- dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
- dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
usleep_range(100, 150);
#define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12)
#define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22)
#define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24)
+#define VIU_OSD_BURST_LENGTH_24 (0x0 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_32 (0x0 << 31 | 0x1 << 10)
+#define VIU_OSD_BURST_LENGTH_48 (0x0 << 31 | 0x2 << 10)
+#define VIU_OSD_BURST_LENGTH_64 (0x0 << 31 | 0x3 << 10)
+#define VIU_OSD_BURST_LENGTH_96 (0x1 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_128 (0x1 << 31 | 0x1 << 10)
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
priv->io_base + _REG(VIU_MISC_CTRL1));
}
-static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
-{
- uint32_t val = (((length & 0x80) % 24) / 12);
-
- return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
-}
-
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
- reg |= meson_viu_osd_burst_length_reg(32);
+ reg |= VIU_OSD_BURST_LENGTH_32;
else
- reg |= meson_viu_osd_burst_length_reg(64);
+ reg |= VIU_OSD_BURST_LENGTH_64;
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- SZ_16M + 0xfff * SZ_64K);
+ 0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
return -ENODEV;
mmu = msm_iommu_new(gmu->dev, domain);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
if (IS_ERR(gmu->aspace)) {
iommu_domain_free(domain);
return PTR_ERR(gmu->aspace);
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
#endif
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a6xx_get_timestamp,
};
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfffffff);
+ 0xffffffff - SZ_16M);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode)
{
- struct msm_display_topology topology;
+ struct msm_display_topology topology = {0};
int i, intf_count = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * Adding color blocks only to primary interface
+ * Adding color blocks only to primary interface if available in
+ * sufficient number
*/
if (intf_count == 2)
topology.num_lm = 2;
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
- topology.num_dspp = topology.num_lm;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+ if (dpu_kms->catalog->dspp &&
+ (dpu_kms->catalog->dspp_count >= topology.num_lm))
+ topology.num_dspp = topology.num_lm;
+ }
topology.num_enc = 0;
topology.num_intf = intf_count;
dpu_enc = to_dpu_encoder_virt(enc);
- mutex_init(&dpu_enc->enc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
0);
- mutex_init(&dpu_enc->rc_lock);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
+ mutex_init(&dpu_enc->enc_lock);
+ mutex_init(&dpu_enc->rc_lock);
return &dpu_enc->base;
}
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
aspace = msm_gem_address_space_create(mmu, "dpu1",
- 0x1000, 0xfffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
mmu->funcs->destroy(mmu);
config->iommu);
aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0xffffffff);
+ "mdp4", 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
aspace = msm_gem_address_space_create(mmu, "mdp5",
- 0x1000, 0xffffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
queue->flags = flags;
if (priv->gpu) {
- if (prio >= priv->gpu->nr_rings)
+ if (prio >= priv->gpu->nr_rings) {
+ kfree(queue);
return -EINVAL;
+ }
queue->prio = prio;
}
(0x0100 << nv_crtc->index),
};
+ if (!nv_encoder->audio)
+ return;
+
nv_encoder->audio = false;
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
goto out_free_page;
- if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+ if (drm->dmem->migrate.copy_func(drm, 1,
NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
goto out_dma_unmap;
} else {
.end = notifier->notifier.interval_tree.last + 1,
.pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
.hmm_pfns = hmm_pfns,
+ .dev_private_owner = drm->dev,
};
struct mm_struct *mm = notifier->notifier.mm;
int ret;
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct panel_desc mitsubishi_aa070mc01 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.num_ps = 0;
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
if (!rdev->pm.power_state[i].clock_info)
return -EINVAL;
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(rdev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
rdev->pm.dpm.ps[i].ps_priv = ps;
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ rdev->pm.dpm.num_ps = i + 1;
}
- rdev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
- if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+ if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
ret = -EINVAL;
if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
depends on DRM && DRM_BRIDGE && OF
+ select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
- reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000)) {
+ reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+ if (reg & SUN4I_HDMI_HPD_HIGH) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
+ if (of_find_property(dev->of_node, "iommus", NULL)) {
+ /*
+ * This assume we have the same DMA constraints for
+ * all our the mixers in our pipeline. This sounds
+ * bad, but it has always been the case for us, and
+ * DRM doesn't do per-device allocation either, so we
+ * would need to fix DRM first...
+ */
+ ret = of_dma_configure(drm->dev, dev->of_node, true);
+ if (ret)
+ return ret;
+ }
+
/*
* While this function can fail, we shouldn't do anything
* if this happens. Some early DE2 DT entries don't provide
}
drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+ drm_plane_create_zpos_immutable_property(&plane->base, 255);
return &plane->base;
}
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_enable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_enable(wgrp);
}
return 0;
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_disable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_disable(wgrp);
}
}
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
+
+ return err;
+
+unregister:
+ host1x_client_unregister(&hub->client);
+ pm_runtime_disable(&pdev->dev);
return err;
}
if (!fence)
return 0;
- if (no_wait_gpu)
+ if (no_wait_gpu) {
+ dma_fence_put(fence);
return -EBUSY;
+ }
dma_resv_add_shared_fence(bo->base.resv, fence);
break;
case -EBUSY:
case -ERESTARTSYS:
+ dma_fence_put(moving);
return VM_FAULT_NOPAGE;
default:
+ dma_fence_put(moving);
return VM_FAULT_SIGBUS;
}
*/
void host1x_driver_unregister(struct host1x_driver *driver)
{
+ struct host1x *host1x;
+
driver_unregister(&driver->driver);
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list)
+ host1x_detach_driver(host1x, driver);
+
+ mutex_unlock(&devices_lock);
+
mutex_lock(&drivers_lock);
list_del_init(&driver->list);
mutex_unlock(&drivers_lock);
err = host1x_register(host);
if (err < 0)
- goto deinit_intr;
+ goto deinit_debugfs;
+
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
return 0;
-deinit_intr:
+unregister:
+ host1x_unregister(host);
+deinit_debugfs:
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
deinit_syncpt:
host1x_syncpt_deinit(host);
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
- kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
+ kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
if (bytes_written)
hyperv_report_panic_msg(panic_pa, bytes_written);
res = setup_attrs(resource);
if (res)
- goto exit_free;
+ goto exit_free_capability;
resource->hwmon_dev = hwmon_device_register(&device->dev);
if (IS_ERR(resource->hwmon_dev)) {
exit_remove:
remove_attrs(resource);
+exit_free_capability:
+ free_capabilities(resource);
exit_free:
kfree(resource);
exit:
* 48380,
* where T = [-48380, 147438] mC and N = [0, 1023].
*/
-static const struct pvt_poly poly_temp_to_N = {
+static const struct pvt_poly __maybe_unused poly_temp_to_N = {
.total_divider = 10000,
.terms = {
{4, 18322, 10000, 10000},
* N = (18658e-3*V - 11572) / 10,
* V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
*/
-static const struct pvt_poly poly_volt_to_N = {
+static const struct pvt_poly __maybe_unused poly_volt_to_N = {
.total_divider = 10,
.terms = {
{1, 18658, 1000, 1},
return IRQ_HANDLED;
}
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
{
return 0644;
}
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
{
return 0444;
}
#define pvt_soft_isr NULL
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
{
return 0;
}
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
{
return 0;
}
* Map device tree / platform data register bit map to chip bit map.
* Applies to alert register and over-temperature register.
*/
-#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
(((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
#define MAX6697_REG_STAT(n) (0x44 + (n))
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
- MAX6697_MAP_BITS(pdata->alert_mask));
+ MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
- MAX6697_MAP_BITS(pdata->over_temperature_mask));
+ MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
if (ret < 0)
return ret;
Infineon IR35221 controller.
This driver can also be built as a module. If so, the module will
- be called ir35521.
+ be called ir35221.
config SENSORS_IR38064
tristate "Infineon IR38064"
struct pmbus_sensor *sensor;
sensor = pmbus_add_sensor(data, "fan", "target", index, page,
- PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
+ 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
false, false, true);
if (!sensor)
return 0;
sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
- PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
+ 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
false, false, true);
if (!sensor)
return -ENOMEM;
sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
- PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
+ 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
true, false, false);
if (!sensor)
continue;
if (pmbus_add_sensor(data, "fan", "input", index,
- page, pmbus_fan_registers[f], 0xff,
+ page, 0xff, pmbus_fan_registers[f],
PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
config I2C_SLAVE
bool "I2C slave support"
+ help
+ This enables Linux to act as an I2C slave device. Note that your I2C
+ bus master driver also needs to support this functionality. Please
+ read Documentation/i2c/slave-interface.rst for further details.
if I2C_SLAVE
config I2C_SLAVE_EEPROM
tristate "I2C eeprom slave driver"
+ help
+ This backend makes Linux behave like an I2C EEPROM. Please read
+ Documentation/i2c/slave-eeprom-backend.rst for further details.
endif
DEB2("BUS ERROR - SDA Stuck low\n");
pca_reset(adap);
goto out;
- case 0x90: /* Bus error - SCL stuck low */
+ case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+ case 0x90: /* Bus error - SCL stuck low (PCA9564) */
DEB2("BUS ERROR - SCL Stuck low\n");
pca_reset(adap);
goto out;
}
EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure);
-void i2c_dw_acpi_adjust_bus_speed(struct device *device)
+static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
{
- struct dw_i2c_dev *dev = dev_get_drvdata(device);
- struct i2c_timings *t = &dev->timings;
u32 acpi_speed;
int i;
*/
for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
if (acpi_speed >= supported_speeds[i])
- break;
+ return supported_speeds[i];
}
- acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
+
+ return 0;
+}
+
+#else /* CONFIG_ACPI */
+
+static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; }
+
+#endif /* CONFIG_ACPI */
+
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
+{
+ u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev);
+ struct i2c_timings *t = &dev->timings;
/*
* Find bus speed from the "clock-frequency" device property, ACPI
else
t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
}
-EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed);
-
-#endif /* CONFIG_ACPI */
+EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
{
#endif
int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_ACPI)
int i2c_dw_acpi_configure(struct device *device);
-void i2c_dw_acpi_adjust_bus_speed(struct device *device);
#else
static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; }
-static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {}
#endif
}
}
- i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+ i2c_dw_adjust_bus_speed(dev);
if (has_acpi_companion(&pdev->dev))
i2c_dw_acpi_configure(&pdev->dev);
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
return ret;
}
+static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
+ {
+ .ident = "Qtechnology QT5222",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
+ },
+ },
+ { } /* terminate list */
+};
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
else
i2c_parse_fw_timings(&pdev->dev, t, false);
- i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+ i2c_dw_adjust_bus_speed(dev);
if (pdev->dev.of_node)
dw_i2c_of_configure(pdev);
adap = &dev->adapter;
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DEPRECATED;
+ adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
+ I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
static irqreturn_t pch_i2c_handler(int irq, void *pData);
#define I2C_STAT_DAT_REQ BIT(25)
#define I2C_STAT_CMD_COMP BIT(24)
#define I2C_STAT_STOP_ERR BIT(23)
-#define I2C_STAT_MAX_PORT GENMASK(19, 16)
+#define I2C_STAT_MAX_PORT GENMASK(22, 16)
#define I2C_STAT_ANY_INT BIT(15)
#define I2C_STAT_SCL_IN BIT(11)
#define I2C_STAT_SDA_IN BIT(10)
if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
&datalen, 1);
- if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+ if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
dev_err(priv->dev, "Incorrect smbus block read message len\n");
- return -E2BIG;
+ return -EPROTO;
}
} else {
datalen = priv->xfer.data_len;
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
+ if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev,
+ "Invalid block size returned: %d\n",
+ msg[1].buf[0]);
+ status = -EPROTO;
+ goto cleanup;
+ }
for (i = 0; i < msg[1].buf[0] + 1; i++)
data->block[i] = msg[1].buf[i];
break;
static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
struct cm_work *work)
+ __releases(&cm_id_priv->lock)
{
bool immediate;
{
struct rdma_id_private *id_priv, *id_priv_dev;
+ lockdep_assert_held(&lock);
+
if (!bind_list)
return ERR_PTR(-EINVAL);
}
}
+ mutex_lock(&lock);
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
+ mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
+ lockdep_assert_held(&lock);
+
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
u64 sid, mask;
__be16 port;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);
struct rdma_bind_list *bind_list;
int ret;
+ lockdep_assert_held(&lock);
+
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr);
+ lockdep_assert_held(&lock);
+
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net;
+ lockdep_assert_held(&lock);
+
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
unsigned short snum;
int ret;
+ lockdep_assert_held(&lock);
+
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
return ret;
}
-static void counter_history_stat_update(const struct rdma_counter *counter)
+static void counter_history_stat_update(struct rdma_counter *counter)
{
struct ib_device *dev = counter->device;
struct rdma_port_counter *port_counter;
if (!port_counter->hstats)
return;
+ rdma_counter_query_stats(counter);
+
for (i = 0; i < counter->stats->num_counters; i++)
port_counter->hstats->value[i] += counter->stats->value[i];
}
xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
+ ib_cancel_rmpp_recvs(mad_agent_priv);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
+ kfree(mad_priv);
ret = -ENOMEM;
break;
}
alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+ const struct uverbs_obj_fd_type *fd_type;
int new_fd;
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj, *ret;
struct file *filp;
+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
- fd_type->fops->release != &uverbs_async_event_release))
- return ERR_PTR(-EINVAL);
+ fd_type->fops->release != &uverbs_async_event_release)) {
+ ret = ERR_PTR(-EINVAL);
+ goto err_fd;
+ }
new_fd = get_unused_fd_flags(O_CLOEXEC);
- if (new_fd < 0)
- return ERR_PTR(new_fd);
-
- uobj = alloc_uobj(attrs, obj);
- if (IS_ERR(uobj))
+ if (new_fd < 0) {
+ ret = ERR_PTR(new_fd);
goto err_fd;
+ }
/* Note that uverbs_uobject_fd_release() is called during abort */
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags);
if (IS_ERR(filp)) {
- uverbs_uobject_put(uobj);
- uobj = ERR_CAST(filp);
- goto err_fd;
+ ret = ERR_CAST(filp);
+ goto err_getfile;
}
uobj->object = filp;
uobj->id = new_fd;
return uobj;
-err_fd:
+err_getfile:
put_unused_fd(new_fd);
- return uobj;
+err_fd:
+ uverbs_uobject_put(uobj);
+ return ret;
}
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
return len;
}
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
struct ib_sa_mad *mad;
int len;
+ unsigned long flags;
+ unsigned long delay;
+ gfp_t gfp_flag;
+ int ret;
+
+ INIT_LIST_HEAD(&query->list);
+ query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
mad = query->mad_buf->mad;
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
-}
+ gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+ GFP_NOWAIT;
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
-{
- unsigned long flags;
- unsigned long delay;
- int ret;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
- INIT_LIST_HEAD(&query->list);
- query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+ if (ret)
+ goto out;
- /* Put the request on the list first.*/
- spin_lock_irqsave(&ib_nl_request_lock, flags);
+ /* Put the request on the list.*/
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
query->timeout = delay + jiffies;
list_add_tail(&query->list, &ib_nl_request_list);
/* Start the timeout if this is the only request */
if (ib_nl_request_list.next == &query->list)
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- ret = ib_nl_send_msg(query, gfp_mask);
- if (ret) {
- ret = -EIO;
- /* Remove the request */
- spin_lock_irqsave(&ib_nl_request_lock, flags);
- list_del(&query->list);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- }
+out:
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
return ret;
}
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+ props->max_pkeys = 1;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int i2c1_debugfs_open(struct inode *in, struct file *fp)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
return -ENOMEM;
}
+/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+}
+
/**
* enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler.
* We can't count on interrupts since we are stopping.
*/
hfi1_quiet_serdes(ppd);
-
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- if (ppd->link_wq) {
- destroy_workqueue(ppd->link_wq);
- ppd->link_wq = NULL;
- }
+ if (ppd->hfi1_wq)
+ flush_workqueue(ppd->hfi1_wq);
+ if (ppd->link_wq)
+ flush_workqueue(ppd->link_wq);
}
sdma_exit(dd);
}
* clear dma engines, etc.
*/
shutdown_device(dd);
+ destroy_workqueues(dd);
stop_timers(dd);
* @wait_head: the wait queue
*
* This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
* buffer) is run out.
*/
static inline void iowait_queue(bool pkts_sent, struct iowait *w,
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
struct sdma_engine *sde;
struct list_head tx_list;
u64 sent_txreqs;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
return sent - completed;
}
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{
- if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs)) >=
- min_t(unsigned int, txq->priv->netdev->tx_queue_len,
- txq->tx_ring.max_items - 1)))
+ return hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_dec_and_test(&txq->stops))
+ netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ ++txq->sent_txreqs;
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+ !atomic_xchg(&txq->ring_full, 1))
+ hfi1_ipoib_stop_txq(txq);
+}
+
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
{
struct net_device *dev = txq->priv->netdev;
- /* If the queue is already running just return */
- if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
- return;
-
/* If shutting down just return as queue state is irrelevant */
if (unlikely(dev->reg_state != NETREG_REGISTERED))
return;
* Use the minimum of the current tx_queue_len or the rings max txreqs
* to protect against ring overflow.
*/
- if (hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs))
- < min_t(unsigned int, dev->tx_queue_len,
- txq->tx_ring.max_items) >> 1)
- netif_wake_subqueue(dev, txq->q_idx);
+ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+ atomic_xchg(&txq->ring_full, 0))
+ hfi1_ipoib_wake_txq(txq);
}
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
if (unlikely(!tx))
return ERR_PTR(-ENOMEM);
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
tx->priv = priv;
tx->txq = txp->txq;
tx->skb = skb;
+ INIT_LIST_HEAD(&tx->txreq.list);
hfi1_ipoib_build_ib_tx_headers(tx, txp);
ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) {
+tx_ok:
trace_sdma_output_ibhdr(tx->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
txq->pkts_sent = false;
- if (ret == -EBUSY) {
- list_add_tail(&tx->txreq.list, &txq->tx_list);
-
- trace_sdma_output_ibhdr(tx->priv->dd,
- &tx->sdma_hdr.hdr,
- ib_is_sc5(txp->flow.sc5));
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
-
- if (ret == -ECOMM) {
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
+ if (ret == -EBUSY || ret == -ECOMM)
+ goto tx_ok;
sdma_txclean(priv->dd, &tx->txreq);
dev_kfree_skb_any(skb);
struct ipoib_txreq *tx;
/* Has the flow change ? */
- if (txq->flow.as_int != txp->flow.as_int)
- (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+ if (txq->flow.as_int != txp->flow.as_int) {
+ int ret;
+
+ ret = hfi1_ipoib_flush_tx_list(dev, txq);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY)
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
if (IS_ERR(tx)) {
int ret = PTR_ERR(tx);
return -EAGAIN;
}
- netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
- if (list_empty(&txq->wait.list))
+ if (list_empty(&txreq->list))
+ /* came from non-list submit */
+ list_add_tail(&txreq->list, &txq->tx_list);
+ if (list_empty(&txq->wait.list)) {
+ if (!atomic_xchg(&txq->no_desc, 1))
+ hfi1_ipoib_stop_txq(txq);
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
write_sequnlock(&sde->waitlock);
return -EBUSY;
struct net_device *dev = txq->priv->netdev;
if (likely(dev->reg_state == NETREG_REGISTERED) &&
- likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
- netif_wake_subqueue(dev, txq->q_idx);
+ if (atomic_xchg(&txq->no_desc, 0))
+ hfi1_ipoib_wake_txq(txq);
}
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list);
atomic64_set(&txq->complete_txreqs, 0);
+ atomic_set(&txq->stops, 0);
+ atomic_set(&txq->ring_full, 0);
+ atomic_set(&txq->no_desc, 0);
txq->q_idx = i;
txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff;
atomic64_inc(complete_txreqs);
}
- if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd,
"txq %d not empty found %llu requests\n",
txq->q_idx,
{
if (dd->dummy_netdev) {
dd_dev_info(dd, "hfi1 netdev freed\n");
- free_netdev(dd->dummy_netdev);
+ kfree(dd->dummy_netdev);
dd->dummy_netdev = NULL;
}
}
{
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
- mtu = OPA_MTU_8192;
+ mtu = (enum ib_mtu)OPA_MTU_8192;
return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return true;
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((dd->flags & HFI1_SHUTDOWN))
+ return true;
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
tx->mr = NULL;
tx->sde = priv->s_sde;
tx->psc = priv->s_sendcontext;
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
/* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type;
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
- int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx);
+ int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
- int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
val);
}
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
+ hw_resetting = ops->get_cmdq_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt)
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
return 0;
}
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
if (mr->type == MR_TYPE_DMA)
return 0;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret;
}
mr->iova = iova;
mr->size = size;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
}
return ret;
}
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0;
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+ mtpt_idx);
else
- ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+ ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
mdev_port_num);
if (err)
goto out;
- ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
if (!in)
return -ENOMEM;
- if (MLX5_CAP_GEN(mdev, ece_support))
+ if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
unsigned long flags;
int err;
- if (qp->ibqp.rwq_ind_tbl) {
+ if (qp->is_rss) {
destroy_rss_raw_qp_tir(dev, qp);
return;
}
- base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ base = (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
- &qp->raw_packet_qp.rq.base :
- &qp->trans_qp.base;
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
- if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+ if (qp->type != IB_QPT_RAW_PACKET &&
!(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp, NULL);
base->mqp.qpn);
}
- get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
- &send_cq, &recv_cq);
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+ &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_INTEGRITY_EN,
+ MLX5_CAP_GEN(mdev, sho), qp);
process_create_flag(dev, &create_flags,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX5_CAP_GEN(mdev, block_lb_mc), qp);
static int check_ucmd_data(struct mlx5_ib_dev *dev,
struct mlx5_create_qp_params *params)
{
- struct ib_qp_init_attr *attr = params->attr;
struct ib_udata *udata = params->udata;
size_t size, last;
int ret;
*/
last = sizeof(struct mlx5_ib_create_qp_rss);
else
- /* IB_QPT_RAW_PACKET doesn't have ECE data */
- switch (attr->qp_type) {
- case IB_QPT_RAW_PACKET:
- last = offsetof(struct mlx5_ib_create_qp, ece_options);
- break;
- default:
- last = offsetof(struct mlx5_ib_create_qp, reserved);
- }
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
if (udata->inlen <= last)
return 0;
if (!ret)
mlx5_ib_dbg(
dev,
- "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
udata->inlen, params->ucmd_size, last, size);
return ret ? 0 : -EINVAL;
}
return &qp->ibqp;
destroy_qp:
- if (qp->type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT) {
mlx5_ib_destroy_dct(qp);
- else
+ } else {
+ /*
+ * These lines below are temp solution till QP allocation
+ * will be moved to be under IB/core responsiblity.
+ */
+ qp->ibqp.send_cq = attr->send_cq;
+ qp->ibqp.recv_cq = attr->recv_cq;
+ qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata);
+ }
+
qp = NULL;
free_qp:
kfree(qp);
if (udata->outlen < min_resp_len)
return -EINVAL;
- resp.response_length = min_resp_len;
-
/*
* If we don't have enough space for the ECE options,
* simply indicate it with resp.response_length.
MLX5_GET(ads, path, src_addr_index),
MLX5_GET(ads, path, hop_limit),
MLX5_GET(ads, path, tclass));
- memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
- MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+ rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
}
}
int ece = 0;
switch (opcode) {
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ ece = MLX5_GET(init2init_qp_out, out, ece);
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
ece = MLX5_GET(init2rtr_qp_out, out, ece);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
ece = MLX5_GET(rts2rts_qp_out, out, ece);
break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ ece = MLX5_GET(rst2init_qp_out, out, ece);
+ break;
default:
break;
}
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
break;
default:
return -EINVAL;
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}
if (ep->cm_id)
err = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) {
ret = (ERR_PTR(err));
- goto bail_driver_priv;
+ goto bail_rq_rvt;
}
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
- rvt_free_rq(&qp->r_rq);
free_ud_wq_attr(qp);
+bail_rq_rvt:
+ rvt_free_rq(&qp->r_rq);
+
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
static int dev_id = 1;
int rv;
+ sdev->vendor_part_id = dev_id++;
+
rv = ib_register_device(base_dev, name);
if (rv) {
pr_warn("siw: device registration error %d\n", rv);
return rv;
}
- sdev->vendor_part_id = dev_id++;
siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
break;
bytes = min(bytes, len);
- if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+ if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+ bytes) {
copied += bytes;
offset += bytes;
len -= bytes;
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
- depends on INTEL_IOMMU && X86
+ depends on INTEL_IOMMU && X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
if (!ret)
ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
&validate_drhd_cb);
- if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ if (!ret && !no_iommu && !iommu_detected &&
+ (!dmar_disabled || dmar_platform_optin())) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
return g_iommus[iommu_id];
}
+static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+{
+ return sm_supported(iommu) ?
+ ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
+}
+
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
for_each_domain_iommu(i, domain) {
found = true;
- if (!ecap_coherent(g_iommus[i]->ecap)) {
+ if (!iommu_paging_structure_coherency(g_iommus[i])) {
domain->iommu_coherency = 0;
break;
}
/* No hardware attached; use lowest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- if (!ecap_coherent(iommu->ecap)) {
+ if (!iommu_paging_structure_coherency(iommu)) {
domain->iommu_coherency = 0;
break;
}
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain))
- pteval |= DMA_FL_PTE_XD;
+ pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
- context->hi |= (1 << 20);
}
/*
context_set_fault_enable(context);
context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));
/*
* It's a non-present to present mapping. If hardware doesn't cache
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain_use_first_level(domain))
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (!sg) {
sg_res = nr_pages;
end >> agaw_to_width(si_domain->agaw)))
continue;
- ret = iommu_domain_identity_map(si_domain, start, end);
+ ret = iommu_domain_identity_map(si_domain,
+ mm_to_dma_pfn(start >> PAGE_SHIFT),
+ mm_to_dma_pfn(end >> PAGE_SHIFT));
if (ret)
return ret;
}
return ret;
}
+/*
+ * Check that the device does not live on an external facing PCI port that is
+ * marked as untrusted. Such devices should not be able to apply quirks and
+ * thus not be able to bypass the IOMMU restrictions.
+ */
+static bool risky_device(struct pci_dev *pdev)
+{
+ if (pdev->untrusted) {
+ pci_info(pdev,
+ "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
+ pdev->vendor, pdev->device);
+ pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
+ return true;
+ }
+ return false;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
static void quirk_iommu_igfx(struct pci_dev *dev)
{
+ if (risky_device(dev))
+ return;
+
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
+ if (risky_device(dev))
+ return;
+
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
{
unsigned short ggc;
+ if (risky_device(dev))
+ return;
+
if (pci_read_config_word(dev, GGC, &ggc))
return;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev)
return;
+
+ if (risky_device(pdev)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case
if (!pdev)
return;
+ if (risky_device(pdev)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev);
return;
Support for the Loongson PCH PIC Controller.
config LOONGSON_PCH_MSI
- bool "Loongson PCH PIC Controller"
+ bool "Loongson PCH MSI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST
depends on PCI
default MACH_LOONGSON64
if (!gic_rdists->has_vpend_valid_dirty)
return;
- WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
- val,
- !(val & GICR_VPENDBASER_Dirty),
- 10, 500));
+ WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+ val,
+ !(val & GICR_VPENDBASER_Dirty),
+ 10, 500));
}
static void its_vpe_schedule(struct its_vpe *vpe)
u64 val;
if (info->req_db) {
+ unsigned long flags;
+
/*
* vPE is going to block: make the vPE non-resident with
* PendingLast clear and DB set. The GIC guarantees that if
* we read-back PendingLast clear, then a doorbell will be
* delivered when an interrupt comes.
+ *
+ * Note the locking to deal with the concurrent update of
+ * pending_last from the doorbell interrupt handler that can
+ * run concurrently.
*/
+ raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
val = its_clear_vpend_valid(vlpi_base,
GICR_VPENDBASER_PendingLast,
GICR_VPENDBASER_4_1_DB);
vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
} else {
/*
* We're not blocking, so just make the vPE non-resident
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
- unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
- u32 val, mask, bit;
- unsigned long flags;
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ unsigned int cpu;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- gic_lock_irqsave(flags);
- mask = 0xff << shift;
- bit = gic_cpu_map[cpu] << shift;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
- gic_unlock_irqrestore(flags);
-
+ writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
hartid = riscv_of_parent_hartid(node);
if (hartid < 0) {
- pr_warn("unable to fine hart id for %pOF\n", node);
+ pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
/*
* Check we have enough space.
*/
- needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
+ needed = struct_size(deps, dev, count);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
return;
*/
static void rq_completed(struct mapped_device *md)
{
- /* nudge anyone waiting on suspend queue */
- if (unlikely(wq_has_sleeper(&md->wait)))
- wake_up(&md->wait);
-
/*
* dm_put() must be at the end of this function. See the comment above
*/
while (daa-- && i < p) {
pages[i++] = pfn_t_to_page(pfn);
pfn.val++;
+ if (!(i & 15))
+ cond_resched();
}
} while (i < p);
wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
if (likely(!e->write_in_progress)) {
if (!discarded_something) {
- writecache_wait_for_ios(wc, READ);
- writecache_wait_for_ios(wc, WRITE);
+ if (!WC_MODE_PMEM(wc)) {
+ writecache_wait_for_ios(wc, READ);
+ writecache_wait_for_ios(wc, WRITE);
+ }
discarded_something = true;
}
+ if (!writecache_entry_is_committed(wc, e))
+ wc->uncommitted_blocks--;
writecache_free_entry(wc, e);
}
}
if (WC_MODE_PMEM(wc)) {
+ if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
+ r = -EOPNOTSUPP;
+ ti->error = "Asynchronous persistent memory not supported as pmem cache";
+ goto bad;
+ }
+
r = persistent_memory_claim(wc);
if (r) {
ti->error = "Unable to map persistent memory for cache";
nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
>> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones ||
- nr_meta_zones >= zmd->nr_rnd_zones) {
+ (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
+ (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
return -ENXIO;
}
unsigned int idx, bool idle)
{
struct dm_zone *dzone = NULL;
- struct dm_zone *zone, *last = NULL;
+ struct dm_zone *zone, *maxw_z = NULL;
struct list_head *zone_list;
/* If we have cache zones select from the cache zone list */
} else
zone_list = &zmd->dev[idx].map_rnd_list;
+ /*
+ * Find the buffer zone with the heaviest weight or the first (oldest)
+ * data zone that can be reclaimed.
+ */
list_for_each_entry(zone, zone_list, link) {
if (dmz_is_buf(zone)) {
dzone = zone->bzone;
- if (dzone->dev->dev_idx != idx)
- continue;
- if (!last) {
- last = dzone;
+ if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
continue;
- }
- if (last->weight < dzone->weight)
+ if (!maxw_z || maxw_z->weight < dzone->weight)
+ maxw_z = dzone;
+ } else {
+ dzone = zone;
+ if (dmz_lock_zone_reclaim(dzone))
+ return dzone;
+ }
+ }
+
+ if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
+ return maxw_z;
+
+ /*
+ * If we come here, none of the zones inspected could be locked for
+ * reclaim. Try again, being more aggressive, that is, find the
+ * first zone that can be reclaimed regardless of its weitght.
+ */
+ list_for_each_entry(zone, zone_list, link) {
+ if (dmz_is_buf(zone)) {
+ dzone = zone->bzone;
+ if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
continue;
- dzone = last;
} else
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int dev_idx, bool idle)
{
- struct dm_zone *zone;
+ struct dm_zone *zone = NULL;
/*
* Search for a zone candidate to reclaim: 2 cases are possible.
dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list))
zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
- else
+ if (!zone)
zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
dmz_unlock_map(zmd);
{
struct list_head *list;
struct dm_zone *zone;
- int i = 0;
+ int i;
+
+ /* Schedule reclaim to ensure free zones are available */
+ if (!(flags & DMZ_ALLOC_RECLAIM)) {
+ for (i = 0; i < zmd->nr_devs; i++)
+ dmz_schedule_reclaim(zmd->dev[i].reclaim);
+ }
+ i = 0;
again:
if (flags & DMZ_ALLOC_CACHE)
list = &zmd->unmap_cache_list;
dmz_metadata_label(zmd), zrc->dev_idx);
return -EBUSY;
}
+ rzone = dzone;
start = jiffies;
if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
*/
ret = dmz_reclaim_rnd_data(zrc, dzone);
}
- rzone = dzone;
-
} else {
struct dm_zone *bzone = dzone->bzone;
sector_t chunk_block = 0;
* be later reclaimed.
*/
ret = dmz_reclaim_seq_data(zrc, dzone);
- rzone = dzone;
}
}
out:
nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
}
+ if (nr_unmap <= 1)
+ return 0;
return nr_unmap * 100 / nr_zones;
}
{
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata;
- unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
+ unsigned int p_unmap;
int ret;
if (dmz_dev_is_dying(zmd))
zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
}
- nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
- nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
-
DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
dmz_metadata_label(zmd), zrc->dev_idx,
zrc->kc_throttle.throttle,
dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
struct dmz_metadata *zmd = dmz->metadata;
struct dm_zone *zone;
- int i, ret;
-
- /*
- * Write may trigger a zone allocation. So make sure the
- * allocation can succeed.
- */
- if (bio_op(bio) == REQ_OP_WRITE)
- for (i = 0; i < dmz->nr_ddevs; i++)
- dmz_schedule_reclaim(dmz->dev[i].reclaim);
+ int ret;
dmz_lock_metadata(zmd);
}
/* Set target (no write same support) */
- ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
bio_put(&tio->clone);
}
-static bool md_in_flight_bios(struct mapped_device *md)
-{
- int cpu;
- struct hd_struct *part = &dm_disk(md)->part0;
- long sum = 0;
-
- for_each_possible_cpu(cpu) {
- sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
- sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
- }
-
- return sum != 0;
-}
-
-static bool md_in_flight(struct mapped_device *md)
-{
- if (queue_is_mq(md->queue))
- return blk_mq_queue_inflight(md->queue);
- else
- return md_in_flight_bios(md);
-}
-
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
+ struct bio *orig_bio = io->orig_bio;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_DISCARD &&
disable_write_zeroes(md);
}
+ /*
+ * For zone-append bios get offset in zone of the written
+ * sector and add that to the original bio sector pos.
+ */
+ if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
+ sector_t written_sector = bio->bi_iter.bi_sector;
+ struct request_queue *q = orig_bio->bi_disk->queue;
+ u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
+
+ orig_bio->bi_iter.bi_sector += written_sector & mask;
+ }
+
if (endio) {
int r = endio(tio->ti, bio, &error);
switch (r) {
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
-
- bio_disassociate_blkg(ci->bio);
-
return 0;
}
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
+ bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
+ bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
}
EXPORT_SYMBOL_GPL(dm_put);
-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+static bool md_in_flight_bios(struct mapped_device *md)
+{
+ int cpu;
+ struct hd_struct *part = &dm_disk(md)->part0;
+ long sum = 0;
+
+ for_each_possible_cpu(cpu) {
+ sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+ sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+
+ return sum != 0;
+}
+
+static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
{
int r = 0;
DEFINE_WAIT(wait);
- while (1) {
+ while (true) {
prepare_to_wait(&md->wait, &wait, task_state);
- if (!md_in_flight(md))
+ if (!md_in_flight_bios(md))
break;
if (signal_pending_state(task_state, current)) {
return r;
}
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+{
+ int r = 0;
+
+ if (!queue_is_mq(md->queue))
+ return dm_wait_for_bios_completion(md, task_state);
+
+ while (true) {
+ if (!blk_mq_queue_inflight(md->queue))
+ break;
+
+ if (signal_pending_state(task_state, current)) {
+ r = -EINTR;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return r;
+}
+
/*
* Process the deferred bios
*/
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
+ int r;
+ unsigned noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
+ noio_flag = memalloc_noio_save();
+
if (!cookie)
- return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
- action, envp);
+ r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+ action, envp);
}
+
+ memalloc_noio_restore(noio_flag);
+
+ return r;
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
* Troy Laramy <t-laramy@ti.com>
*/
-#include <asm/cacheflush.h>
-
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
- while(host_page_buffer_sz > 0) {
-
- if((ioc->HostPageBuffer = pci_alloc_consistent(
- ioc->pcidev,
- host_page_buffer_sz,
- &ioc->HostPageBuffer_dma)) != NULL) {
-
+ while (host_page_buffer_sz > 0) {
+ ioc->HostPageBuffer =
+ dma_alloc_coherent(&ioc->pcidev->dev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma,
+ GFP_KERNEL);
+ if (ioc->HostPageBuffer) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
- pci_free_consistent(ioc->pcidev, sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev, sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
- pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
ioc->name, sz, sz, num_chain));
total_size += sz;
- mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+ mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+ &alloc_dma, GFP_KERNEL);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
ioc->name);
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- ioc->sense_buf_pool =
- pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+ ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+ &ioc->sense_buf_pool_dma, GFP_KERNEL);
if (ioc->sense_buf_pool == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
ioc->name);
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
int mptscsih_resume(struct pci_dev *pdev);
#endif
-#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
/* Copy the sense received into the scsi command block. */
req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
- memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+ memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
*/
container_of(fence, struct hl_cs_compl, base_fence);
struct hl_device *hdev = hl_cs_cmpl->hdev;
+ /* EBUSY means the CS was never submitted and hence we don't have
+ * an attached hw_sob object that we should handle here
+ */
+ if (fence->error == -EBUSY)
+ goto free;
+
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT)) {
kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
}
+free:
kfree_rcu(hl_cs_cmpl, base_fence.rcu);
}
hl_ctx_put(cs->ctx);
+ /* We need to mark an error for not submitted because in that case
+ * the dma fence release flow is different. Mainly, we don't need
+ * to handle hw_sob for signal/wait
+ */
if (cs->timedout)
dma_fence_set_error(cs->fence, -ETIMEDOUT);
else if (cs->aborted)
dma_fence_set_error(cs->fence, -EIO);
+ else if (!cs->submitted)
+ dma_fence_set_error(cs->fence, -EBUSY);
dma_fence_signal(cs->fence);
dma_fence_put(cs->fence);
return 0;
}
-static ssize_t mmu_write(struct file *file, const char __user *buf,
+static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct seq_file *s = file->private_data;
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
- {"mmu", mmu_show, mmu_write},
+ {"mmu", mmu_show, mmu_asid_va_write},
{"engines", engines_show, NULL}
};
#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
-#define GAUDI_ARB_WDT_TIMEOUT 0x400000
+#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
+ WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
+
/* The following configuration is needed only once per QMAN */
if (qman_id == 0) {
/* Configure RAZWI IRQ */
WREG32(mmSTLB_HOP_CONFIGURATION,
hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
+ /*
+ * The H/W expects the first PI after init to be 1. After wraparound
+ * we'll write 0.
+ */
+ gaudi->mmu_cache_inv_pi = 1;
+
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
src_in_host);
}
+static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_load_and_exe *user_pkt)
+{
+ u32 cfg;
+
+ cfg = le32_to_cpu(user_pkt->cfg);
+
+ if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
+ dev_err(hdev->dev,
+ "User not allowed to use Load and Execute\n");
+ return -EPERM;
+ }
+
+ parser->patched_cb_size += sizeof(struct packet_load_and_exe);
+
+ return 0;
+}
+
static int gaudi_validate_cb(struct hl_device *hdev,
struct hl_cs_parser *parser, bool is_mmu)
{
rc = -EPERM;
break;
+ case PACKET_LOAD_AND_EXE:
+ rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
+ (struct packet_load_and_exe *) user_pkt);
+ break;
+
case PACKET_LIN_DMA:
parser->contains_dma_pkt = true;
if (is_mmu)
case PACKET_FENCE:
case PACKET_NOP:
case PACKET_ARB_POINT:
- case PACKET_LOAD_AND_EXE:
parser->patched_cb_size += pkt_size;
break;
mutex_lock(&hdev->mmu_cache_lock);
/* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_PS, 3);
+ WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
WREG32(mmSTLB_INV_PS, 2);
rc = hl_poll_timeout(
* @multi_msi_mode: whether we are working in multi MSI single MSI mode.
* Multi MSI is possible only with IOMMU enabled.
* @ext_queue_idx: helper index for external queues initialization.
+ * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
+ * 8-bit value so use u8.
*/
struct gaudi_device {
int (*armcp_info_get)(struct hl_device *hdev);
u32 hw_cap_initialized;
u8 multi_msi_mode;
u8 ext_queue_idx;
+ u8 mmu_cache_inv_pi;
};
void gaudi_init_security(struct hl_device *hdev);
__le32 ctl;
};
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT 0
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK 0x00000001
+
struct packet_load_and_exe {
__le32 cfg;
__le32 ctl;
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
+#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
+# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
+# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
#define PCI_CFG_HFS_6 0x6C
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
return (reg & 0xf0000) == 0xf0000;
}
-#define MEI_CFG_FW_SPS \
+#define MEI_CFG_FW_SPS_4 \
+ .quirk_probe = mei_me_fw_type_sps_4
+
+/**
+ * mei_me_fw_sku_sps() - check for sps sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in pci function 0
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+ u32 reg;
+ u32 fw_type;
+ unsigned int devfn;
+
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
+ fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
+
+ dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
+
+ return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
+}
+
+#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
#define MEI_CFG_FW_VER_SUPP \
};
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-static const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_4,
+};
+
+/* LBG with quirk for SPS (4.0) Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_FW_SPS_4,
};
/* Cannon Lake and newer devices */
MEI_CFG_DMA_128,
};
-/* LBG with quirk for SPS Firmware exclusion */
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
static const struct mei_cfg mei_me_pch12_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_FW_SPS,
+};
+
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support
+ */
+static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
MEI_CFG_TRC,
};
+/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
+static const struct mei_cfg mei_me_pch15_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+ MEI_CFG_FW_SPS,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
- [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+ [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
+ [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
+ [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
* client platforms.
- * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
+ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
- * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
+ * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
- MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_PCH8_SPS_4_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH12_SPS_4_CFG,
MEI_ME_PCH12_SPS_CFG,
+ MEI_ME_PCH12_SPS_NODMA_CFG,
MEI_ME_PCH15_CFG,
+ MEI_ME_PCH15_SPS_CFG,
MEI_ME_NUM_CFG,
};
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
mmc->caps |= MMC_CAP_CMD23;
if (host->dram_access_quirk) {
+ /* Limit segments to 1 due to low available sram memory */
+ mmc->max_segs = 1;
/* Limit to the available sram memory */
- mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
- mmc->max_blk_count = mmc->max_segs;
+ mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
+ mmc->max_blk_size;
} else {
mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
static struct platform_driver owl_mmc_driver = {
.driver = {
.name = "owl_mmc",
- .of_match_table = of_match_ptr(owl_mmc_of_match),
+ .of_match_table = owl_mmc_of_match,
},
.probe = owl_mmc_probe,
.remove = owl_mmc_remove,
config &= ~CORE_CLK_PWRSAVE;
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
- config = msm_host->dll_config;
- writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+ if (msm_host->dll_config)
+ writel_relaxed(msm_host->dll_config,
+ host->ioaddr + msm_offset->core_dll_config);
if (msm_host->use_14lpp_dll_reset) {
config = readl_relaxed(host->ioaddr +
return -EROFS;
if (!len)
return 0;
- if (!mtd->oops_panic_write)
- mtd->oops_panic_write = true;
+ if (!master->oops_panic_write)
+ master->oops_panic_write = true;
return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
retlen, buf);
NS_DBG("switch_state: operation is unknown, try to find it\n");
- if (!ns_find_operation(ns, 0))
+ if (ns_find_operation(ns, 0))
return;
if ((ns->state & ACTION_MASK) &&
struct nand_chip *chip = &data->chip;
int ret;
- ret = mtd_device_unregister(mtd);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
if (data[IFLA_BAREUDP_SRCPORT_MIN])
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
+ if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
+ conf->multi_proto_mode = true;
+
return 0;
}
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
+ /* Balance of_node_put() done by of_find_node_by_name() */
+ of_node_get(dn);
ports = of_find_node_by_name(dn, "ports");
if (ports) {
bcm_sf2_identify_ports(priv, ports);
return -ENOMEM;
}
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->port_cnt;
+
return 0;
}
return -ENOMEM;
}
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->port_cnt;
+
return 0;
}
static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9477" },
{ .compatible = "microchip,ksz9897" },
+ { .compatible = "microchip,ksz9893" },
{ .compatible = "microchip,ksz9567" },
{},
};
#define SJA1105_SIZE_VL_STATUS 8
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ sja1105_free_gating_config(gating_cfg);
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
/* The switch flow classification core implements TTEthernet, which 'thinks' in
* terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
* However it also has one other operating mode (VLLUPFORMAT=0) where it acts
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+ priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+ key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
kfree(rule);
}
- rc = sja1105_init_virtual_links(priv, extack);
+ rc = sja1105_compose_gating_subschedule(priv, extack);
if (rc)
return rc;
- return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
-}
-
-/* Insert into the global gate list, sorted by gate action time. */
-static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
- struct sja1105_rule *rule,
- u8 gate_state, s64 entry_time,
- struct netlink_ext_ack *extack)
-{
- struct sja1105_gate_entry *e;
- int rc;
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- e->rule = rule;
- e->gate_state = gate_state;
- e->interval = entry_time;
-
- if (list_empty(&gating_cfg->entries)) {
- list_add(&e->list, &gating_cfg->entries);
- } else {
- struct sja1105_gate_entry *p;
-
- list_for_each_entry(p, &gating_cfg->entries, list) {
- if (p->interval == e->interval) {
- NL_SET_ERR_MSG_MOD(extack,
- "Gate conflict");
- rc = -EBUSY;
- goto err;
- }
-
- if (e->interval < p->interval)
- break;
- }
- list_add(&e->list, p->list.prev);
- }
-
- gating_cfg->num_entries++;
-
- return 0;
-err:
- kfree(e);
- return rc;
-}
-
-/* The gate entries contain absolute times in their e->interval field. Convert
- * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
- */
-static void
-sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
- u64 cycle_time)
-{
- struct sja1105_gate_entry *last_e;
- struct sja1105_gate_entry *e;
- struct list_head *prev;
-
- list_for_each_entry(e, &gating_cfg->entries, list) {
- struct sja1105_gate_entry *p;
-
- prev = e->list.prev;
-
- if (prev == &gating_cfg->entries)
- continue;
-
- p = list_entry(prev, struct sja1105_gate_entry, list);
- p->interval = e->interval - p->interval;
- }
- last_e = list_last_entry(&gating_cfg->entries,
- struct sja1105_gate_entry, list);
- if (last_e->list.prev != &gating_cfg->entries)
- last_e->interval = cycle_time - last_e->interval;
-}
-
-static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
-{
- struct sja1105_gate_entry *e, *n;
-
- list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
- list_del(&e->list);
- kfree(e);
- }
-}
-
-static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
- struct netlink_ext_ack *extack)
-{
- struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
- struct sja1105_rule *rule;
- s64 max_cycle_time = 0;
- s64 its_base_time = 0;
- int i, rc = 0;
-
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type != SJA1105_RULE_VL)
- continue;
- if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
- continue;
-
- if (max_cycle_time < rule->vl.cycle_time) {
- max_cycle_time = rule->vl.cycle_time;
- its_base_time = rule->vl.base_time;
- }
- }
-
- if (!max_cycle_time)
- return 0;
-
- dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
- max_cycle_time, its_base_time);
-
- sja1105_free_gating_config(gating_cfg);
-
- gating_cfg->base_time = its_base_time;
- gating_cfg->cycle_time = max_cycle_time;
- gating_cfg->num_entries = 0;
-
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- s64 time;
- s64 rbt;
-
- if (rule->type != SJA1105_RULE_VL)
- continue;
- if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
- continue;
-
- /* Calculate the difference between this gating schedule's
- * base time, and the base time of the gating schedule with the
- * longest cycle time. We call it the relative base time (rbt).
- */
- rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
- its_base_time);
- rbt -= its_base_time;
-
- time = rbt;
-
- for (i = 0; i < rule->vl.num_entries; i++) {
- u8 gate_state = rule->vl.entries[i].gate_state;
- s64 entry_time = time;
-
- while (entry_time < max_cycle_time) {
- rc = sja1105_insert_gate_entry(gating_cfg, rule,
- gate_state,
- entry_time,
- extack);
- if (rc)
- goto err;
-
- entry_time += rule->vl.cycle_time;
- }
- time += rule->vl.entries[i].interval;
- }
- }
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
- sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
- return 0;
-err:
- sja1105_free_gating_config(gating_cfg);
- return rc;
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
int sja1105_vl_gate(struct sja1105_private *priv, int port,
if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
- dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
- priv->vlan_state, key->type);
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
- dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
- priv->vlan_state, key->type);
+ } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+ priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+ key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_SRCA_ADR(location + i),
- ipv6_src[i]);
+ ipv6_src[3 - i]);
}
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_DSTA_ADR(location + i),
- ipv6_dest[i]);
+ ipv6_dest[3 - i]);
}
u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
*/
/* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4)
/* Bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_da0[1F:0] */
static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
+ struct hwrm_stat_ctx_clr_stats_input req0 = {0};
struct hwrm_stat_ctx_free_input req = {0};
int i;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return;
+ bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
+ if (BNXT_FW_MAJ(bp) <= 20) {
+ req0.stat_ctx_id = req.stat_ctx_id;
+ _hwrm_send_message(bp, &req0, sizeof(req0),
+ HWRM_CMD_TIMEOUT);
+ }
_hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
bp->tx_push_thresh = 0;
- if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+ if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+ BNXT_FW_MAJ(bp) > 217)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
static int bnxt_hwrm_ver_get(struct bnxt *bp)
{
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc;
+ int rc, len;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
mutex_lock(&bp->hwrm_cmd_lock);
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
resp->hwrm_intf_upd_8b);
- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
- resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+ len = FW_VER_STR_LEN;
+ } else {
+ fw_maj = resp->hwrm_fw_maj_8b;
+ fw_min = resp->hwrm_fw_min_8b;
+ fw_bld = resp->hwrm_fw_bld_8b;
+ fw_rsv = resp->hwrm_fw_rsvd_8b;
+ len = BC_HWRM_STR_LEN;
+ }
+ bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+ fw_rsv);
if (strlen(resp->active_pkg_name)) {
int fw_ver_len = strlen(bp->fw_ver_str);
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
- bnxt_vpd_read_info(bp);
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
char hwrm_ver_supp[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
}
}
+ bp->pf.active_vfs = 0;
kfree(bp->pf.vf);
bp->pf.vf = NULL;
}
bnxt_free_vf_resources(bp);
- bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
}
static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
struct flow_block_cb *block_cb;
cb_priv->bp = bp;
list_add(&cb_priv->list, &bp->tc_indr_block_list);
- block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
- cb_priv, cb_priv,
- bnxt_tc_setup_indr_rel);
+ block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ bnxt_tc_setup_indr_rel, f,
+ netdev, data, bp, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
break;
default:
}
static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_BLOCK:
- return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
+ cleanup);
default:
break;
}
return;
flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
- bnxt_tc_setup_indr_block_cb);
+ bnxt_tc_setup_indr_rel);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
genet_dma_ring_regs[r]);
}
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
- u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- return !!(reg & (1 << (f_index % 32)));
-}
-
static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset;
bcmgenet_hfb_reg_writel(priv, reg, offset);
}
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
- u32 f_index;
-
- /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
- for (f_index = MAX_NUM_OF_FS_RULES;
- f_index < priv->hw_params->hfb_filter_cnt; f_index++)
- if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
- return f_index;
-
- return -ENOMEM;
-}
-
static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
{
while (size) {
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
int err = 0, offset = 0, f_length = 0;
- u16 val_16, mask_16;
u8 val_8, mask_8;
+ __be16 val_16;
+ u16 mask_16;
size_t size;
u32 *f_data;
return err;
}
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit 19 - nibble 0 match enable
- * bit 18 - nibble 1 match enable
- * bit 17 - nibble 2 match enable
- * bit 16 - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8 - nibble 1 data
- * bits 7:4 - nibble 2 data
- * bits 3:0 - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue)
-{
- int f_index;
-
- f_index = bcmgenet_hfb_find_unused_filter(priv);
- if (f_index < 0)
- return -ENOMEM;
-
- if (f_length > priv->hw_params->hfb_filter_size)
- return -EINVAL;
-
- bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
- bcmgenet_hfb_enable_filter(priv, f_index);
-
- return 0;
-}
-
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
goto out;
}
- if (skb_padto(skb, ETH_ZLEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ * will need to restore software padding of "runt" packets
+ */
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
rtnl_lock();
- /* We probably don't have netdev yet */
- if (!netdev || !netif_running(netdev))
+ /* Could be second call or maybe we don't have netdev yet */
+ if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
goto done;
/* We needn't recover from permanent error */
err = macb_phylink_connect(bp);
if (err)
- goto napi_exit;
+ goto reset_hw;
netif_tx_start_all_queues(dev);
return 0;
-napi_exit:
+reset_hw:
+ macb_reset_hw(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
+ macb_free_consistent(bp);
pm_exit:
pm_runtime_put_sync(&bp->pdev->dev);
return err;
{
struct macb *bp = netdev_priv(netdev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= WAKE_MAGIC;
+
+ if (bp->wol & MACB_WOL_ENABLED)
+ wol->wolopts |= WAKE_MAGIC;
+ }
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct macb *bp = netdev_priv(netdev);
int ret;
+ /* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- if (!ret)
- return 0;
+ /* Don't manage WoL on MAC if handled by the PHY
+ * or if there's a failure in talking to the PHY
+ */
+ if (!ret || ret != -EOPNOTSUPP)
+ return ret;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
static struct sifive_fu540_macb_mgmt *mgmt;
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
{
- struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
- struct macb_dma_desc *desc;
- dma_addr_t addr;
- u32 ctl;
- int i;
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
return -ENOMEM;
}
+ return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+
+ if (q->rx_ring) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ macb_dma_desc_get_size(lp),
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
+ }
+
+ if (q->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ AT91ETHER_MAX_RBUFF_SZ,
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
+ }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ u32 ctl;
+ int i, ret;
+
+ ret = at91ether_alloc_coherent(lp);
+ if (ret)
+ return ret;
+
addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(q, i);
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
return 0;
}
+static void at91ether_stop(struct macb *lp)
+{
+ u32 ctl;
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Free resources. */
+ at91ether_free_coherent(lp);
+}
+
/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
macb_set_hwaddr(lp);
- ret = at91ether_start(dev);
+ ret = at91ether_start(lp);
if (ret)
- return ret;
-
- /* Enable MAC interrupts */
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
+ goto pm_exit;
ret = macb_phylink_connect(lp);
if (ret)
- return ret;
+ goto stop;
netif_start_queue(dev);
return 0;
+
+stop:
+ at91ether_stop(lp);
+pm_exit:
+ pm_runtime_put_sync(&lp->pdev->dev);
+ return ret;
}
/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
- struct macb_queue *q = &lp->queues[0];
- u32 ctl;
-
- /* Disable Receiver and Transmitter */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
- /* Disable MAC interrupts */
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
netif_stop_queue(dev);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR *
- macb_dma_desc_get_size(lp),
- q->rx_ring, q->rx_ring_dma);
- q->rx_ring = NULL;
-
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- q->rx_buffers, q->rx_buffers_dma);
- q->rx_buffers = NULL;
+ at91ether_stop(lp);
return pm_runtime_put(&lp->pdev->dev);
}
bp->wol = 0;
if (of_get_property(np, "magic-packet", NULL))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
spin_lock_init(&bp->lock);
bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
}
- netif_carrier_off(netdev);
if (bp->ptp_info)
bp->ptp_info->ptp_remove(netdev);
- pm_runtime_force_suspend(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_suspend(dev);
return 0;
}
if (!netif_running(netdev))
return 0;
- pm_runtime_force_resume(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IDR, MACB_BIT(WOL));
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u64 *dst_off, *src_off;
u8 *ctx_buf;
u8 i, k;
int rc;
}
for (j = 0; j < max_ctx_qid; j++) {
+ __be64 *dst_off;
+ u64 *src_off;
+
src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
- dst_off = (u64 *)buff->data;
+ dst_off = (__be64 *)buff->data;
/* The data is stored in 64-bit cpu order. Convert it
* to big endian before parsing.
((val & 0x02) << 5) |
((val & 0x01) << 7);
}
+
+extern const char * const dcb_ver_array[];
+
#define CXGB4_DCB_ENABLED true
#else /* !CONFIG_CHELSIO_T4_DCB */
};
#ifdef CONFIG_CHELSIO_T4_DCB
-extern char *dcb_ver_array[];
/* Data Center Briging information for each port.
*/
/**
* lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
* capabilities
- * @et_lmm: ethtool Link Mode Mask
+ * @link_mode_mask: ethtool Link Mode Mask
*
* Translate ethtool Link Mode Mask into a Firmware Port capabilities
* value.
unsigned int tid, bool dip, bool sip, bool dp,
bool sp)
{
+ u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+ u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
if (dip) {
if (f->fs.type) {
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
}
set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
- (dp ? f->fs.nat_lport : 0) |
- (sp ? f->fs.nat_fport << 16 : 0), 1);
+ (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+ (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
+ 1);
}
/* Validate filter spec against configuration done on the card. */
fwr->fpm = htons(f->fs.mask.fport);
if (adapter->params.filter2_wr_support) {
+ u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+ u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
fwr->natmode_to_ulp_type =
FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
ULP_MODE_TCPDDP :
FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
- fwr->newlport = htons(f->fs.nat_lport);
- fwr->newfport = htons(f->fs.nat_fport);
+ fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
+ fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
}
/* Mark the filter as "pending" and ship off the Filter Work Request.
struct in_addr *addr;
addr = (struct in_addr *)ipmask;
- if (addr->s_addr == 0xffffffff)
+ if (addr->s_addr == htonl(0xffffffff))
return true;
} else if (family == AF_INET6) {
struct in6_addr *addr6;
addr6 = (struct in6_addr *)ipmask;
- if (addr6->s6_addr32[0] == 0xffffffff &&
- addr6->s6_addr32[1] == 0xffffffff &&
- addr6->s6_addr32[2] == 0xffffffff &&
- addr6->s6_addr32[3] == 0xffffffff)
+ if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
+ addr6->s6_addr32[1] == htonl(0xffffffff) &&
+ addr6->s6_addr32[2] == htonl(0xffffffff) &&
+ addr6->s6_addr32[3] == htonl(0xffffffff))
return true;
}
return false;
* or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @smt_idx: the destination to store the new SMT index.
*
* Modifies an MPS filter and sets it to the new MAC address if
* @tcam_idx >= 0, or adds the MAC address to a new filter if
* @stid: the server TID
* @sip: local IP address to bind server to
* @sport: the server's TCP port
+ * @vlan: the VLAN header information
* @queue: queue to direct messages from this server to
*
* Create an IP server for the given port and address.
/* Clear out filter specifications */
memset(&f->fs, 0, sizeof(struct ch_filter_specification));
- f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.val.lport = be16_to_cpu(sport);
f->fs.mask.lport = ~0;
val = (u8 *)&sip;
if ((val[0] | val[1] | val[2] | val[3]) != 0) {
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
- u32 i, n10g = 0, qidx = 0, n1g = 0;
u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
+ u32 i, n10g = 0, qidx = 0;
u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */
if (n10g)
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
- n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
else
q10g = max(8U, q10g);
- while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+ while ((q10g * n10g) >
+ (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
q10g--;
#else /* !CONFIG_CHELSIO_T4_DCB */
}
/**
+ * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
* @ptp: ptp clock structure
* @ppb: Desired frequency change in parts per billion
*
/**
* cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
- * @ptp: ptp clock structure
+ * @adapter: board private structure
* @delta: Desired change in nanoseconds
*
* Adjust the timer by resetting the timecounter structure.
PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
- PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
- PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
- PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
- PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
};
static struct ch_tc_flower_entry *allocate_flower_entry(void)
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- fs->val.lport = cpu_to_be16(match.key->dst);
- fs->mask.lport = cpu_to_be16(match.mask->dst);
- fs->val.fport = cpu_to_be16(match.key->src);
- fs->mask.fport = cpu_to_be16(match.mask->src);
+ fs->val.lport = be16_to_cpu(match.key->dst);
+ fs->mask.lport = be16_to_cpu(match.mask->dst);
+ fs->val.fport = be16_to_cpu(match.key->src);
+ fs->mask.fport = be16_to_cpu(match.mask->src);
/* also initialize nat_lport/fport to same values */
- fs->nat_lport = cpu_to_be16(match.key->dst);
- fs->nat_fport = cpu_to_be16(match.key->src);
+ fs->nat_lport = fs->val.lport;
+ fs->nat_fport = fs->val.fport;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
- offload_pedit(fs, cpu_to_be32(val) >> 16,
- cpu_to_be32(mask) >> 16,
- TCP_SPORT);
+ fs->nat_fport = val;
else
- offload_pedit(fs, cpu_to_be32(val),
- cpu_to_be32(mask), TCP_DPORT);
+ fs->nat_lport = val >> 16;
}
fs->nat_mode = NAT_MODE_ALL;
break;
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
- offload_pedit(fs, cpu_to_be32(val) >> 16,
- cpu_to_be32(mask) >> 16,
- UDP_SPORT);
+ fs->nat_fport = val;
else
- offload_pedit(fs, cpu_to_be32(val),
- cpu_to_be32(mask), UDP_DPORT);
+ fs->nat_lport = val >> 16;
}
fs->nat_mode = NAT_MODE_ALL;
}
bool next_header)
{
unsigned int i, j;
- u32 val, mask;
+ __be32 val, mask;
int off, err;
bool found;
const struct cxgb4_next_header *next;
bool found = false;
unsigned int i, j;
- u32 val, mask;
+ __be32 val, mask;
int off;
if (t->table[link_uhtid - 1].link_handle) {
/* Try to find matches that allow jumps to next header. */
for (i = 0; next[i].jump; i++) {
- if (next[i].offoff != cls->knode.sel->offoff ||
- next[i].shift != cls->knode.sel->offshift ||
- next[i].mask != cls->knode.sel->offmask ||
- next[i].offset != cls->knode.sel->off)
+ if (next[i].sel.offoff != cls->knode.sel->offoff ||
+ next[i].sel.offshift != cls->knode.sel->offshift ||
+ next[i].sel.offmask != cls->knode.sel->offmask ||
+ next[i].sel.off != cls->knode.sel->off)
continue;
/* Found a possible candidate. Find a key that
val = cls->knode.sel->keys[j].val;
mask = cls->knode.sel->keys[j].mask;
- if (next[i].match_off == off &&
- next[i].match_val == val &&
- next[i].match_mask == mask) {
+ if (next[i].key.off == off &&
+ next[i].key.val == val &&
+ next[i].key.mask == mask) {
found = true;
break;
}
struct cxgb4_match_field {
int off; /* Offset from the beginning of the header to match */
/* Fill the value/mask pair in the spec if matched */
- int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+ int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
};
/* IPv4 match fields */
static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
}
static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
u32 mask_val;
u8 frag_val;
}
static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
}
static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
/* IPv6 match fields */
static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
}
static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
}
static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[4], &val, sizeof(u32));
memcpy(&f->mask.fip[4], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[8], &val, sizeof(u32));
memcpy(&f->mask.fip[8], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[12], &val, sizeof(u32));
memcpy(&f->mask.fip[12], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[4], &val, sizeof(u32));
memcpy(&f->mask.lip[4], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[8], &val, sizeof(u32));
memcpy(&f->mask.lip[8], &mask, sizeof(u32));
}
static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[12], &val, sizeof(u32));
memcpy(&f->mask.lip[12], &mask, sizeof(u32));
/* TCP/UDP match */
static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.fport = ntohl(val) >> 16;
f->mask.fport = ntohl(mask) >> 16;
};
struct cxgb4_next_header {
- unsigned int offset; /* Offset to next header */
- /* offset, shift, and mask added to offset above
+ /* Offset, shift, and mask added to beginning of the header
* to get to next header. Useful when using a header
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- unsigned int offoff;
- u32 shift;
- u32 mask;
- /* match criteria to make this jump */
- unsigned int match_off;
- u32 match_val;
- u32 match_mask;
+ struct tc_u32_sel sel;
+ struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
};
* IPv4 header.
*/
static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
- .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
- .jump = cxgb4_tcp_fields },
- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
- .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
- .jump = cxgb4_udp_fields },
- { .jump = NULL }
+ {
+ /* TCP Jump */
+ .sel = {
+ .off = 0,
+ .offoff = 0,
+ .offshift = 6,
+ .offmask = cpu_to_be16(0x0f00),
+ },
+ .key = {
+ .off = 8,
+ .val = cpu_to_be32(0x00060000),
+ .mask = cpu_to_be32(0x00ff0000),
+ },
+ .jump = cxgb4_tcp_fields,
+ },
+ {
+ /* UDP Jump */
+ .sel = {
+ .off = 0,
+ .offoff = 0,
+ .offshift = 6,
+ .offmask = cpu_to_be16(0x0f00),
+ },
+ .key = {
+ .off = 8,
+ .val = cpu_to_be32(0x00110000),
+ .mask = cpu_to_be32(0x00ff0000),
+ },
+ .jump = cxgb4_udp_fields,
+ },
+ { .jump = NULL },
};
/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
* to get to transport layer header.
*/
static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
- .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
- .jump = cxgb4_tcp_fields },
- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
- .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
- .jump = cxgb4_udp_fields },
- { .jump = NULL }
+ {
+ /* TCP Jump */
+ .sel = {
+ .off = 40,
+ .offoff = 0,
+ .offshift = 0,
+ .offmask = 0,
+ },
+ .key = {
+ .off = 4,
+ .val = cpu_to_be32(0x00000600),
+ .mask = cpu_to_be32(0x0000ff00),
+ },
+ .jump = cxgb4_tcp_fields,
+ },
+ {
+ /* UDP Jump */
+ .sel = {
+ .off = 40,
+ .offoff = 0,
+ .offshift = 0,
+ .offmask = 0,
+ },
+ .key = {
+ .off = 4,
+ .val = cpu_to_be32(0x00001100),
+ .mask = cpu_to_be32(0x0000ff00),
+ },
+ .jump = cxgb4_udp_fields,
+ },
+ { .jump = NULL },
};
struct cxgb4_link {
}
EXPORT_SYMBOL(cxgb4_select_ntuple);
-/*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head. If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
- const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
- spin_unlock(&e->lock);
- if (cb->arp_err_handler)
- cb->arp_err_handler(cb->handle, skb);
- else
- t4_ofld_send(adap, skb);
- spin_lock(&e->lock);
- }
-}
-
/*
* Called when the host's neighbor layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
- struct l2t_entry *e;
- struct sk_buff_head *arpq = NULL;
- struct l2t_data *d = adap->l2t;
unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(d, addr, addr_len, ifidx);
+ int hash, ifidx = neigh->dev->ifindex;
+ struct sk_buff_head *arpq = NULL;
+ struct l2t_data *d = adap->l2t;
+ struct l2t_entry *e;
+ hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx) {
write_l2e(adap, e, 0);
}
- if (arpq)
- handle_failed_resolution(adap, e);
+ if (arpq) {
+ struct sk_buff *skb;
+
+ /* Called when address resolution fails for an L2T
+ * entry to handle packets on the arpq head. If a
+ * packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ spin_unlock(&e->lock);
+ if (cb->arp_err_handler)
+ cb->arp_err_handler(cb->handle, skb);
+ else
+ t4_ofld_send(adap, skb);
+ spin_lock(&e->lock);
+ }
+ }
spin_unlock_bh(&e->lock);
}
}
/**
+ * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
* @dev: net_device pointer
* @vlan: VLAN Id
* @port: Associated port
/**
* cxgb4_sched_class_free - free a scheduling class
* @dev: net_device pointer
- * @e: scheduling class
+ * @classid: scheduling class id to free
*
* Frees a scheduling class if there are no users.
*/
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the Tx queue to reclaim descriptors from
* @n: the number of descriptors to reclaim
* @unmap: whether the buffers should be unmapped for DMA
/**
* is_eth_imm - can an Ethernet packet be sent as immediate data?
* @skb: the packet
+ * @chip_ver: chip version
*
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @skb: the packet
+ * @chip_ver: chip version
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
/**
* calc_tx_descs - calculate the number of Tx descriptors for a packet
* @skb: the packet
+ * @chip_ver: chip version
*
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
- spin_lock(&adap->ptp_lock);
if (!(adap->ptp_tx_skb)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
adap->ptp_tx_skb = skb_get(skb);
} else {
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
q = &adap->sge.ptptxq;
#ifdef CONFIG_CHELSIO_T4_FCOE
ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(ret == -ENOTSUPP)) {
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
+ if (unlikely(ret == -EOPNOTSUPP))
goto out_free;
- }
#endif /* CONFIG_CHELSIO_T4_FCOE */
chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_BUSY;
}
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
if (iph->version == 4) {
iph->check = 0;
iph->tot_len = 0;
- iph->check = (u16)(~ip_fast_csum((u8 *)iph,
- iph->ihl));
+ iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
out_free:
if (unlikely(qid >= pi->nqsets))
return cxgb4_ethofld_xmit(skb, dev);
+ if (is_ptp_enabled(skb, dev)) {
+ struct adapter *adap = netdev2adap(dev);
+ netdev_tx_t ret;
+
+ spin_lock(&adap->ptp_lock);
+ ret = cxgb4_eth_xmit(skb, dev);
+ spin_unlock(&adap->ptp_lock);
+ return ret;
+ }
+
return cxgb4_eth_xmit(skb, dev);
}
/**
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
- * @dev - netdevice
- * @eotid - ETHOFLD tid to bind/unbind
- * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
+ * @dev: netdevice
+ * @eotid: ETHOFLD tid to bind/unbind
+ * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
* Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
* If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
/**
* txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
- * @adap: the adapter
* @q: the queue to stop
*
* Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
/**
* t4_systim_to_hwstamp - read hardware time stamp
- * @adap: the adapter
+ * @adapter: the adapter
* @skb: the packet
*
* Read Time Stamp from MPS packet and insert in skb which
hwtstamps = skb_hwtstamps(skb);
memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+ hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
return RX_PTP_PKT_SUC;
}
/**
* t4_rx_hststamp - Recv PTP Event Message
- * @adap: the adapter
+ * @adapter: the adapter
* @rsp: the response queue descriptor holding the RX_PKT message
+ * @rxq: the response queue holding the RX_PKT message
* @skb: the packet
*
* PTP enabled and MPS packet, read HW timestamp
/**
* t4_tx_hststamp - Loopback PTP Transmit Event Message
- * @adap: the adapter
+ * @adapter: the adapter
* @skb: the packet
* @dev: the ingress net device
*
}
/**
+ * cxgb4_smt_release - Release SMT entry
* @e: smt entry to release
*
* Releases ref count and frees up an smt entry from SMT table
}
/**
+ * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
* @dev: net_device pointer
* @smac: MAC address to add to SMT
* Returns pointer to the SMT entry created
/**
* t4_get_exprom_version - return the Expansion ROM version (if any)
- * @adapter: the adapter
+ * @adap: the adapter
* @vers: where to place the version
*
* Reads the Expansion ROM header from FLASH and returns the version
drv_fw = &fw_info->fw_hdr;
/* Read the header of the firmware on the card */
- ret = -t4_read_flash(adap, FLASH_FW_START,
+ ret = t4_read_flash(adap, FLASH_FW_START,
sizeof(*card_fw) / sizeof(uint32_t),
(uint32_t *)card_fw, 1);
if (ret == 0) {
should_install_fs_fw(adap, card_fw_usable,
be32_to_cpu(fs_fw->fw_ver),
be32_to_cpu(card_fw->fw_ver))) {
- ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
- fw_size, 0);
+ ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
+ fw_size, 0);
if (ret != 0) {
dev_err(adap->pdev_dev,
"failed to install firmware: %d\n", ret);
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
* @cmd: TP fw ldst address space type
* @vals: where the indirect register values are stored/written
* @nregs: how many indirect registers to read/write
- * @start_idx: index of first indirect register to read/write
+ * @start_index: index of first indirect register to read/write
* @rw: Read (1) or Write (0)
* @sleep_ok: if true we may sleep while awaiting command completion
*
/**
* compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
- * @adap: the adapter
+ * @adapter: the adapter
* @pidx: the port index
*
* Computes and returns a bitmap indicating which MPS buffer groups are
/**
* t4_get_tp_ch_map - return TP ingress channels associated with a port
- * @adapter: the adapter
+ * @adap: the adapter
* @pidx: the port index
*
* Returns a bitmap indicating which TP Ingress Channels are associated
* @phy_addr: the PHY address
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
* @reg: the register to write
- * @valp: value to write
+ * @val: value to write
*
* Issues a FW command through the given mailbox to write a PHY register.
*/
/**
* t4_sge_decode_idma_state - decode the idma state
- * @adap: the adapter
+ * @adapter: the adapter
* @state: the state idma is stuck in
*/
void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
- * @ctx_type: Egress or Ingress
+ * @ctxt_type: Egress or Ingress
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
/**
* t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
- * @adap - the adapter
+ * @adap: the adapter
* @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
* @dbqtimers: SGE Doorbell Queue Timer table
*
/**
* t4_fw_restart - restart the firmware by taking the uP out of RESET
* @adap: the adapter
+ * @mbox: mailbox to use for the FW command
* @reset: if we want to do a RESET to restart things
*
* Restart firmware previously halted by t4_fw_halt(). On successful
* @nmac: number of MAC addresses needed (1 to 5)
* @mac: the MAC addresses of the VI
* @rss_size: size of RSS table slice associated with this VI
+ * @vivld: the destination to store the VI Valid value.
+ * @vin: the destination to store the VIN value.
*
* Allocates a virtual interface for the given physical port. If @mac is
* not %NULL it contains the MAC addresses of the VI as assigned by FW.
* t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
* @adap: the adapter
* @viid: the VI id
- * @mac: the MAC address
+ * @addr: the MAC address
* @mask: the mask
* @vni: the VNI id for the tunnel protocol
* @vni_mask: mask for the VNI id
* t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
* @adap: the adapter
* @viid: the VI id
- * @mac: the MAC address
+ * @addr: the MAC address
* @mask: the mask
* @idx: index at which to add this entry
- * @port_id: the port index
* @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
* @sleep_ok: call is allowed to sleep
*
* Adds the mac entry at the specified index using raw mac interface.
* @idx: index of existing filter for old value of MAC address, or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @smt_idx: the destination to store the new SMT index.
*
* Modifies an exact-match filter and sets it to the new MAC address.
* Note that in general it is not possible to modify the value of a given
/**
* t4_link_down_rc_str - return a string for a Link Down Reason Code
- * @adap: the adapter
* @link_down_rc: Link Down Reason Code
*
* Returns a string representation of the Link Down Reason Code.
return reason[link_down_rc];
}
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
{
#define TEST_SPEED_RETURN(__caps_speed, __speed) \
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
- * @reset: if true perform a HW reset
*
* Initialize adapter SW state for the various HW modules, set initial
* values for some adapter tunables, take PHYs out of reset, and
/**
* t4_i2c_rd - read I2C data from adapter
* @adap: the adapter
+ * @mbox: mailbox to use for the FW command
* @port: Port number if per-port device; <0 if not
* @devid: per-port device ID or absolute device ID
* @offset: byte offset into device I2C space
/**
* t4_set_vlan_acl - Set a VLAN id for the specified VF
- * @adapter: the adapter
+ * @adap: the adapter
* @mbox: mailbox to use for the FW command
* @vf: one of the VFs instantiated by the specified PF
* @vlan: The vlanid to be set
* @tcam_idx: TCAM index of existing filter for old value of MAC address,
* or -1
* @addr: the new MAC address value
- * @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @persistent: whether a new MAC allocation should be persistent
*
* Modifies an MPS filter and sets it to the new MAC address if
* @tcam_idx >= 0, or adds the MAC address to a new filter if
* restore_rx_bufs - put back a packet's RX buffers
* @gl: the packet gather list
* @fl: the SGE Free List
- * @nfrags: how many fragments in @si
+ * @frags: how many fragments in @si
*
* Called when we find out that the current packet, @si, can't be
* processed right away for some reason. This is a very rare event and
/**
* sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
- * @data: the adapter
+ * @t: Rx timer
*
* Runs periodically from a timer to perform maintenance of SGE RX queues.
*
/**
* sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
- * @data: the adapter
+ * @t: Tx timer
*
* Runs periodically from a timer to perform maintenance of SGE TX queues.
*
* t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
* @adapter: the adapter
* @txq: pointer to the new txq to be filled in
+ * @dev: the network device
* @devq: the network TX queue associated with the new txq
* @iqid: the relative ingress queue ID to which events relating to
* the new txq should be directed
return cc_fec;
}
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
{
#define TEST_SPEED_RETURN(__caps_speed, __speed) \
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
* @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
* -1 no change
+ * @sleep_ok: call is allowed to sleep
*
* Sets Rx properties of a virtual interface.
*/
/**
* t4vf_handle_get_port_info - process a FW reply message
* @pi: the port info
- * @rpl: start of the FW message
+ * @cmd: start of the FW message
*
* Processes a GET_PORT_INFO FW reply message.
*/
return 0;
}
-/**
- */
int t4vf_prep_adapter(struct adapter *adapter)
{
int err;
/* disable interrupts */
enetc_wr_reg(v->rbier, 0);
- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
napi_schedule_irqoff(&v->napi);
/* enable interrupts */
enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE);
return 0;
}
+static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+}
+
+static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+}
+
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
{
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ enetc_enable_rxvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
if (changed & NETIF_F_HW_TC)
err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
/* Common H/W utility functions */
-static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
- bool en)
+static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
+ bool en)
{
- u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+ u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
- enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
}
-static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
- bool en)
+static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
+ bool en)
{
- u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+ u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
- enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
}
static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
- enetc_enable_rxvlan(&priv->si->hw, 0,
- !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
- if (changed & NETIF_F_HW_VLAN_CTAG_TX)
- enetc_enable_txvlan(&priv->si->hw, 0,
- !!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
- unsigned long work_tx;
- unsigned long work_rx;
- unsigned long work_ts;
- unsigned long work_mdio;
-
struct platform_device *pdev;
int dev_id;
#define DRIVER_NAME "fec"
-#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
-
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
fep = netdev_priv(ndev);
- queue_id = FEC_ENET_GET_QUQUE(queue_id);
-
txq = fep->tx_queue[queue_id];
/* get next bdp of dirty_tx */
nq = netdev_get_tx_queue(ndev, queue_id);
writel(0, txq->bd.reg_desc_active);
}
-static void
-fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u16 queue_id;
- /* First process class A queue, then Class B and Best Effort queue */
- for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
- clear_bit(queue_id, &fep->work_tx);
- fec_enet_tx_queue(ndev, queue_id);
- }
- return;
+ int i;
+
+ /* Make sure that AVB queues are processed first. */
+ for (i = fep->num_tx_queues - 1; i >= 0; i--)
+ fec_enet_tx_queue(ndev, i);
}
static int
#ifdef CONFIG_M532x
flush_cache_all();
#endif
- queue_id = FEC_ENET_GET_QUQUE(queue_id);
rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
htons(ETH_P_8021Q),
vlan_tag);
+ skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
if (is_copybreak) {
return pkt_received;
}
-static int
-fec_enet_rx(struct net_device *ndev, int budget)
+static int fec_enet_rx(struct net_device *ndev, int budget)
{
- int pkt_received = 0;
- u16 queue_id;
struct fec_enet_private *fep = netdev_priv(ndev);
+ int i, done = 0;
- for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
- int ret;
-
- ret = fec_enet_rx_queue(ndev,
- budget - pkt_received, queue_id);
+ /* Make sure that AVB queues are processed first. */
+ for (i = fep->num_rx_queues - 1; i >= 0; i--)
+ done += fec_enet_rx_queue(ndev, budget - done, i);
- if (ret < budget - pkt_received)
- clear_bit(queue_id, &fep->work_rx);
-
- pkt_received += ret;
- }
- return pkt_received;
+ return done;
}
-static bool
-fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
{
- if (int_events == 0)
- return false;
+ uint int_events;
+
+ int_events = readl(fep->hwp + FEC_IEVENT);
- if (int_events & FEC_ENET_RXF_0)
- fep->work_rx |= (1 << 2);
- if (int_events & FEC_ENET_RXF_1)
- fep->work_rx |= (1 << 0);
- if (int_events & FEC_ENET_RXF_2)
- fep->work_rx |= (1 << 1);
+ /* Don't clear MDIO events, we poll for those */
+ int_events &= ~FEC_ENET_MII;
- if (int_events & FEC_ENET_TXF_0)
- fep->work_tx |= (1 << 2);
- if (int_events & FEC_ENET_TXF_1)
- fep->work_tx |= (1 << 0);
- if (int_events & FEC_ENET_TXF_2)
- fep->work_tx |= (1 << 1);
+ writel(int_events, fep->hwp + FEC_IEVENT);
- return true;
+ return int_events != 0;
}
static irqreturn_t
{
struct net_device *ndev = dev_id;
struct fec_enet_private *fep = netdev_priv(ndev);
- uint int_events;
irqreturn_t ret = IRQ_NONE;
- int_events = readl(fep->hwp + FEC_IEVENT);
-
- /* Don't clear MDIO events, we poll for those */
- int_events &= ~FEC_ENET_MII;
-
- writel(int_events, fep->hwp + FEC_IEVENT);
- fec_enet_collect_events(fep, int_events);
-
- if ((fep->work_tx || fep->work_rx) && fep->link) {
+ if (fec_enet_collect_events(fep) && fep->link) {
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
{
struct net_device *ndev = napi->dev;
struct fec_enet_private *fep = netdev_priv(ndev);
- int pkts;
+ int done = 0;
- pkts = fec_enet_rx(ndev, budget);
-
- fec_enet_tx(ndev);
+ do {
+ done += fec_enet_rx(ndev, budget - done);
+ fec_enet_tx(ndev);
+ } while ((done < budget) && fec_enet_collect_events(fep));
- if (pkts < budget) {
- napi_complete_done(napi, pkts);
+ if (done < budget) {
+ napi_complete_done(napi, done);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
- return pkts;
+
+ return done;
}
/* ------------------------------------------------------------------------- */
struct net_device *ndev = ring_data->napi.dev;
skb->protocol = eth_type_trans(skb, ndev);
- (void)napi_gro_receive(&ring_data->napi, skb);
+ napi_gro_receive(&ring_data->napi, skb);
}
static int hns_desc_unused(struct hnae_ring *ring)
hns3_put_ring_config(priv);
- hns3_dbg_uninit(handle);
-
out_netdev_free:
+ hns3_dbg_uninit(handle);
free_netdev(netdev);
}
{
struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
unsigned char *packet = skb->data;
+ u32 len = skb_headlen(skb);
u32 i;
- for (i = 0; i < skb->len; i++)
+ len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ for (i = 0; i < len; i++)
if (packet[i] != (unsigned char)(i & 0xff))
break;
/* The packet is correctly received */
- if (i == skb->len)
+ if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
tqp_vector->rx_group.total_packets++;
else
print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
- skb->data, skb->len, true);
+ skb->data, len, true);
dev_kfree_skb_any(skb);
}
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hdev->reset_type = HNAE3_FLR_RESET;
ret = hclge_reset_prepare(hdev);
- if (ret) {
+ if (ret || hdev->reset_pending) {
dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
ret);
if (hdev->reset_pending ||
if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to assert VF reset, ret = %d\n", ret);
+ return ret;
+ }
hdev->rst_stats.vf_func_rst_cnt++;
}
err_init_msix:
err_pfhwdev_alloc:
hinic_free_hwif(hwif);
+ if (err > 0)
+ err = -EIO;
return ERR_PTR(err);
}
MSG_NOT_RESP, timeout);
}
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u8 *buf_out = recv_msg->buf_out;
+ struct hinic_mgmt_msg_handle_work *mgmt_work =
+ container_of(work, struct hinic_mgmt_msg_handle_work, work);
+ struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
+ struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+ u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
struct hinic_mgmt_cb *mgmt_cb;
unsigned long cb_state;
u16 out_size = 0;
- if (recv_msg->mod >= HINIC_MOD_MAX) {
+ memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+ if (mgmt_work->mod >= HINIC_MOD_MAX) {
dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
- recv_msg->mod);
+ mgmt_work->mod);
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
return;
}
- mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
+ mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
cb_state = cmpxchg(&mgmt_cb->state,
HINIC_MGMT_CB_ENABLED,
HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
- mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
- recv_msg->msg, recv_msg->msg_len,
+ mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
+ mgmt_work->msg, mgmt_work->msg_len,
buf_out, &out_size);
else
dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
- recv_msg->mod, recv_msg->cmd);
+ mgmt_work->mod, mgmt_work->cmd);
mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
- if (!recv_msg->async_mgmt_to_pf)
+ if (!mgmt_work->async_mgmt_to_pf)
/* MGMT sent sync msg, send the response */
- msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
+ msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
buf_out, out_size, MGMT_RESP,
- recv_msg->msg_id);
+ mgmt_work->msg_id);
+
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
+ struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+
+ mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work) {
+ dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
+ return;
+ }
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
}
/**
if (!pf_to_mgmt->sync_msg_buf)
return -ENOMEM;
+ pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
+ MAX_PF_MGMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf)
+ return -ENOMEM;
+
return 0;
}
return 0;
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+ pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
+ if (!pf_to_mgmt->workq) {
+ dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+ return -ENOMEM;
+ }
pf_to_mgmt->sync_msg_id = 0;
err = alloc_msg_buf(pf_to_mgmt);
hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ destroy_workqueue(pf_to_mgmt->workq);
}
struct semaphore sync_msg_lock;
u16 sync_msg_id;
u8 *sync_msg_buf;
+ void *mgmt_ack_buf;
struct hinic_recv_msg recv_resp_msg_from_mgmt;
struct hinic_recv_msg recv_msg_from_mgmt;
struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX];
+
+ struct workqueue_struct *workq;
+};
+
+struct hinic_mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic_pf_to_mgmt *pf_to_mgmt;
+
+ void *msg;
+ u16 msg_len;
+
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
};
void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
}
netdev->min_mtu = IBMVETH_MIN_MTU;
- netdev->max_mtu = ETH_MAX_MTU;
+ netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
release_sub_crqs(adapter, 1);
} else {
rc = ibmvnic_reset_crq(adapter);
- if (!rc)
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
rc = vio_enable_interrupts(adapter->vdev);
+ if (rc)
+ netdev_err(adapter->netdev,
+ "Reset failed to enable interrupts. rc=%d\n",
+ rc);
+ }
}
if (rc) {
netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
+ "Reset couldn't initialize crq. rc=%d\n", rc);
goto out;
}
*/
rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+ err = i40e_alloc_rx_bi(&rx_rings[i]);
if (err)
goto rx_unwind;
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
for (i = 0; i < vsi->alloc_txq; i++) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
}
}
}
for (i = 0; i < vsi->alloc_rxq; i++) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
}
}
}
ring->vsi = vsi;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
- vsi->tx_rings[i] = ring;
+ WRITE_ONCE(vsi->tx_rings[i], ring);
}
/* Allocate Rx rings */
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- vsi->xdp_rings[i] = xdp_ring;
+ WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
ring->queue_index = txr_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
/* update count and index */
txr_count--;
set_ring_xdp(ring);
/* assign ring to adapter */
- adapter->xdp_ring[xdp_idx] = ring;
+ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
/* update count and index */
xdp_count--;
ring->queue_index = rxr_idx;
/* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
+ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
/* update count and index */
rxr_count--;
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
- adapter->xdp_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
else
- adapter->tx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
}
ixgbe_for_each_ring(ring, q_vector->rx)
- adapter->rx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
napi_hash_del(&q_vector->napi);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+/* Only exists on Armada XP and Armada 370 */
#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_QSGMII_SERDES_PROTO 0x0667
+#define MVNETA_HSGMII_SERDES_PROTO 0x1107
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
return 0;
}
-static int mvneta_comphy_init(struct mvneta_port *pp)
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
{
int ret;
- if (!pp->comphy)
- return 0;
-
- ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
- pp->phy_interface);
+ ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
if (ret)
return ret;
return phy_power_on(pp->comphy);
}
+static int mvneta_config_interface(struct mvneta_port *pp,
+ phy_interface_t interface)
+{
+ int ret = 0;
+
+ if (pp->comphy) {
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ ret = mvneta_comphy_init(pp, interface);
+ }
+ } else {
+ switch (interface) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_QSGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_SGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_HSGMII_SERDES_PROTO);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pp->phy_interface = interface;
+
+ return ret;
+}
+
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
- WARN_ON(mvneta_comphy_init(pp));
+ WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
- if (state->speed == SPEED_2500)
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
- if (pp->comphy && pp->phy_interface != state->interface &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
- pp->phy_interface = state->interface;
-
- WARN_ON(phy_power_off(pp->comphy));
- WARN_ON(mvneta_comphy_init(pp));
+ if (pp->phy_interface != state->interface) {
+ if (pp->comphy)
+ WARN_ON(phy_power_off(pp->comphy));
+ WARN_ON(mvneta_config_interface(pp, state->interface));
}
if (new_ctrl0 != gmac_ctrl0)
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
- else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(phy_mode))
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- else if (!phy_interface_mode_is_rgmii(phy_mode))
+ if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+ phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(phy_mode) &&
+ !phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
return 0;
if (err < 0)
goto err_netdev;
- err = mvneta_port_power_up(pp, phy_mode);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_netdev;
+ return err;
}
/* Armada3700 network controller does not support per-cpu
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
- u16 v;
+ u16 v = 0;
__gm_phy_read(hw, port, reg, &v);
return v;
}
bool manual_buffer;
u32 cable_len;
u32 xoff;
+ u16 port_buff_cell_sz;
};
#define MLX5E_MAX_DSCP (64)
[MLX5E_400GAUI_8] = 400000,
};
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_port_eth_proto eproto;
+ int err;
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
+ return true;
+
+ err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
+ if (err)
+ return false;
+
+ return !!eproto.cap;
+}
+
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
const u32 **arr, u32 *size,
bool force_legacy)
{
- bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed);
bool ext;
int err;
- ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext = mlx5e_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
goto out;
int err;
int i;
- ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext = mlx5e_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
bool force_legacy);
-
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer)
{
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
u32 total_used = 0;
port_buffer->buffer[i].epsb =
MLX5_GET(bufferx_reg, buffer, epsb);
port_buffer->buffer[i].size =
- MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
port_buffer->buffer[i].xon =
- MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
port_buffer->buffer[i].xoff =
- MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
total_used += port_buffer->buffer[i].size;
mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
}
port_buffer->port_buffer_size =
- MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
port_buffer->spare_buffer_size =
port_buffer->port_buffer_size - total_used;
static int port_set_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer)
{
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
- void *buffer;
void *in;
int err;
int i;
goto out;
for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
- buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
-
- MLX5_SET(bufferx_reg, buffer, size,
- port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
- MLX5_SET(bufferx_reg, buffer, lossy,
- port_buffer->buffer[i].lossy);
- MLX5_SET(bufferx_reg, buffer, xoff_threshold,
- port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
- MLX5_SET(bufferx_reg, buffer, xon_threshold,
- port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
+ void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+ u64 size = port_buffer->buffer[i].size;
+ u64 xoff = port_buffer->buffer[i].xoff;
+ u64 xon = port_buffer->buffer[i].xon;
+
+ do_div(size, port_buff_cell_sz);
+ do_div(xoff, port_buff_cell_sz);
+ do_div(xon, port_buff_cell_sz);
+ MLX5_SET(bufferx_reg, buffer, size, size);
+ MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
+ MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
+ MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
err = mlx5e_port_set_pbmc(mdev, in);
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int max_mtu)
+ u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
{
int i;
}
if (port_buffer->buffer[i].size <
- (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
+ (xoff + max_mtu + port_buff_cell_sz)) {
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
i, port_buffer->buffer[i].size);
return -ENOMEM;
* @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping
* @xoff: <input> xoff value
+ * @port_buff_cell_sz: <input> port buffer cell_size
* @port_buffer: <output> port receive buffer configuration
* @change: <output>
*
* sets change to true if buffer configuration was modified.
*/
static int update_buffer_lossy(unsigned int max_mtu,
- u8 pfc_en, u8 *buffer, u32 xoff,
+ u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
struct mlx5e_port_buffer *port_buffer,
bool *change)
{
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
u32 *buffer_size,
u8 *prio2buffer)
{
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5e_port_buffer port_buffer;
u32 xoff = calculate_xoff(priv, mtu);
bool update_prio2buffer = false;
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
}
if (err)
return err;
- err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
&port_buffer, &update_buffer);
if (err)
return err;
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
xoff, &port_buffer, &update_buffer);
if (err)
return err;
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
}
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
}
#include "port.h"
#define MLX5E_MAX_BUFFER 8
-#define MLX5E_BUFFER_CELL_SHIFT 7
#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
-#include <linux/rwlock.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <net/netevent.h>
mlx5e_rep_indr_setup_block(struct net_device *netdev,
struct mlx5e_rep_priv *rpriv,
struct flow_block_offload *f,
- flow_setup_cb_t *setup_cb)
+ flow_setup_cb_t *setup_cb,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5e_rep_indr_block_priv *indr_priv;
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
- block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
- mlx5e_rep_indr_block_unbind);
+ block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+ mlx5e_rep_indr_block_unbind,
+ f, netdev, data, rpriv,
+ cleanup);
if (IS_ERR(block_cb)) {
list_del(&indr_priv->list);
kfree(indr_priv);
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_tc_cb);
+ mlx5e_rep_indr_setup_tc_cb,
+ data, cleanup);
case TC_SETUP_FT:
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_ft_cb);
+ mlx5e_rep_indr_setup_ft_cb,
+ data, cleanup);
default:
return -EOPNOTSUPP;
}
void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
{
flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
- mlx5e_rep_indr_setup_tc_cb);
+ mlx5e_rep_indr_block_unbind);
}
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
struct mlx5_ct_entry *entry = ptr;
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ kfree(entry);
}
static void
return 0;
}
+#define MLX5E_BUFFER_CELL_SHIFT 7
+
+static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+ return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+ if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_SBCAM, 0, 0))
+ return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+ return MLX5_GET(sbcam_reg, out, cap_cell_size);
+}
+
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+ priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = mlx5e_ptys_ext_supported(mdev);
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = mlx5e_ptys_ext_supported(mdev);
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
__func__, err);
goto err_query_regs;
}
- ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
autoneg = link_ksettings->base.autoneg;
speed = link_ksettings->base.speed;
- ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext_supported = mlx5e_ptys_ext_supported(mdev);
ext = ext_requested(autoneg, adver, ext_supported);
if (!ext_supported && ext)
return -EOPNOTSUPP;
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
- if (mlx5_vxlan_allowed(priv->mdev->vxlan))
- udp_tunnel_get_rx_info(netdev);
-
return err;
}
if (err)
goto err_destroy_flow_steering;
+#ifdef CONFIG_MLX5_EN_ARFS
+ priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
+#endif
+
return 0;
err_destroy_flow_steering:
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+ udp_tunnel_get_rx_info(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+ udp_tunnel_drop_rx_info(priv->netdev);
netif_device_detach(priv->netdev);
rtnl_unlock();
/* netdev init */
netif_carrier_off(netdev);
-#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
-#endif
-
return 0;
err_free_cpumask:
struct mlx5e_rep_priv *rpriv)
{
/* Offloaded flow rule is allowed to duplicate on non-uplink representor
- * sharing tc block with other slaves of a lag device.
+ * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
+ * function is called from NIC mode.
*/
- return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+ return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
- rcu_read_unlock();
if (flow) {
/* Same flow rule offloaded to non-uplink representor sharing tc block,
* just return 0.
*/
if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
- goto out;
+ goto rcu_unlock;
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
"flow cookie %lx already exists, ignoring\n",
f->cookie);
err = -EEXIST;
- goto out;
+ goto rcu_unlock;
}
+rcu_unlock:
+ rcu_read_unlock();
+ if (flow)
+ goto out;
trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
}
/* Create ingress allow rule */
- memset(spec, 0, sizeof(*spec));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
return 0;
}
-static int mlx5_eeprom_page(int offset)
+static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
+ u8 *module_id)
+{
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
+ u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+ int err, status;
+ u8 *ptr;
+
+ MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
+ MLX5_SET(mcia_reg, in, module, module_num);
+ MLX5_SET(mcia_reg, in, device_address, 0);
+ MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, size, 1);
+ MLX5_SET(mcia_reg, in, l, 0);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
+ if (err)
+ return err;
+
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+ ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+ *module_id = ptr[0];
+
+ return 0;
+}
+
+static int mlx5_qsfp_eeprom_page(u16 offset)
{
if (offset < MLX5_EEPROM_PAGE_LENGTH)
/* Addresses between 0-255 - page 00 */
MLX5_EEPROM_HIGH_PAGE_LENGTH);
}
-static int mlx5_eeprom_high_page_offset(int page_num)
+static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
{
if (!page_num) /* Page 0 always start from low page */
return 0;
return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
}
+static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+ *i2c_addr = MLX5_I2C_ADDR_LOW;
+ *page_num = mlx5_qsfp_eeprom_page(*offset);
+ *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num);
+}
+
+static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+ *i2c_addr = MLX5_I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < MLX5_EEPROM_PAGE_LENGTH)
+ return;
+
+ *i2c_addr = MLX5_I2C_ADDR_HIGH;
+ *offset -= MLX5_EEPROM_PAGE_LENGTH;
+}
+
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data)
{
- int module_num, page_num, status, err;
+ int module_num, status, err, page_num = 0;
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- u32 in[MLX5_ST_SZ_DW(mcia_reg)];
- u16 i2c_addr;
- void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+ u16 i2c_addr = 0;
+ u8 module_id;
+ void *ptr;
err = mlx5_query_module_num(dev, &module_num);
if (err)
return err;
- memset(in, 0, sizeof(in));
- size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
-
- /* Get the page number related to the given offset */
- page_num = mlx5_eeprom_page(offset);
+ err = mlx5_query_module_id(dev, module_num, &module_id);
+ if (err)
+ return err;
- /* Set the right offset according to the page number,
- * For page_num > 0, relative offset is always >= 128 (high page).
- */
- offset -= mlx5_eeprom_high_page_offset(page_num);
+ switch (module_id) {
+ case MLX5_MODULE_ID_SFP:
+ mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX5_MODULE_ID_QSFP:
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ case MLX5_MODULE_ID_QSFP28:
+ mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+ return -EINVAL;
+ }
if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
- i2c_addr = MLX5_I2C_ADDR_LOW;
+ size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
return -EIO;
}
+ ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
return size;
u16 num_pages;
int err;
- mutex_init(&mlxsw_pci->cmd.lock);
- init_waitqueue_head(&mlxsw_pci->cmd.wait);
-
mlxsw_pci->core = mlxsw_core;
mbox = mlxsw_cmd_mbox_alloc();
if (!mbox)
return -ENOMEM;
- err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
- if (err)
- goto mbox_put;
-
- err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
- if (err)
- goto err_out_mbox_alloc;
-
err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
if (err)
goto err_sw_reset;
mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_alloc_irq:
err_sw_reset:
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-err_out_mbox_alloc:
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
mlxsw_pci_aqs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
}
static struct mlxsw_pci_queue *
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto err_in_mbox_alloc;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ return 0;
+
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+ return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
+ err = mlxsw_pci_cmd_init(mlxsw_pci);
+ if (err)
+ goto err_pci_cmd_init;
+
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
return 0;
err_bus_device_register:
+ mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
iounmap(mlxsw_pci->hw_addr);
err_ioremap:
err_pci_resource_len_check:
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_cmd_fini(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
lossy = !(pfc || pause_en);
thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &thres_cells);
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
pfc, pause_en);
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &delay_cells);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
total_cells = thres_cells + delay_cells;
taken_headroom_cells += total_cells;
return NULL;
}
-static inline void
+static inline u32
mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 *p_size)
+ u32 size_cells)
{
/* Ports with eight lanes use two headroom buffers between which the
* configured headroom size is split. Therefore, multiply the calculated
* headroom size by two.
*/
- if (mlxsw_sp_port->mapping.width != 8)
- return;
- *p_size *= 2;
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}
enum mlxsw_sp_flood_type {
if (i == MLXSW_SP_PB_UNUSED)
continue;
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &size);
+ size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
}
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
}
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (WARN_ON(!fib_work))
+ if (!fib_work)
return NOTIFY_BAD;
fib_work->mlxsw_sp = router->mlxsw_sp;
speed = 0;
buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, (u16 *) &buffsize);
+ buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
* @greedy_return: If Set it forces the device to return absolutely all RxD
* that are consumed and still on board when a timer interrupt
* triggers. If Clear, then if the device has already returned
- * RxD before current timer interrupt trigerred and after the
+ * RxD before current timer interrupt triggered and after the
* previous timer interrupt triggered, then the device is not
* forced to returned the rest of the consumed RxD that it has
* on board which account for a byte count less than the one
flush_work(&app_priv->cmsg_work);
flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
- nfp_flower_setup_indr_block_cb);
+ nfp_flower_setup_indr_tc_release);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data);
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+void nfp_flower_setup_indr_tc_release(void *cb_priv);
void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
return NULL;
}
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
struct flow_cls_offload *flower = type_data;
}
}
-static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+void nfp_flower_setup_indr_tc_release(void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
- block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
- cb_priv, cb_priv,
- nfp_flower_setup_indr_tc_release);
+ block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ nfp_flower_setup_indr_tc_release,
+ f, netdev, data, app, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_BLOCK:
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
- type_data);
+ type_data, data, cleanup);
default:
return -EOPNOTSUPP;
}
#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
-/* RX FIFO Read Triger Threshold */
+/* RX FIFO Read Trigger Threshold */
#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
ring->rx_pending = lif->nrxq_descs;
}
+static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
+{
+ struct ethtool_ringparam *ring = arg;
+
+ lif->ntxq_descs = ring->tx_pending;
+ lif->nrxq_descs = ring->rx_pending;
+}
+
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
- bool running;
- int err;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
ring->rx_pending == lif->nrxq_descs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
- if (err)
- return err;
-
- running = test_bit(IONIC_LIF_F_UP, lif->state);
- if (running)
- ionic_stop(netdev);
-
- lif->ntxq_descs = ring->tx_pending;
- lif->nrxq_descs = ring->rx_pending;
-
- if (running)
- ionic_open(netdev);
- clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
- return 0;
+ return ionic_reset_queues(lif, ionic_set_ringsize, ring);
}
static void ionic_get_channels(struct net_device *netdev,
ch->combined_count = lif->nxqs;
}
+static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
+{
+ struct ethtool_channels *ch = arg;
+
+ lif->nxqs = ch->combined_count;
+}
+
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
- bool running;
- int err;
if (!ch->combined_count || ch->other_count ||
ch->rx_count || ch->tx_count)
if (ch->combined_count == lif->nxqs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
- if (err)
- return err;
-
- running = test_bit(IONIC_LIF_F_UP, lif->state);
- if (running)
- ionic_stop(netdev);
-
- lif->nxqs = ch->combined_count;
-
- if (running)
- ionic_open(netdev);
- clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
- return 0;
+ return ionic_reset_queues(lif, ionic_set_queuecount, ch);
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
u16 link_status;
bool link_up;
- if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
+ test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
return;
link_status = le16_to_cpu(lif->info->status.link_status);
netdev->hw_features |= netdev->hw_enc_features;
netdev->features |= netdev->hw_features;
+ netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE;
return err;
netdev->mtu = new_mtu;
- err = ionic_reset_queues(lif);
+ err = ionic_reset_queues(lif, NULL, NULL);
return err;
}
netdev_info(lif->netdev, "Tx Timeout recovery\n");
rtnl_lock();
- ionic_reset_queues(lif);
+ ionic_reset_queues(lif, NULL, NULL);
rtnl_unlock();
}
if (err)
goto err_out;
+ err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
+ if (err)
+ goto err_txrx_deinit;
+
+ err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
+ if (err)
+ goto err_txrx_deinit;
+
/* don't start the queues until we have link */
if (netif_carrier_ok(netdev)) {
err = ionic_start_queues(lif);
if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
return;
- ionic_txrx_disable(lif);
netif_tx_disable(lif->netdev);
+ ionic_txrx_disable(lif);
}
int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (!netif_device_present(netdev))
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
return 0;
ionic_stop_queues(lif);
.ndo_get_vf_stats = ionic_get_vf_stats,
};
-int ionic_reset_queues(struct ionic_lif *lif)
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
{
bool running;
int err = 0;
- /* Put off the next watchdog timeout */
- netif_trans_update(lif->netdev);
-
err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
running = netif_running(lif->netdev);
- if (running)
+ if (running) {
+ netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
- if (!err && running)
- ionic_open(lif->netdev);
+ if (err)
+ goto reset_out;
+ }
+
+ if (cb)
+ cb(lif, arg);
+
+ if (running) {
+ err = ionic_open(lif->netdev);
+ netif_device_attach(lif->netdev);
+ }
+reset_out:
clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return err;
return (units * div) / mult;
}
+typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
+
void ionic_link_status_check_request(struct ionic_lif *lif);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
int ionic_open(struct net_device *netdev);
int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif);
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
struct ionic_txq_desc *desc, bool dbell)
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
u8 engine_for_debug;
bool disable_ilt_dump;
+ bool dbg_bin_dump;
+
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
- iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->vf_cids = vf_cids;
iids->tids += vf_tids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
return p_blk;
}
+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 cli_idx, blk_idx;
+
+ for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+ for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+ clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+ for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+ clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+ }
+}
+
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+ /* Reset all ILT blocks at the beginning of ILT computing in order
+ * to prevent memory allocation for irrelevant blocks afterwards.
+ */
+ qed_cxt_ilt_blk_reset(p_hwfn);
+
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"hwfn [%d] - Set context manager starting line to be 0x%08x\n",
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
"The filter/trigger constraint dword offsets are not enabled for recording",
-
+ /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+ "No matching framing mode",
/* DBG_STATUS_VFC_READ_ERROR */
"Error reading from VFC",
if (p_hwfn->cdev->print_dbg_data)
qed_dbg_print_feature(text_buf, text_size_bytes);
+ /* Just return the original binary buffer if requested */
+ if (p_hwfn->cdev->dbg_bin_dump) {
+ vfree(text_buf);
+ return DBG_STATUS_OK;
+ }
+
/* Free the old dump_buf and point the dump_buf to the newly allocagted
* and formatted text buffer.
*/
#define REGDUMP_HEADER_SIZE_SHIFT 0
#define REGDUMP_HEADER_SIZE_MASK 0xffffff
#define REGDUMP_HEADER_FEATURE_SHIFT 24
-#define REGDUMP_HEADER_FEATURE_MASK 0x3f
+#define REGDUMP_HEADER_FEATURE_MASK 0x1f
+#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29
+#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1
#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
#define REGDUMP_HEADER_ENGINE_SHIFT 31
feature, feature_size);
SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
omit_engine = 1;
mutex_lock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = true;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
+ /* Re-populate nvm attribute info */
+ qed_mcp_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_populate(p_hwfn);
+
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
(u8 *)buffer + offset +
QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
}
+ cdev->dbg_bin_dump = false;
mutex_unlock(&qed_dbg_lock);
return 0;
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
union qed_llh_filter filter = {};
- u8 filter_idx, abs_ppfid;
+ u8 filter_idx, abs_ppfid = 0;
u32 high, low, ref_cnt;
int rc = 0;
void qed_resc_free(struct qed_dev *cdev)
{
+ struct qed_rdma_info *rdma_info;
+ struct qed_hwfn *p_hwfn;
int i;
if (IS_VF(cdev)) {
qed_llh_free(cdev);
for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ p_hwfn = cdev->hwfns + i;
+ rdma_info = p_hwfn->p_rdma_info;
qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
qed_ooo_free(p_hwfn);
}
- if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
qed_rdma_info_free(p_hwfn);
+ }
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
return 0;
}
-static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
-{
- kfree(p_hwfn->nvm_info.image_att);
- p_hwfn->nvm_info.image_att = NULL;
-}
-
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_regview,
void __iomem *p_doorbells,
return rc;
err3:
if (IS_LEAD_HWFN(p_hwfn))
- qed_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
err2:
if (IS_LEAD_HWFN(p_hwfn))
qed_iov_free_hw_info(p_hwfn->cdev);
if (rc) {
if (IS_PF(cdev)) {
qed_init_free(p_hwfn);
- qed_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
}
qed_iov_free_hw_info(cdev);
- qed_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
}
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
if (rc)
return rc;
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
-
return qed_iwarp_ll2_stop(p_hwfn);
}
return rc;
}
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = NULL;
+ p_hwfn->nvm_info.valid = false;
+}
+
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
+/**
+ * @brief Delete nvm info shadow in the given hardware function
+ *
+ * @param p_hwfn
+ */
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
/**
* @brief Get the engine affinity configuration.
*
break;
}
}
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
}
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
+#define QED_VF_CHANNEL_USLEEP_DELAY 100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
+#define QED_VF_CHANNEL_MSLEEP_DELAY 25
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = 0, time = 100;
+ int iter, rc = 0;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
/* When PF would be done with the response, it would write back to the
- * `done' address. Poll until then.
+ * `done' address from a coherent DMA zone. Poll until then.
*/
- while ((!*done) && time) {
- msleep(25);
- time--;
+
+ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+ dma_rmb();
+ }
+
+ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+ dma_rmb();
}
if (!*done) {
/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+ qede_ptp_enable(edev);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
+ edev->cdev = NULL;
/* Since this can happen out-of-sync with other flows,
* don't release the netdevice until after slowpath stop
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
}
/* Disable PTP in HW */
edev->ptp = NULL;
}
-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
/* Init work queue for Tx timestamping */
INIT_WORK(&ptp->work, qede_ptp_task);
- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
+ /* Init cyclecounter and timecounter */
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
- return rc;
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+ return 0;
}
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
edev->ptp = ptp;
- rc = qede_ptp_init(edev, init_tc);
+ rc = qede_ptp_init(edev);
if (rc)
goto err1;
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
qede_rdma_cleanup_event(edev);
destroy_workqueue(edev->rdma_info.rdma_wq);
+ edev->rdma_info.rdma_wq = NULL;
}
int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
if (edev->rdma_info.exp_recovery)
return;
- if (!edev->rdma_info.qedr_dev)
+ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
return;
/* We don't want the cleanup flow to start while we're allocating and
return 0;
}
-static int rmnet_register_real_device(struct net_device *real_dev)
+static int rmnet_register_real_device(struct net_device *real_dev,
+ struct netlink_ext_ack *extack)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
- if (rmnet_is_real_dev_registered(real_dev))
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+ return -EINVAL;
+ }
+
return 0;
+ }
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- err = rmnet_register_real_device(real_dev);
+ err = rmnet_register_real_device(real_dev, extack);
if (err)
goto err0;
}
if (port->rmnet_mode != RMNET_EPMODE_VND) {
- NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+ NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
return -EINVAL;
}
return -EBUSY;
}
- err = rmnet_register_real_device(slave_dev);
+ err = rmnet_register_real_device(slave_dev, extack);
if (err)
return -EBUSY;
void r8169_apply_firmware(struct rtl8169_private *tp)
{
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
- if (tp->rtl_fw)
+ if (tp->rtl_fw) {
rtl_fw_write_firmware(tp, tp->rtl_fw);
+ /* At least one firmware doesn't reset tp->ocp_base. */
+ tp->ocp_base = OCP_STD_PHY_BASE;
+ }
}
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
skb->ip_summed = CHECKSUM_UNNECESSARY;
next:
- if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result) {
+ if (skb)
+ napi_gro_receive(&priv->napi, skb);
+ if (skb || xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
geneve->ttl_inherit = ttl_inherit;
+ geneve->df = df;
geneve_unquiesce(geneve, gs4, gs6);
return 0;
int ret;
state = gsi_channel_state(channel);
+
+ /* Channel could have entered STOPPED state since last call
+ * if it timed out. If so, we're done.
+ */
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EINVAL;
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- enum gsi_channel_state state;
u32 retries;
int ret;
gsi_channel_freeze(channel);
- /* Channel could have entered STOPPED state since last call if the
- * STOP command timed out. We won't stop a channel if stopping it
- * was successful previously (so we still want the freeze above).
- */
- state = gsi_channel_state(channel);
- if (state == GSI_CHANNEL_STATE_STOPPED)
- return 0;
-
/* RX channels might require a little time to enter STOPPED state */
retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
return 4;
}
+void ipa_cmd_tag_process(struct ipa *ipa)
+{
+ u32 count = ipa_cmd_tag_process_count();
+ struct gsi_trans *trans;
+
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (trans) {
+ ipa_cmd_tag_process_add(trans);
+ gsi_trans_commit_wait(trans);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error allocating %u entry tag transaction\n", count);
+ }
+}
+
static struct ipa_cmd_info *
ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
{
*/
u32 ipa_cmd_tag_process_count(void);
+/**
+ * ipa_cmd_tag_process() - Perform a tag process
+ *
+ * @Return: The number of elements to allocate in a transaction
+ * to hold tag process commands
+ */
+void ipa_cmd_tag_process(struct ipa *ipa);
+
/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
* @ipa: IPA pointer
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
- .checksum = true,
.aggregation = true,
.status_enable = true,
.rx = {
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
+ ipa_cmd_tag_process(ipa);
+
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
}
#include <linux/types.h>
+#include "ipa_gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
#include "ipa_endpoint.h"
#include <linux/types.h>
+struct gsi;
struct gsi_trans;
+struct ipa_gsi_endpoint_data;
/**
* ipa_gsi_trans_complete() - GSI transaction completion callback
sizeof_field(struct ipa_driver_init_complete_rsp,
rsp),
.tlv_type = 0x02,
- .elem_size = offsetof(struct ipa_driver_init_complete_rsp,
+ .offset = offsetof(struct ipa_driver_init_complete_rsp,
rsp),
.ei_array = qmi_response_type_v01_ei,
},
sizeof_field(struct ipa_init_complete_ind,
status),
.tlv_type = 0x02,
- .elem_size = offsetof(struct ipa_init_complete_ind,
+ .offset = offsetof(struct ipa_init_complete_ind,
status),
.ei_array = qmi_response_type_v01_ei,
},
sizeof_field(struct ipa_init_modem_driver_req,
platform_type_valid),
.tlv_type = 0x10,
- .elem_size = offsetof(struct ipa_init_modem_driver_req,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
platform_type_valid),
},
{
return err;
netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macsec_netdev_addr_lock_key,
- dev->lower_level);
+ lockdep_set_class(&dev->addr_list_lock,
+ &macsec_netdev_addr_lock_key);
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
static void macvlan_set_lockdep_class(struct net_device *dev)
{
netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key,
- dev->lower_level);
+ lockdep_set_class(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key);
}
static int macvlan_init(struct net_device *dev)
config MICROSEMI_PHY
tristate "Microsemi PHYs"
depends on MACSEC || MACSEC=n
- select CRYPTO_AES
- select CRYPTO_ECB
+ select CRYPTO_LIB_AES if MACSEC
help
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
#include <linux/phy.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
-#include <crypto/skcipher.h>
+#include <crypto/aes.h>
#include <net/macsec.h>
static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
u16 key_len, u8 hkey[16])
{
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- struct skcipher_request *req = NULL;
- struct scatterlist src, dst;
- DECLARE_CRYPTO_WAIT(wait);
- u32 input[4] = {0};
+ const u8 input[AES_BLOCK_SIZE] = {0};
+ struct crypto_aes_ctx ctx;
int ret;
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto out;
- }
-
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
- &wait);
- ret = crypto_skcipher_setkey(tfm, key, key_len);
- if (ret < 0)
- goto out;
-
- sg_init_one(&src, input, 16);
- sg_init_one(&dst, hkey, 16);
- skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ ret = aes_expandkey(&ctx, key, key_len);
+ if (ret)
+ return ret;
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return ret;
+ aes_encrypt(&ctx, hkey, input);
+ memzero_explicit(&ctx, sizeof(ctx));
+ return 0;
}
static int vsc8584_macsec_transformation(struct phy_device *phydev,
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
-static int phy_disable_interrupts(struct phy_device *phydev)
+int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
- if (phy_reg < 0)
- return -EIO;
+ if (phy_reg < 0) {
+ /* returning -ENODEV doesn't stop bus scanning */
+ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
+ }
*phy_id |= phy_reg;
if (ret < 0)
return ret;
+ ret = phy_disable_interrupts(phydev);
+ if (ret)
+ return ret;
+
if (phydev->drv->config_init)
ret = phydev->drv->config_init(phydev);
struct ethtool_pauseparam *pause)
{
struct phylink_link_state *config = &pl->link_config;
+ bool manual_changed;
+ int pause_state;
ASSERT_RTNL();
!pause->autoneg && pause->rx_pause != pause->tx_pause)
return -EINVAL;
- mutex_lock(&pl->state_mutex);
- config->pause = 0;
+ pause_state = 0;
if (pause->autoneg)
- config->pause |= MLO_PAUSE_AN;
+ pause_state |= MLO_PAUSE_AN;
if (pause->rx_pause)
- config->pause |= MLO_PAUSE_RX;
+ pause_state |= MLO_PAUSE_RX;
if (pause->tx_pause)
- config->pause |= MLO_PAUSE_TX;
+ pause_state |= MLO_PAUSE_TX;
+ mutex_lock(&pl->state_mutex);
/*
* See the comments for linkmode_set_pause(), wrt the deficiencies
* with the current implementation. A solution to this issue would
linkmode_set_pause(config->advertising, pause->tx_pause,
pause->rx_pause);
- /* If we have a PHY, phylib will call our link state function if the
- * mode has changed, which will trigger a resolve and update the MAC
- * configuration.
+ manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN ||
+ (!(pause_state & MLO_PAUSE_AN) &&
+ (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK);
+
+ config->pause = pause_state;
+
+ if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state))
+ phylink_pcs_config(pl, true, &pl->link_config);
+
+ mutex_unlock(&pl->state_mutex);
+
+ /* If we have a PHY, a change of the pause frame advertisement will
+ * cause phylib to renegotiate (if AN is enabled) which will in turn
+ * call our phylink_phy_change() and trigger a resolve. Note that
+ * we can't hold our state mutex while calling phy_set_asym_pause().
*/
- if (pl->phydev) {
+ if (pl->phydev)
phy_set_asym_pause(pl->phydev, pause->rx_pause,
pause->tx_pause);
- } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
- &pl->phylink_disable_state)) {
- phylink_pcs_config(pl, true, &pl->link_config);
+
+ /* If the manual pause settings changed, make sure we trigger a
+ * resolve to update their state; we can not guarantee that the
+ * link will cycle.
+ */
+ if (manual_changed) {
+ pl->mac_link_dropped = true;
+ phylink_run_resolve(pl);
}
- mutex_unlock(&pl->state_mutex);
return 0;
}
if (rc < 0)
return rc;
- /* Wait max 640 ms to detect energy */
- phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
- rc & MII_LAN83C185_ENERGYON, 10000,
- 640000, true);
+ /* Wait max 640 ms to detect energy and the timeout is not
+ * an actual error.
+ */
+ read_poll_timeout(phy_read, rc,
+ rc & MII_LAN83C185_ENERGYON || rc < 0,
+ 10000, 640000, true, phydev,
+ MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <net/xdp.h>
+#include <net/ip_tunnels.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <linux/skb_array.h>
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
dev->netdev_ops = &tun_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
/* Point-to-Point TUN Device */
dev->hard_header_len = 0;
}
if (pkt_cnt == 0) {
- /* Skip IP alignment psudo header */
- skb_pull(skb, 2);
skb->len = pkt_len;
- skb_set_tail_pointer(skb, pkt_len);
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+ skb_set_tail_pointer(skb, skb->len);
skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
ax_skb->len = pkt_len;
- ax_skb->data = skb->data + 2;
- skb_set_tail_pointer(ax_skb, pkt_len);
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+ skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
/* Init all registers */
ret = smsc95xx_reset(dev);
+ if (ret)
+ goto free_pdata;
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
if (ret < 0)
- return ret;
+ goto free_pdata;
+
val >>= 16;
pdata->chip_id = val;
pdata->mdix_ctrl = get_mdix_status(dev->net);
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
return 0;
+
+free_pdata:
+ kfree(pdata);
+ return ret;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
if (pdata) {
- cancel_delayed_work(&pdata->carrier_check);
+ cancel_delayed_work_sync(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
struct vxlan_rdst *rd;
if (rcu_access_pointer(f->nh)) {
+ if (*idx < cb->args[2])
+ goto skip_nh;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI, NULL);
if (err < 0)
goto out;
+skip_nh:
+ *idx += 1;
continue;
}
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
- dev->hard_header_len = 3;
dev->mtu = 1000;
dev->addr_len = 0;
}
if (!ndev)
goto out;
+ /* When transmitting data:
+ * first this driver removes a pseudo header of 1 byte,
+ * then the lapb module prepends an LAPB header of at most 3 bytes,
+ * then this driver prepends a length field of 2 bytes,
+ * then the underlying Ethernet device prepends its own header.
+ */
+ ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
if (dev_v6)
dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
+ mutex_lock(&wg->device_update_lock);
ret = wg_socket_init(wg, wg->incoming_port);
if (ret < 0)
- return ret;
- mutex_lock(&wg->device_update_lock);
+ goto out;
list_for_each_entry(peer, &wg->peer_list, peer_list) {
wg_packet_send_staged_packets(peer);
if (peer->persistent_keepalive_interval)
wg_packet_send_keepalive(peer);
}
+out:
mutex_unlock(&wg->device_update_lock);
- return 0;
+ return ret;
}
#ifdef CONFIG_PM_SLEEP
list_del(&wg->device_list);
rtnl_unlock();
mutex_lock(&wg->device_update_lock);
+ rcu_assign_pointer(wg->creating_net, NULL);
wg->incoming_port = 0;
wg_socket_reinit(wg, NULL, NULL);
/* The final references are cleared in the below calls to destroy_workqueue. */
skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
free_percpu(wg->incoming_handshakes_worker);
- if (wg->have_creating_net_ref)
- put_net(wg->creating_net);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
- pr_debug("%s: Interface deleted\n", dev->name);
+ pr_debug("%s: Interface destroyed\n", dev->name);
free_netdev(dev);
}
max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
dev->netdev_ops = &netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
struct wg_device *wg = netdev_priv(dev);
int ret = -ENOMEM;
- wg->creating_net = src_net;
+ rcu_assign_pointer(wg->creating_net, src_net);
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
.newlink = wg_newlink,
};
-static int wg_netdevice_notification(struct notifier_block *nb,
- unsigned long action, void *data)
+static void wg_netns_pre_exit(struct net *net)
{
- struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
- struct wg_device *wg = netdev_priv(dev);
-
- ASSERT_RTNL();
-
- if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
- return 0;
+ struct wg_device *wg;
- if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
- put_net(wg->creating_net);
- wg->have_creating_net_ref = false;
- } else if (dev_net(dev) != wg->creating_net &&
- !wg->have_creating_net_ref) {
- wg->have_creating_net_ref = true;
- get_net(wg->creating_net);
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ if (rcu_access_pointer(wg->creating_net) == net) {
+ pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
+ netif_carrier_off(wg->dev);
+ mutex_lock(&wg->device_update_lock);
+ rcu_assign_pointer(wg->creating_net, NULL);
+ wg_socket_reinit(wg, NULL, NULL);
+ mutex_unlock(&wg->device_update_lock);
+ }
}
- return 0;
+ rtnl_unlock();
}
-static struct notifier_block netdevice_notifier = {
- .notifier_call = wg_netdevice_notification
+static struct pernet_operations pernet_ops = {
+ .pre_exit = wg_netns_pre_exit
};
int __init wg_device_init(void)
return ret;
#endif
- ret = register_netdevice_notifier(&netdevice_notifier);
+ ret = register_pernet_device(&pernet_ops);
if (ret)
goto error_pm;
ret = rtnl_link_register(&link_ops);
if (ret)
- goto error_netdevice;
+ goto error_pernet;
return 0;
-error_netdevice:
- unregister_netdevice_notifier(&netdevice_notifier);
+error_pernet:
+ unregister_pernet_device(&pernet_ops);
error_pm:
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
- unregister_netdevice_notifier(&netdevice_notifier);
+ unregister_pernet_device(&pernet_ops);
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
#endif
struct net_device *dev;
struct crypt_queue encrypt_queue, decrypt_queue;
struct sock __rcu *sock4, *sock6;
- struct net *creating_net;
+ struct net __rcu *creating_net;
struct noise_static_identity static_identity;
struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
struct workqueue_struct *packet_crypt_wq;
unsigned int num_peers, device_update_gen;
u32 fwmark;
u16 incoming_port;
- bool have_creating_net_ref;
};
int wg_device_init(void);
if (flags & ~__WGDEVICE_F_ALL)
goto out;
- ret = -EPERM;
- if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
- info->attrs[WGDEVICE_A_FWMARK]) &&
- !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
- goto out;
+ if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
+ struct net *net;
+ rcu_read_lock();
+ net = rcu_dereference(wg->creating_net);
+ ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
+ rcu_read_unlock();
+ if (ret)
+ goto out;
+ }
++wg->device_update_gen;
memcpy(handshake->hash, hash, NOISE_HASH_LEN);
memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
handshake->remote_index = src->sender_index;
- if ((s64)(handshake->last_initiation_consumption -
- (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+ initiation_consumption = ktime_get_coarse_boottime_ns();
+ if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0)
handshake->last_initiation_consumption = initiation_consumption;
handshake->state = HANDSHAKE_CONSUMED_INITIATION;
up_write(&handshake->lock);
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/ip_tunnels.h>
struct wg_device;
struct wg_peer;
#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
-/* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
-{
- if (skb_network_header(skb) >= skb->head &&
- (skb_network_header(skb) + sizeof(struct iphdr)) <=
- skb_tail_pointer(skb) &&
- ip_hdr(skb)->version == 4)
- return htons(ETH_P_IP);
- if (skb_network_header(skb) >= skb->head &&
- (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
- skb_tail_pointer(skb) &&
- ipv6_hdr(skb)->version == 6)
- return htons(ETH_P_IPV6);
- return 0;
-}
-
static inline bool wg_check_packet_protocol(struct sk_buff *skb)
{
- __be16 real_protocol = wg_examine_packet_protocol(skb);
+ __be16 real_protocol = ip_tunnel_parse_protocol(skb);
return real_protocol && skb->protocol == real_protocol;
}
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = ~0; /* All levels */
- skb->protocol = wg_examine_packet_protocol(skb);
+ skb->protocol = ip_tunnel_parse_protocol(skb);
if (skb->protocol == htons(ETH_P_IP)) {
len = ntohs(ip_hdr(skb)->tot_len);
if (unlikely(len < sizeof(struct iphdr)))
if (unlikely(routed_peer != peer))
goto dishonest_packet_peer;
- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
- ++dev->stats.rx_dropped;
- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
- dev->name, peer->internal_id,
- &peer->endpoint.addr);
- } else {
- update_rx_stats(peer, message_data_len(len_before_trim));
- }
+ napi_gro_receive(&peer->napi, skb);
+ update_rx_stats(peer, message_data_len(len_before_trim));
return;
dishonest_packet_peer:
int wg_socket_init(struct wg_device *wg, u16 port)
{
+ struct net *net;
int ret;
struct udp_tunnel_sock_cfg cfg = {
.sk_user_data = wg,
};
#endif
+ rcu_read_lock();
+ net = rcu_dereference(wg->creating_net);
+ net = net ? maybe_get_net(net) : NULL;
+ rcu_read_unlock();
+ if (unlikely(!net))
+ return -ENONET;
+
#if IS_ENABLED(CONFIG_IPV6)
retry:
#endif
- ret = udp_sock_create(wg->creating_net, &port4, &new4);
+ ret = udp_sock_create(net, &port4, &new4);
if (ret < 0) {
pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
- return ret;
+ goto out;
}
set_sock_opts(new4);
- setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+ setup_udp_tunnel_sock(net, new4, &cfg);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6_mod_enabled()) {
port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
- ret = udp_sock_create(wg->creating_net, &port6, &new6);
+ ret = udp_sock_create(net, &port6, &new6);
if (ret < 0) {
udp_tunnel_sock_release(new4);
if (ret == -EADDRINUSE && !port && retries++ < 100)
goto retry;
pr_err("%s: Could not create IPv6 socket\n",
wg->dev->name);
- return ret;
+ goto out;
}
set_sock_opts(new6);
- setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+ setup_udp_tunnel_sock(net, new6, &cfg);
}
#endif
wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
- return 0;
+ ret = 0;
+out:
+ put_net(net);
+ return ret;
}
void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
struct wil_net_stats *stats, bool gro)
{
- gro_result_t rc = GRO_NORMAL;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
*/
int mcast = is_multicast_ether_addr(da);
struct sk_buff *xmit_skb = NULL;
- static const char * const gro_res_str[] = {
- [GRO_MERGED] = "GRO_MERGED",
- [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
- [GRO_HELD] = "GRO_HELD",
- [GRO_NORMAL] = "GRO_NORMAL",
- [GRO_DROP] = "GRO_DROP",
- [GRO_CONSUMED] = "GRO_CONSUMED",
- };
if (wdev->iftype == NL80211_IFTYPE_STATION) {
sa = wil_skb_get_sa(skb);
if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
/* mcast packet looped back to us */
- rc = GRO_DROP;
dev_kfree_skb(skb);
- goto stats;
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ return;
}
} else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
wil_rx_handle_eapol(vif, skb);
if (gro)
- rc = napi_gro_receive(&wil->napi_rx, skb);
+ napi_gro_receive(&wil->napi_rx, skb);
else
netif_rx_ni(skb);
- wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
- len, gro_res_str[rc]);
- }
-stats:
- /* statistics. rc set to GRO_NORMAL for AP bridging */
- if (unlikely(rc == GRO_DROP)) {
- ndev->stats.rx_dropped++;
- stats->rx_dropped++;
- wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
- } else {
- ndev->stats.rx_packets++;
- stats->rx_packets++;
- ndev->stats.rx_bytes += len;
- stats->rx_bytes += len;
- if (mcast)
- ndev->stats.multicast++;
}
+ ndev->stats.rx_packets++;
+ stats->rx_packets++;
+ ndev->stats.rx_bytes += len;
+ stats->rx_bytes += len;
+ if (mcast)
+ ndev->stats.multicast++;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
struct encrypted_key_payload *epayload;
struct device *dev = &nvdimm->dev;
- keyref = lookup_user_key(id, 0, 0);
+ keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyref))
return NULL;
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", status);
/*
- * Don't treat an error as fatal, as we potentially already
- * have a NGUID or EUI-64.
+ * Don't treat non-retryable errors as fatal, as we potentially
+ * already have a NGUID or EUI-64. If we failed with DNR set,
+ * we want to silently ignore the error as we can still
+ * identify the device, but if the status has DNR set, we want
+ * to propagate the error back specifically for the disk
+ * revalidation flow to make sure we don't abandon the
+ * device just because of a temporal retry-able error (such
+ * as path of transport errors).
*/
- if (status > 0 && !(status & NVME_SC_DNR))
+ if (status > 0 && (status & NVME_SC_DNR))
status = 0;
goto free_data;
}
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
- revalidate_disk(ns->head->disk);
}
#endif
return 0;
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ ctrl->numa_node = NUMA_NO_NODE;
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
{
struct nvme_ns_head *head = ns->head;
- lockdep_assert_held(&ns->head->lock);
-
if (!head->disk)
return;
- if (!(head->disk->flags & GENHD_FL_UP))
+ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
+ mutex_lock(&head->lock);
if (nvme_path_is_optimized(ns)) {
int node, srcu_idx;
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
+ mutex_unlock(&head->lock);
- synchronize_srcu(&ns->head->srcu);
- kblockd_schedule_work(&ns->head->requeue_work);
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- mutex_lock(&ns->head->lock);
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
if (nvme_state_is_live(ns->ana_state))
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
}
DEVICE_ATTR_RO(ana_state);
-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
- struct nvme_ns *ns = data;
+ struct nvme_ana_group_desc *dst = data;
- if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
- nvme_update_ns_ana_state(desc, ns);
- return -ENXIO; /* just break out of the loop */
- }
+ if (desc->grpid != dst->grpid)
+ return 0;
- return 0;
+ *dst = *desc;
+ return -ENXIO; /* just break out of the loop */
}
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
+ struct nvme_ana_group_desc desc = {
+ .grpid = id->anagrpid,
+ .state = 0,
+ };
+
mutex_lock(&ns->ctrl->ana_lock);
ns->ana_grpid = le32_to_cpu(id->anagrpid);
- nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+ nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
+ if (desc.state) {
+ /* found the group desc: update */
+ nvme_update_ns_ana_state(&desc, ns);
+ }
} else {
- mutex_lock(&ns->head->lock);
ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct backing_dev_info *info =
- ns->head->disk->queue->backing_dev_info;
+ struct gendisk *disk = ns->head->disk;
- info->capabilities |= BDI_CAP_STABLE_WRITES;
+ if (disk)
+ disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
}
}
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
blk_cleanup_queue(head->disk->queue);
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ /*
+ * if device_add_disk wasn't called, prevent
+ * disk release to put a bogus reference on the
+ * request queue
+ */
+ head->disk->queue = NULL;
+ }
put_disk(head->disk);
}
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex lock;
+ unsigned long flags;
+#define NVME_NSHEAD_DISK_LIVE 0
struct nvme_ns __rcu *current_path[];
#endif
};
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev_to_node(dev->dev);
+ dev->admin_tagset.numa_node = dev->ctrl.numa_node;
dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
if (result)
return result;
+ dev->ctrl.numa_node = dev_to_node(dev->dev);
+
nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev_to_node(dev->dev);
+ dev->tagset.numa_node = dev->ctrl.numa_node;
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
dev->tagset.cmd_size = sizeof(struct nvme_iod);
* Spread I/O queues completion vectors according their queue index.
* Admin queues can always go on completion vector 0.
*/
- comp_vector = idx == 0 ? idx : idx - 1;
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
/* Polling queues need direct cq polling context */
if (nvme_rdma_poll_queue(queue))
set->ops = &nvme_tcp_admin_mq_ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->ops = &nvme_tcp_mq_ops;
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
- ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->tag_set.ops = &nvme_loop_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
- ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
child, addr);
if (of_mdiobus_child_is_phy(child)) {
+ /* -ENODEV is the return code that PHYLIB has
+ * standardized on to indicate that bus
+ * scanning should continue.
+ */
rc = of_mdiobus_register_phy(mdio, child, addr);
- if (rc && rc != -ENODEV)
+ if (!rc)
+ break;
+ if (rc != -ENODEV)
goto unregister;
- break;
}
}
}
pm_runtime_put(vg->dev);
}
+static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg,
+ unsigned int offset)
+{
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+
+ /*
+ * Before making any direction modifications, do a check if gpio is set
+ * for direct IRQ. On Bay Trail, setting GPIO to output does not make
+ * sense, so let's at least inform the caller before they shoot
+ * themselves in the foot.
+ */
+ if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
+ dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+}
+
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
u32 value;
value &= ~BYT_DIR_MASK;
if (input)
value |= BYT_OUTPUT_EN;
- else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
- /*
- * Before making any direction modifications, do a check if gpio
- * is set for direct IRQ. On baytrail, setting GPIO to output
- * does not make sense, so let's at least inform the caller before
- * they shoot themselves in the foot.
- */
- dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+ else
+ byt_gpio_direct_irq_check(vg, offset);
writel(value, val_reg);
static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 reg;
+
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ reg = readl(val_reg);
+ reg &= ~BYT_DIR_MASK;
+ reg |= BYT_OUTPUT_EN;
+ writel(reg, val_reg);
+
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ return 0;
}
+/*
+ * Note despite the temptation this MUST NOT be converted into a call to
+ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
+ * MUST be done as a single BYT_VAL_REG register write.
+ * See the commit message of the commit adding this comment for details.
+ */
static int byt_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
- int ret = pinctrl_gpio_direction_output(chip->base + offset);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 reg;
- if (ret)
- return ret;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ byt_gpio_direct_irq_check(vg, offset);
- byt_gpio_set(chip, offset, value);
+ reg = readl(val_reg);
+ reg &= ~BYT_DIR_MASK;
+ if (value)
+ reg |= BYT_LEVEL;
+ else
+ reg &= ~BYT_LEVEL;
+ writel(reg, val_reg);
+
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
{
.name = "uart0",
.pins = uart0_pins,
- .npins = 9,
+ .npins = 5,
},
{
.name = "uart1",
config REGULATOR_MT6358
tristate "MediaTek MT6358 PMIC"
- depends on MFD_MT6397 && BROKEN
+ depends on MFD_MT6397
help
Say y here to select this option to enable the power regulator of
MediaTek MT6358 PMIC.
},
{
DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
},
{
DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
continue;
}
- ret = selector + sel;
+ ret = selector + sel - range->min_sel;
voltage = rdev->desc->ops->list_voltage(rdev, ret);
};
+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
[_chip ## _ ## _name] = { \
.desc = { \
.stby_mask = 0x20, \
}
-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
- .desc = { \
- .name = #_name,\
- .n_voltages = ((max) - (min)) / (step) + 1, \
- .ops = &pfuze100_sw_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = _chip ## _ ## _name, \
- .owner = THIS_MODULE, \
- .min_uV = (min), \
- .uV_step = (step), \
- .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
- .vsel_mask = 0x7, \
- }, \
- .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
- .stby_mask = 0x7, \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
+ [_chip ## _ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(voltages), \
+ .ops = &pfuze3000_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .volt_table = voltages, \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = (mask), \
+ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
+ .enable_mask = 0xf, \
+ .enable_val = 0x8, \
+ .enable_time = 500, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = (mask), \
+ .sw_reg = true, \
+ }
#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
.desc = { \
};
static struct pfuze_regulator pfuze3000_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
- PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
};
static struct pfuze_regulator pfuze3001_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
- PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
* Eric Farman <farman@linux.ibm.com>
*/
+#include <linux/slab.h>
#include <linux/vfio.h>
#include "vfio_ccw_private.h"
int fallback = *(int *)reply->param;
QETH_CARD_TEXT(card, 4, "setaccb");
- if (cmd->hdr.return_code)
- return -EIO;
- qeth_setadpparms_inspect_rc(cmd);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_CARD_TEXT_(card, 2, "rc=%d",
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
access_ctrl_req->subcmd_code, CARD_DEVID(card),
cmd->data.setadapterparms.hdr.return_code);
- switch (cmd->data.setadapterparms.hdr.return_code) {
+ switch (qeth_setadpparms_inspect_rc(cmd)) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (card->options.isolation == ISOLATION_MODE_NONE) {
dev_info(&card->gdev->dev,
struct net_device *dev,
netdev_features_t features)
{
+ struct qeth_card *card = dev->ml_priv;
+
/* Traffic with local next-hop is not eligible for some offloads: */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- struct qeth_card *card = dev->ml_priv;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ card->options.isolation != ISOLATION_MODE_FWD) {
netdev_features_t restricted = 0;
if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
ZFCP_STATUS_ERP_TIMEDOUT)) {
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_run("erscf_1", act);
- req->erp_action = NULL;
+ /* lock-free concurrent access with
+ * zfcp_erp_timeout_handler()
+ */
+ WRITE_ONCE(req->erp_action, NULL);
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_run("erscf_2", act);
void zfcp_erp_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
- struct zfcp_erp_action *act = fsf_req->erp_action;
+ struct zfcp_erp_action *act;
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ return;
+ /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
+ act = READ_ONCE(fsf_req->erp_action);
+ if (!act)
+ return;
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id);
- if (rdata)
+ if (rdata) {
+ kref_put(&rdata->kref, fc_rport_destroy);
return rdata;
+ }
if (lport->rport_priv_size > 0)
rport_priv_size = lport->rport_priv_size;
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- kref_get(&rdata->kref);
- if (rdata->event == RPORT_EV_NONE &&
- !queue_work(rport_event_queue, &rdata->event_work))
- kref_put(&rdata->kref, fc_rport_destroy);
+ if (rdata->event == RPORT_EV_NONE) {
+ kref_get(&rdata->kref);
+ if (!queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
rdata->event = event;
}
"iscsi_q_%d", shost->host_no);
ihost->workq = alloc_workqueue("%s",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 2, ihost->workq_name);
+ 1, ihost->workq_name);
if (!ihost->workq)
goto free_host;
}
lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
- lpfc_cpuhp_remove(phba);
+ if (phba->pport)
+ lpfc_cpuhp_remove(phba);
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
if (!ioc->is_warpdrive) {
ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
__func__);
- goto out;
+ return 0;
}
/* pci_access_mutex lock acquired by sysfs show path */
mutex_lock(&ioc->pci_access_mutex);
- if (ioc->pci_error_recovery || ioc->remove_host) {
- mutex_unlock(&ioc->pci_access_mutex);
- return 0;
- }
+ if (ioc->pci_error_recovery || ioc->remove_host)
+ goto out;
/* allocate upto GPIOVal 36 entries */
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
if (!io_unit_pg3) {
+ rc = -ENOMEM;
ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
__func__, sz);
goto out;
0) {
ioc_err(ioc, "%s: failed reading iounit_pg3\n",
__func__);
+ rc = -EINVAL;
goto out;
}
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
__func__, ioc_status);
+ rc = -EINVAL;
goto out;
}
if (io_unit_pg3->GPIOCount < 25) {
ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
__func__, io_unit_pg3->GPIOCount);
+ rc = -EINVAL;
goto out;
}
qla2x00_clear_loop_id(fcport);
fcport->flags |= FCF_FABRIC_DEVICE;
} else if (fcport->d_id.b24 != rp->id.b24 ||
- fcport->scan_needed) {
+ (fcport->scan_needed &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_NVME_INITIATOR)) {
qlt_schedule_sess_for_deletion(fcport);
}
fcport->d_id.b24 = rp->id.b24;
break;
}
- if (NVME_TARGET(vha->hw, fcport)) {
+ if (found && NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
vha->fcport_count--;
sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) {
fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
+ fd->status = NVME_SC_SUCCESS;
} else {
fd->rcv_rsplen = 0;
fd->transferred_length = 0;
+ fd->status = NVME_SC_INTERNAL;
}
- fd->status = 0;
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd->done(fd);
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
BLIST_INQUIRY_36},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
{"LENOVO", "DE_Series", "rdac", },
+ {"FUJITSU", "ETERNUS_AHB", "rdac", },
{NULL, NULL, NULL },
};
iscsi_eh_timer_workq = alloc_workqueue("%s",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 2, "iscsi_eh");
+ 1, "iscsi_eh");
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
- if (i->f->set_##field) \
+ if (!i->f->set_##field) \
return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420
+#define IMX8MP_OCOTP_UID_OFFSET 0x10
+
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
{
void __iomem *ocotp_base;
struct device_node *np;
+ u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ IMX8MP_OCOTP_UID_OFFSET : 0;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
if (!np)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
iounmap(ocotp_base);
of_node_put(np);
.soc_revision = imx8mm_soc_revision,
};
-static const struct of_device_id imx8_soc_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
goto exit;
/* wait for the status to be set */
- ret = readl_relaxed_poll_timeout(reset->prm->base +
- reset->prm->data->rstst,
- v, v & BIT(st_bit), 1,
- OMAP_RESET_MAX_WAIT);
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstst,
+ v, v & BIT(st_bit), 1,
+ OMAP_RESET_MAX_WAIT);
if (ret)
pr_err("%s: timedout waiting for %s:%lu\n", __func__,
reset->prm->data->name, id);
return;
if (dma->chan_tx) {
- dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
- dma_bufsize, DMA_TO_DEVICE);
+ dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
- dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
- dma_bufsize, DMA_FROM_DEVICE);
+ dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys);
dma_release_channel(dma->chan_rx);
}
}
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ if (dspi->irq)
+ disable_irq(dspi->irq);
spi_controller_suspend(ctlr);
clk_disable_unprepare(dspi->clk);
if (ret)
return ret;
spi_controller_resume(ctlr);
+ if (dspi->irq)
+ enable_irq(dspi->irq);
return 0;
}
goto poll_mode;
}
- ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
- IRQF_SHARED, pdev->name, dspi);
+ init_completion(&dspi->xfer_done);
+
+ ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+ IRQF_SHARED, pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_clk_put;
}
- init_completion(&dspi->xfer_done);
-
poll_mode:
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
dev_err(&pdev->dev, "can't get dma channels\n");
- goto out_clk_put;
+ goto out_free_irq;
}
}
ret = spi_register_controller(ctlr);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
- goto out_clk_put;
+ goto out_free_irq;
}
return ret;
+out_free_irq:
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
out_clk_put:
clk_disable_unprepare(dspi->clk);
out_ctlr_put:
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
/* Disconnect from the SPI framework */
- dspi_release_dma(dspi);
- clk_disable_unprepare(dspi->clk);
spi_unregister_controller(dspi->ctlr);
- return 0;
-}
-
-static void dspi_shutdown(struct platform_device *pdev)
-{
- struct spi_controller *ctlr = platform_get_drvdata(pdev);
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
-
/* Disable RX and TX */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
dspi_release_dma(dspi);
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
clk_disable_unprepare(dspi->clk);
- spi_unregister_controller(dspi->ctlr);
+
+ return 0;
+}
+
+static void dspi_shutdown(struct platform_device *pdev)
+{
+ dspi_remove(pdev);
}
static struct platform_driver fsl_dspi_driver = {
{ PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
+ /* TGL-H */
+ { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
struct rspi_data {
void __iomem *addr;
- u32 max_speed_hz;
+ u32 speed_hz;
struct spi_controller *ctlr;
struct platform_device *pdev;
wait_queue_head_t wait;
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
- 2 * rspi->max_speed_hz) - 1;
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
/* Disable dummy transmission, set 16-bit word access, 1 frame */
clksrc = clk_get_rate(rspi->clk);
while (div < 3) {
- if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
+ if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
break;
div++;
clksrc /= 2;
}
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
rspi->spcmd |= div << 2;
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
/* Disable dummy transmission, set byte access */
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
+ const struct spi_transfer *xfer;
int ret;
- rspi->max_speed_hz = spi->max_speed_hz;
+ /*
+ * As the Bit Rate Register must not be changed while the device is
+ * active, all transfers in a message must use the same bit rate.
+ * In theory, the sequencer could be enabled, and each Command Register
+ * could divide the base bit rate by a different value.
+ * However, most RSPI variants do not have Transfer Data Length
+ * Multiplier Setting Registers, so each sequence step would be limited
+ * to a single word, making this feature unsuitable for large
+ * transfers, which would gain most from it.
+ */
+ rspi->speed_hz = spi->max_speed_hz;
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->speed_hz < rspi->speed_hz)
+ rspi->speed_hz = xfer->speed_hz;
+ }
rspi->spcmd = SPCMD_SSLKP;
if (spi->mode & SPI_CPOL)
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
/* Load the watchdog timeout value, 50ms is always enough. */
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
WDG_LOAD_VAL & WDG_LOAD_MASK);
- sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
/* Start the watchdog to reset system */
sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
.exec_op = stm32_qspi_exec_op,
};
-static void stm32_qspi_release(struct stm32_qspi *qspi)
-{
- pm_runtime_get_sync(qspi->dev);
- /* disable qspi */
- writel_relaxed(0, qspi->io_base + QSPI_CR);
- stm32_qspi_dma_free(qspi);
- mutex_destroy(&qspi->lock);
- pm_runtime_put_noidle(qspi->dev);
- pm_runtime_disable(qspi->dev);
- pm_runtime_set_suspended(qspi->dev);
- pm_runtime_dont_use_autosuspend(qspi->dev);
- clk_disable_unprepare(qspi->clk);
-}
-
static int stm32_qspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
- goto err_qspi_release;
+ goto err_clk_disable;
} else {
reset_control_assert(rstc);
udelay(2);
platform_set_drvdata(pdev, qspi);
ret = stm32_qspi_dma_setup(qspi);
if (ret)
- goto err_qspi_release;
+ goto err_dma_free;
mutex_init(&qspi->lock);
ret = devm_spi_register_master(dev, ctrl);
if (ret)
- goto err_qspi_release;
+ goto err_pm_runtime_free;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
-err_qspi_release:
- stm32_qspi_release(qspi);
+err_pm_runtime_free:
+ pm_runtime_get_sync(qspi->dev);
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
+err_dma_free:
+ stm32_qspi_dma_free(qspi);
+err_clk_disable:
+ clk_disable_unprepare(qspi->clk);
err_master_put:
spi_master_put(qspi->ctrl);
{
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
- stm32_qspi_release(qspi);
+ pm_runtime_get_sync(qspi->dev);
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ stm32_qspi_dma_free(qspi);
+ mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
+ clk_disable_unprepare(qspi->clk);
return 0;
}
static int spidev_release(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
+ int dofree;
mutex_lock(&device_list_lock);
spidev = filp->private_data;
filp->private_data = NULL;
+ spin_lock_irq(&spidev->spi_lock);
+ /* ... after we unbound from the underlying device? */
+ dofree = (spidev->spi == NULL);
+ spin_unlock_irq(&spidev->spi_lock);
+
/* last close? */
spidev->users--;
if (!spidev->users) {
- int dofree;
kfree(spidev->tx_buffer);
spidev->tx_buffer = NULL;
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
- spin_lock_irq(&spidev->spi_lock);
- if (spidev->spi)
- spidev->speed_hz = spidev->spi->max_speed_hz;
-
- /* ... after we unbound from the underlying device? */
- dofree = (spidev->spi == NULL);
- spin_unlock_irq(&spidev->spi_lock);
-
if (dofree)
kfree(spidev);
+ else
+ spidev->speed_hz = spidev->spi->max_speed_hz;
}
#ifdef CONFIG_SPI_SLAVE
- spi_slave_abort(spidev->spi);
+ if (!dofree)
+ spi_slave_abort(spidev->spi);
#endif
mutex_unlock(&device_list_lock);
{
struct spidev_data *spidev = spi_get_drvdata(spi);
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
spin_unlock_irq(&spidev->spi_lock);
- /* prevent new opens */
- mutex_lock(&device_list_lock);
list_del(&spidev->device_entry);
device_destroy(spidev_class, spidev->devt);
clear_bit(MINOR(spidev->devt), minors);
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
if (!pIE)
return _FAIL;
+ if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
+ return _FAIL;
memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
supportRateNum = ie_len;
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (pIE)
+ if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
return _SUCCESS;
}
int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
- int chan_start_idx, int chan_num)
+ int chan_start_idx, int chan_num, int *timeout)
{
int ret, i;
struct hif_msg *hif;
tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
tmo_chan_fg *= body->num_of_probe_requests;
tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
+ if (timeout)
+ *timeout = usecs_to_jiffies(tmo);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
- return ret ? ret : usecs_to_jiffies(tmo);
+ return ret;
}
int hif_stop_scan(struct wfx_vif *wvif)
int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
- int chan_start, int chan_num);
+ int chan_start, int chan_num, int *timeout);
int hif_stop_scan(struct wfx_vif *wvif);
int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sorted_queues[i] = &wdev->tx_queue[i];
for (j = i; j > 0; j--)
- if (atomic_read(&sorted_queues[j]->pending_frames) >
+ if (atomic_read(&sorted_queues[j]->pending_frames) <
atomic_read(&sorted_queues[j - 1]->pending_frames))
swap(sorted_queues[j - 1], sorted_queues[j]);
}
if (atomic_read(&wdev->tx_lock))
return NULL;
-
- for (;;) {
- skb = wfx_tx_queues_get_skb(wdev);
- if (!skb)
- return NULL;
- skb_queue_tail(&wdev->tx_pending, skb);
- wake_up(&wdev->tx_dequeue);
- tx_priv = wfx_skb_tx_priv(skb);
- tx_priv->xmit_timestamp = ktime_get();
- return (struct hif_msg *)skb->data;
- }
+ skb = wfx_tx_queues_get_skb(wdev);
+ if (!skb)
+ return NULL;
+ skb_queue_tail(&wdev->tx_pending, skb);
+ wake_up(&wdev->tx_dequeue);
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ return (struct hif_msg *)skb->data;
}
wfx_tx_lock_flush(wvif->wdev);
wvif->scan_abort = false;
reinit_completion(&wvif->scan_complete);
- timeout = hif_scan(wvif, req, start_idx, i - start_idx);
- if (timeout < 0) {
+ ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout);
+ if (ret) {
wfx_tx_unlock(wvif->wdev);
- return timeout;
+ return -EIO;
}
ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
{
int i;
- for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
- if (power > cpufreq_cdev->em->table[i].power)
+ for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+ if (power >= cpufreq_cdev->em->table[i].power)
break;
}
- return cpufreq_cdev->em->table[i + 1].frequency;
+ return cpufreq_cdev->em->table[i].frequency;
}
/**
static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
{
struct device_node *np;
- int ret;
+ int ret = 0;
data->policy = cpufreq_cpu_get(0);
if (!data->policy) {
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
cpufreq_cpu_put(data->policy);
- return ret;
}
}
- return 0;
+ of_node_put(np);
+
+ return ret;
}
static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data)
/* The total number of temperature sensors in the MT8183 */
#define MT8183_NUM_SENSORS 6
+/* The number of banks in the MT8183 */
+#define MT8183_NUM_ZONES 1
+
/* The number of sensing points per bank */
#define MT8183_NUM_SENSORS_PER_ZONE 6
*/
static const struct mtk_thermal_data mt8183_thermal_data = {
.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
- .num_banks = MT8183_NUM_SENSORS_PER_ZONE,
+ .num_banks = MT8183_NUM_ZONES,
.num_sensors = MT8183_NUM_SENSORS,
.vts_index = mt8183_vts_index,
.cali_val = MT8183_CALIBRATION,
*
* Return: IRQ_HANDLED
*/
-irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
{
struct tsens_priv *priv = data;
struct tsens_irq_data d;
*
* Return: IRQ_HANDLED
*/
-irqreturn_t tsens_irq_thread(int irq, void *data)
+static irqreturn_t tsens_irq_thread(int irq, void *data)
{
struct tsens_priv *priv = data;
struct tsens_irq_data d;
return IRQ_HANDLED;
}
-int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(void *_sensor, int low, int high)
{
struct tsens_sensor *s = _sensor;
struct tsens_priv *priv = s->priv;
return 0;
}
-int tsens_enable_irq(struct tsens_priv *priv)
+static int tsens_enable_irq(struct tsens_priv *priv)
{
int ret;
int val = tsens_version(priv) > VER_1_X ? 7 : 1;
return ret;
}
-void tsens_disable_irq(struct tsens_priv *priv)
+static void tsens_disable_irq(struct tsens_priv *priv)
{
regmap_field_write(priv->rf[INT_EN], 0);
}
{
struct rcar_gen3_thermal_tsc *tsc = devdata;
int mcelsius, val;
- u32 reg;
+ int reg;
/* Read register and convert to mili Celsius */
reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
thm->var_data = pdata;
thm->base = devm_platform_ioremap_resource(pdev, 0);
- if (!thm->base)
- return -ENOMEM;
+ if (IS_ERR(thm->base))
+ return PTR_ERR(thm->base);
thm->nr_sensors = of_get_child_count(np);
if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
* I/O utilities that messages sent to the console will automatically
* be displayed on the dbg_io.
*/
- dbg_io_ops->is_console = true;
+ dbg_io_ops->cons = co;
return 0;
}
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static struct kgdb_io kgdboc_earlycon_io_ops;
-static struct console *earlycon;
static int (*earlycon_orig_exit)(struct console *con);
#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static void cleanup_earlycon(void)
{
- if (earlycon)
+ if (kgdboc_earlycon_io_ops.cons)
kgdb_unregister_io_module(&kgdboc_earlycon_io_ops);
}
#else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
goto noconfig;
}
- kgdboc_io_ops.is_console = 0;
+ kgdboc_io_ops.cons = NULL;
kgdb_tty_driver = NULL;
kgdboc_use_kms = 0;
int idx;
if (cons->device && cons->device(cons, &idx) == p &&
idx == tty_line) {
- kgdboc_io_ops.is_console = 1;
+ kgdboc_io_ops.cons = cons;
break;
}
}
{
char c;
- if (!earlycon->read(earlycon, &c, 1))
+ if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons,
+ &c, 1))
return NO_POLL_CHAR;
return c;
static void kgdboc_earlycon_put_char(u8 chr)
{
- earlycon->write(earlycon, &chr, 1);
+ kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr,
+ 1);
}
static void kgdboc_earlycon_pre_exp_handler(void)
* boot if we detect this case.
*/
for_each_console(con)
- if (con == earlycon)
+ if (con == kgdboc_earlycon_io_ops.cons)
return;
already_warned = true;
static void kgdboc_earlycon_deinit(void)
{
- if (!earlycon)
+ if (!kgdboc_earlycon_io_ops.cons)
return;
- if (earlycon->exit == kgdboc_earlycon_deferred_exit)
+ if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit)
/*
* kgdboc_earlycon is exiting but original boot console exit
* was never called (AKA kgdboc_earlycon_deferred_exit()
* didn't ever run). Undo our trap.
*/
- earlycon->exit = earlycon_orig_exit;
- else if (earlycon->exit)
+ kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit;
+ else if (kgdboc_earlycon_io_ops.cons->exit)
/*
* We skipped calling the exit() routine so we could try to
* keep using the boot console even after it went away. We're
* finally done so call the function now.
*/
- earlycon->exit(earlycon);
+ kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons);
- earlycon = NULL;
+ kgdboc_earlycon_io_ops.cons = NULL;
}
static struct kgdb_io kgdboc_earlycon_io_ops = {
.write_char = kgdboc_earlycon_put_char,
.pre_exception = kgdboc_earlycon_pre_exp_handler,
.deinit = kgdboc_earlycon_deinit,
- .is_console = true,
};
#define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name)
goto unlock;
}
- earlycon = con;
+ kgdboc_earlycon_io_ops.cons = con;
pr_info("Going to register kgdb with earlycon '%s'\n", con->name);
if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) {
- earlycon = NULL;
+ kgdboc_earlycon_io_ops.cons = NULL;
pr_info("Failed to register kgdb with earlycon\n");
} else {
/* Trap exit so we can keep earlycon longer if needed. */
if (!set || (tmode & 0xff) != 0)
return -EINVAL;
- switch (tmode >> 8) {
+ tmode >>= 8;
+ switch (tmode) {
case TEST_J:
case TEST_K:
case TEST_SE0_NAK:
int ret = 0;
u8 zlp = 0;
+ spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_ep0_queue(priv_dev, request);
/* cancel the request if controller receive new SETUP packet. */
- if (cdns3_check_new_setup(priv_dev))
+ if (cdns3_check_new_setup(priv_dev)) {
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
return -ECONNRESET;
+ }
/* send STATUS stage. Should be called only for SET_CONFIGURATION */
if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
- spin_lock_irqsave(&priv_dev->lock, flags);
cdns3_select_ep(priv_dev, 0x00);
erdy_sent = !priv_dev->hw_configured_flag;
return 0;
}
- spin_lock_irqsave(&priv_dev->lock, flags);
if (!list_empty(&priv_ep->pending_req_list)) {
dev_err(priv_dev->dev,
"can't handle multiple requests for ep0\n");
__dynamic_array(char, str, CDNS3_MSG_MAX)
),
TP_fast_assign(
- __entry->ep_dir = priv_dev->ep0_data_dir;
+ __entry->ep_dir = priv_dev->selected_ep;
__entry->ep_sts = ep_sts;
),
TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
+ { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
+ .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
/* Logitech HD Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
epnum, 0);
}
- ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret) {
- dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
- hsotg->ctrl_req);
- return ret;
- }
dwc2_hsotg_dump(hsotg);
return 0;
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Postponed adding a new gadget to the udc class driver list */
+ if (hsotg->gadget_enabled) {
+ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+ if (retval) {
+ dwc2_hsotg_remove(hsotg);
+ goto error_init;
+ }
+ }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;
error_init:
.suspend_clk_idx = -1,
};
-static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
- .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
- .num_clks = 2,
- .suspend_clk_idx = 1,
-};
-
static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
.clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
.num_clks = 4,
{
.compatible = "samsung,exynos5250-dwusb3",
.data = &exynos5250_drvdata,
- }, {
- .compatible = "samsung,exynos5420-dwusb3",
- .data = &exynos5420_drvdata,
}, {
.compatible = "samsung,exynos5433-dwusb3",
.data = &exynos5433_drvdata,
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
+ }
pm_runtime_mark_last_busy(&dwc3->dev);
pm_runtime_put_sync_autosuspend(&dwc3->dev);
kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
}
kgdb_register_io_module(&kgdbdbgp_io_ops);
- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+ if (early_dbgp_console.index != -1)
+ kgdbdbgp_io_ops.cons = &early_dbgp_console;
return 0;
}
return 0;
err_create_workqueue:
- destroy_workqueue(udc->qwork);
+ if (udc->qwork)
+ destroy_workqueue(udc->qwork);
err_destroy_dma:
dma_pool_destroy(udc->dtd_pool);
err_free_dma:
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
+ case PCI_VENDOR_ID_HUAWEI:
+ /* Synopsys HC bug */
+ if (pdev->device == 0xa239) {
+ ehci_info(ehci, "applying Synopsys HC workaround\n");
+ ehci->has_synopsys_hc_bug = 1;
+ }
+ break;
}
/* optional debug port, normally in the first BAR */
struct resource *mem;
usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
device_init_wakeup(&dev->dev, false);
xhci_mtk_sch_exit(mtk);
xhci_mtk_clks_disable(mtk);
xhci_mtk_ldos_disable(mtk);
- pm_runtime_put_sync(&dev->dev);
- pm_runtime_disable(&dev->dev);
return 0;
}
xhci->devs[slot_id]->out_ctx, ep_index);
ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
int hird, exit_latency;
int ret;
+ if (xhci->quirks & XHCI_HW_LPM_DISABLE)
+ return -EPERM;
+
if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
- if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+ if (enable) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
mutex_lock(hcd->bandwidth_mutex);
xhci_change_max_exit_latency(xhci, udev, 0);
mutex_unlock(hcd->bandwidth_mutex);
+ readl_poll_timeout(ports[port_num]->addr, pm_val,
+ (pm_val & PORT_PLS_MASK) == XDEV_U0,
+ 100, 10000);
return 0;
}
}
* 4 - TRB error
* 5-7 - reserved
*/
-#define EP_STATE_MASK (0xf)
+#define EP_STATE_MASK (0x7)
#define EP_STATE_DISABLED 0
#define EP_STATE_RUNNING 1
#define EP_STATE_HALTED 2
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
+ kfree(dev->buf);
kfree(dev);
}
platform_set_drvdata(pdev, tegra_phy);
- err = usb_add_phy_dev(&tegra_phy->u_phy);
- if (err)
- return err;
-
- return 0;
+ return usb_add_phy_dev(&tegra_phy->u_phy);
}
static int tegra_usb_phy_remove(struct platform_device *pdev)
return info->dma_map_ctrl(chan->device->dev, pkt, map);
}
-static void usbhsf_dma_complete(void *arg);
+static void usbhsf_dma_complete(void *arg,
+ const struct dmaengine_result *result);
static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{
struct usbhs_pipe *pipe = pkt->pipe;
struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
+ dma_cookie_t cookie;
fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo)
if (!desc)
return;
- desc->callback = usbhsf_dma_complete;
- desc->callback_param = pipe;
+ desc->callback_result = usbhsf_dma_complete;
+ desc->callback_param = pkt;
- pkt->cookie = dmaengine_submit(desc);
- if (pkt->cookie < 0) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
return;
}
struct dma_chan *chan, int dtln)
{
struct usbhs_pipe *pipe = pkt->pipe;
- struct dma_tx_state state;
size_t received_size;
int maxp = usbhs_pipe_get_maxpacket(pipe);
- dmaengine_tx_status(chan, pkt->cookie, &state);
- received_size = pkt->length - state.residue;
+ received_size = pkt->length - pkt->dma_result->residue;
if (dtln) {
received_size -= USBHS_USB_DMAC_XFER_SIZE;
return 0;
}
-static void usbhsf_dma_complete(void *arg)
+static void usbhsf_dma_complete(void *arg,
+ const struct dmaengine_result *result)
{
- struct usbhs_pipe *pipe = arg;
+ struct usbhs_pkt *pkt = arg;
+ struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
+ pkt->dma_result = result;
ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
if (ret < 0)
dev_err(dev, "dma_complete run_error %d : %d\n",
struct usbhs_pkt *pkt);
struct work_struct work;
dma_addr_t dma;
- dma_cookie_t cookie;
+ const struct dmaengine_result *dma_result;
void *buf;
int length;
int trans;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
- msg[1] = PMC_USB_DP_HPD_IRQ;
+ if (data->status & DP_STATUS_IRQ_HPD)
+ msg[1] = PMC_USB_DP_HPD_IRQ;
if (data->status & DP_STATUS_HPD_STATE)
msg[1] |= PMC_USB_DP_HPD_LVL;
{
struct typec_displayport_data *data = state->data;
struct altmode_req req = { };
+ int ret;
if (data->status & DP_STATUS_IRQ_HPD)
return pmc_usb_mux_dp_hpd(port, state);
if (data->status & DP_STATUS_HPD_STATE)
req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
- return pmc_usb_command(port, (void *)&req, sizeof(req));
+ ret = pmc_usb_command(port, (void *)&req, sizeof(req));
+ if (ret)
+ return ret;
+
+ if (data->status & DP_STATUS_HPD_STATE)
+ return pmc_usb_mux_dp_hpd(port, state);
+
+ return 0;
}
static int
return tcpci_irq(chip->tcpci);
}
-static int rt1711h_init_alert(struct rt1711h_chip *chip,
- struct i2c_client *client)
-{
- int ret;
-
- /* Disable chip interrupts before requesting irq */
- ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
- if (ret < 0)
- return ret;
-
- ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
- rt1711h_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- dev_name(chip->dev), chip);
- if (ret < 0)
- return ret;
- enable_irq_wake(client->irq);
- return 0;
-}
-
static int rt1711h_sw_reset(struct rt1711h_chip *chip)
{
int ret;
if (ret < 0)
return ret;
- ret = rt1711h_init_alert(chip, client);
+ /* Disable chip interrupts before requesting irq */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
if (ret < 0)
return ret;
if (IS_ERR_OR_NULL(chip->tcpci))
return PTR_ERR(chip->tcpci);
+ ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
+ rt1711h_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(chip->dev), chip);
+ if (ret < 0)
+ return ret;
+ enable_irq_wake(client->irq);
+
return 0;
}
* @config: the bus operations that is supported by this device
* @size: size of the parent structure that contains private data
*
- * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
*
* Returns an error when parent/config/dma_dev is not set or fail to get
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
- if (vdev->err_trigger)
+ if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
- if (vdev->req_trigger)
+ vdev->err_trigger = NULL;
+ }
+ if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
+ vdev->req_trigger = NULL;
+ }
}
mutex_unlock(&vdev->reflck->lock);
/* Caller should hold memory_lock semaphore */
bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
{
+ struct pci_dev *pdev = vdev->pdev;
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
- return cmd & PCI_COMMAND_MEMORY;
+ /*
+ * SR-IOV VF memory enable is handled by the MSE bit in the
+ * PF SR-IOV capability, there's therefore no need to trigger
+ * faults based on the virtual value.
+ */
+ return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
}
/*
vconfig[PCI_INTERRUPT_PIN]);
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+
+ /*
+ * VFs do no implement the memory enable bit of the COMMAND
+ * register therefore we'll not have it set in our initial
+ * copy of config space after pci_enable_device(). For
+ * consistency with PFs, set the virtual enable bit here.
+ */
+ *(__le16 *)&vconfig[PCI_COMMAND] |=
+ cpu_to_le16(PCI_COMMAND_MEMORY);
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
return 0;
}
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+ static void *backend;
+
+ const bool enable = fd != -1;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_TEST_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ if (!enable) {
+ vhost_poll_stop(&vq->poll);
+ backend = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
+ } else {
+ vhost_vq_set_backend(vq, backend);
+ r = vhost_vq_init_access(vq);
+ if (r == 0)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (enable) {
+ vhost_test_flush_vq(n, index);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
+ struct vhost_vring_file backend;
struct vhost_test *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
if (copy_from_user(&test, argp, sizeof test))
return -EFAULT;
return vhost_test_run(n, test);
+ case VHOST_TEST_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_test_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
/* Start a given test on the virtio null device. 0 stops all tests. */
#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
#endif
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vdpa_notification_area notify;
- int index = vma->vm_pgoff;
+ unsigned long index = vma->vm_pgoff;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
ops->graphics = 1;
if (!blank) {
- var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
+ FB_ACTIVATE_KD_TEXT;
fb_set_var(info, &var);
ops->graphics = 0;
ops->var = info->var;
else if (!strcmp(this_opt, "noedid"))
noedid = true;
else if (!strcmp(this_opt, "noblank"))
- blank = true;
+ blank = false;
else if (!strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vremap:", 7))
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
+ /*
+ * Copy of "System RAM (virtio_mem)" to be used for
+ * add_memory_driver_managed().
+ */
+ const char *resource_name;
/* Summary of all memory block states. */
unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(addr);
+ /*
+ * When force-unloading the driver and we still have memory added to
+ * Linux, the resource name has to stay.
+ */
+ if (!vm->resource_name) {
+ vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
+ GFP_KERNEL);
+ if (!vm->resource_name)
+ return -ENOMEM;
+ }
+
dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
- return add_memory(nid, addr, memory_block_size_bytes());
+ return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
+ vm->resource_name);
}
/*
VIRTIO_MEM_MB_STATE_OFFLINE);
}
- return rc;
+ return 0;
}
/*
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
dev_warn(&vdev->dev, "device still has system memory added\n");
- else
+ } else {
virtio_mem_delete_resource(vm);
+ kfree_const(vm->resource_name);
+ }
/* remove all tracking data - no locking needed */
vfree(vm->mb_state);
unsigned int nr_handles;
};
+struct map_ring_valloc {
+ struct xenbus_map_node *node;
+
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
+ union {
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ };
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+ unsigned int idx; /* HVM only. */
+};
+
static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops {
- int (*map)(struct xenbus_device *dev,
+ int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
grant_ref_t *gnt_refs, unsigned int nr_grefs,
void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
* Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
* pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address. Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address. Returns 0 on success, and -errno on
* error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
unsigned int nr_grefs, void **vaddr)
{
int err;
+ struct map_ring_valloc *info;
+
+ *vaddr = NULL;
+
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
- /* Some hypervisors are buggy and can return 1. */
- if (err > 0)
- err = GNTST_general_error;
+ info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+ if (!info->node)
+ err = -ENOMEM;
+ else
+ err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
+ kfree(info->node);
+ kfree(info);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
grant_ref_t *gnt_refs,
unsigned int nr_grefs,
grant_handle_t *handles,
- phys_addr_t *addrs,
+ struct map_ring_valloc *info,
unsigned int flags,
bool *leaked)
{
- struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j;
- int err = GNTST_okay;
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_grefs; i++) {
- memset(&map[i], 0, sizeof(map[i]));
- gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
- dev->otherend_id);
+ gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+ gnt_refs[i], dev->otherend_id);
handles[i] = INVALID_GRANT_HANDLE;
}
- gnttab_batch_map(map, i);
+ gnttab_batch_map(info->map, i);
for (i = 0; i < nr_grefs; i++) {
- if (map[i].status != GNTST_okay) {
- err = map[i].status;
- xenbus_dev_fatal(dev, map[i].status,
+ if (info->map[i].status != GNTST_okay) {
+ xenbus_dev_fatal(dev, info->map[i].status,
"mapping in shared page %d from domain %d",
gnt_refs[i], dev->otherend_id);
goto fail;
} else
- handles[i] = map[i].handle;
+ handles[i] = info->map[i].handle;
}
- return GNTST_okay;
+ return 0;
fail:
for (i = j = 0; i < nr_grefs; i++) {
if (handles[i] != INVALID_GRANT_HANDLE) {
- memset(&unmap[j], 0, sizeof(unmap[j]));
- gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+ gnttab_set_unmap_op(&info->unmap[j],
+ info->phys_addrs[i],
GNTMAP_host_map, handles[i]);
j++;
}
}
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
BUG();
*leaked = false;
for (i = 0; i < j; i++) {
- if (unmap[i].status != GNTST_okay) {
+ if (info->unmap[i].status != GNTST_okay) {
*leaked = true;
break;
}
}
- return err;
+ return -ENOENT;
}
/**
return err;
}
-struct map_ring_valloc_hvm
-{
- unsigned int idx;
-
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
unsigned int goffset,
unsigned int len,
void *data)
{
- struct map_ring_valloc_hvm *info = data;
+ struct map_ring_valloc *info = data;
unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
info->phys_addrs[info->idx] = vaddr;
info->idx++;
}
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
- grant_ref_t *gnt_ref,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_ref,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
int err;
void *addr;
bool leaked = false;
- struct map_ring_valloc_hvm info = {
- .idx = 0,
- };
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- *vaddr = NULL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
gnttab_foreach_grant(node->hvm.pages, nr_grefs,
xenbus_map_ring_setup_grant_hvm,
- &info);
+ info);
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
- info.phys_addrs, GNTMAP_host_map, &leaked);
+ info, GNTMAP_host_map, &leaked);
node->nr_handles = nr_grefs;
if (err)
spin_unlock(&xenbus_valloc_lock);
*vaddr = addr;
+ info->node = NULL;
+
return 0;
out_xenbus_unmap_ring:
if (!leaked)
- xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+ xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
else
pr_alert("leaking %p size %u page(s)",
addr, nr_pages);
if (!leaked)
free_xenballooned_pages(nr_pages, node->hvm.pages);
out_err:
- kfree(node);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
#ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- grant_ref_t *gnt_refs,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int err = GNTST_okay;
int i;
bool leaked;
- *vaddr = NULL;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
+ if (!area)
return -ENOMEM;
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
- if (!area) {
- kfree(node);
- return -ENOMEM;
- }
-
for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+ info->phys_addrs[i] =
+ arbitrary_virt_to_machine(info->ptes[i]).maddr;
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
- phys_addrs,
- GNTMAP_host_map | GNTMAP_contains_pte,
+ info, GNTMAP_host_map | GNTMAP_contains_pte,
&leaked);
if (err)
goto failed;
spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
+ info->node = NULL;
+
return 0;
failed:
else
pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
- kfree(node);
return err;
}
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
}
static const struct xenbus_ring_ops ring_ops_pv = {
- .map = xenbus_map_ring_valloc_pv,
- .unmap = xenbus_unmap_ring_vfree_pv,
+ .map = xenbus_map_ring_pv,
+ .unmap = xenbus_unmap_ring_pv,
};
#endif
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
{
unsigned int idx;
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
unsigned int len,
void *data)
{
- struct unmap_ring_vfree_hvm *info = data;
+ struct unmap_ring_hvm *info = data;
info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
info->idx++;
}
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
{
int rv;
struct xenbus_map_node *node;
void *addr;
- struct unmap_ring_vfree_hvm info = {
+ struct unmap_ring_hvm info = {
.idx = 0,
};
unsigned int nr_pages;
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static const struct xenbus_ring_ops ring_ops_hvm = {
- .map = xenbus_map_ring_valloc_hvm,
- .unmap = xenbus_unmap_ring_vfree_hvm,
+ .map = xenbus_map_ring_hvm,
+ .unmap = xenbus_unmap_ring_hvm,
};
void __init xenbus_ring_ops_init(void)
return ERR_PTR(-ENOMEM);
}
+ cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+ if (!cell->name) {
+ kfree(cell);
+ return ERR_PTR(-ENOMEM);
+ }
+
cell->net = net;
cell->name_len = namelen;
for (i = 0; i < namelen; i++)
cell->name[i] = tolower(name[i]);
+ cell->name[i] = 0;
atomic_set(&cell->usage, 2);
INIT_WORK(&cell->manager, afs_manage_cell);
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
+ kfree(cell->name);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
afs_put_cell(cell->net, cell->alias_of);
key_put(cell->anonymous_key);
+ kfree(cell->name);
kfree(cell);
_leave(" [destroyed]");
struct afs_vlserver_list __rcu *vl_servers;
u8 name_len; /* Length of name */
- char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */
+ char *name; /* Cell name, case-flattened and NUL-padded */
};
/*
mutex_lock(&sbi->pipe_mutex);
while (bytes) {
- wr = __kernel_write(file, data, bytes, &file->f_pos);
+ wr = kernel_write(file, data, bytes, &file->f_pos);
if (wr <= 0)
break;
data += wr;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
- goto out_put_group;
+ goto out;
}
/*
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret) {
btrfs_add_delayed_iput(inode);
- goto out_put_group;
+ goto out;
}
clear_nlink(inode);
/* One for the block groups ref */
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
if (ret < 0)
- goto out_put_group;
+ goto out;
if (ret > 0)
btrfs_release_path(path);
if (ret == 0) {
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
- goto out_put_group;
+ goto out;
btrfs_release_path(path);
}
&fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
+ /* Once for the block groups rbtree */
+ btrfs_put_block_group(block_group);
+
if (fs_info->first_logical_byte == block_group->start)
fs_info->first_logical_byte = (u64)-1;
spin_unlock(&fs_info->block_group_cache_lock);
spin_unlock(&block_group->space_info->lock);
+ /*
+ * Remove the free space for the block group from the free space tree
+ * and the block group's item from the extent tree before marking the
+ * block group as removed. This is to prevent races with tasks that
+ * freeze and unfreeze a block group, this task and another task
+ * allocating a new block group - the unfreeze task ends up removing
+ * the block group's extent map before the task calling this function
+ * deletes the block group item from the extent tree, allowing for
+ * another task to attempt to create another block group with the same
+ * item key (and failing with -EEXIST and a transaction abort).
+ */
+ ret = remove_block_group_free_space(trans, block_group);
+ if (ret)
+ goto out;
+
+ ret = remove_block_group_item(trans, path, block_group);
+ if (ret < 0)
+ goto out;
+
mutex_lock(&fs_info->chunk_mutex);
spin_lock(&block_group->lock);
block_group->removed = 1;
mutex_unlock(&fs_info->chunk_mutex);
- ret = remove_block_group_free_space(trans, block_group);
- if (ret)
- goto out_put_group;
-
- /* Once for the block groups rbtree */
- btrfs_put_block_group(block_group);
-
- ret = remove_block_group_item(trans, path, block_group);
- if (ret < 0)
- goto out;
-
if (remove_em) {
struct extent_map_tree *em_tree;
free_extent_map(em);
}
-out_put_group:
+out:
/* Once for the lookup reference */
btrfs_put_block_group(block_group);
-out:
if (remove_rsv)
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_free_path(path);
switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n);
- /* Fallthrough */
+ fallthrough;
case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case MOD_LOG_KEY_REMOVE:
btrfs_set_node_key(eb, &tm->key, tm->slot);
BTRFS_ROOT_DEAD_RELOC_TREE,
/* Mark dead root stored on device whose cleanup needs to be resumed */
BTRFS_ROOT_DEAD_TREE,
+ /* The root has a log tree. Used only for subvolume roots. */
+ BTRFS_ROOT_HAS_LOG_TREE,
};
/*
list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
bg_list) {
list_del_init(&block_group->bg_list);
+ btrfs_put_block_group(block_group);
btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
}
spin_unlock(&fs_info->unused_bgs_lock);
!extent_buffer_uptodate(tree_root->node)) {
handle_error = true;
- if (IS_ERR(tree_root->node))
+ if (IS_ERR(tree_root->node)) {
ret = PTR_ERR(tree_root->node);
- else if (!extent_buffer_uptodate(tree_root->node))
+ tree_root->node = NULL;
+ } else if (!extent_buffer_uptodate(tree_root->node)) {
ret = -EUCLEAN;
+ }
btrfs_warn(fs_info, "failed to read tree root");
continue;
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;
- /* the ref bit is tricky. We have to make sure it is set
- * if we have the buffer dirty. Otherwise the
- * code to free a buffer can end up dropping a dirty
- * page
+ /*
+ * The TREE_REF bit is first set when the extent_buffer is added
+ * to the radix tree. It is also reset, if unset, when a new reference
+ * is created by find_extent_buffer.
*
- * Once the ref bit is set, it won't go away while the
- * buffer is dirty or in writeback, and it also won't
- * go away while we have the reference count on the
- * eb bumped.
+ * It is only cleared in two cases: freeing the last non-tree
+ * reference to the extent_buffer when its STALE bit is set or
+ * calling releasepage when the tree reference is the only reference.
*
- * We can't just set the ref bit without bumping the
- * ref on the eb because free_extent_buffer might
- * see the ref bit and try to clear it. If this happens
- * free_extent_buffer might end up dropping our original
- * ref by mistake and freeing the page before we are able
- * to add one more ref.
+ * In both cases, care is taken to ensure that the extent_buffer's
+ * pages are not under io. However, releasepage can be concurrently
+ * called with creating new references, which is prone to race
+ * conditions between the calls to check_buffer_tree_ref in those
+ * codepaths and clearing TREE_REF in try_release_extent_buffer.
*
- * So bump the ref count first, then set the bit. If someone
- * beat us to it, drop the ref we added.
+ * The actual lifetime of the extent_buffer in the radix tree is
+ * adequately protected by the refcount, but the TREE_REF bit and
+ * its corresponding reference are not. To protect against this
+ * class of races, we call check_buffer_tree_ref from the codepaths
+ * which trigger io after they set eb->io_pages. Note that once io is
+ * initiated, TREE_REF can no longer be cleared, so that is the
+ * moment at which any such race is best fixed.
*/
refs = atomic_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
+ /*
+ * It is possible for releasepage to clear the TREE_REF bit before we
+ * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+ */
+ check_buffer_tree_ref(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
}
static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
- size_t *write_bytes)
+ size_t *write_bytes, bool nowait)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
u64 num_bytes;
int ret;
- if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
+ if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
return -EAGAIN;
lockstart = round_down(pos, fs_info->sectorsize);
lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1;
+ num_bytes = lockend - lockstart + 1;
- btrfs_lock_and_flush_ordered_range(inode, lockstart,
- lockend, NULL);
+ if (nowait) {
+ struct btrfs_ordered_extent *ordered;
+
+ if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
+ return -EAGAIN;
+
+ ordered = btrfs_lookup_ordered_range(inode, lockstart,
+ num_bytes);
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+ } else {
+ btrfs_lock_and_flush_ordered_range(inode, lockstart,
+ lockend, NULL);
+ }
- num_bytes = lockend - lockstart + 1;
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
NULL, NULL, NULL);
if (ret <= 0) {
ret = 0;
- btrfs_drew_write_unlock(&root->snapshot_lock);
+ if (!nowait)
+ btrfs_drew_write_unlock(&root->snapshot_lock);
} else {
*write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart);
}
-
+out_unlock:
unlock_extent(&inode->io_tree, lockstart, lockend);
return ret;
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) &&
check_can_nocow(BTRFS_I(inode), pos,
- &write_bytes) > 0) {
+ &write_bytes, false) > 0) {
/*
* For nodata cow case, no need to reserve
* data space.
pos = iocb->ki_pos;
count = iov_iter_count(from);
if (iocb->ki_flags & IOCB_NOWAIT) {
+ size_t nocow_bytes = count;
+
/*
* We will allocate space in case nodatacow is not set,
* so bail
*/
if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) ||
- check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+ check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes,
+ true) <= 0) {
+ inode_unlock(inode);
+ return -EAGAIN;
+ }
+ /*
+ * There are holes in the range or parts of the range that must
+ * be COWed (shared extents, RO block groups, etc), so just bail
+ * out.
+ */
+ if (nocow_bytes < count) {
inode_unlock(inode);
return -EAGAIN;
}
.read_iter = generic_file_read_iter,
.splice_read = generic_file_splice_read,
.write_iter = btrfs_file_write_iter,
+ .splice_write = iter_file_splice_write,
.mmap = btrfs_file_mmap,
.open = btrfs_file_open,
.release = btrfs_release_file,
u64 num_bytes;
unsigned long ram_size;
u64 cur_alloc_size = 0;
+ u64 min_alloc_size;
u64 blocksize = fs_info->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
btrfs_drop_extent_cache(BTRFS_I(inode), start,
start + num_bytes - 1, 0);
+ /*
+ * Relocation relies on the relocated extents to have exactly the same
+ * size as the original extents. Normally writeback for relocation data
+ * extents follows a NOCOW path because relocation preallocates the
+ * extents. However, due to an operation such as scrub turning a block
+ * group to RO mode, it may fallback to COW mode, so we must make sure
+ * an extent allocated during COW has exactly the requested size and can
+ * not be split into smaller extents, otherwise relocation breaks and
+ * fails during the stage where it updates the bytenr of file extent
+ * items.
+ */
+ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+ min_alloc_size = num_bytes;
+ else
+ min_alloc_size = fs_info->sectorsize;
+
while (num_bytes > 0) {
cur_alloc_size = num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
- fs_info->sectorsize, 0, alloc_hint,
+ min_alloc_size, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
goto out_unlock;
int *page_started, unsigned long *nr_written)
{
const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
+ const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
+ BTRFS_DATA_RELOC_TREE_OBJECTID);
const u64 range_bytes = end + 1 - start;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
u64 range_start = start;
* data space info, which we incremented in the step above.
*
* If we need to fallback to cow and the inode corresponds to a free
- * space cache inode, we must also increment bytes_may_use of the data
- * space_info for the same reason. Space caches always get a prealloc
+ * space cache inode or an inode of the data relocation tree, we must
+ * also increment bytes_may_use of the data space_info for the same
+ * reason. Space caches and relocated data extents always get a prealloc
* extent for them, however scrub or balance may have set the block
- * group that contains that extent to RO mode.
+ * group that contains that extent to RO mode and therefore force COW
+ * when starting writeback.
*/
count = count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0);
- if (count > 0 || is_space_ino) {
- const u64 bytes = is_space_ino ? range_bytes : count;
+ if (count > 0 || is_space_ino || is_reloc_ino) {
+ u64 bytes = count;
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct btrfs_space_info *sinfo = fs_info->data_sinfo;
+ if (is_space_ino || is_reloc_ino)
+ bytes = range_bytes;
+
spin_lock(&sinfo->lock);
btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
spin_unlock(&sinfo->lock);
ret = fallback_to_cow(inode, locked_page, cow_start,
found_key.offset - 1,
page_started, nr_written);
- if (ret) {
- if (nocow)
- btrfs_dec_nocow_writers(fs_info,
- disk_bytenr);
+ if (ret)
goto error;
- }
cow_start = (u64)-1;
}
ram_bytes, BTRFS_COMPRESS_NONE,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
- if (nocow)
- btrfs_dec_nocow_writers(fs_info,
- disk_bytenr);
ret = PTR_ERR(em);
goto error;
}
dio_data.overwrite = 1;
inode_unlock(inode);
relock = true;
- } else if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
- goto out;
}
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
offset, count);
btrfs_put_root(root);
out_free:
btrfs_free_path(path);
- kzfree(subvol_info);
+ kfree(subvol_info);
return ret;
}
switch (key.type) {
case BTRFS_EXTENT_ITEM_KEY:
*num_bytes = key.offset;
- /* fall through */
+ fallthrough;
case BTRFS_METADATA_ITEM_KEY:
*bytenr = key.objectid;
ret = process_extent_item(fs_info, path, &key, i,
return false;
}
global_rsv->reserved -= ticket->bytes;
+ remove_ticket(space_info, ticket);
ticket->bytes = 0;
- list_del_init(&ticket->list);
wake_up(&ticket->wait);
space_info->tickets_id++;
if (global_rsv->reserved < global_rsv->size)
case Opt_compress_force:
case Opt_compress_force_type:
compress_force = true;
- /* Fallthrough */
+ fallthrough;
case Opt_compress:
case Opt_compress_type:
saved_compress_type = btrfs_test_opt(info,
btrfs_set_opt(info->mount_opt, NOSSD);
btrfs_clear_and_info(info, SSD,
"not using ssd optimizations");
- /* Fallthrough */
+ fallthrough;
case Opt_nossd_spread:
btrfs_clear_and_info(info, SSD_SPREAD,
"not using spread ssd allocation scheme");
case Opt_recovery:
btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead");
- /* fall through */
+ fallthrough;
case Opt_usebackuproot:
btrfs_info(info,
"trying to use backup root at mount time");
if (ret)
goto out;
+ set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
root->log_start_pid = current->pid;
}
{
int ret = -ENOENT;
+ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
+ return ret;
+
mutex_lock(&root->log_mutex);
if (root->log_root) {
ret = 0;
if (root->log_root) {
free_log_tree(trans, root->log_root);
root->log_root = NULL;
+ clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
}
return 0;
}
return BTRFS_MAP_WRITE;
default:
WARN_ON_ONCE(1);
- /* fall through */
+ fallthrough;
case REQ_OP_READ:
return BTRFS_MAP_READ;
}
}
data = kmap(page);
- ret = __kernel_write(file, data, len, &pos);
+ ret = kernel_write(file, data, len, &pos);
kunmap(page);
fput(file);
if (ret != len)
if (ses->sign)
seq_puts(m, " signed");
+ seq_printf(m, "\n\tUser: %d Cred User: %d",
+ from_kuid(&init_user_ns, ses->linux_uid),
+ from_kuid(&init_user_ns, ses->cred_uid));
+
if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu\n",
ses->chan_count-1);
cifs_dump_channel(m, j, &ses->chans[j]);
}
- seq_puts(m, "\n\tShares:");
+ seq_puts(m, "\n\n\tShares:");
j = 0;
seq_printf(m, "\n\t%d) IPC: ", j);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.27"
+#define CIFS_VERSION "2.28"
#endif /* _CIFSFS_H */
vol_info->nocase = master_tcon->nocase;
vol_info->nohandlecache = master_tcon->nohandlecache;
vol_info->local_lease = master_tcon->local_lease;
+ vol_info->no_lease = master_tcon->no_lease;
+ vol_info->resilient = master_tcon->use_resilient;
+ vol_info->persistent = master_tcon->use_persistent;
+ vol_info->handle_timeout = master_tcon->handle_timeout;
vol_info->no_linux_ext = !master_tcon->unix_ext;
+ vol_info->linux_ext = master_tcon->posix_extensions;
vol_info->sectype = master_tcon->ses->sectype;
vol_info->sign = master_tcon->ses->sign;
+ vol_info->seal = master_tcon->seal;
rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
if (rc) {
goto out;
}
- /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
- if (tcon->posix_extensions)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
/*
* Set the byte-range lock (posix style). Returns:
- * 1) 0, if we set the lock and don't need to request to the server;
- * 2) 1, if we need to request to the server;
- * 3) <0, if the error occurs while setting the lock.
+ * 1) <0, if the error occurs while setting the lock;
+ * 2) 0, if we set the lock and don't need to request to the server;
+ * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
+ * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
*/
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
- int rc = 1;
+ int rc = FILE_LOCK_DEFERRED + 1;
if ((flock->fl_flags & FL_POSIX) == 0)
return rc;
-try_again:
cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
rc = posix_lock_file(file, flock, NULL);
up_write(&cinode->lock_sem);
- if (rc == FILE_LOCK_DEFERRED) {
- rc = wait_event_interruptible(flock->fl_wait,
- list_empty(&flock->fl_blocked_member));
- if (!rc)
- goto try_again;
- locks_delete_block(flock);
- }
return rc;
}
int posix_lock_type;
rc = cifs_posix_lock_set(file, flock);
- if (!rc || rc < 0)
+ if (rc <= FILE_LOCK_DEFERRED)
return rc;
if (type & server->vals->shared_lock_type)
break;
__SetPageLocked(page);
- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
+ rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
+ if (rc) {
__ClearPageLocked(page);
break;
}
struct list_head *page_list, unsigned num_pages)
{
int rc;
+ int err = 0;
struct list_head tmplist;
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
* the order of declining indexes. When we put the pages in
* the rdata->pages, then we want them in increasing order.
*/
- while (!list_empty(page_list)) {
+ while (!list_empty(page_list) && !err) {
unsigned int i, nr_pages, bytes, rsize;
loff_t offset;
struct page *page, *tpage;
return 0;
}
- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+ nr_pages = 0;
+ err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
&nr_pages, &offset, &bytes);
- if (rc) {
+ if (!nr_pages) {
add_credits_and_wake_if(server, credits, 0);
break;
}
FILE_UNIX_BASIC_INFO *info_buf_target;
unsigned int xid;
int rc, tmprc;
+ bool new_target = d_really_is_negative(target_dentry);
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
*/
unlink_target:
- /* Try unlinking the target dentry if it's not negative */
- if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+ /*
+ * If the target dentry was created during the rename, try
+ * unlinking it if it's not negative
+ */
+ if (new_target &&
+ d_really_is_positive(target_dentry) &&
+ (rc == -EACCES || rc == -EEXIST)) {
if (d_is_dir(target_dentry))
tmprc = cifs_rmdir(target_dir, target_dentry);
else
if (rc == 0) {
cifsInode->server_eof = attrs->ia_size;
cifs_setsize(inode, attrs->ia_size);
+
+ /*
+ * The man page of truncate says if the size changed,
+ * then the st_ctime and st_mtime fields for the file
+ * are updated.
+ */
+ attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
+ attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+
cifs_truncate_page(inode->i_mapping, inode->i_size);
}
unsigned int xid;
struct cifsFileInfo *pSMBFile = filep->private_data;
struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb;
__u64 ExtAttrBits = 0;
__u64 caps;
break;
}
cifs_sb = CIFS_SB(inode->i_sb);
- tcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ break;
+ }
+ tcon = tlink_tcon(tlink);
if (tcon && tcon->ses->server->ops->notify) {
rc = tcon->ses->server->ops->notify(xid,
filep, (void __user *)arg);
cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
} else
rc = -EOPNOTSUPP;
+ cifs_put_tlink(tlink);
break;
default:
cifs_dbg(FYI, "unsupported ioctl\n");
struct bio_vec *bv = NULL;
if (iov_iter_is_kvec(iter)) {
- memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
+ memcpy(&ctx->iter, iter, sizeof(*iter));
ctx->len = count;
iov_iter_advance(iter, count);
return 0;
}
- if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
- bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
- GFP_KERNEL);
+ if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
+ bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
if (!bv) {
- bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
+ bv = vmalloc(array_size(max_pages, sizeof(*bv)));
if (!bv)
return -ENOMEM;
}
- if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
- pages = kmalloc_array(max_pages, sizeof(struct page *),
- GFP_KERNEL);
+ if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
+ pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
- pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
+ pages = vmalloc(array_size(max_pages, sizeof(*pages)));
if (!pages) {
kvfree(bv);
return -ENOMEM;
((struct smb2_ioctl_rsp *)shdr)->OutputCount);
break;
case SMB2_CHANGE_NOTIFY:
+ *off = le16_to_cpu(
+ ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
+ *len = le32_to_cpu(
+ ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
+ break;
default:
- /* BB FIXME for unimplemented cases above */
- cifs_dbg(VFS, "no length check for command\n");
+ cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
break;
}
/* close extra handle outside of crit sec */
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
+ rc = 0;
goto oshr_free;
}
tcon = cifs_sb_master_tcon(cifs_sb);
oparms.tcon = tcon;
- oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
oparms.disposition = FILE_OPEN;
oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
ses->Suid, offset, len);
+ /*
+ * We zero the range through ioctl, so we need remove the page caches
+ * first, otherwise the data may be inconsistent with the server.
+ */
+ truncate_pagecache_range(inode, offset, offset + len - 1);
/* if file not oplocked can't be sure whether asking to extend size */
if (!CIFS_CACHE_READ(cifsi))
return rc;
}
+ /*
+ * We implement the punch hole through ioctl, so we need remove the page
+ * caches first, otherwise the data may be inconsistent with the server.
+ */
+ truncate_pagecache_range(inode, offset, offset + len - 1);
+
cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
fsctl_buf.FileOffset = cpu_to_le64(offset);
const int timeout, const int flags,
unsigned int *instance)
{
- int rc;
+ long rc;
int *credits;
int optype;
long int t;
} else {
inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
+ inode->i_mtime = current_time(inode);
inode_unlock(inode);
}
ssize_t size = 0;
int err;
- while (!__ratelimit(&file->f_cred->user->ratelimit)) {
- if (!msleep_interruptible(50))
- return -EINTR;
- }
+ while (!__ratelimit(&file->f_cred->user->ratelimit))
+ msleep(50);
err = efivar_entry_size(var, &datasize);
static inline void z_erofs_onlinepage_fixup(struct page *page,
uintptr_t index, bool down)
{
- unsigned long *p, o, v, id;
-repeat:
- p = &page_private(page);
- o = READ_ONCE(*p);
+ union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
+ int orig, orig_index, val;
- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
- if (id) {
+repeat:
+ orig = atomic_read(u.o);
+ orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+ if (orig_index) {
if (!index)
return;
- DBG_BUGON(id != index);
+ DBG_BUGON(orig_index != index);
}
- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
- if (cmpxchg(p, o, v) != o)
+ val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+ ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+ if (atomic_cmpxchg(u.o, orig, val) != orig)
goto repeat;
}
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate = exfat_iterate,
- .fsync = generic_file_fsync,
+ .fsync = exfat_file_fsync,
};
int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
ep->dentry.name.flags = 0x0;
for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
- ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
- if (*uniname == 0x0)
- break;
- uniname++;
+ if (*uniname != 0x0) {
+ ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
+ uniname++;
+ } else {
+ ep->dentry.name.unicode_0_14[i] = 0x0;
+ }
}
}
int exfat_setattr(struct dentry *dentry, struct iattr *attr);
int exfat_getattr(const struct path *path, struct kstat *stat,
unsigned int request_mask, unsigned int query_flags);
+int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
/* namei.c */
extern const struct dentry_operations exfat_dentry_ops;
#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
return error;
}
+int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+{
+ struct inode *inode = filp->f_mapping->host;
+ int err;
+
+ err = __generic_file_fsync(filp, start, end, datasync);
+ if (err)
+ return err;
+
+ err = sync_blockdev(inode->i_sb->s_bdev);
+ if (err)
+ return err;
+
+ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+}
+
const struct file_operations exfat_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
- .fsync = generic_file_fsync,
+ .fsync = exfat_file_fsync,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
};
goto unlock;
}
- exfat_set_vol_flags(sb, VOL_DIRTY);
exfat_chain_set(&clu_to_free, ei->start_clu,
EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags);
num_entries++;
brelse(bh);
+ exfat_set_vol_flags(sb, VOL_DIRTY);
err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
if (err) {
exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
§or_old);
+ if (!epold)
+ return -EIO;
epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
§or_new);
- if (!epold || !epnew)
+ if (!epnew) {
+ brelse(old_bh);
return -EIO;
+ }
memcpy(epnew, epold, DENTRY_SIZE);
exfat_update_bh(sb, new_bh, sync);
epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
§or_mov);
+ if (!epmov)
+ return -EIO;
epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
§or_new);
- if (!epmov || !epnew)
+ if (!epnew) {
+ brelse(mov_bh);
return -EIO;
+ }
memcpy(epnew, epmov, DENTRY_SIZE);
exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
}
}
+static int exfat_reconfigure(struct fs_context *fc)
+{
+ fc->sb_flags |= SB_NODIRATIME;
+
+ /* volume flag will be updated in exfat_sync_fs */
+ sync_filesystem(fc->root->d_sb);
+ return 0;
+}
+
static const struct fs_context_operations exfat_context_ops = {
.parse_param = exfat_parse_param,
.get_tree = exfat_get_tree,
.free = exfat_free,
+ .reconfigure = exfat_reconfigure,
};
static int exfat_init_fs_context(struct fs_context *fc)
}
-/**
- * __gfs2_readpage - readpage
- * @file: The file to read a page for
- * @page: The page to read
- *
- * This is the core of gfs2's readpage. It's used by the internal file
- * reading code as in that case we already hold the glock. Also it's
- * called by gfs2_readpage() once the required lock has been granted.
- */
-
static int __gfs2_readpage(void *file, struct page *page)
{
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
-
int error;
if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
* gfs2_readpage - read a page of a file
* @file: The file to read
* @page: The page of the file
- *
- * This deals with the locking required. We have to unlock and
- * relock the page in order to get the locking in the right
- * order.
*/
static int gfs2_readpage(struct file *file, struct page *page)
{
- struct address_space *mapping = page->mapping;
- struct gfs2_inode *ip = GFS2_I(mapping->host);
- struct gfs2_holder gh;
- int error;
-
- unlock_page(page);
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
- error = gfs2_glock_nq(&gh);
- if (unlikely(error))
- goto out;
- error = AOP_TRUNCATED_PAGE;
- lock_page(page);
- if (page->mapping == mapping && !PageUptodate(page))
- error = __gfs2_readpage(file, page);
- else
- unlock_page(page);
- gfs2_glock_dq(&gh);
-out:
- gfs2_holder_uninit(&gh);
- if (error && error != AOP_TRUNCATED_PAGE)
- lock_page(page);
- return error;
+ return __gfs2_readpage(file, page);
}
/**
{
struct inode *inode = rac->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_holder gh;
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
- if (gfs2_glock_nq(&gh))
- goto out_uninit;
if (!gfs2_is_stuffed(ip))
mpage_readahead(rac, gfs2_block_map);
- gfs2_glock_dq(&gh);
-out_uninit:
- gfs2_holder_uninit(&gh);
}
/**
return block_page_mkwrite_return(ret);
}
+static vm_fault_t gfs2_fault(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder gh;
+ vm_fault_t ret;
+ int err;
+
+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ err = gfs2_glock_nq(&gh);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
+ goto out_uninit;
+ }
+ ret = filemap_fault(vmf);
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return ret;
+}
+
static const struct vm_operations_struct gfs2_vm_ops = {
- .fault = filemap_fault,
+ .fault = gfs2_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = gfs2_page_mkwrite,
};
static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct gfs2_inode *ip;
+ struct gfs2_holder gh;
+ size_t written = 0;
ssize_t ret;
if (iocb->ki_flags & IOCB_DIRECT) {
return ret;
iocb->ki_flags &= ~IOCB_DIRECT;
}
- return generic_file_read_iter(iocb, to);
+ iocb->ki_flags |= IOCB_NOIO;
+ ret = generic_file_read_iter(iocb, to);
+ iocb->ki_flags &= ~IOCB_NOIO;
+ if (ret >= 0) {
+ if (!iov_iter_count(to))
+ return ret;
+ written = ret;
+ } else {
+ if (ret != -EAGAIN)
+ return ret;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return ret;
+ }
+ ip = GFS2_I(iocb->ki_filp->f_mapping->host);
+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ ret = gfs2_glock_nq(&gh);
+ if (ret)
+ goto out_uninit;
+ ret = generic_file_read_iter(iocb, to);
+ if (ret > 0)
+ written += ret;
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return written ? written : ret;
}
/**
static void flush_delete_work(struct gfs2_glock *gl)
{
- flush_delayed_work(&gl->gl_delete);
+ if (cancel_delayed_work(&gl->gl_delete)) {
+ queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, 0);
+ }
gfs2_glock_queue_work(gl, 0);
}
int error = 0;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
error = freeze_super(sdp->sd_vfs);
if (error) {
gfs2_assert_withdraw(sdp, 0);
}
queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
- gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
- GFS2_LFC_FREEZE_GO_SYNC);
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
+ else /* read-only mounts */
+ atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
}
return 0;
}
GIF_QD_LOCKED = 1,
GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
- GIF_ORDERED = 4,
GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
GIF_DEFERRED_DELETE = 7,
if (no_formal_ino && ip->i_no_formal_ino &&
no_formal_ino != ip->i_no_formal_ino) {
+ error = -ESTALE;
if (inode->i_state & I_NEW)
goto fail;
iput(inode);
- return ERR_PTR(-ESTALE);
+ return ERR_PTR(error);
}
if (inode->i_state & I_NEW)
return 0;
}
+static void __ordered_del_inode(struct gfs2_inode *ip)
+{
+ if (!list_empty(&ip->i_ordered))
+ list_del_init(&ip->i_ordered);
+}
+
static void gfs2_ordered_write(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip;
while (!list_empty(&sdp->sd_log_ordered)) {
ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
if (ip->i_inode.i_mapping->nrpages == 0) {
- test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
- list_del(&ip->i_ordered);
+ __ordered_del_inode(ip);
continue;
}
list_move(&ip->i_ordered, &written);
spin_lock(&sdp->sd_ordered_lock);
while (!list_empty(&sdp->sd_log_ordered)) {
ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
- list_del(&ip->i_ordered);
- WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
+ __ordered_del_inode(ip);
if (ip->i_inode.i_mapping->nrpages == 0)
continue;
spin_unlock(&sdp->sd_ordered_lock);
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
spin_lock(&sdp->sd_ordered_lock);
- if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
- list_del(&ip->i_ordered);
+ __ordered_del_inode(ip);
spin_unlock(&sdp->sd_ordered_lock);
}
out:
if (gfs2_withdrawn(sdp)) {
+ /**
+ * If the tr_list is empty, we're withdrawing during a log
+ * flush that targets a transaction, but the transaction was
+ * never queued onto any of the ail lists. Here we add it to
+ * ail1 just so that ail_drain() will find and free it.
+ */
+ spin_lock(&sdp->sd_ail_lock);
+ if (tr && list_empty(&tr->tr_list))
+ list_add(&tr->tr_list, &sdp->sd_ail1_list);
+ spin_unlock(&sdp->sd_ail_lock);
ail_drain(sdp); /* frees all transactions */
tr = NULL;
}
if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
return;
- if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
+ if (list_empty(&ip->i_ordered)) {
spin_lock(&sdp->sd_ordered_lock);
- if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
+ if (list_empty(&ip->i_ordered))
list_add(&ip->i_ordered, &sdp->sd_log_ordered);
spin_unlock(&sdp->sd_ordered_lock);
}
atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
+ INIT_LIST_HEAD(&ip->i_ordered);
ip->i_qadata = NULL;
gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
memset(&ip->i_res, 0, sizeof(ip->i_res));
goto fail_per_node;
}
- if (!sb_rdonly(sb)) {
+ if (sb_rdonly(sb)) {
+ struct gfs2_holder freeze_gh;
+
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
+ &freeze_gh);
+ if (error) {
+ fs_err(sdp, "can't make FS RO: %d\n", error);
+ goto fail_per_node;
+ }
+ gfs2_glock_dq_uninit(&freeze_gh);
+ } else {
error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
/* Acquire a shared hold on the freeze lock */
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | LM_FLAG_PRIORITY,
- &thaw_gh);
+ LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+ GL_EXACT, &thaw_gh);
if (error)
goto fail_gunlock_ji;
if (error)
return error;
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error)
goto fail_threads;
return 0;
fail:
- freeze_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&freeze_gh);
fail_threads:
if (sdp->sd_quotad_process)
}
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
- GL_NOCACHE, &sdp->sd_freeze_gh);
+ LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
if (error)
goto out;
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
if (!log_write_allowed) {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
- LM_ST_SHARED, GL_NOCACHE |
- LM_FLAG_TRY, &freeze_gh);
+ LM_ST_SHARED, LM_FLAG_TRY |
+ LM_FLAG_NOEXP | GL_EXACT,
+ &freeze_gh);
if (error == GLR_TRYFAILED)
error = 0;
} else {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
- LM_ST_SHARED, GL_NOCACHE,
+ LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT,
&freeze_gh);
if (error && !gfs2_withdrawn(sdp))
return error;
struct super_block *sb = sdp->sd_vfs;
atomic_inc(&sb->s_active);
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
- &freeze_gh);
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
if (error) {
fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
gfs2_assert_withdraw(sdp, 0);
error);
gfs2_assert_withdraw(sdp, 0);
}
- if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
- freeze_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&freeze_gh);
}
deactivate_super(sb);
{
struct inode *inode = dreq->inode;
- inode_dio_end(inode);
-
if (dreq->iocb) {
long res = (long) dreq->error;
if (dreq->count != 0) {
complete(&dreq->completion);
+ igrab(inode);
nfs_direct_req_release(dreq);
+ inode_dio_end(inode);
+ iput(inode);
}
static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_end(inode);
+ igrab(inode);
nfs_direct_req_release(dreq);
+ inode_dio_end(inode);
+ iput(inode);
return result < 0 ? result : -EIO;
}
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_end(inode);
+ igrab(inode);
nfs_direct_req_release(dreq);
+ inode_dio_end(inode);
+ iput(inode);
return result < 0 ? result : -EIO;
}
dprintk("NFS: release(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
+ inode_dio_wait(inode);
nfs_file_clear_open_context(filp);
return 0;
}
goto out_mds;
/* Use a direct mapping of ds_idx to pgio mirror_idx */
- if (WARN_ON_ONCE(pgio->pg_mirror_count !=
- FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
- goto out_mds;
+ if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
+ goto out_eagain;
for (i = 0; i < pgio->pg_mirror_count; i++) {
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
pgio->pg_maxretrans = io_maxretrans;
return;
-
+out_eagain:
+ pnfs_generic_pg_cleanup(pgio);
+ pgio->pg_error = -EAGAIN;
+ return;
out_mds:
trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
0, NFS4_MAX_UINT64, IOMODE_RW,
pgio->pg_lseg = NULL;
pgio->pg_maxretrans = 0;
nfs_pageio_reset_write_mds(pgio);
+ pgio->pg_error = -EAGAIN;
}
static unsigned int
if (IS_ERR(export_path))
return PTR_ERR(export_path);
+ kfree(ctx->nfs_server.export_path);
ctx->nfs_server.export_path = export_path;
source = kmalloc(len + 1 + ctx->nfs_server.export_path_len + 1,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
- ret = nfs4_state_create_net(net);
+ ret = get_nfsdfs(net);
if (ret)
return ret;
+ ret = nfs4_state_create_net(net);
+ if (ret) {
+ mntput(nn->nfsd_mnt);
+ return ret;
+ }
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
+ mntput(nn->nfsd_mnt);
}
void
WARN_ON_ONCE(ret);
fsnotify_rmdir(dir, dentry);
d_delete(dentry);
+ dput(dentry);
inode_unlock(dir);
}
};
MODULE_ALIAS_FS("nfsd");
+int get_nfsdfs(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct vfsmount *mnt;
+
+ mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+ nn->nfsd_mnt = mnt;
+ return 0;
+}
+
#ifdef CONFIG_PROC_FS
static int create_proc_exports_entry(void)
{
static __net_init int nfsd_init_net(struct net *net)
{
int retval;
- struct vfsmount *mnt;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
retval = nfsd_export_init(net);
init_waitqueue_head(&nn->ntf_wq);
seqlock_init(&nn->boot_lock);
- mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
- if (IS_ERR(mnt)) {
- retval = PTR_ERR(mnt);
- goto out_mount_err;
- }
- nn->nfsd_mnt = mnt;
return 0;
-out_mount_err:
- nfsd_reply_cache_shutdown(nn);
out_drc_error:
nfsd_idmap_shutdown(net);
out_idmap_error:
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- mntput(nn->nfsd_mnt);
nfsd_reply_cache_shutdown(nn);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
bool i_am_nfsd(void);
+int get_nfsdfs(struct net *);
+
struct nfsdfs_client {
struct kref cl_ref;
void (*cl_release)(struct kref *kref);
struct nfsdfs_client *ncl, u32 id, const struct tree_descr *);
void nfsd_client_rmdir(struct dentry *dentry);
+
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
#ifdef CONFIG_NFSD_V2_ACL
extern const struct svc_version nfsd_acl_version2;
iap->ia_mode = 0;
iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
+ if (!IS_POSIXACL(dirp))
+ iap->ia_mode &= ~current_umask();
+
err = 0;
host_err = 0;
switch (type) {
goto out;
}
+ if (!IS_POSIXACL(dirp))
+ iap->ia_mode &= ~current_umask();
+
host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
if (host_err < 0) {
fh_drop_write(fhp);
&ocfs2_nfs_sync_lops, osb);
}
+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+ ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ init_rwsem(&osb->nfs_sync_rwlock);
+}
+
void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
+ if (ex)
+ down_write(&osb->nfs_sync_rwlock);
+ else
+ down_read(&osb->nfs_sync_rwlock);
+
if (ocfs2_mount_local(osb))
return 0;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres,
ex ? LKM_EXMODE : LKM_PRMODE);
+ if (ex)
+ up_write(&osb->nfs_sync_rwlock);
+ else
+ up_read(&osb->nfs_sync_rwlock);
}
int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
local:
ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
- ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ ocfs2_nfs_sync_lock_init(osb);
ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
osb->cconn = conn;
struct ocfs2_lock_res osb_super_lockres;
struct ocfs2_lock_res osb_rename_lockres;
struct ocfs2_lock_res osb_nfs_sync_lockres;
+ struct rw_semaphore nfs_sync_rwlock;
struct ocfs2_lock_res osb_trim_fs_lockres;
struct mutex obs_trim_fs_mutex;
struct ocfs2_dlm_debug *osb_dlm_debug;
#define OCFS2_MAX_SLOTS 255
/* Slot map indicator for an empty slot */
-#define OCFS2_INVALID_SLOT -1
+#define OCFS2_INVALID_SLOT ((u16)-1)
#define OCFS2_VOL_UUID_LEN 16
#define OCFS2_MAX_VOL_LABEL_LEN 64
enum {
BAD_BLOCK_SYSTEM_INODE = 0,
GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
SLOT_MAP_SYSTEM_INODE,
-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
HEARTBEAT_SYSTEM_INODE,
GLOBAL_BITMAP_SYSTEM_INODE,
USER_QUOTA_SYSTEM_INODE,
goto bail;
}
- inode_alloc_inode =
- ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
- suballoc_slot);
+ if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
+ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
+ else
+ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+ INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
if (!inode_alloc_inode) {
/* the error code could be inaccurate, but we are not able to
* get the correct one. */
goto out;
/* don't even try if the size is too large */
- if (count > KMALLOC_MAX_SIZE)
- return -ENOMEM;
+ error = -ENOMEM;
+ if (count >= KMALLOC_MAX_SIZE)
+ goto out;
if (write) {
kbuf = memdup_user_nul(ubuf, count);
goto out;
}
} else {
- error = -ENOMEM;
kbuf = kzalloc(count, GFP_KERNEL);
if (!kbuf)
goto out;
return ret;
}
-ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
- loff_t *pos)
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
{
+ mm_segment_t old_fs = get_fs();
+ ssize_t ret;
+
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
+ return -EINVAL;
+ if (!(file->f_mode & FMODE_CAN_READ))
+ return -EINVAL;
+
+ if (count > MAX_RW_COUNT)
+ count = MAX_RW_COUNT;
+ set_fs(KERNEL_DS);
if (file->f_op->read)
- return file->f_op->read(file, buf, count, pos);
+ ret = file->f_op->read(file, (void __user *)buf, count, pos);
else if (file->f_op->read_iter)
- return new_sync_read(file, buf, count, pos);
+ ret = new_sync_read(file, (void __user *)buf, count, pos);
else
- return -EINVAL;
+ ret = -EINVAL;
+ set_fs(old_fs);
+ if (ret > 0) {
+ fsnotify_access(file);
+ add_rchar(current, ret);
+ }
+ inc_syscr(current);
+ return ret;
}
ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
{
- mm_segment_t old_fs;
- ssize_t result;
+ ssize_t ret;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- /* The cast to a user pointer is valid due to the set_fs() */
- result = vfs_read(file, (void __user *)buf, count, pos);
- set_fs(old_fs);
- return result;
+ ret = rw_verify_area(READ, file, pos, count);
+ if (ret)
+ return ret;
+ return __kernel_read(file, buf, count, pos);
}
EXPORT_SYMBOL(kernel_read);
return -EFAULT;
ret = rw_verify_area(READ, file, pos, count);
- if (!ret) {
- if (count > MAX_RW_COUNT)
- count = MAX_RW_COUNT;
- ret = __vfs_read(file, buf, count, pos);
- if (ret > 0) {
- fsnotify_access(file);
- add_rchar(current, ret);
- }
- inc_syscr(current);
- }
+ if (ret)
+ return ret;
+ if (count > MAX_RW_COUNT)
+ count = MAX_RW_COUNT;
+ if (file->f_op->read)
+ ret = file->f_op->read(file, buf, count, pos);
+ else if (file->f_op->read_iter)
+ ret = new_sync_read(file, buf, count, pos);
+ else
+ ret = -EINVAL;
+ if (ret > 0) {
+ fsnotify_access(file);
+ add_rchar(current, ret);
+ }
+ inc_syscr(current);
return ret;
}
return ret;
}
-static ssize_t __vfs_write(struct file *file, const char __user *p,
- size_t count, loff_t *pos)
-{
- if (file->f_op->write)
- return file->f_op->write(file, p, count, pos);
- else if (file->f_op->write_iter)
- return new_sync_write(file, p, count, pos);
- else
- return -EINVAL;
-}
-
+/* caller is responsible for file_start_write/file_end_write */
ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
{
mm_segment_t old_fs;
const char __user *p;
ssize_t ret;
+ if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
+ return -EBADF;
if (!(file->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
p = (__force const char __user *)buf;
if (count > MAX_RW_COUNT)
count = MAX_RW_COUNT;
- ret = __vfs_write(file, p, count, pos);
+ if (file->f_op->write)
+ ret = file->f_op->write(file, p, count, pos);
+ else if (file->f_op->write_iter)
+ ret = new_sync_write(file, p, count, pos);
+ else
+ ret = -EINVAL;
set_fs(old_fs);
if (ret > 0) {
fsnotify_modify(file);
inc_syscw(current);
return ret;
}
-EXPORT_SYMBOL(__kernel_write);
ssize_t kernel_write(struct file *file, const void *buf, size_t count,
loff_t *pos)
{
- mm_segment_t old_fs;
- ssize_t res;
+ ssize_t ret;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- /* The cast to a user pointer is valid due to the set_fs() */
- res = vfs_write(file, (__force const char __user *)buf, count, pos);
- set_fs(old_fs);
+ ret = rw_verify_area(WRITE, file, pos, count);
+ if (ret)
+ return ret;
- return res;
+ file_start_write(file);
+ ret = __kernel_write(file, buf, count, pos);
+ file_end_write(file);
+ return ret;
}
EXPORT_SYMBOL(kernel_write);
return -EFAULT;
ret = rw_verify_area(WRITE, file, pos, count);
- if (!ret) {
- if (count > MAX_RW_COUNT)
- count = MAX_RW_COUNT;
- file_start_write(file);
- ret = __vfs_write(file, buf, count, pos);
- if (ret > 0) {
- fsnotify_modify(file);
- add_wchar(current, ret);
- }
- inc_syscw(current);
- file_end_write(file);
+ if (ret)
+ return ret;
+ if (count > MAX_RW_COUNT)
+ count = MAX_RW_COUNT;
+ file_start_write(file);
+ if (file->f_op->write)
+ ret = file->f_op->write(file, buf, count, pos);
+ else if (file->f_op->write_iter)
+ ret = new_sync_write(file, buf, count, pos);
+ else
+ ret = -EINVAL;
+ if (ret > 0) {
+ fsnotify_modify(file);
+ add_wchar(current, ret);
}
-
+ inc_syscw(current);
+ file_end_write(file);
return ret;
}
/*
* Wake up any background push waiters now this context is being pushed.
*/
- wake_up_all(&ctx->push_wait);
+ if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+ wake_up_all(&cil->xc_push_wait);
/*
* Check if we've anything to push. If there is nothing, then we don't
/*
* initialise the new context and attach it to the CIL. Then attach
- * the current context to the CIL committing lsit so it can be found
+ * the current context to the CIL committing list so it can be found
* during log forces to extract the commit lsn of the sequence that
* needs to be forced.
*/
INIT_LIST_HEAD(&new_ctx->committing);
INIT_LIST_HEAD(&new_ctx->busy_extents);
- init_waitqueue_head(&new_ctx->push_wait);
new_ctx->sequence = ctx->sequence + 1;
new_ctx->cil = cil;
cil->xc_ctx = new_ctx;
if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
ASSERT(cil->xc_ctx->space_used < log->l_logsize);
- xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock);
+ xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
return;
}
INIT_LIST_HEAD(&cil->xc_committing);
spin_lock_init(&cil->xc_cil_lock);
spin_lock_init(&cil->xc_push_lock);
+ init_waitqueue_head(&cil->xc_push_wait);
init_rwsem(&cil->xc_ctx_lock);
init_waitqueue_head(&cil->xc_commit_wait);
INIT_LIST_HEAD(&ctx->committing);
INIT_LIST_HEAD(&ctx->busy_extents);
- init_waitqueue_head(&ctx->push_wait);
ctx->sequence = 1;
ctx->cil = cil;
cil->xc_ctx = ctx;
struct xfs_log_vec *lv_chain; /* logvecs being pushed */
struct list_head iclog_entry;
struct list_head committing; /* ctx committing list */
- wait_queue_head_t push_wait; /* background push throttle */
struct work_struct discard_endio_work;
};
wait_queue_head_t xc_commit_wait;
xfs_lsn_t xc_current_sequence;
struct work_struct xc_push_work;
+ wait_queue_head_t xc_push_wait; /* background push throttle */
} ____cacheline_aligned_in_smp;
/*
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
struct sock *parent;
- unsigned int refcnt;
- unsigned int nokey_refcnt;
+ atomic_t refcnt;
+ atomic_t nokey_refcnt;
const struct af_alg_type *type;
void *private;
#endif /* cmpxchg64_relaxed */
+#define arch_atomic_read atomic_read
+#define arch_atomic_read_acquire atomic_read_acquire
+
#ifndef atomic_read_acquire
static __always_inline int
atomic_read_acquire(const atomic_t *v)
#define atomic_read_acquire atomic_read_acquire
#endif
+#define arch_atomic_set atomic_set
+#define arch_atomic_set_release atomic_set_release
+
#ifndef atomic_set_release
static __always_inline void
atomic_set_release(atomic_t *v, int i)
#define atomic_set_release atomic_set_release
#endif
+#define arch_atomic_add atomic_add
+
+#define arch_atomic_add_return atomic_add_return
+#define arch_atomic_add_return_acquire atomic_add_return_acquire
+#define arch_atomic_add_return_release atomic_add_return_release
+#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
+
#ifndef atomic_add_return_relaxed
#define atomic_add_return_acquire atomic_add_return
#define atomic_add_return_release atomic_add_return
#endif /* atomic_add_return_relaxed */
+#define arch_atomic_fetch_add atomic_fetch_add
+#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
+#define arch_atomic_fetch_add_release atomic_fetch_add_release
+#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+
#ifndef atomic_fetch_add_relaxed
#define atomic_fetch_add_acquire atomic_fetch_add
#define atomic_fetch_add_release atomic_fetch_add
#endif /* atomic_fetch_add_relaxed */
+#define arch_atomic_sub atomic_sub
+
+#define arch_atomic_sub_return atomic_sub_return
+#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
+#define arch_atomic_sub_return_release atomic_sub_return_release
+#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
+
#ifndef atomic_sub_return_relaxed
#define atomic_sub_return_acquire atomic_sub_return
#define atomic_sub_return_release atomic_sub_return
#endif /* atomic_sub_return_relaxed */
+#define arch_atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
+#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
#ifndef atomic_fetch_sub_relaxed
#define atomic_fetch_sub_acquire atomic_fetch_sub
#define atomic_fetch_sub_release atomic_fetch_sub
#endif /* atomic_fetch_sub_relaxed */
+#define arch_atomic_inc atomic_inc
+
#ifndef atomic_inc
static __always_inline void
atomic_inc(atomic_t *v)
#define atomic_inc atomic_inc
#endif
+#define arch_atomic_inc_return atomic_inc_return
+#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
+#define arch_atomic_inc_return_release atomic_inc_return_release
+#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
+
#ifndef atomic_inc_return_relaxed
#ifdef atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#endif /* atomic_inc_return_relaxed */
+#define arch_atomic_fetch_inc atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
+#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+
#ifndef atomic_fetch_inc_relaxed
#ifdef atomic_fetch_inc
#define atomic_fetch_inc_acquire atomic_fetch_inc
#endif /* atomic_fetch_inc_relaxed */
+#define arch_atomic_dec atomic_dec
+
#ifndef atomic_dec
static __always_inline void
atomic_dec(atomic_t *v)
#define atomic_dec atomic_dec
#endif
+#define arch_atomic_dec_return atomic_dec_return
+#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
+#define arch_atomic_dec_return_release atomic_dec_return_release
+#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
+
#ifndef atomic_dec_return_relaxed
#ifdef atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#endif /* atomic_dec_return_relaxed */
+#define arch_atomic_fetch_dec atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
+#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+
#ifndef atomic_fetch_dec_relaxed
#ifdef atomic_fetch_dec
#define atomic_fetch_dec_acquire atomic_fetch_dec
#endif /* atomic_fetch_dec_relaxed */
+#define arch_atomic_and atomic_and
+
+#define arch_atomic_fetch_and atomic_fetch_and
+#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
+#define arch_atomic_fetch_and_release atomic_fetch_and_release
+#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+
#ifndef atomic_fetch_and_relaxed
#define atomic_fetch_and_acquire atomic_fetch_and
#define atomic_fetch_and_release atomic_fetch_and
#endif /* atomic_fetch_and_relaxed */
+#define arch_atomic_andnot atomic_andnot
+
#ifndef atomic_andnot
static __always_inline void
atomic_andnot(int i, atomic_t *v)
#define atomic_andnot atomic_andnot
#endif
+#define arch_atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
+#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+
#ifndef atomic_fetch_andnot_relaxed
#ifdef atomic_fetch_andnot
#define atomic_fetch_andnot_acquire atomic_fetch_andnot
#endif /* atomic_fetch_andnot_relaxed */
+#define arch_atomic_or atomic_or
+
+#define arch_atomic_fetch_or atomic_fetch_or
+#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
+#define arch_atomic_fetch_or_release atomic_fetch_or_release
+#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+
#ifndef atomic_fetch_or_relaxed
#define atomic_fetch_or_acquire atomic_fetch_or
#define atomic_fetch_or_release atomic_fetch_or
#endif /* atomic_fetch_or_relaxed */
+#define arch_atomic_xor atomic_xor
+
+#define arch_atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
+#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
#ifndef atomic_fetch_xor_relaxed
#define atomic_fetch_xor_acquire atomic_fetch_xor
#define atomic_fetch_xor_release atomic_fetch_xor
#endif /* atomic_fetch_xor_relaxed */
+#define arch_atomic_xchg atomic_xchg
+#define arch_atomic_xchg_acquire atomic_xchg_acquire
+#define arch_atomic_xchg_release atomic_xchg_release
+#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
+
#ifndef atomic_xchg_relaxed
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
#endif /* atomic_xchg_relaxed */
+#define arch_atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
+#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_acquire atomic_cmpxchg
#define atomic_cmpxchg_release atomic_cmpxchg
#endif /* atomic_cmpxchg_relaxed */
+#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+
#ifndef atomic_try_cmpxchg_relaxed
#ifdef atomic_try_cmpxchg
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
#endif /* atomic_try_cmpxchg_relaxed */
+#define arch_atomic_sub_and_test atomic_sub_and_test
+
#ifndef atomic_sub_and_test
/**
* atomic_sub_and_test - subtract value from variable and test result
#define atomic_sub_and_test atomic_sub_and_test
#endif
+#define arch_atomic_dec_and_test atomic_dec_and_test
+
#ifndef atomic_dec_and_test
/**
* atomic_dec_and_test - decrement and test
#define atomic_dec_and_test atomic_dec_and_test
#endif
+#define arch_atomic_inc_and_test atomic_inc_and_test
+
#ifndef atomic_inc_and_test
/**
* atomic_inc_and_test - increment and test
#define atomic_inc_and_test atomic_inc_and_test
#endif
+#define arch_atomic_add_negative atomic_add_negative
+
#ifndef atomic_add_negative
/**
* atomic_add_negative - add and test if negative
#define atomic_add_negative atomic_add_negative
#endif
+#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
+
#ifndef atomic_fetch_add_unless
/**
* atomic_fetch_add_unless - add unless the number is already a given value
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
+#define arch_atomic_add_unless atomic_add_unless
+
#ifndef atomic_add_unless
/**
* atomic_add_unless - add unless the number is already a given value
#define atomic_add_unless atomic_add_unless
#endif
+#define arch_atomic_inc_not_zero atomic_inc_not_zero
+
#ifndef atomic_inc_not_zero
/**
* atomic_inc_not_zero - increment unless the number is zero
#define atomic_inc_not_zero atomic_inc_not_zero
#endif
+#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
+
#ifndef atomic_inc_unless_negative
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
#define atomic_inc_unless_negative atomic_inc_unless_negative
#endif
+#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
+
#ifndef atomic_dec_unless_positive
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
#define atomic_dec_unless_positive atomic_dec_unless_positive
#endif
+#define arch_atomic_dec_if_positive atomic_dec_if_positive
+
#ifndef atomic_dec_if_positive
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
#include <asm-generic/atomic64.h>
#endif
+#define arch_atomic64_read atomic64_read
+#define arch_atomic64_read_acquire atomic64_read_acquire
+
#ifndef atomic64_read_acquire
static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
#define atomic64_read_acquire atomic64_read_acquire
#endif
+#define arch_atomic64_set atomic64_set
+#define arch_atomic64_set_release atomic64_set_release
+
#ifndef atomic64_set_release
static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
#define atomic64_set_release atomic64_set_release
#endif
+#define arch_atomic64_add atomic64_add
+
+#define arch_atomic64_add_return atomic64_add_return
+#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
+#define arch_atomic64_add_return_release atomic64_add_return_release
+#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
+
#ifndef atomic64_add_return_relaxed
#define atomic64_add_return_acquire atomic64_add_return
#define atomic64_add_return_release atomic64_add_return
#endif /* atomic64_add_return_relaxed */
+#define arch_atomic64_fetch_add atomic64_fetch_add
+#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
+#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+
#ifndef atomic64_fetch_add_relaxed
#define atomic64_fetch_add_acquire atomic64_fetch_add
#define atomic64_fetch_add_release atomic64_fetch_add
#endif /* atomic64_fetch_add_relaxed */
+#define arch_atomic64_sub atomic64_sub
+
+#define arch_atomic64_sub_return atomic64_sub_return
+#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
+#define arch_atomic64_sub_return_release atomic64_sub_return_release
+#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
#endif /* atomic64_sub_return_relaxed */
+#define arch_atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
+#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
#ifndef atomic64_fetch_sub_relaxed
#define atomic64_fetch_sub_acquire atomic64_fetch_sub
#define atomic64_fetch_sub_release atomic64_fetch_sub
#endif /* atomic64_fetch_sub_relaxed */
+#define arch_atomic64_inc atomic64_inc
+
#ifndef atomic64_inc
static __always_inline void
atomic64_inc(atomic64_t *v)
#define atomic64_inc atomic64_inc
#endif
+#define arch_atomic64_inc_return atomic64_inc_return
+#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
+#define arch_atomic64_inc_return_release atomic64_inc_return_release
+#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+
#ifndef atomic64_inc_return_relaxed
#ifdef atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#endif /* atomic64_inc_return_relaxed */
+#define arch_atomic64_fetch_inc atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
+#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+
#ifndef atomic64_fetch_inc_relaxed
#ifdef atomic64_fetch_inc
#define atomic64_fetch_inc_acquire atomic64_fetch_inc
#endif /* atomic64_fetch_inc_relaxed */
+#define arch_atomic64_dec atomic64_dec
+
#ifndef atomic64_dec
static __always_inline void
atomic64_dec(atomic64_t *v)
#define atomic64_dec atomic64_dec
#endif
+#define arch_atomic64_dec_return atomic64_dec_return
+#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
+#define arch_atomic64_dec_return_release atomic64_dec_return_release
+#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+
#ifndef atomic64_dec_return_relaxed
#ifdef atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#endif /* atomic64_dec_return_relaxed */
+#define arch_atomic64_fetch_dec atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
+#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+
#ifndef atomic64_fetch_dec_relaxed
#ifdef atomic64_fetch_dec
#define atomic64_fetch_dec_acquire atomic64_fetch_dec
#endif /* atomic64_fetch_dec_relaxed */
+#define arch_atomic64_and atomic64_and
+
+#define arch_atomic64_fetch_and atomic64_fetch_and
+#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
+#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+
#ifndef atomic64_fetch_and_relaxed
#define atomic64_fetch_and_acquire atomic64_fetch_and
#define atomic64_fetch_and_release atomic64_fetch_and
#endif /* atomic64_fetch_and_relaxed */
+#define arch_atomic64_andnot atomic64_andnot
+
#ifndef atomic64_andnot
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
#define atomic64_andnot atomic64_andnot
#endif
+#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+
#ifndef atomic64_fetch_andnot_relaxed
#ifdef atomic64_fetch_andnot
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
#endif /* atomic64_fetch_andnot_relaxed */
+#define arch_atomic64_or atomic64_or
+
+#define arch_atomic64_fetch_or atomic64_fetch_or
+#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
+#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+
#ifndef atomic64_fetch_or_relaxed
#define atomic64_fetch_or_acquire atomic64_fetch_or
#define atomic64_fetch_or_release atomic64_fetch_or
#endif /* atomic64_fetch_or_relaxed */
+#define arch_atomic64_xor atomic64_xor
+
+#define arch_atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
+#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
#ifndef atomic64_fetch_xor_relaxed
#define atomic64_fetch_xor_acquire atomic64_fetch_xor
#define atomic64_fetch_xor_release atomic64_fetch_xor
#endif /* atomic64_fetch_xor_relaxed */
+#define arch_atomic64_xchg atomic64_xchg
+#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
+#define arch_atomic64_xchg_release atomic64_xchg_release
+#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
+
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
#endif /* atomic64_xchg_relaxed */
+#define arch_atomic64_cmpxchg atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
+#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+
#ifndef atomic64_cmpxchg_relaxed
#define atomic64_cmpxchg_acquire atomic64_cmpxchg
#define atomic64_cmpxchg_release atomic64_cmpxchg
#endif /* atomic64_cmpxchg_relaxed */
+#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+
#ifndef atomic64_try_cmpxchg_relaxed
#ifdef atomic64_try_cmpxchg
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg_relaxed */
+#define arch_atomic64_sub_and_test atomic64_sub_and_test
+
#ifndef atomic64_sub_and_test
/**
* atomic64_sub_and_test - subtract value from variable and test result
#define atomic64_sub_and_test atomic64_sub_and_test
#endif
+#define arch_atomic64_dec_and_test atomic64_dec_and_test
+
#ifndef atomic64_dec_and_test
/**
* atomic64_dec_and_test - decrement and test
#define atomic64_dec_and_test atomic64_dec_and_test
#endif
+#define arch_atomic64_inc_and_test atomic64_inc_and_test
+
#ifndef atomic64_inc_and_test
/**
* atomic64_inc_and_test - increment and test
#define atomic64_inc_and_test atomic64_inc_and_test
#endif
+#define arch_atomic64_add_negative atomic64_add_negative
+
#ifndef atomic64_add_negative
/**
* atomic64_add_negative - add and test if negative
#define atomic64_add_negative atomic64_add_negative
#endif
+#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
+
#ifndef atomic64_fetch_add_unless
/**
* atomic64_fetch_add_unless - add unless the number is already a given value
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
+#define arch_atomic64_add_unless atomic64_add_unless
+
#ifndef atomic64_add_unless
/**
* atomic64_add_unless - add unless the number is already a given value
#define atomic64_add_unless atomic64_add_unless
#endif
+#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
+
#ifndef atomic64_inc_not_zero
/**
* atomic64_inc_not_zero - increment unless the number is zero
#define atomic64_inc_not_zero atomic64_inc_not_zero
#endif
+#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
+
#ifndef atomic64_inc_unless_negative
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
#endif
+#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
+
#ifndef atomic64_dec_unless_positive
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
#endif
+#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
+
#ifndef atomic64_dec_if_positive
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
#endif
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
+// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
-#if !defined(__ASSEMBLY__) && \
- (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
union bpf_attr __user *uattr);
int netns_bpf_prog_attach(const union bpf_attr *attr,
struct bpf_prog *prog);
-int netns_bpf_prog_detach(const union bpf_attr *attr);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int netns_bpf_link_create(const union bpf_attr *attr,
struct bpf_prog *prog);
#else
return -EOPNOTSUPP;
}
-static inline int netns_bpf_prog_detach(const union bpf_attr *attr)
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_BPF_STREAM_PARSER)
-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
+ struct bpf_prog *old, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
#else
static inline int sock_map_prog_update(struct bpf_map *map,
- struct bpf_prog *prog, u32 which)
+ struct bpf_prog *prog,
+ struct bpf_prog *old, u32 which)
{
return -EOPNOTSUPP;
}
{
return -EINVAL;
}
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_STREAM_PARSER */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
}
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
union {
#ifdef __LITTLE_ENDIAN
struct {
- u8 is_data;
+ u8 is_data : 1;
+ u8 no_refcnt : 1;
+ u8 unused : 6;
u8 padding;
u16 prioidx;
u32 classid;
u32 classid;
u16 prioidx;
u8 padding;
- u8 is_data;
+ u8 unused : 6;
+ u8 no_refcnt : 1;
+ u8 is_data : 1;
} __packed;
#endif
u64 val;
void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
*/
v = READ_ONCE(skcd->val);
- if (v & 1)
+ if (v & 3)
return &cgrp_dfl_root.cgrp;
return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
#else /* CONFIG_CGROUP_DATA */
static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
#define __no_sanitize_thread
#endif
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+ __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
+ __GNUC_PATCHLEVEL__)
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
-#if GCC_VERSION < 40800
+#if GCC_VERSION < 40900
# error Sorry, your compiler is too old - please upgrade it.
#endif
#define __no_sanitize_thread
#endif
+#if __has_attribute(__no_sanitize_undefined__)
+#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#else
+#define __no_sanitize_undefined
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
#ifdef CONFIG_DEBUG_ENTRY
/* Begin/end of an instrumentation safe region */
#define instrumentation_begin() ({ \
- asm volatile("%c0:\n\t" \
+ asm volatile("%c0: nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
# define __GCC4_has_attribute___noclone__ 1
# define __GCC4_has_attribute___nonstring__ 0
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
# define __GCC4_has_attribute___fallthrough__ 0
#endif
#define notrace __attribute__((__no_instrument_function__))
#endif
-/* Section for code which can't be instrumented at all */
-#define noinstr \
- noinline notrace __attribute((__section__(".noinstr.text")))
-
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
#define __no_kcsan __no_sanitize_thread
#ifdef __SANITIZE_THREAD__
-# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
-# define __no_sanitize_or_inline __no_kcsan_or_inline
-#else
-# define __no_kcsan_or_inline __always_inline
+# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
#endif
#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
+/* Section for code which can't be instrumented at all */
+#define noinstr \
+ noinline notrace __attribute((__section__(".noinstr.text"))) \
+ __no_kcsan __no_sanitize_address
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
* __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
* non-scalar types unchanged.
*/
-#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
/*
- * We build this out of a couple of helper macros in a vain attempt to
- * help you keep your lunch down while reading it.
- */
-#define __pick_scalar_type(x, type, otherwise) \
- __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
-
-/*
- * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
- * so we include the naked type here as well as the signed/unsigned variants.
- */
-#define __pick_integer_type(x, type, otherwise) \
- __pick_scalar_type(x, type, \
- __pick_scalar_type(x, unsigned type, \
- __pick_scalar_type(x, signed type, otherwise)))
-
-#define __unqual_scalar_typeof(x) typeof( \
- __pick_integer_type(x, char, \
- __pick_integer_type(x, short, \
- __pick_integer_type(x, int, \
- __pick_integer_type(x, long, \
- __pick_integer_type(x, long long, x))))))
-#else
-/*
- * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
* is not type-compatible with 'signed char', and we define a separate case.
*/
#define __scalar_type_to_expr_cases(type) \
__scalar_type_to_expr_cases(long), \
__scalar_type_to_expr_cases(long long), \
default: (x)))
-#endif
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- gfp_t gfp, unsigned long attrs);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
{
return 0;
}
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return false;
+}
static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
return 0;
* associated with ConOut
*/
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
struct {
phys_addr_t base;
phys_addr_t size;
- } entry[0];
+ } entry[];
};
-#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
- (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
-
#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
- / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+ / sizeof_field(struct linux_efi_memreserve, entry[0]))
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
bool bpf_jit_needs_zext(void);
bool bpf_helper_changes_pkt_data(void *func);
-static inline bool bpf_dump_raw_ok(void)
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
{
/* Reconstruction of call-sites is dependent on kallsyms,
* thus make dump the same restriction.
*/
- return kallsyms_show_value() == 1;
+ return kallsyms_show_value(cred);
}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
#define IOCB_SYNC (1 << 5)
#define IOCB_WRITE (1 << 6)
#define IOCB_NOWAIT (1 << 7)
+#define IOCB_NOIO (1 << 9)
struct kiocb {
struct file *ki_filp;
struct iovec *fast_pointer,
struct iovec **ret_pointer);
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
enum kernel_read_file_id);
extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);
* @channel: host1x channel associated with this client
* @syncpts: array of syncpoints requested for this client
* @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
*/
struct host1x_client {
struct list_head list;
#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10)
#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11)
#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13)
#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20)
#define WLAN_MAX_KEY_LEN 32
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
+#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
+
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- unsigned int vlan_depth = skb->mac_len;
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
/* if type is 802.1Q/AD then the header should already be
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
vlan_depth = ETH_HLEN;
}
do {
- struct vlan_hdr *vh;
+ struct vlan_hdr vhdr, *vh;
- if (unlikely(!pskb_may_pull(skb,
- vlan_depth + VLAN_HLEN)))
+ vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
return 0;
- vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
} while (eth_type_vlan(type));
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+ if (!skip_vlan)
+ /* VLAN acceleration strips the VLAN header from the skb and
+ * moves it to skb->vlan_proto
+ */
+ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+ return vlan_get_protocol(skb);
+}
+
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
#define DMA_PTE_SNP BIT_ULL(11)
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_XD BIT_ULL(63)
#define ADDR_WIDTH_5LEVEL (57)
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
-#include <linux/llist.h>
+#include <linux/smp_types.h>
/*
* An entry can be in one of four states:
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-/* flags share CSD_FLAG_ space */
-
-#define IRQ_WORK_PENDING BIT(0)
-#define IRQ_WORK_BUSY BIT(1)
-
-/* Doesn't want IPI, wait for tick: */
-#define IRQ_WORK_LAZY BIT(2)
-/* Run hard IRQ context, even on RT */
-#define IRQ_WORK_HARD_IRQ BIT(3)
-
-#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
-
-/*
- * structure shares layout with single_call_data_t.
- */
struct irq_work {
- struct llist_node llnode;
- atomic_t flags;
+ union {
+ struct __call_single_node node;
+ struct {
+ struct llist_node llnode;
+ atomic_t flags;
+ };
+ };
void (*func)(struct irq_work *);
};
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+struct cred;
struct module;
static inline int is_kernel_inittext(unsigned long addr)
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
/* How and when do we show kallsyms values? */
-extern int kallsyms_show_value(void);
+extern bool kallsyms_show_value(const struct cred *cred);
#else /* !CONFIG_KALLSYMS */
return -ERANGE;
}
-static inline int kallsyms_show_value(void)
+static inline bool kallsyms_show_value(const struct cred *cred)
{
return false;
}
char *remcom_out_buffer,
struct pt_regs *regs);
+/**
+ * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML
+ * packets.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ */
+
+extern void
+kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer);
+
/**
* kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
* @ignored: This parameter is only here to match the prototype.
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
*/
struct kgdb_io {
const char *name;
void (*deinit) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
- int is_console;
+ struct console *cons;
};
extern const struct kgdb_arch arch_kgdb_ops;
extern int kgdb_isremovedbreak(unsigned long addr);
extern void kgdb_schedule_breakpoint(void);
+extern int kgdb_has_hit_break(unsigned long addr);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
#define ATA_SCSI_COMPAT_IOCTL /* empty */
#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-#if IS_ENABLED(CONFIG_ATA)
+#if IS_REACHABLE(CONFIG_ATA)
bool ata_scsi_dma_need_drain(struct request *rq);
#else
#define ata_scsi_dma_need_drain NULL
size_t buffer_size)
LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
-LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
unsigned long flags)
LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
- unsigned perm)
+ enum key_need_perm need_perm)
LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
#endif /* CONFIG_KEYS */
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
};
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rst2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 untagged_buff[0x4];
};
+struct mlx5_ifc_sbcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sb_access_reg_cap_mask[4][0x20];
+
+ u8 reserved_at_c0[0x80];
+
+ u8 sb_feature_cap_mask[4][0x20];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 cap_total_buffer_size[0x20];
+
+ u8 cap_cell_size[0x10];
+ u8 cap_max_pg_buffers[0x8];
+ u8 cap_num_pool_supported[0x8];
+
+ u8 reserved_at_240[0x8];
+ u8 cap_sbsr_stat_size[0x8];
+ u8 cap_max_tclass_data[0x8];
+ u8 cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
struct mlx5_ifc_pbmc_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
*/
unsigned long anon_cost;
unsigned long file_cost;
- /* Evictions & activations on the inactive file list */
- atomic_long_t inactive_age;
+ /* Non-resident age, driven by LRU movement */
+ atomic_long_t nonresident_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults;
/* Various lruvec state flags (enum lruvec_flags) */
return this_cpu_read(softnet_data.xmit.recursion);
}
-#define XMIT_RECURSION_LIMIT 10
+#define XMIT_RECURSION_LIMIT 8
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res);
+
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table);
+
void ipt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
const struct nf_hook_ops *ops, struct xt_table **res);
void ip6t_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
*/
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- struct pci_dev *bridge = pci_upstream_bridge(dev);
-
- while (bridge) {
- if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
- return bridge;
- bridge = pci_upstream_bridge(bridge);
+ while (dev) {
+ if (pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ dev = pci_upstream_bridge(dev);
}
return NULL;
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u32 prod = p_chain->u.chain16.prod_idx;
+ u32 cons = p_chain->u.chain16.cons_idx;
u16 used;
- used = (u16) (((u32)0x10000 +
- (u32)p_chain->u.chain16.prod_idx) -
- (u32)p_chain->u.chain16.cons_idx);
+ if (prod < cons)
+ prod += (u32)U16_MAX + 1;
+
+ used = (u16)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ used -= prod / elem_per_page - cons / elem_per_page;
return (u16)(p_chain->capacity - used);
}
static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u64 prod = p_chain->u.chain32.prod_idx;
+ u64 cons = p_chain->u.chain32.cons_idx;
u32 used;
- used = (u32) (((u64)0x100000000ULL +
- (u64)p_chain->u.chain32.prod_idx) -
- (u64)p_chain->u.chain32.cons_idx);
+ if (prod < cons)
+ prod += (u64)U32_MAX + 1;
+
+ used = (u32)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
return p_chain->capacity - used;
}
* Loop over each sg element in the given sg_table object.
*/
#define for_each_sgtable_sg(sgt, sg, i) \
- for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+ for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
/*
* Loop over each sg element in the given *DMA mapped* sg_table object.
* of the each element.
*/
#define for_each_sgtable_dma_sg(sgt, sg, i) \
- for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
/**
* sg_chain - Chain two sglists together
* See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sgtable_page(sgt, piter, pgoffset) \
- for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+ for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
/**
* for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
* unit.
*/
#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
- for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+ for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
/*
unsigned int ptrace;
#ifdef CONFIG_SMP
- struct llist_node wake_entry;
- unsigned int wake_entry_type;
int on_cpu;
+ struct __call_single_node wake_entry;
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* Current CPU: */
unsigned int cpu;
bpf_prog_put(prog);
}
+static inline int psock_replace_prog(struct bpf_prog **pprog,
+ struct bpf_prog *prog,
+ struct bpf_prog *old)
+{
+ if (cmpxchg(pprog, old, prog) != old)
+ return -ENOENT;
+
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
static inline void psock_progs_drop(struct sk_psock_progs *progs)
{
psock_set_prog(&progs->msg_parser, NULL);
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
-#include <linux/llist.h>
+#include <linux/smp_types.h>
typedef void (*smp_call_func_t)(void *info);
typedef bool (*smp_cond_func_t)(int cpu, void *info);
-enum {
- CSD_FLAG_LOCK = 0x01,
-
- /* IRQ_WORK_flags */
-
- CSD_TYPE_ASYNC = 0x00,
- CSD_TYPE_SYNC = 0x10,
- CSD_TYPE_IRQ_WORK = 0x20,
- CSD_TYPE_TTWU = 0x30,
- CSD_FLAG_TYPE_MASK = 0xF0,
-};
-
/*
* structure shares (partial) layout with struct irq_work
*/
struct __call_single_data {
- struct llist_node llist;
- unsigned int flags;
+ union {
+ struct __call_single_node node;
+ struct {
+ struct llist_node llist;
+ unsigned int flags;
+ };
+ };
smp_call_func_t func;
void *info;
};
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SMP_TYPES_H
+#define __LINUX_SMP_TYPES_H
+
+#include <linux/llist.h>
+
+enum {
+ CSD_FLAG_LOCK = 0x01,
+
+ IRQ_WORK_PENDING = 0x01,
+ IRQ_WORK_BUSY = 0x02,
+ IRQ_WORK_LAZY = 0x04, /* No IPI, wait for tick */
+ IRQ_WORK_HARD_IRQ = 0x08, /* IRQ context on PREEMPT_RT */
+
+ IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY),
+
+ CSD_TYPE_ASYNC = 0x00,
+ CSD_TYPE_SYNC = 0x10,
+ CSD_TYPE_IRQ_WORK = 0x20,
+ CSD_TYPE_TTWU = 0x30,
+
+ CSD_FLAG_TYPE_MASK = 0xF0,
+};
+
+/*
+ * struct __call_single_node is the primary type on
+ * smp.c:call_single_queue.
+ *
+ * flush_smp_call_function_queue() only reads the type from
+ * __call_single_node::u_flags as a regular load, the above
+ * (anonymous) enum defines all the bits of this word.
+ *
+ * Other bits are not modified until the type is known.
+ *
+ * CSD_TYPE_SYNC/ASYNC:
+ * struct {
+ * struct llist_node node;
+ * unsigned int flags;
+ * smp_call_func_t func;
+ * void *info;
+ * };
+ *
+ * CSD_TYPE_IRQ_WORK:
+ * struct {
+ * struct llist_node node;
+ * atomic_t flags;
+ * void (*func)(struct irq_work *);
+ * };
+ *
+ * CSD_TYPE_TTWU:
+ * struct {
+ * struct llist_node node;
+ * unsigned int flags;
+ * };
+ *
+ */
+
+struct __call_single_node {
+ struct llist_node llist;
+ union {
+ unsigned int u_flags;
+ atomic_t a_flags;
+ };
+};
+
+#endif /* __LINUX_SMP_TYPES_H */
};
/* linux/mm/workingset.c */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
-static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
+static inline long ksys_ftruncate(unsigned int fd, loff_t length)
{
return do_sys_ftruncate(fd, length, 1);
}
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
-/*
+/**
* struct system_time_snapshot - simultaneous raw/real time capture with
- * counter value
+ * counter value
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
* @raw: Monotonic raw system time
u8 cs_was_changed_seq;
};
-/*
+/**
* struct system_device_crosststamp - system/device cross-timestamp
- * (syncronized capture)
+ * (synchronized capture)
* @device: Device time
* @sys_realtime: Realtime simultaneous with device time
* @sys_monoraw: Monotonic raw simultaneous with device time
ktime_t sys_monoraw;
};
-/*
+/**
* struct system_counterval_t - system counter value with the pointer to the
- * corresponding clocksource
+ * corresponding clocksource
* @cycles: System counter value
* @cs: Clocksource corresponding to system counter value. Used by
- * timekeeping code to verify comparibility of two cycle values
+ * timekeeping code to verify comparibility of two cycle values
*/
struct system_counterval_t {
u64 cycles;
u16 digest_size;
} __packed;
+#define TCG_SPECID_SIG "Spec ID Event03"
+
struct tcg_efi_specid_event_head {
u8 signature[16];
u32 platform_class;
int i;
int j;
u32 count, event_type;
+ const u8 zero_digest[sizeof(event_header->digest)] = {0};
marker = event;
marker_start = marker;
count = READ_ONCE(event->count);
event_type = READ_ONCE(event->event_type);
+ /* Verify that it's the log header */
+ if (event_header->pcr_idx != 0 ||
+ event_header->event_type != NO_ACTION ||
+ memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
+ size = 0;
+ goto out;
+ }
+
efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
/* Check if event is malformed. */
- if (count > efispecid->num_algs) {
+ if (memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
size = 0;
goto out;
}
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
struct sk_buff *skb)
{
- struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
+ struct neighbour *n = NULL;
+
+ /* The packets from tunnel devices (eg bareudp) may have only
+ * metadata in the dst pointer of skb. Hence a pointer check of
+ * neigh_lookup is needed.
+ */
+ if (dst->ops->neigh_lookup)
+ n = dst->ops->neigh_lookup(dst, skb, NULL);
+
return IS_ERR(n) ? NULL : n;
}
}
#ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog);
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+ struct bpf_prog *prog);
#endif /* CONFIG_BPF_SYSCALL */
#endif
struct net_device *dev;
enum flow_block_binder_type binder_type;
void *data;
+ void *cb_priv;
void (*cleanup)(struct flow_block_cb *block_cb);
};
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv),
+ struct flow_block_offload *bo,
+ struct net_device *dev, void *data,
+ void *indr_cb_priv,
+ void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
list_move(&block_cb->list, &offload->cb_list);
}
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+ struct flow_block_offload *offload)
+{
+ list_del(&block_cb->indr.list);
+ list_move(&block_cb->list, &offload->cb_list);
+}
+
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list);
}
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
- enum tc_setup_type type, void *type_data);
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
- flow_setup_cb_t *setup_cb);
+ void (*release)(void *cb_priv));
int flow_indr_dev_setup_offload(struct net_device *dev,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
- * @mcast_bind: a socket bound to the given multicast group (which
- * is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group.
- * Note that unbind() will not be called symmetrically if the
- * generic netlink family is removed while there are still open
- * sockets.
- * @attrbuf: buffer to store parsed attributes (private)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
* @mcgrp_offset: starting number of multicast group IDs in this family
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
- int (*mcast_bind)(struct net *net, int group);
- void (*mcast_unbind)(struct net *net, int group);
- struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops;
const struct genl_multicast_group *mcgrps;
unsigned int n_ops;
* | |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * C bit indicates contol message when set, data message when unset.
+ * C bit indicates control message when set, data message when unset.
* For a control message, proto/ctype is interpreted as a type of
* control message. For data messages, proto/ctype is the IP protocol
* of the next header.
#include <linux/ip.h>
#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
#include <net/inet_sock.h>
#include <net/dsfield.h>
static inline int INET_ECN_set_ce(struct sk_buff *skb)
{
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case cpu_to_be16(ETH_P_IP):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
{
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case cpu_to_be16(ETH_P_IP):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
{
__u8 inner;
- if (skb->protocol == htons(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
inner = ip_hdr(skb)->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ break;
+ case htons(ETH_P_IPV6):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
- else
+ break;
+ default:
return 0;
+ }
return INET_ECN_decapsulate(skb, oiph->tos, inner);
}
{
__u8 inner;
- if (skb->protocol == htons(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
inner = ip_hdr(skb)->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ break;
+ case htons(ETH_P_IPV6):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
- else
+ break;
+ default:
return 0;
+ }
return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
}
struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
+extern const struct header_ops ip_tunnel_header_ops;
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
+
struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
#include <linux/bpf-netns.h>
struct bpf_prog;
+struct bpf_prog_array;
struct netns_bpf {
- struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE];
- struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE];
+ /* Array of programs to run compiled from progs or links */
+ struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
+ struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
+ struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
};
#endif /* __NETNS_BPF_H__ */
}
}
-static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
-{
- /* We need to take extra care in case the skb came via
- * vlan accelerated path. In that case, use skb->vlan_proto
- * as the original vlan header was already stripped.
- */
- if (skb_vlan_tag_present(skb))
- return skb->vlan_proto;
- return skb->protocol;
-}
-
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
local sock family */
-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
peer */
-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
peer */
/* Reasons to retransmit. */
* be copied.
*/
#define SK_USER_DATA_NOCOPY 1UL
-#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY)
+#define SK_USER_DATA_BPF 2UL /* Managed by BPF */
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
/**
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
- sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
#define XFRM_DEV_RESUME 128
+#define XFRM_XMIT 256
__u32 status;
#define CRYPTO_SUCCESS 1
u32 headroom;
u32 chunk_size;
u32 frame_len;
- bool cheap_dma;
+ bool dma_need_sync;
bool unaligned;
void *addrs;
struct device *dev;
void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- if (xskb->pool->cheap_dma)
+ if (!xskb->pool->dma_need_sync)
return;
xp_dma_sync_for_cpu_slow(xskb);
static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, size_t size)
{
- if (pool->cheap_dma)
+ if (!pool->dma_need_sync)
return;
xp_dma_sync_for_device_slow(pool, dma, size);
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
+ * @partial_drain: undergoing partial_drain for stream, true when set
* @private_data: pointer to DSP private data
* @dma_buffer: allocated buffer if any
*/
enum snd_compr_direction direction;
bool metadata_set;
bool next_track;
+ bool partial_drain;
void *private_data;
struct snd_dma_buffer dma_buffer;
};
if (snd_BUG_ON(!stream))
return;
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ /* for partial_drain case we are back to running state on success */
+ if (stream->partial_drain) {
+ stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+ stream->partial_drain = false; /* clear this flag as well */
+ } else {
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ }
wake_up(&stream->runtime->sleep);
}
#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
#endif
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
+struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
+ const char *driver_name);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name);
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+ struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming);
void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
EM(rxrpc_cong_begin_retransmission, " Retrans") \
EM(rxrpc_cong_cleared_nacks, " Cleared") \
EM(rxrpc_cong_new_low_nack, " NewLowN") \
- EM(rxrpc_cong_no_change, "") \
+ EM(rxrpc_cong_no_change, " -") \
EM(rxrpc_cong_progress, " Progres") \
EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
EM(rxrpc_cong_rtt_window_end, " RttWinE") \
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
* Description
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- * new data availability is sent.
- * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- * new data availability is sent unconditionally.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
* Return
- * 0, on success;
- * < 0, on error.
+ * 0 on success, or a negative error in case of failure.
*
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
* Description
* void bpf_ringbuf_submit(void *data, u64 flags)
* Description
* Submit reserved ring buffer sample, pointed to by *data*.
- * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- * new data availability is sent.
- * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- * new data availability is sent unconditionally.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
* Return
* Nothing. Always succeeds.
*
* void bpf_ringbuf_discard(void *data, u64 flags)
* Description
* Discard reserved ring buffer sample, pointed to by *data*.
- * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- * new data availability is sent.
- * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- * new data availability is sent unconditionally.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
* Return
* Nothing. Always succeeds.
*
* Description
* Query various characteristics of provided ring buffer. What
* exactly is queries is determined by *flags*:
- * - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- * - BPF_RB_RING_SIZE - the size of ring buffer;
- * - BPF_RB_CONS_POS - consumer position (can wrap around);
- * - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- * Data returned is just a momentary snapshots of actual values
+ *
+ * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ * * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ * Data returned is just a momentary snapshot of actual values
* and could be inaccurate, so this facility should be used to
* power heuristics and for reporting, not to make 100% correct
* calculation.
* Return
- * Requested value, or 0, if flags are not recognized.
+ * Requested value, or 0, if *flags* are not recognized.
*
* int bpf_csum_level(struct sk_buff *skb, u64 level)
* Description
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT 512 /* for KDSET vt ioctl */
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */
enum br_mrp_port_role_type {
BR_MRP_PORT_ROLE_PRIMARY,
BR_MRP_PORT_ROLE_SECONDARY,
- BR_MRP_PORT_ROLE_NONE,
};
enum br_mrp_tlv_header_type {
/* supported values for SO_RDS_TRANSPORT */
#define RDS_TRANS_IB 0
-#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_GAP 1
#define RDS_TRANS_TCP 2
#define RDS_TRANS_COUNT 3
#define RDS_TRANS_NONE (~0)
+/* don't use RDS_TRANS_IWARP - it is deprecated */
+#define RDS_TRANS_IWARP RDS_TRANS_GAP
/* IOCTLS commands for SOL_RDS */
#define SIOCRDSSETTOS (SIOCPROTOPRIVATE)
#define SPI_TX_QUAD 0x200
#define SPI_RX_DUAL 0x400
#define SPI_RX_QUAD 0x800
+#define SPI_CS_WORD 0x1000
+#define SPI_TX_OCTAL 0x2000
+#define SPI_RX_OCTAL 0x4000
+#define SPI_3WIRE_HIZ 0x8000
/*---------------------------------------------------------------------------*/
* size in bytes that can be used by user applications when getting the dirty
* bitmap.
*/
-#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 1
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 2
struct vfio_iommu_type1_info_cap_migration {
struct vfio_info_cap_header header;
config CC_CAN_LINK
bool
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag))
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
config CC_CAN_LINK_STATIC
bool
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT
- default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag))
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
config CC_HAS_ASM_GOTO
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
return false;
t = btf_type_skip_modifiers(btf, t->type, NULL);
- if (!btf_type_is_int(t)) {
+ if (!btf_type_is_small_int(t)) {
bpf_log(log,
"ret type %s not allowed for fmod_ret\n",
btf_kind_str[BTF_INFO_KIND(t->info)]);
/* skip modifiers */
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
- if (btf_type_is_int(t) || btf_type_is_enum(t))
+ if (btf_type_is_small_int(t) || btf_type_is_enum(t))
/* accessing a scalar */
return true;
if (!btf_type_is_ptr(t)) {
static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
{
- if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
+ if (unlikely(max_optlen < 0))
return -EINVAL;
+ if (unlikely(max_optlen > PAGE_SIZE)) {
+ /* We don't expose optvals that are greater than PAGE_SIZE
+ * to the BPF program.
+ */
+ max_optlen = PAGE_SIZE;
+ }
+
ctx->optval = kzalloc(max_optlen, GFP_USER);
if (!ctx->optval)
return -ENOMEM;
ctx->optval_end = ctx->optval + max_optlen;
- return 0;
+ return max_optlen;
}
static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
*/
max_optlen = max_t(int, 16, *optlen);
- ret = sockopt_alloc_buf(&ctx, max_optlen);
- if (ret)
- return ret;
+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+ if (max_optlen < 0)
+ return max_optlen;
ctx.optlen = *optlen;
- if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
+ if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
ret = -EFAULT;
goto out;
}
/* export any potential modifications */
*level = ctx.level;
*optname = ctx.optname;
- *optlen = ctx.optlen;
- *kernel_optval = ctx.optval;
+
+ /* optlen == 0 from BPF indicates that we should
+ * use original userspace data.
+ */
+ if (ctx.optlen != 0) {
+ *optlen = ctx.optlen;
+ *kernel_optval = ctx.optval;
+ }
}
out:
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
return retval;
- ret = sockopt_alloc_buf(&ctx, max_optlen);
- if (ret)
- return ret;
-
ctx.optlen = max_optlen;
+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+ if (max_optlen < 0)
+ return max_optlen;
+
if (!retval) {
/* If kernel getsockopt finished successfully,
* copy whatever was returned to the user back
goto out;
}
- if (ctx.optlen > max_optlen)
- ctx.optlen = max_optlen;
-
- if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
+ if (copy_from_user(ctx.optval, optval,
+ min(ctx.optlen, max_optlen)) != 0) {
ret = -EFAULT;
goto out;
}
goto out;
}
- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
- put_user(ctx.optlen, optlen)) {
- ret = -EFAULT;
- goto out;
+ if (ctx.optlen != 0) {
+ if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+ put_user(ctx.optlen, optlen)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
ret = ctx.retval;
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);
-static struct hlist_head *dev_map_create_hash(unsigned int entries)
+static struct hlist_head *dev_map_create_hash(unsigned int entries,
+ int numa_node)
{
int i;
struct hlist_head *hash;
- hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
+ hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
if (hash != NULL)
for (i = 0; i < entries; i++)
INIT_HLIST_HEAD(&hash[i]);
return -EINVAL;
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
- dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
+ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+ dtab->map.numa_node);
if (!dtab->dev_index_head)
goto free_charge;
}
}
- kfree(dtab->dev_index_head);
+ bpf_map_area_free(dtab->dev_index_head);
} else {
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
* with netns_bpf_mutex held.
*/
struct net *net;
+ struct list_head node; /* node in list of links attached to net */
};
/* Protects updates to netns_bpf */
DEFINE_MUTEX(netns_bpf_mutex);
/* Must be called with netns_bpf_mutex held. */
-static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link)
+static void netns_bpf_run_array_detach(struct net *net,
+ enum netns_bpf_attach_type type)
{
- struct bpf_netns_link *net_link =
- container_of(link, struct bpf_netns_link, link);
+ struct bpf_prog_array *run_array;
- net_link->net = NULL;
+ run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
+ lockdep_is_held(&netns_bpf_mutex));
+ bpf_prog_array_free(run_array);
}
static void bpf_netns_link_release(struct bpf_link *link)
enum netns_bpf_attach_type type = net_link->netns_type;
struct net *net;
- /* Link auto-detached by dying netns. */
- if (!net_link->net)
- return;
-
mutex_lock(&netns_bpf_mutex);
- /* Recheck after potential sleep. We can race with cleanup_net
- * here, but if we see a non-NULL struct net pointer pre_exit
- * has not happened yet and will block on netns_bpf_mutex.
+ /* We can race with cleanup_net, but if we see a non-NULL
+ * struct net pointer, pre_exit has not run yet and wait for
+ * netns_bpf_mutex.
*/
net = net_link->net;
if (!net)
goto out_unlock;
- net->bpf.links[type] = NULL;
- RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+ netns_bpf_run_array_detach(net, type);
+ list_del(&net_link->node);
out_unlock:
mutex_unlock(&netns_bpf_mutex);
struct bpf_netns_link *net_link =
container_of(link, struct bpf_netns_link, link);
enum netns_bpf_attach_type type = net_link->netns_type;
+ struct bpf_prog_array *run_array;
struct net *net;
int ret = 0;
goto out_unlock;
}
+ run_array = rcu_dereference_protected(net->bpf.run_array[type],
+ lockdep_is_held(&netns_bpf_mutex));
+ WRITE_ONCE(run_array->items[0].prog, new_prog);
+
old_prog = xchg(&link->prog, new_prog);
- rcu_assign_pointer(net->bpf.progs[type], new_prog);
bpf_prog_put(old_prog);
out_unlock:
.show_fdinfo = bpf_netns_link_show_fdinfo,
};
+/* Must be called with netns_bpf_mutex held. */
+static int __netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr,
+ struct net *net,
+ enum netns_bpf_attach_type type)
+{
+ __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+ struct bpf_prog_array *run_array;
+ u32 prog_cnt = 0, flags = 0;
+
+ run_array = rcu_dereference_protected(net->bpf.run_array[type],
+ lockdep_is_held(&netns_bpf_mutex));
+ if (run_array)
+ prog_cnt = bpf_prog_array_length(run_array);
+
+ if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
+ return -EFAULT;
+ if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
+ return -EFAULT;
+ if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
+ return 0;
+
+ return bpf_prog_array_copy_to_user(run_array, prog_ids,
+ attr->query.prog_cnt);
+}
+
int netns_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- u32 prog_id, prog_cnt = 0, flags = 0;
enum netns_bpf_attach_type type;
- struct bpf_prog *attached;
struct net *net;
+ int ret;
if (attr->query.query_flags)
return -EINVAL;
if (IS_ERR(net))
return PTR_ERR(net);
- rcu_read_lock();
- attached = rcu_dereference(net->bpf.progs[type]);
- if (attached) {
- prog_cnt = 1;
- prog_id = attached->aux->id;
- }
- rcu_read_unlock();
+ mutex_lock(&netns_bpf_mutex);
+ ret = __netns_bpf_prog_query(attr, uattr, net, type);
+ mutex_unlock(&netns_bpf_mutex);
put_net(net);
-
- if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
- return -EFAULT;
- if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
- return -EFAULT;
-
- if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
- return 0;
-
- if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
- return -EFAULT;
-
- return 0;
+ return ret;
}
int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
+ struct bpf_prog_array *run_array;
enum netns_bpf_attach_type type;
+ struct bpf_prog *attached;
struct net *net;
int ret;
+ if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
+ return -EINVAL;
+
type = to_netns_bpf_attach_type(attr->attach_type);
if (type < 0)
return -EINVAL;
mutex_lock(&netns_bpf_mutex);
/* Attaching prog directly is not compatible with links */
- if (net->bpf.links[type]) {
+ if (!list_empty(&net->bpf.links[type])) {
ret = -EEXIST;
goto out_unlock;
}
switch (type) {
case NETNS_BPF_FLOW_DISSECTOR:
- ret = flow_dissector_bpf_prog_attach(net, prog);
+ ret = flow_dissector_bpf_prog_attach_check(net, prog);
break;
default:
ret = -EINVAL;
break;
}
+ if (ret)
+ goto out_unlock;
+
+ attached = net->bpf.progs[type];
+ if (attached == prog) {
+ /* The same program cannot be attached twice */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ run_array = rcu_dereference_protected(net->bpf.run_array[type],
+ lockdep_is_held(&netns_bpf_mutex));
+ if (run_array) {
+ WRITE_ONCE(run_array->items[0].prog, prog);
+ } else {
+ run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+ if (!run_array) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ run_array->items[0].prog = prog;
+ rcu_assign_pointer(net->bpf.run_array[type], run_array);
+ }
+
+ net->bpf.progs[type] = prog;
+ if (attached)
+ bpf_prog_put(attached);
+
out_unlock:
mutex_unlock(&netns_bpf_mutex);
/* Must be called with netns_bpf_mutex held. */
static int __netns_bpf_prog_detach(struct net *net,
- enum netns_bpf_attach_type type)
+ enum netns_bpf_attach_type type,
+ struct bpf_prog *old)
{
struct bpf_prog *attached;
/* Progs attached via links cannot be detached */
- if (net->bpf.links[type])
+ if (!list_empty(&net->bpf.links[type]))
return -EINVAL;
- attached = rcu_dereference_protected(net->bpf.progs[type],
- lockdep_is_held(&netns_bpf_mutex));
- if (!attached)
+ attached = net->bpf.progs[type];
+ if (!attached || attached != old)
return -ENOENT;
- RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+ netns_bpf_run_array_detach(net, type);
+ net->bpf.progs[type] = NULL;
bpf_prog_put(attached);
return 0;
}
-int netns_bpf_prog_detach(const union bpf_attr *attr)
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
{
enum netns_bpf_attach_type type;
+ struct bpf_prog *prog;
int ret;
+ if (attr->target_fd)
+ return -EINVAL;
+
type = to_netns_bpf_attach_type(attr->attach_type);
if (type < 0)
return -EINVAL;
+ prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
mutex_lock(&netns_bpf_mutex);
- ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type);
+ ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
mutex_unlock(&netns_bpf_mutex);
+ bpf_prog_put(prog);
+
return ret;
}
static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
enum netns_bpf_attach_type type)
{
- struct bpf_prog *prog;
+ struct bpf_netns_link *net_link =
+ container_of(link, struct bpf_netns_link, link);
+ struct bpf_prog_array *run_array;
int err;
mutex_lock(&netns_bpf_mutex);
/* Allow attaching only one prog or link for now */
- if (net->bpf.links[type]) {
+ if (!list_empty(&net->bpf.links[type])) {
err = -E2BIG;
goto out_unlock;
}
/* Links are not compatible with attaching prog directly */
- prog = rcu_dereference_protected(net->bpf.progs[type],
- lockdep_is_held(&netns_bpf_mutex));
- if (prog) {
+ if (net->bpf.progs[type]) {
err = -EEXIST;
goto out_unlock;
}
switch (type) {
case NETNS_BPF_FLOW_DISSECTOR:
- err = flow_dissector_bpf_prog_attach(net, link->prog);
+ err = flow_dissector_bpf_prog_attach_check(net, link->prog);
break;
default:
err = -EINVAL;
if (err)
goto out_unlock;
- net->bpf.links[type] = link;
+ run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+ if (!run_array) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+ run_array->items[0].prog = link->prog;
+ rcu_assign_pointer(net->bpf.run_array[type], run_array);
+
+ list_add_tail(&net_link->node, &net->bpf.links[type]);
out_unlock:
mutex_unlock(&netns_bpf_mutex);
return err;
}
+static int __net_init netns_bpf_pernet_init(struct net *net)
+{
+ int type;
+
+ for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
+ INIT_LIST_HEAD(&net->bpf.links[type]);
+
+ return 0;
+}
+
static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
{
enum netns_bpf_attach_type type;
- struct bpf_link *link;
+ struct bpf_netns_link *net_link;
mutex_lock(&netns_bpf_mutex);
for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
- link = net->bpf.links[type];
- if (link)
- bpf_netns_link_auto_detach(link);
- else
- __netns_bpf_prog_detach(net, type);
+ netns_bpf_run_array_detach(net, type);
+ list_for_each_entry(net_link, &net->bpf.links[type], node)
+ net_link->net = NULL; /* auto-detach link */
+ if (net->bpf.progs[type])
+ bpf_prog_put(net->bpf.progs[type]);
}
mutex_unlock(&netns_bpf_mutex);
}
static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
+ .init = netns_bpf_pernet_init,
.pre_exit = netns_bpf_pernet_pre_exit,
};
/* The caller must hold the reuseport_lock */
void bpf_sk_reuseport_detach(struct sock *sk)
{
- struct sock __rcu **socks;
+ uintptr_t sk_user_data;
write_lock_bh(&sk->sk_callback_lock);
- socks = sk->sk_user_data;
- if (socks) {
+ sk_user_data = (uintptr_t)sk->sk_user_data;
+ if (sk_user_data & SK_USER_DATA_BPF) {
+ struct sock __rcu **socks;
+
+ socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
WRITE_ONCE(sk->sk_user_data, NULL);
/*
* Do not move this NULL assignment outside of
struct sock *free_osk = NULL, *osk, *nsk;
struct sock_reuseport *reuse;
u32 index = *(u32 *)key;
+ uintptr_t sk_user_data;
struct socket *socket;
int err, fd;
if (err)
goto put_file_unlock;
- WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
+ sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
+ SK_USER_DATA_BPF;
+ WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
rcu_assign_pointer(array->ptrs[index], nsk);
free_osk = osk;
err = 0;
{
struct bpf_ringbuf *rb;
- if (!data_sz || !PAGE_ALIGNED(data_sz))
- return ERR_PTR(-EINVAL);
-
-#ifdef CONFIG_64BIT
- /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
- if (data_sz > RINGBUF_MAX_DATA_SZ)
- return ERR_PTR(-E2BIG);
-#endif
-
rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
if (!rb)
return ERR_PTR(-ENOMEM);
return ERR_PTR(-EINVAL);
if (attr->key_size || attr->value_size ||
- attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries))
+ !is_power_of_2(attr->max_entries) ||
+ !PAGE_ALIGNED(attr->max_entries))
return ERR_PTR(-EINVAL);
+#ifdef CONFIG_64BIT
+ /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
+ if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
+ return ERR_PTR(-E2BIG);
+#endif
+
rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
if (!rb_map)
return ERR_PTR(-ENOMEM);
!bpf_capable())
return -EPERM;
- if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
+ if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
return -EPERM;
if (is_perfmon_prog_type(type) && !perfmon_capable())
return -EPERM;
switch (ptype) {
case BPF_PROG_TYPE_SK_MSG:
case BPF_PROG_TYPE_SK_SKB:
- return sock_map_get_from_fd(attr, NULL);
+ return sock_map_prog_detach(attr, ptype);
case BPF_PROG_TYPE_LIRC_MODE2:
return lirc_prog_detach(attr);
case BPF_PROG_TYPE_FLOW_DISSECTOR:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- return netns_bpf_prog_detach(attr);
+ return netns_bpf_prog_detach(attr, ptype);
case BPF_PROG_TYPE_CGROUP_DEVICE:
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
return NULL;
}
-static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
+ const struct cred *f_cred)
{
const struct bpf_map *map;
struct bpf_insn *insns;
code == (BPF_JMP | BPF_CALL_ARGS)) {
if (code == (BPF_JMP | BPF_CALL_ARGS))
insns[i].code = BPF_JMP | BPF_CALL;
- if (!bpf_dump_raw_ok())
+ if (!bpf_dump_raw_ok(f_cred))
insns[i].imm = 0;
continue;
}
return 0;
}
-static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
+static int bpf_prog_get_info_by_fd(struct file *file,
+ struct bpf_prog *prog,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct bpf_insn *insns_sanitized;
bool fault;
- if (prog->blinded && !bpf_dump_raw_ok()) {
+ if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
info.xlated_prog_insns = 0;
goto done;
}
- insns_sanitized = bpf_insn_prepare_dump(prog);
+ insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
if (!insns_sanitized)
return -ENOMEM;
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
}
if (info.jited_prog_len && ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
uinsns = u64_to_user_ptr(info.jited_prog_insns);
ulen = min_t(u32, info.jited_prog_len, ulen);
ulen = info.nr_jited_ksyms;
info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
if (ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
unsigned long ksym_addr;
u64 __user *user_ksyms;
u32 i;
ulen = info.nr_jited_func_lens;
info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
if (ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
u32 __user *user_lens;
u32 func_len, i;
else
info.nr_jited_line_info = 0;
if (info.nr_jited_line_info && ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
__u64 __user *user_linfo;
u32 i;
return 0;
}
-static int bpf_map_get_info_by_fd(struct bpf_map *map,
+static int bpf_map_get_info_by_fd(struct file *file,
+ struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
return 0;
}
-static int bpf_btf_get_info_by_fd(struct btf *btf,
+static int bpf_btf_get_info_by_fd(struct file *file,
+ struct btf *btf,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
return btf_get_info_by_fd(btf, attr, uattr);
}
-static int bpf_link_get_info_by_fd(struct bpf_link *link,
+static int bpf_link_get_info_by_fd(struct file *file,
+ struct bpf_link *link,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
return -EBADFD;
if (f.file->f_op == &bpf_prog_fops)
- err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
+ err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
uattr);
else if (f.file->f_op == &bpf_map_fops)
- err = bpf_map_get_info_by_fd(f.file->private_data, attr,
+ err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
uattr);
else if (f.file->f_op == &btf_fops)
- err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
+ err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
else if (f.file->f_op == &bpf_link_fops)
- err = bpf_link_get_info_by_fd(f.file->private_data,
+ err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
attr, uattr);
else
err = -EINVAL;
return type == PTR_TO_SOCKET ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_MAP_VALUE ||
- type == PTR_TO_SOCK_COMMON ||
- type == PTR_TO_BTF_ID;
+ type == PTR_TO_SOCK_COMMON;
}
static bool reg_type_may_be_null(enum bpf_reg_type type)
int i, j, subprog_start, subprog_end = 0, len, subprog;
struct bpf_insn *insn;
void *old_bpf_func;
- int err;
+ int err, num_exentries;
if (env->subprog_cnt <= 1)
return 0;
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
+ num_exentries = 0;
+ insn = func[i]->insnsi;
+ for (j = 0; j < func[i]->len; j++, insn++) {
+ if (BPF_CLASS(insn->code) == BPF_LDX &&
+ BPF_MODE(insn->code) == BPF_PROBE_MEM)
+ num_exentries++;
+ }
+ func[i]->aux->num_exentries = num_exentries;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
- if (cgroup_sk_alloc_disabled)
- return;
-
- /* Socket clone path */
- if (skcd->val) {
- /*
- * We might be cloning a socket which is left in an empty
- * cgroup and the cgroup might have already been rmdir'd.
- * Don't use cgroup_get_live().
- */
- cgroup_get(sock_cgroup_ptr(skcd));
- cgroup_bpf_get(sock_cgroup_ptr(skcd));
+ if (cgroup_sk_alloc_disabled) {
+ skcd->no_refcnt = 1;
return;
}
rcu_read_unlock();
}
+void cgroup_sk_clone(struct sock_cgroup_data *skcd)
+{
+ if (skcd->val) {
+ if (skcd->no_refcnt)
+ return;
+ /*
+ * We might be cloning a socket which is left in an empty
+ * cgroup and the cgroup might have already been rmdir'd.
+ * Don't use cgroup_get_live().
+ */
+ cgroup_get(sock_cgroup_ptr(skcd));
+ cgroup_bpf_get(sock_cgroup_ptr(skcd));
+ }
+}
+
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+ if (skcd->no_refcnt)
+ return;
cgroup_bpf_put(cgrp);
cgroup_put(cgrp);
}
arch_kgdb_ops.disable_hw_break(regs);
acquirelock:
+ rcu_read_lock();
/*
* Interrupts will be restored by the 'trap return' code, except when
* single stepping.
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
local_irq_restore(flags);
+ rcu_read_unlock();
return 0;
}
cpu_relax();
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);
+ rcu_read_unlock();
goto acquirelock;
}
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);
+ rcu_read_unlock();
return kgdb_info[cpu].ret_state;
}
}
break;
#endif
+#ifdef CONFIG_HAVE_ARCH_KGDB_QXFER_PKT
+ case 'S':
+ if (!strncmp(remcom_in_buffer, "qSupported:", 11))
+ strcpy(remcom_out_buffer, kgdb_arch_gdb_stub_feature);
+ break;
+ case 'X':
+ if (!strncmp(remcom_in_buffer, "qXfer:", 6))
+ kgdb_arch_handle_qxfer_pkt(remcom_in_buffer,
+ remcom_out_buffer);
+ break;
+#endif
+ default:
+ break;
}
}
return 0;
}
+static void kdb_msg_write(const char *msg, int msg_len)
+{
+ struct console *c;
+
+ if (msg_len == 0)
+ return;
+
+ if (dbg_io_ops) {
+ const char *cp = msg;
+ int len = msg_len;
+
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+ cp++;
+ }
+ }
+
+ for_each_console(c) {
+ if (!(c->flags & CON_ENABLED))
+ continue;
+ if (c == dbg_io_ops->cons)
+ continue;
+ /*
+ * Set oops_in_progress to encourage the console drivers to
+ * disregard their internal spin locks: in the current calling
+ * context the risk of deadlock is a bigger problem than risks
+ * due to re-entering the console driver. We operate directly on
+ * oops_in_progress rather than using bust_spinlocks() because
+ * the calls bust_spinlocks() makes on exit are not appropriate
+ * for this calling context.
+ */
+ ++oops_in_progress;
+ c->write(c, msg, msg_len);
+ --oops_in_progress;
+ touch_nmi_watchdog();
+ }
+}
+
int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
{
int diag;
int this_cpu, old_cpu;
char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
char *moreprompt = "more> ";
- struct console *c;
unsigned long uninitialized_var(flags);
/* Serialize kdb_printf if multiple cpus try to write at once.
*/
retlen = strlen(kdb_buffer);
cp = (char *) printk_skip_headers(kdb_buffer);
- if (!dbg_kdb_mode && kgdb_connected) {
+ if (!dbg_kdb_mode && kgdb_connected)
gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
- } else {
- if (dbg_io_ops && !dbg_io_ops->is_console) {
- len = retlen - (cp - kdb_buffer);
- cp2 = cp;
- while (len--) {
- dbg_io_ops->write_char(*cp2);
- cp2++;
- }
- }
- for_each_console(c) {
- c->write(c, cp, retlen - (cp - kdb_buffer));
- touch_nmi_watchdog();
- }
- }
+ else
+ kdb_msg_write(cp, retlen - (cp - kdb_buffer));
+
if (logging) {
saved_loglevel = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_SILENT;
moreprompt = "more> ";
kdb_input_flush();
-
- if (dbg_io_ops && !dbg_io_ops->is_console) {
- len = strlen(moreprompt);
- cp = moreprompt;
- while (len--) {
- dbg_io_ops->write_char(*cp);
- cp++;
- }
- }
- for_each_console(c) {
- c->write(c, moreprompt, strlen(moreprompt));
- touch_nmi_watchdog();
- }
+ kdb_msg_write(moreprompt, strlen(moreprompt));
if (logging)
printk("%s", moreprompt);
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
+ default y if !MMU
bool
config DMA_COHERENT_POOL
+ select GENERIC_ALLOCATOR
bool
config DMA_REMAP
bool
depends on MMU
- select GENERIC_ALLOCATOR
select DMA_NONCOHERENT_MMAP
config DMA_DIRECT_REMAP
return false;
}
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs)
{
- size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL;
u64 phys_limit;
+ WARN_ON_ONCE(!PAGE_ALIGNED(size));
+
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
gfp &= ~__GFP_ZERO;
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
- page = dma_alloc_contiguous(dev, alloc_size, gfp);
+ page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_free_contiguous(dev, page, alloc_size);
+ dma_free_contiguous(dev, page, size);
page = NULL;
}
again:
if (!page)
- page = alloc_pages_node(node, gfp, get_order(alloc_size));
+ page = alloc_pages_node(node, gfp, get_order(size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
page = NULL;
{
struct page *page;
void *ret;
+ int err;
+
+ size = PAGE_ALIGN(size);
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
- ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
+ ret = dma_alloc_from_pool(dev, size, &page, gfp);
if (!ret)
return NULL;
goto done;
dma_alloc_need_uncached(dev, attrs)) ||
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
/* remove any dirty cache lines on the kernel alias */
- arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+ arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
- ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+ ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
+ if (force_dma_unencrypted(dev)) {
+ err = set_memory_decrypted((unsigned long)ret,
+ 1 << get_order(size));
+ if (err)
+ goto out_free_pages;
+ }
memset(ret, 0, size);
goto done;
}
}
ret = page_address(page);
- if (force_dma_unencrypted(dev))
- set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
+ if (force_dma_unencrypted(dev)) {
+ err = set_memory_decrypted((unsigned long)ret,
+ 1 << get_order(size));
+ if (err)
+ goto out_free_pages;
+ }
memset(ret, 0, size);
arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
- goto out_free_pages;
+ goto out_encrypt_pages;
}
done:
if (force_dma_unencrypted(dev))
else
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
+
+out_encrypt_pages:
+ if (force_dma_unencrypted(dev)) {
+ err = set_memory_encrypted((unsigned long)page_address(page),
+ 1 << get_order(size));
+ /* If memory cannot be re-encrypted, it must be leaked */
+ if (err)
+ return NULL;
+ }
out_free_pages:
dma_free_contiguous(dev, page, size);
return NULL;
return ret;
}
-#ifdef CONFIG_MMU
bool dma_direct_can_mmap(struct device *dev)
{
return dev_is_dma_coherent(dev) ||
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
}
-#else /* CONFIG_MMU */
-bool dma_direct_can_mmap(struct device *dev)
-{
- return false;
-}
-
-int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- return -ENXIO;
-}
-#endif /* CONFIG_MMU */
int dma_direct_supported(struct device *dev, u64 mask)
{
return swiotlb_max_mapping_size(dev);
return SIZE_MAX;
}
+
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return !dev_is_dma_coherent(dev) ||
+ is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+}
}
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_is_direct(ops))
+ return dma_direct_need_sync(dev, dma_addr);
+ return ops->sync_single_for_cpu || ops->sync_single_for_device;
+}
+EXPORT_SYMBOL_GPL(dma_need_sync);
+
unsigned long dma_get_merge_boundary(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
}
val = gen_pool_alloc(pool, size);
- if (val) {
+ if (likely(val)) {
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
*ret_page = pfn_to_page(__phys_to_pfn(phys));
ptr = (void *)val;
memset(ptr, 0, size);
+ } else {
+ WARN_ONCE(1, "DMA coherent pool depleted, increase size "
+ "(recommended min coherent_pool=%zuK)\n",
+ gen_pool_size(pool) >> 9);
}
if (gen_pool_avail(pool) < atomic_pool_size)
schedule_work(&atomic_pool_work);
{
void *vaddr;
- vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
+ vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ VM_DMA_COHERENT, prot);
if (vaddr)
find_vm_area(vaddr)->pages = pages;
return vaddr;
void *dma_common_contiguous_remap(struct page *page, size_t size,
pgprot_t prot, const void *caller)
{
- int count = size >> PAGE_SHIFT;
+ int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page **pages;
void *vaddr;
int i;
* to stop root fork bombs.
*/
retval = -EAGAIN;
- if (nr_threads >= max_threads)
+ if (data_race(nr_threads >= max_threads))
goto bad_fork_cleanup_count;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
* Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
* block even that).
*/
-int kallsyms_show_value(void)
+bool kallsyms_show_value(const struct cred *cred)
{
switch (kptr_restrict) {
case 0:
if (kallsyms_for_perf())
- return 1;
+ return true;
/* fallthrough */
case 1:
- if (has_capability_noaudit(current, CAP_SYSLOG))
- return 1;
+ if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
+ CAP_OPT_NOAUDIT) == 0)
+ return true;
/* fallthrough */
default:
- return 0;
+ return false;
}
}
return -ENOMEM;
reset_iter(iter, 0);
- iter->show_value = kallsyms_show_value();
+ /*
+ * Instead of checking this on every s_show() call, cache
+ * the result here at open time.
+ */
+ iter->show_value = kallsyms_show_value(file->f_cred);
return 0;
}
static int
kimage_validate_signature(struct kimage *image)
{
- const char *reason;
int ret;
ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
image->kernel_buf_len);
- switch (ret) {
- case 0:
- break;
+ if (ret) {
- /* Certain verification errors are non-fatal if we're not
- * checking errors, provided we aren't mandating that there
- * must be a valid signature.
- */
- case -ENODATA:
- reason = "kexec of unsigned image";
- goto decide;
- case -ENOPKG:
- reason = "kexec of image with unsupported crypto";
- goto decide;
- case -ENOKEY:
- reason = "kexec of image with unavailable key";
- decide:
if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
- pr_notice("%s rejected\n", reason);
+ pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
return ret;
}
- /* If IMA is guaranteed to appraise a signature on the kexec
+ /*
+ * If IMA is guaranteed to appraise a signature on the kexec
* image, permit it even if the kernel is otherwise locked
* down.
*/
security_locked_down(LOCKDOWN_KEXEC))
return -EPERM;
- return 0;
-
- /* All other errors are fatal, including nomem, unparseable
- * signatures and signature check failures - even if signatures
- * aren't required.
- */
- default:
- pr_notice("kernel signature verification failed (%d).\n", ret);
+ pr_debug("kernel signature verification failed (%d).\n", ret);
}
- return ret;
+ return 0;
}
#endif
else
kprobe_type = "k";
- if (!kallsyms_show_value())
+ if (!kallsyms_show_value(pi->file->f_cred))
addr = NULL;
if (sym)
* If /proc/kallsyms is not showing kernel address, we won't
* show them here either.
*/
- if (!kallsyms_show_value())
+ if (!kallsyms_show_value(m->file->f_cred))
seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
(void *)ent->start_addr);
else
}
struct module_sect_attr {
- struct module_attribute mattr;
- char *name;
+ struct bin_attribute battr;
unsigned long address;
};
struct module_sect_attr attrs[];
};
-static ssize_t module_sect_show(struct module_attribute *mattr,
- struct module_kobject *mk, char *buf)
+static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *battr,
+ char *buf, loff_t pos, size_t count)
{
struct module_sect_attr *sattr =
- container_of(mattr, struct module_sect_attr, mattr);
- return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
- (void *)sattr->address : NULL);
+ container_of(battr, struct module_sect_attr, battr);
+
+ if (pos != 0)
+ return -EINVAL;
+
+ return sprintf(buf, "0x%px\n",
+ kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
}
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
unsigned int section;
for (section = 0; section < sect_attrs->nsections; section++)
- kfree(sect_attrs->attrs[section].name);
+ kfree(sect_attrs->attrs[section].battr.attr.name);
kfree(sect_attrs);
}
unsigned int nloaded = 0, i, size[2];
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
- struct attribute **gattr;
+ struct bin_attribute **gattr;
/* Count loaded sections and allocate structures */
for (i = 0; i < info->hdr->e_shnum; i++)
if (!sect_empty(&info->sechdrs[i]))
nloaded++;
size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
- sizeof(sect_attrs->grp.attrs[0]));
- size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
+ sizeof(sect_attrs->grp.bin_attrs[0]));
+ size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
if (sect_attrs == NULL)
return;
/* Setup section attributes. */
sect_attrs->grp.name = "sections";
- sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
+ sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
sect_attrs->nsections = 0;
sattr = §_attrs->attrs[0];
- gattr = §_attrs->grp.attrs[0];
+ gattr = §_attrs->grp.bin_attrs[0];
for (i = 0; i < info->hdr->e_shnum; i++) {
Elf_Shdr *sec = &info->sechdrs[i];
if (sect_empty(sec))
continue;
+ sysfs_bin_attr_init(&sattr->battr);
sattr->address = sec->sh_addr;
- sattr->name = kstrdup(info->secstrings + sec->sh_name,
- GFP_KERNEL);
- if (sattr->name == NULL)
+ sattr->battr.attr.name =
+ kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
+ if (sattr->battr.attr.name == NULL)
goto out;
sect_attrs->nsections++;
- sysfs_attr_init(&sattr->mattr.attr);
- sattr->mattr.show = module_sect_show;
- sattr->mattr.store = NULL;
- sattr->mattr.attr.name = sattr->name;
- sattr->mattr.attr.mode = S_IRUSR;
- *(gattr++) = &(sattr++)->mattr.attr;
+ sattr->battr.read = module_sect_read;
+ sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
+ sattr->battr.attr.mode = 0400;
+ *(gattr++) = &(sattr++)->battr;
}
*gattr = NULL;
continue;
if (info->sechdrs[i].sh_type == SHT_NOTE) {
sysfs_bin_attr_init(nattr);
- nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+ nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
nattr->attr.mode = S_IRUGO;
nattr->size = info->sechdrs[i].sh_size;
nattr->private = (void *) info->sechdrs[i].sh_addr;
void * __weak module_alloc(unsigned long size)
{
- return vmalloc_exec(size);
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
bool __weak module_init_section(const char *name)
if (!err) {
struct seq_file *m = file->private_data;
- m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+ m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
}
return err;
} else if (!IS_ERR(pidfd_pid(file))) {
err = check_setns_flags(flags);
} else {
- err = -EBADF;
+ err = -EINVAL;
}
if (err)
goto out;
*
* Ensure reorder queue is read after pd->lock is dropped so we see
* new objects from another task in padata_do_serial. Pairs with
- * smp_mb__after_atomic in padata_do_serial.
+ * smp_mb in padata_do_serial.
*/
smp_mb();
* with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
* in padata_reorder.
*/
- smp_mb__after_atomic();
+ smp_mb();
padata_reorder(pd);
}
user->idx = log_next_idx;
user->seq = log_next_seq;
break;
- case SEEK_CUR:
- /*
- * It isn't supported due to the record nature of this
- * interface: _SET _DATA and _END point to very specific
- * record positions, while _CUR would be more useful in case
- * of a byte-based log. Because of that, return the default
- * errno value for invalid seek operation.
- */
- ret = -ESPIPE;
- break;
default:
ret = -EINVAL;
}
schedule_timeout_uninterruptible(1);
}
- pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj));
+ pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
GFP_KERNEL);
* next idle sojourn.
*/
rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
- seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+ seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
// RCU is no longer watching. Better be in extended quiescent state!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
(seq & RCU_DYNTICK_CTRL_CTR));
* and we also must force ordering with the next RCU read-side
* critical section.
*/
- seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+ seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
// RCU is now watching. Better not be in an extended quiescent state!
rcu_dynticks_task_trace_exit(); // After ->dynticks update!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!(seq & RCU_DYNTICK_CTRL_CTR));
if (seq & RCU_DYNTICK_CTRL_MASK) {
- atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
+ arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
smp_mb__after_atomic(); /* _exit after clearing mask. */
}
}
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+ return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
}
/*
do_nocb_deferred_wakeup(rdp);
rcu_prepare_for_idle();
rcu_preempt_deferred_qs(current);
+
+ // instrumentation for the noinstr rcu_dynticks_eqs_enter()
+ instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
instrumentation_end();
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
// RCU is watching here ...
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ instrumentation_begin();
/*
* Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
* (We are exiting an NMI handler, so RCU better be paying attention
* leave it in non-RCU-idle state.
*/
if (rdp->dynticks_nmi_nesting != 1) {
- instrumentation_begin();
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
return;
}
- instrumentation_begin();
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
if (!in_nmi())
rcu_prepare_for_idle();
+
+ // instrumentation for the noinstr rcu_dynticks_eqs_enter()
+ instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
instrumentation_end();
// RCU is watching here ...
rcu_dynticks_eqs_exit();
// ... but is watching here.
instrumentation_begin();
+
+ // instrumentation for the noinstr rcu_dynticks_eqs_exit()
+ instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
rcu_cleanup_after_idle();
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
if (!in_nmi())
rcu_cleanup_after_idle();
+ instrumentation_begin();
+ // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
+ instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
+ // instrumentation for the noinstr rcu_dynticks_eqs_exit()
+ instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
incby = 1;
} else if (!in_nmi()) {
instrumentation_begin();
rcu_irq_enter_check_tick();
instrumentation_end();
+ } else {
+ instrumentation_begin();
}
- instrumentation_begin();
+
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdp->dynticks_nmi_nesting,
rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
goto out;
}
- if (cpumask_equal(p->cpus_ptr, new_mask))
+ if (cpumask_equal(&p->cpus_mask, new_mask))
goto out;
/*
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
- llist_for_each_entry_safe(p, t, llist, wake_entry)
+ llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
+ if (WARN_ON_ONCE(p->on_cpu))
+ smp_cond_load_acquire(&p->on_cpu, !VAL);
+
+ if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
+ set_task_cpu(p, cpu_of(rq));
+
ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
+ }
rq_unlock_irqrestore(rq, &rf);
}
p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
WRITE_ONCE(rq->ttwu_pending, 1);
- __smp_call_single_queue(cpu, &p->wake_entry);
+ __smp_call_single_queue(cpu, &p->wake_entry.llist);
}
void wake_up_if_idle(int cpu)
* the soon-to-be-idle CPU as the current CPU is likely busy.
* nr_running is checked to avoid unnecessary task stacking.
*/
- if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1)
+ if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
return true;
return false;
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
+ if (WARN_ON_ONCE(cpu == smp_processor_id()))
+ return false;
+
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
__ttwu_queue_wakelist(p, cpu, wake_flags);
return true;
goto out;
success = 1;
- cpu = task_cpu(p);
trace_sched_waking(p);
p->state = TASK_RUNNING;
trace_sched_wakeup(p);
/* We're going to change ->state: */
success = 1;
- cpu = task_cpu(p);
/*
* Ensure we load p->on_rq _after_ p->state, otherwise it would
* which potentially sends an IPI instead of spinning on p->on_cpu to
* let the waker make forward progress. This is safe because IRQs are
* disabled and the IPI will deliver after on_cpu is cleared.
+ *
+ * Ensure we load task_cpu(p) after p->on_cpu:
+ *
+ * set_task_cpu(p, cpu);
+ * STORE p->cpu = @cpu
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock
+ * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
+ * STORE p->on_cpu = 1 LOAD p->cpu
+ *
+ * to ensure we observe the correct CPU on which the task is currently
+ * scheduling.
*/
- if (READ_ONCE(p->on_cpu) && ttwu_queue_wakelist(p, cpu, wake_flags | WF_ON_RQ))
+ if (smp_load_acquire(&p->on_cpu) &&
+ ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
goto unlock;
/*
psi_ttwu_dequeue(p);
set_task_cpu(p, cpu);
}
+#else
+ cpu = task_cpu(p);
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu, wake_flags);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out:
if (success)
- ttwu_stat(p, cpu, wake_flags);
+ ttwu_stat(p, task_cpu(p), wake_flags);
preempt_enable();
return success;
#endif
init_numa_balancing(clone_flags, p);
#ifdef CONFIG_SMP
- p->wake_entry_type = CSD_TYPE_TTWU;
+ p->wake_entry.u_flags = CSD_TYPE_TTWU;
#endif
}
*/
if (dl_prio(prio)) {
if (!dl_prio(p->normal_prio) ||
- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+ (pi_task && dl_prio(pi_task->prio) &&
+ dl_entity_preempt(&pi_task->dl, &p->dl))) {
p->dl.dl_boosted = 1;
queue_flag |= ENQUEUE_REPLENISH;
} else
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
+ dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
}
}
- sa->runnable_avg = cpu_scale;
+ sa->runnable_avg = sa->util_avg;
if (p->sched_class != &fair_sched_class) {
/*
}
}
+static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ if (current_clr_polling_and_test())
+ return -EBUSY;
+
+ return cpuidle_enter_s2idle(drv, dev);
+}
+
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int next_state)
{
if (idle_should_enter_s2idle()) {
rcu_idle_enter();
- entered_state = cpuidle_enter_s2idle(drv, dev);
- if (entered_state > 0) {
- local_irq_enable();
+ entered_state = call_cpuidle_s2idle(drv, dev);
+ if (entered_state > 0)
goto exit_idle;
- }
rcu_idle_exit();
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x04 /* Internal use, task got migrated */
-#define WF_ON_RQ 0x08 /* Wakee is on_rq */
+#define WF_ON_CPU 0x08 /* Wakee is on_cpu */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
{
int num_nodes, num_cpus;
- /*
- * Ensure struct irq_work layout matches so that
- * flush_smp_call_function_queue() can do horrible things.
- */
- BUILD_BUG_ON(offsetof(struct irq_work, llnode) !=
- offsetof(struct __call_single_data, llist));
- BUILD_BUG_ON(offsetof(struct irq_work, func) !=
- offsetof(struct __call_single_data, func));
- BUILD_BUG_ON(offsetof(struct irq_work, flags) !=
- offsetof(struct __call_single_data, flags));
-
- /*
- * Assert the CSD_TYPE_TTWU layout is similar enough
- * for task_struct to be on the @call_single_queue.
- */
- BUILD_BUG_ON(offsetof(struct task_struct, wake_entry_type) - offsetof(struct task_struct, wake_entry) !=
- offsetof(struct __call_single_data, flags) - offsetof(struct __call_single_data, llist));
-
idle_threads_init();
cpuhp_threads_init();
if (unlikely(ret < 0))
goto fail;
- return 0;
+ return ret;
fail:
memset(dst, 0, size);
return ret;
if (unlikely(info->add_timestamp)) {
bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
- event = rb_add_time_stamp(event, info->delta, abs);
+ event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
length -= RB_LEN_TIME_EXTEND;
delta = 0;
}
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
ret = kprobe_event_gen_cmd_start(&cmd, event, val);
- if (ret)
+ if (ret) {
+ pr_err("Failed to generate probe: %s\n", buf);
break;
+ }
ret = kprobe_event_gen_cmd_end(&cmd);
- if (ret)
+ if (ret) {
pr_err("Failed to add probe: %s\n", buf);
+ break;
+ }
}
return ret;
}
#endif
-#ifdef CONFIG_HIST_TRIGGERS
+#ifdef CONFIG_SYNTH_EVENTS
static int __init
trace_boot_add_synth_event(struct xbc_node *node, const char *event)
{
int trigger_process_regex(struct trace_event_file *file, char *buff)
{
- char *command, *next = buff;
+ char *command, *next;
struct event_command *p;
int ret = -EINVAL;
+ next = buff = skip_spaces(buff);
command = strsep(&next, ": \t");
+ if (next) {
+ next = skip_spaces(next);
+ if (!*next)
+ next = NULL;
+ }
command = (command[0] != '!') ? command : command + 1;
mutex_lock(&trigger_cmd_mutex);
int ret;
/* separate the trigger from the filter (t:n [if filter]) */
- if (param && isdigit(param[0]))
+ if (param && isdigit(param[0])) {
trigger = strsep(¶m, " \t");
+ if (param) {
+ param = skip_spaces(param);
+ if (!*param)
+ param = NULL;
+ }
+ }
trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
trigger = strsep(¶m, " \t");
if (!trigger)
return -EINVAL;
+ if (param) {
+ param = skip_spaces(param);
+ if (!*param)
+ param = NULL;
+ }
system = strsep(&trigger, ":");
if (!trigger)
config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ def_bool !CC_IS_GCC || GCC_VERSION >= 80300
+
config KASAN
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
config HAVE_ARCH_KGDB
bool
+# set if architecture has the its kgdb_arch_handle_qxfer_pkt
+# function to enable gdb stub to address XML packet sent from GDB.
+config HAVE_ARCH_KGDB_QXFER_PKT
+ bool
+
menuconfig KGDB
bool "KGDB: kernel debugger"
depends on HAVE_ARCH_KGDB
* @endbit: The index (in logical notation, compensated for quirks) where
* the packed value ends within pbuf. Must be smaller than, or equal
* to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
* @op: If PACK, then uval will be treated as const pointer and copied (packed)
* into pbuf, between startbit and endbit.
* If UNPACK, then pbuf will be treated as const pointer and the logical
err_free:
kfree(devmem);
err_release:
- release_mem_region(devmem->pagemap.res.start,
- resource_size(&devmem->pagemap.res));
+ release_mem_region(res->start, resource_size(res));
err:
mutex_unlock(&mdevice->devmem_lock);
return false;
*/
if (base < highmem_start && limit > highmem_start) {
addr = memblock_alloc_range_nid(size, alignment,
- highmem_start, limit, nid, false);
+ highmem_start, limit, nid, true);
limit = highmem_start;
}
if (!addr) {
addr = memblock_alloc_range_nid(size, alignment, base,
- limit, nid, false);
+ limit, nid, true);
if (!addr) {
ret = -ENOMEM;
goto err;
.page = NULL,
};
- current->capture_control = &capc;
+ /*
+ * Make sure the structs are really initialized before we expose the
+ * capture control, in case we are interrupted and the interrupt handler
+ * frees a page.
+ */
+ barrier();
+ WRITE_ONCE(current->capture_control, &capc);
ret = compact_zone(&cc, &capc);
VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));
- *capture = capc.page;
- current->capture_control = NULL;
+ /*
+ * Make sure we hide capture control first before we read the captured
+ * page pointer, otherwise an interrupt could free and capture a page
+ * and we would leak it.
+ */
+ WRITE_ONCE(current->capture_control, NULL);
+ *capture = READ_ONCE(capc.page);
return ret;
}
static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
unsigned long vaddr)
{
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
set_pte_at(mm, vaddr, ptep, pte);
barrier();
pte_clear(mm, vaddr, ptep);
- pte = READ_ONCE(*ptep);
+ pte = ptep_get(ptep);
WARN_ON(!pte_none(pte));
}
page = find_get_page(mapping, index);
if (!page) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
goto would_block;
page_cache_sync_readahead(mapping,
ra, filp,
goto no_cached_page;
}
if (PageReadahead(page)) {
+ if (iocb->ki_flags & IOCB_NOIO) {
+ put_page(page);
+ goto out;
+ }
page_cache_async_readahead(mapping,
ra, filp, page,
index, last_index - index);
}
readpage:
+ if (iocb->ki_flags & IOCB_NOIO) {
+ unlock_page(page);
+ put_page(page);
+ goto would_block;
+ }
/*
* A previous I/O error may have been due to temporary
* failures, eg. multipath errors.
*
* This is the "read_iter()" routine for all filesystems
* that can use the page cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
+ * be returned when no data can be read without waiting for I/O requests
+ * to complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
+ * requests shall be made for the read or for readahead. When no data
+ * can be read, -EAGAIN shall be returned. When readahead would be
+ * triggered, a partial, possibly empty read shall be returned.
+ *
* Return:
* * number of bytes copied, even for partial reads
- * * negative error code if nothing was read
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
*/
ssize_t
generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
/* Use first found vma */
pgoff_start = page_to_pgoff(hpage);
- pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
+ pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
return;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
- if (!cw)
+ if (!cw) {
+ css_put(&memcg->css);
return;
+ }
cw->memcg = memcg;
cw->cachep = cachep;
* We're using unprotected memory for the weight so that if
* some cgroups DO claim explicit protection, we don't protect
* the same bytes twice.
+ *
+ * Check both usage and parent_usage against the respective
+ * protected values. One should imply the other, but they
+ * aren't read atomically - make sure the division is sane.
*/
if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
return ep;
-
- if (parent_effective > siblings_protected && usage > protected) {
+ if (parent_effective > siblings_protected &&
+ parent_usage > siblings_protected &&
+ usage > protected) {
unsigned long unclaimed;
unclaimed = parent_effective - siblings_protected;
if (parent == root) {
memcg->memory.emin = READ_ONCE(memcg->memory.min);
- memcg->memory.elow = memcg->memory.low;
+ memcg->memory.elow = READ_ONCE(memcg->memory.low);
goto out;
}
atomic_long_read(&parent->memory.children_min_usage)));
WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
- memcg->memory.low, READ_ONCE(parent->memory.elow),
+ READ_ONCE(memcg->memory.low),
+ READ_ONCE(parent->memory.elow),
atomic_long_read(&parent->memory.children_low_usage)));
out:
}
#ifdef pte_index
-static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
+static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
int err;
if (!page_count(page))
return -EINVAL;
err = validate_page_before_insert(page);
- return err ? err : insert_page_into_pte_locked(
- mm, pte_offset_map(pmd, addr), addr, page, prot);
+ if (err)
+ return err;
+ return insert_page_into_pte_locked(mm, pte, addr, page, prot);
}
/* insert_pages() amortizes the cost of spinlock operations
struct page **pages, unsigned long *num, pgprot_t prot)
{
pmd_t *pmd = NULL;
- spinlock_t *pte_lock = NULL;
+ pte_t *start_pte, *pte;
+ spinlock_t *pte_lock;
struct mm_struct *const mm = vma->vm_mm;
unsigned long curr_page_idx = 0;
unsigned long remaining_pages_total = *num;
ret = -ENOMEM;
if (pte_alloc(mm, pmd))
goto out;
- pte_lock = pte_lockptr(mm, pmd);
while (pages_to_write_in_pmd) {
int pte_idx = 0;
const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
- spin_lock(pte_lock);
- for (; pte_idx < batch_size; ++pte_idx) {
- int err = insert_page_in_batch_locked(mm, pmd,
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
+ for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
+ int err = insert_page_in_batch_locked(mm, pte,
addr, pages[curr_page_idx], prot);
if (unlikely(err)) {
- spin_unlock(pte_lock);
+ pte_unmap_unlock(start_pte, pte_lock);
ret = err;
remaining_pages_total -= pte_idx;
goto out;
addr += PAGE_SIZE;
++curr_page_idx;
}
- spin_unlock(pte_lock);
+ pte_unmap_unlock(start_pte, pte_lock);
pages_to_write_in_pmd -= batch_size;
remaining_pages_total -= batch_size;
}
err = mem_cgroup_charge(page, vma->vm_mm,
GFP_KERNEL);
ClearPageSwapCache(page);
- if (err)
+ if (err) {
+ ret = VM_FAULT_OOM;
goto out_page;
+ }
+
+ /*
+ * XXX: Move to lru_cache_add() when it
+ * supports new vs putback
+ */
+ spin_lock_irq(&page_pgdat(page)->lru_lock);
+ lru_note_cost_page(page);
+ spin_unlock_irq(&page_pgdat(page)->lru_lock);
lru_cache_add(page);
swap_readpage(page, true);
unsigned long start_pfn,
unsigned long nr_pages)
{
+ const unsigned long end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
- unsigned long flags;
+ unsigned long pfn, cur_nr_pages, flags;
/* Poison struct pages because they are now uninitialized again. */
- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+ for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
+ cond_resched();
+
+ /* Select all remaining pages up to the next section boundary */
+ cur_nr_pages =
+ min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
+ page_init_poison(pfn_to_page(pfn),
+ sizeof(struct page) * cur_nr_pages);
+ }
#ifdef CONFIG_ZONE_DEVICE
/*
return rc;
}
-/*
- * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
- * around it.
- */
-#if defined(CONFIG_ARM) && \
- defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
-#define ICE_noinline noinline
-#else
-#define ICE_noinline
-#endif
-
/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
-static ICE_noinline int unmap_and_move(new_page_t get_new_page,
+static int unmap_and_move(new_page_t get_new_page,
free_page_t put_new_page,
unsigned long private, struct page *page,
int force, enum migrate_mode mode,
}
EXPORT_SYMBOL(vzalloc_node);
-/**
- * vmalloc_exec - allocate virtually contiguous, executable memory
- * @size: allocation size
- *
- * Kernel-internal function to allocate enough pages to cover @size
- * the page level allocator and map them into contiguous and
- * executable kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- */
-
-void *vmalloc_exec(unsigned long size)
-{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
-}
-
/**
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
* @size: allocation size
* Initialise min_free_kbytes.
*
* For small machines we want it small (128k min). For large machines
- * we want it large (64MB max). But it is not linear, because network
+ * we want it large (256MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use
*
* min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
gfp_t gfp, int order,
struct kmem_cache *s)
{
- unsigned int nr_pages = 1 << order;
+ int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;
int ret;
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
- unsigned int nr_pages = 1 << order;
+ int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;
if (unlikely(ZERO_OR_NULL_PTR(mem)))
return;
ks = ksize(mem);
- memset(mem, 0, ks);
+ memzero_explicit(mem, ks);
kfree(mem);
}
EXPORT_SYMBOL(kzfree);
}
static void list_slab_objects(struct kmem_cache *s, struct page *page,
- const char *text, unsigned long *map)
+ const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
+ unsigned long *map;
void *p;
- if (!map)
- return;
-
slab_err(s, page, text, s->name);
slab_lock(page);
print_tracking(s, p);
}
}
+ put_map(map);
slab_unlock(page);
#endif
}
{
LIST_HEAD(discard);
struct page *page, *h;
- unsigned long *map = NULL;
-
-#ifdef CONFIG_SLUB_DEBUG
- map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-#endif
BUG_ON(irqs_disabled());
spin_lock_irq(&n->list_lock);
list_add(&page->slab_list, &discard);
} else {
list_slab_objects(s, page,
- "Objects remaining in %s on __kmem_cache_shutdown()",
- map);
+ "Objects remaining in %s on __kmem_cache_shutdown()");
}
}
spin_unlock_irq(&n->list_lock);
-#ifdef CONFIG_SLUB_DEBUG
- bitmap_free(map);
-#endif
-
list_for_each_entry_safe(page, h, &discard, slab_list)
discard_slab(s, page);
}
else
__lru_cache_activate_page(page);
ClearPageReferenced(page);
- if (page_is_file_lru(page))
- workingset_activation(page);
+ workingset_activation(page);
}
if (page_is_idle(page))
clear_page_idle(page);
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
-
+#include "internal.h"
/*
* swapper_space is a fiction, retained to simplify the path through
__SetPageSwapBacked(page);
/* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) {
+ if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) {
put_swap_page(page, entry);
goto fail_unlock;
}
* @pages: an array of pointers to the pages to be mapped
* @count: number of pages
* @node: prefer to allocate data structures on this node
- * @prot: memory protection to use. PAGE_KERNEL for regular RAM
*
* If you use this function for less than VMAP_MAX_ALLOC pages, it could be
* faster than vmap so it's good. But if you mix long-life and short-life
}
EXPORT_SYMBOL(vzalloc_node);
-/**
- * vmalloc_exec - allocate virtually contiguous, executable memory
- * @size: allocation size
- *
- * Kernel-internal function to allocate enough pages to cover @size
- * the page level allocator and map them into contiguous and
- * executable kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
-void *vmalloc_exec(unsigned long size)
-{
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __builtin_return_address(0));
-}
-
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
__delete_from_swap_cache(page, swap);
xa_unlock_irqrestore(&mapping->i_pages, flags);
put_swap_page(page, swap);
+ workingset_eviction(page, target_memcg);
} else {
void (*freepage)(struct page *);
void *shadow = NULL;
list_add(&page->lru, &pages_to_free);
} else {
nr_moved += nr_pages;
+ if (PageActive(page))
+ workingset_age_nonresident(lruvec, nr_pages);
}
}
*
* Implementation
*
- * For each node's file LRU lists, a counter for inactive evictions
- * and activations is maintained (node->inactive_age).
+ * For each node's LRU lists, a counter for inactive evictions and
+ * activations is maintained (node->nonresident_age).
*
* On eviction, a snapshot of this counter (along with some bits to
* identify the node) is stored in the now empty page cache
*workingsetp = workingset;
}
-static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
+/**
+ * workingset_age_nonresident - age non-resident entries as LRU ages
+ * @memcg: the lruvec that was aged
+ * @nr_pages: the number of pages to count
+ *
+ * As in-memory pages are aged, non-resident pages need to be aged as
+ * well, in order for the refault distances later on to be comparable
+ * to the in-memory dimensions. This function allows reclaim and LRU
+ * operations to drive the non-resident aging along in parallel.
+ */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
{
/*
* Reclaiming a cgroup means reclaiming all its children in a
* the root cgroup's, age as well.
*/
do {
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
- atomic_long_inc(&lruvec->inactive_age);
- } while (memcg && (memcg = parent_mem_cgroup(memcg)));
+ atomic_long_add(nr_pages, &lruvec->nonresident_age);
+ } while ((lruvec = parent_lruvec(lruvec)));
}
/**
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- advance_inactive_age(page_memcg(page), pgdat);
-
lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+ workingset_age_nonresident(lruvec, hpage_nr_pages(page));
/* XXX: target_memcg can be NULL, go through lruvec */
memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
- eviction = atomic_long_read(&lruvec->inactive_age);
+ eviction = atomic_long_read(&lruvec->nonresident_age);
return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
}
if (!mem_cgroup_disabled() && !eviction_memcg)
goto out;
eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
- refault = atomic_long_read(&eviction_lruvec->inactive_age);
+ refault = atomic_long_read(&eviction_lruvec->nonresident_age);
/*
* Calculate the refault distance
*
* The unsigned subtraction here gives an accurate distance
- * across inactive_age overflows in most cases. There is a
+ * across nonresident_age overflows in most cases. There is a
* special case: usually, shadow entries have a short lifetime
* and are either refaulted or reclaimed along with the inode
* before they get too old. But it is not impossible for the
- * inactive_age to lap a shadow entry in the field, which can
- * then result in a false small refault distance, leading to a
- * false activation should this old entry actually refault
- * again. However, earlier kernels used to deactivate
+ * nonresident_age to lap a shadow entry in the field, which
+ * can then result in a false small refault distance, leading
+ * to a false activation should this old entry actually
+ * refault again. However, earlier kernels used to deactivate
* unconditionally with *every* reclaim invocation for the
* longest time, so the occasional inappropriate activation
* leading to pressure on the active list is not a problem.
goto out;
SetPageActive(page);
- advance_inactive_age(memcg, pgdat);
+ workingset_age_nonresident(lruvec, hpage_nr_pages(page));
inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
/* Page was active prior to eviction */
void workingset_activation(struct page *page)
{
struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
rcu_read_lock();
/*
memcg = page_memcg_rcu(page);
if (!mem_cgroup_disabled() && !memcg)
goto out;
- advance_inactive_age(memcg, page_pgdat(page));
+ lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+ workingset_age_nonresident(lruvec, hpage_nr_pages(page));
out:
rcu_read_unlock();
}
lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
}
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
{
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key,
- subclass);
+ lockdep_set_class(&dev->addr_list_lock,
+ &vlan_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
}
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev, dev->lower_level);
+ vlan_dev_set_lockdep_class(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
return a + (long)b + c + d + (long)e + f;
}
+struct bpf_fentry_test_t {
+ struct bpf_fentry_test_t *a;
+};
+
+int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+{
+ return (long)arg;
+}
+
+int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
+{
+ return (long)arg->a;
+}
+
int noinline bpf_modify_return_test(int a, int *b)
{
*b += 1;
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
+ struct bpf_fentry_test_t arg = {};
u16 side_effect = 0, ret = 0;
int b = 2, err = -EFAULT;
u32 retval = 0;
bpf_fentry_test3(4, 5, 6) != 15 ||
bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
- bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
+ bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
+ bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
+ bpf_fentry_test8(&arg) != 0)
goto out;
break;
case BPF_MODIFY_RETURN:
req.len = optlen;
if (!bpfilter_ops.info.pid)
goto out;
- n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+ n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
&pos);
if (n != sizeof(req)) {
pr_err("write fail %zd\n", n);
{
struct ethhdr *eth_hdr;
struct sk_buff *skb;
- u16 *version;
+ __be16 *version;
skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
if (!skb)
if (!mrp)
return -EINVAL;
- if (role == BR_MRP_PORT_ROLE_PRIMARY)
+ switch (role) {
+ case BR_MRP_PORT_ROLE_PRIMARY:
rcu_assign_pointer(mrp->p_port, p);
- else
+ break;
+ case BR_MRP_PORT_ROLE_SECONDARY:
rcu_assign_pointer(mrp->s_port, p);
+ break;
+ default:
+ return -EINVAL;
+ }
br_mrp_port_switchdev_set_role(p, role);
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
- nsrcs_offset + sizeof(_nsrcs))
+ nsrcs_offset + sizeof(__nsrcs))
return -EINVAL;
_nsrcs = skb_header_pointer(skb, nsrcs_offset,
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
+ unsigned char eth_addr[ETH_ALEN] __aligned(2);
unsigned char flags;
- unsigned char eth_addr[ETH_ALEN];
};
struct net_bridge_mdb_entry {
struct hlist_head fdb_list;
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- struct list_head __rcu mrp_list;
+ struct list_head mrp_list;
#endif
};
struct br_mrp {
/* list of mrp instances */
- struct list_head __rcu list;
+ struct list_head list;
struct net_bridge_port __rcu *p_port;
struct net_bridge_port __rcu *s_port;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("wenxu <wenxu@ucloud.cn>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
+MODULE_DESCRIPTION("Support for bridge dedicated meta key");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
+MODULE_DESCRIPTION("Reject packets from bridge via nftables");
local_bh_disable();
+ dev_xmit_recursion_inc();
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq))
ret = netdev_start_xmit(skb, dev, txq, false);
HARD_TX_UNLOCK(dev, txq);
+ dev_xmit_recursion_dec();
local_bh_enable();
rcu_barrier();
dev->reg_state = NETREG_UNREGISTERED;
+ /* We should put the kobject that hold in
+ * netdev_unregister_kobject(), otherwise
+ * the net device cannot be freed when
+ * driver calls free_netdev(), because the
+ * kobject is being hold.
+ */
+ kobject_put(&dev->dev.kobj);
}
/*
* Prevent userspace races by waiting until the network
if (to->addr_len != from->addr_len)
return;
+ /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
+ * reasons:
+ * 1) This is always called without any addr_list_lock, so as the
+ * outermost one here, it must be 0.
+ * 2) This is called by some callers after unlinking the upper device,
+ * so the dev->lower_level becomes 1 again.
+ * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
+ * larger.
+ */
netif_addr_lock_bh(from);
netif_addr_lock_nested(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
if (to->addr_len != from->addr_len)
return;
+ /* See the above comments inside dev_uc_unsync(). */
netif_addr_lock_bh(from);
netif_addr_lock_nested(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
MODULE_ALIAS_GENL_FAMILY("NET_DM");
+MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
{
unsigned int iphdr_len;
- if (skb->protocol == cpu_to_be16(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
iphdr_len = sizeof(struct iphdr);
- else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
iphdr_len = sizeof(struct ipv6hdr);
- else
+ break;
+ default:
return 0;
+ }
if (skb_headlen(skb) < iphdr_len)
return 0;
EXPORT_SYMBOL(skb_flow_dissector_init);
#ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+ struct bpf_prog *prog)
{
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
- struct bpf_prog *attached;
if (net == &init_net) {
/* BPF flow dissector in the root namespace overrides
for_each_net(ns) {
if (ns == &init_net)
continue;
- if (rcu_access_pointer(ns->bpf.progs[type]))
+ if (rcu_access_pointer(ns->bpf.run_array[type]))
return -EEXIST;
}
} else {
/* Make sure root flow dissector is not attached
* when attaching to the non-root namespace.
*/
- if (rcu_access_pointer(init_net.bpf.progs[type]))
+ if (rcu_access_pointer(init_net.bpf.run_array[type]))
return -EEXIST;
}
- attached = rcu_dereference_protected(net->bpf.progs[type],
- lockdep_is_held(&netns_bpf_mutex));
- if (attached == prog)
- /* The same program cannot be attached twice */
- return -EINVAL;
-
- rcu_assign_pointer(net->bpf.progs[type], prog);
- if (attached)
- bpf_prog_put(attached);
return 0;
}
#endif /* CONFIG_BPF_SYSCALL */
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
- struct bpf_prog *attached = NULL;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
bool mpls_el = false;
WARN_ON_ONCE(!net);
if (net) {
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+ struct bpf_prog_array *run_array;
rcu_read_lock();
- attached = rcu_dereference(init_net.bpf.progs[type]);
-
- if (!attached)
- attached = rcu_dereference(net->bpf.progs[type]);
+ run_array = rcu_dereference(init_net.bpf.run_array[type]);
+ if (!run_array)
+ run_array = rcu_dereference(net->bpf.run_array[type]);
- if (attached) {
+ if (run_array) {
struct bpf_flow_keys flow_keys;
struct bpf_flow_dissector ctx = {
.flow_keys = &flow_keys,
.data_end = data + hlen,
};
__be16 n_proto = proto;
+ struct bpf_prog *prog;
if (skb) {
ctx.skb = skb;
n_proto = skb->protocol;
}
- ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
+ prog = READ_ONCE(run_array->items[0].prog);
+ ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
hlen, flags);
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
}
EXPORT_SYMBOL(flow_indr_dev_register);
-static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
+ void *cb_priv,
struct list_head *cleanup_list)
{
struct flow_block_cb *this, *next;
list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
- if (this->cb == setup_cb &&
- this->cb_priv == cb_priv) {
+ if (this->release == release &&
+ this->indr.cb_priv == cb_priv) {
list_move(&this->indr.list, cleanup_list);
return;
}
}
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
- flow_setup_cb_t *setup_cb)
+ void (*release)(void *cb_priv))
{
struct flow_indr_dev *this, *next, *indr_dev = NULL;
LIST_HEAD(cleanup_list);
return;
}
- __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+ __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
mutex_unlock(&flow_indr_block_lock);
flow_block_indr_notify(&cleanup_list);
static void flow_block_indr_init(struct flow_block_cb *flow_block,
struct flow_block_offload *bo,
struct net_device *dev, void *data,
+ void *cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb))
{
flow_block->indr.binder_type = bo->binder_type;
flow_block->indr.data = data;
+ flow_block->indr.cb_priv = cb_priv;
flow_block->indr.dev = dev;
flow_block->indr.cleanup = cleanup;
}
-static void __flow_block_indr_binding(struct flow_block_offload *bo,
- struct net_device *dev, void *data,
- void (*cleanup)(struct flow_block_cb *block_cb))
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv),
+ struct flow_block_offload *bo,
+ struct net_device *dev, void *data,
+ void *indr_cb_priv,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_block_cb *block_cb;
- list_for_each_entry(block_cb, &bo->cb_list, list) {
- switch (bo->command) {
- case FLOW_BLOCK_BIND:
- flow_block_indr_init(block_cb, bo, dev, data, cleanup);
- list_add(&block_cb->indr.list, &flow_block_indr_list);
- break;
- case FLOW_BLOCK_UNBIND:
- list_del(&block_cb->indr.list);
- break;
- }
- }
+ block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
+ if (IS_ERR(block_cb))
+ goto out;
+
+ flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
+ list_add(&block_cb->indr.list, &flow_block_indr_list);
+
+out:
+ return block_cb;
}
+EXPORT_SYMBOL(flow_indr_block_cb_alloc);
int flow_indr_dev_setup_offload(struct net_device *dev,
enum tc_setup_type type, void *data,
mutex_lock(&flow_indr_block_lock);
list_for_each_entry(this, &flow_block_indr_dev_list, list)
- this->cb(dev, this->cb_priv, type, bo);
+ this->cb(dev, this->cb_priv, type, bo, data, cleanup);
- __flow_block_indr_binding(bo, dev, data, cleanup);
mutex_unlock(&flow_indr_block_lock);
return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
return container_of(parser, struct sk_psock, parser);
}
-static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
+static void sk_psock_skb_redirect(struct sk_buff *skb)
{
struct sk_psock *psock_other;
struct sock *sk_other;
}
}
-static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
- struct sk_buff *skb, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
{
switch (verdict) {
case __SK_REDIRECT:
- sk_psock_skb_redirect(psock, skb);
+ sk_psock_skb_redirect(skb);
break;
case __SK_PASS:
case __SK_DROP:
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
}
+ sk_psock_tls_verdict_apply(skb, ret);
rcu_read_unlock();
- sk_psock_tls_verdict_apply(psock, skb, ret);
return ret;
}
EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
}
goto out_free;
case __SK_REDIRECT:
- sk_psock_skb_redirect(psock, skb);
+ sk_psock_skb_redirect(skb);
break;
case __SK_DROP:
/* fall-through */
static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
{
- struct sk_psock *psock = sk_psock_from_strp(strp);
+ struct sk_psock *psock;
struct bpf_prog *prog;
int ret = __SK_DROP;
+ struct sock *sk;
rcu_read_lock();
+ sk = strp->sk;
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ kfree_skb(skb);
+ goto out;
+ }
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
skb_orphan(skb);
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
}
- rcu_read_unlock();
sk_psock_verdict_apply(psock, skb, ret);
+out:
+ rcu_read_unlock();
}
static int sk_psock_strp_read_done(struct strparser *strp, int err)
return inet6_sk(sk)->mc_loop;
#endif
}
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return true;
}
EXPORT_SYMBOL(sk_mc_loop);
cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);
+ sk_tx_queue_clear(sk);
}
return sk;
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
- cgroup_sk_alloc(&newsk->sk_cgrp_data);
+ cgroup_sk_clone(&newsk->sk_cgrp_data);
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
+ sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
struct fd f;
int ret;
+ if (attr->attach_flags || attr->replace_bpf_fd)
+ return -EINVAL;
+
f = fdget(ufd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- ret = sock_map_prog_update(map, prog, attr->attach_type);
+ ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
+ fdput(f);
+ return ret;
+}
+
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+ u32 ufd = attr->target_fd;
+ struct bpf_prog *prog;
+ struct bpf_map *map;
+ struct fd f;
+ int ret;
+
+ if (attr->attach_flags || attr->replace_bpf_fd)
+ return -EINVAL;
+
+ f = fdget(ufd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ prog = bpf_prog_get(attr->attach_bpf_fd);
+ if (IS_ERR(prog)) {
+ ret = PTR_ERR(prog);
+ goto put_map;
+ }
+
+ if (prog->type != ptype) {
+ ret = -EINVAL;
+ goto put_prog;
+ }
+
+ ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
+put_prog:
+ bpf_prog_put(prog);
+put_map:
fdput(f);
return ret;
}
}
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
- u32 which)
+ struct bpf_prog *old, u32 which)
{
struct sk_psock_progs *progs = sock_map_progs(map);
+ struct bpf_prog **pprog;
if (!progs)
return -EOPNOTSUPP;
switch (which) {
case BPF_SK_MSG_VERDICT:
- psock_set_prog(&progs->msg_parser, prog);
+ pprog = &progs->msg_parser;
break;
case BPF_SK_SKB_STREAM_PARSER:
- psock_set_prog(&progs->skb_parser, prog);
+ pprog = &progs->skb_parser;
break;
case BPF_SK_SKB_STREAM_VERDICT:
- psock_set_prog(&progs->skb_verdict, prog);
+ pprog = &progs->skb_verdict;
break;
default:
return -EOPNOTSUPP;
}
+ if (old)
+ return psock_replace_prog(pprog, prog, old);
+
+ psock_set_prog(pprog, prog);
return 0;
}
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && !ret) {
if (jit_enable < 2 ||
- (jit_enable == 2 && bpf_dump_raw_ok())) {
+ (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
*(int *)table->data = jit_enable;
if (jit_enable == 2)
pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
xdpf->len = totsize - metasize;
xdpf->headroom = 0;
xdpf->metasize = metasize;
+ xdpf->frame_sz = PAGE_SIZE;
xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
xsk_buff_free(xdp);
#define DSA_HLEN 4
#define EDSA_HLEN 8
+#define FRAME_TYPE_TO_CPU 0x00
+#define FRAME_TYPE_FORWARD 0x03
+
+#define TO_CPU_CODE_MGMT_TRAP 0x00
+#define TO_CPU_CODE_FRAME2REG 0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02
+#define TO_CPU_CODE_POLICY_TRAP 0x03
+#define TO_CPU_CODE_ARP_MIRROR 0x04
+#define TO_CPU_CODE_POLICY_MIRROR 0x05
+
static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct packet_type *pt)
{
u8 *edsa_header;
+ int frame_type;
+ int code;
int source_device;
int source_port;
/*
* Check that frame type is either TO_CPU or FORWARD.
*/
- if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
+ frame_type = edsa_header[0] >> 6;
+
+ switch (frame_type) {
+ case FRAME_TYPE_TO_CPU:
+ code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+ /*
+ * Mark the frame to never egress on any port of the same switch
+ * unless it's a trapped IGMP/MLD packet, in which case the
+ * bridge might want to forward it.
+ */
+ if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+ skb->offload_fwd_mark = 1;
+
+ break;
+
+ case FRAME_TYPE_FORWARD:
+ skb->offload_fwd_mark = 1;
+ break;
+
+ default:
return NULL;
+ }
/*
* Determine source device and port.
2 * ETH_ALEN);
}
- skb->offload_fwd_mark = 1;
-
return skb;
}
struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1];
int ret;
+ cfg->first = 100;
+ cfg->step = 100;
+ cfg->last = MAX_CABLE_LENGTH_CM;
+ cfg->pair = PHY_PAIR_ALL;
+
+ if (!nest)
+ return 0;
+
ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest,
cable_test_tdr_act_cfg_policy, info->extack);
if (ret < 0)
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST])
cfg->first = nla_get_u32(
tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]);
- else
- cfg->first = 100;
+
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST])
cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]);
- else
- cfg->last = MAX_CABLE_LENGTH_CM;
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP])
cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]);
- else
- cfg->step = 100;
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) {
cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]);
"invalid pair parameter");
return -EINVAL;
}
- } else {
- cfg->pair = PHY_PAIR_ALL;
}
if (cfg->first > MAX_CABLE_LENGTH_CM) {
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
[NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
+ [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
[NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
[NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
[NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation",
+ [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
sizeof(match->mask.ipv6.dst));
}
if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
- memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
match->dissector.used_keys |=
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
ret = linkstate_get_sqi(dev);
if (ret < 0 && ret != -EOPNOTSUPP)
- return ret;
-
+ goto out;
data->sqi = ret;
ret = linkstate_get_sqi_max(dev);
if (ret < 0 && ret != -EOPNOTSUPP)
- return ret;
-
+ goto out;
data->sqi_max = ret;
+ ret = 0;
+out:
ethnl_ops_complete(dev);
-
- return 0;
+ return ret;
}
static int linkstate_reply_size(const struct ethnl_req_info *req_base,
}
static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
- const struct ethnl_dump_ctx *ctx)
+ const struct ethnl_dump_ctx *ctx,
+ struct netlink_callback *cb)
{
+ void *ehdr;
int ret;
+ ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ ðtool_genl_family, 0, ctx->ops->reply_cmd);
+ if (!ehdr)
+ return -EMSGSIZE;
+
ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev);
rtnl_lock();
ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL);
if (ctx->ops->cleanup_data)
ctx->ops->cleanup_data(ctx->reply_data);
ctx->reply_data->dev = NULL;
+ if (ret < 0)
+ genlmsg_cancel(skb, ehdr);
+ else
+ genlmsg_end(skb, ehdr);
return ret;
}
int s_idx = ctx->pos_idx;
int h, idx = 0;
int ret = 0;
- void *ehdr;
rtnl_lock();
for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
dev_hold(dev);
rtnl_unlock();
- ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- ðtool_genl_family, 0,
- ctx->ops->reply_cmd);
- if (!ehdr) {
- dev_put(dev);
- ret = -EMSGSIZE;
- goto out;
- }
- ret = ethnl_default_dump_one(skb, dev, ctx);
+ ret = ethnl_default_dump_one(skb, dev, ctx, cb);
dev_put(dev);
if (ret < 0) {
- genlmsg_cancel(skb, ehdr);
if (ret == -EOPNOTSUPP)
goto lock_and_cont;
if (likely(skb->len))
ret = skb->len;
goto out;
}
- genlmsg_end(skb, ehdr);
lock_and_cont:
rtnl_lock();
if (net->dev_base_seq != seq) {
rcu_read_unlock();
}
-static void hsr_del_ports(struct hsr_priv *hsr)
+void hsr_del_ports(struct hsr_priv *hsr)
{
struct hsr_port *port;
hsr_del_port(port);
}
-/* This has to be called after all the readers are gone.
- * Otherwise we would have to check the return value of
- * hsr_port_get_hsr().
- */
-static void hsr_dev_destroy(struct net_device *hsr_dev)
-{
- struct hsr_priv *hsr = netdev_priv(hsr_dev);
-
- hsr_debugfs_term(hsr);
- hsr_del_ports(hsr);
-
- del_timer_sync(&hsr->prune_timer);
- del_timer_sync(&hsr->announce_timer);
-
- hsr_del_self_node(hsr);
- hsr_del_nodes(&hsr->node_db);
-}
-
static const struct net_device_ops hsr_device_ops = {
.ndo_change_mtu = hsr_dev_change_mtu,
.ndo_open = hsr_dev_open,
.ndo_stop = hsr_dev_close,
.ndo_start_xmit = hsr_dev_xmit,
.ndo_fix_features = hsr_fix_features,
- .ndo_uninit = hsr_dev_destroy,
};
static struct device_type hsr_type = {
unsigned char multicast_spec, u8 protocol_version,
struct netlink_ext_ack *extack)
{
+ bool unregister = false;
struct hsr_priv *hsr;
int res;
if (res)
goto err_unregister;
+ unregister = true;
+
res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
if (res)
- goto err_add_slaves;
+ goto err_unregister;
res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
if (res)
- goto err_add_slaves;
+ goto err_unregister;
hsr_debugfs_init(hsr, hsr_dev);
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
return 0;
-err_add_slaves:
- unregister_netdevice(hsr_dev);
err_unregister:
hsr_del_ports(hsr);
err_add_master:
hsr_del_self_node(hsr);
+ if (unregister)
+ unregister_netdevice(hsr_dev);
return res;
}
#include <linux/netdevice.h>
#include "hsr_main.h"
+void hsr_del_ports(struct hsr_priv *hsr);
void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version,
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr);
-
#endif /* __HSR_DEVICE_H */
*/
#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
#include <linux/rculist.h>
#include <linux/timer.h>
#include <linux/etherdevice.h>
master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
hsr_del_port(port);
if (hsr_slave_empty(master->hsr)) {
- unregister_netdevice_queue(master->dev,
- &list_kill);
+ const struct rtnl_link_ops *ops;
+
+ ops = master->dev->rtnl_link_ops;
+ ops->dellink(master->dev, &list_kill);
unregister_netdevice_many(&list_kill);
}
}
static void __exit hsr_exit(void)
{
- unregister_netdevice_notifier(&hsr_nb);
hsr_netlink_exit();
hsr_debugfs_remove_root();
+ unregister_netdevice_notifier(&hsr_nb);
}
module_init(hsr_init);
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
}
+static void hsr_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct hsr_priv *hsr = netdev_priv(dev);
+
+ del_timer_sync(&hsr->prune_timer);
+ del_timer_sync(&hsr->announce_timer);
+
+ hsr_debugfs_term(hsr);
+ hsr_del_ports(hsr);
+
+ hsr_del_self_node(hsr);
+ hsr_del_nodes(&hsr->node_db);
+
+ unregister_netdevice_queue(dev, head);
+}
+
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct hsr_priv *hsr = netdev_priv(dev);
.priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup,
.newlink = hsr_newlink,
+ .dellink = hsr_dellink,
.fill_info = hsr_fill_info,
};
config INET_AH
tristate "IP: AH transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_SHA1
+ select XFRM_AH
help
- Support for IPsec AH.
+ Support for IPsec AH (Authentication Header).
+
+ AH can be used with various authentication algorithms. Besides
+ enabling AH support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
config INET_ESP
tristate "IP: ESP transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_AUTHENC
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_CBC
- select CRYPTO_SHA1
- select CRYPTO_DES
- select CRYPTO_ECHAINIV
+ select XFRM_ESP
help
- Support for IPsec ESP.
+ Support for IPsec ESP (Encapsulating Security Payload).
+
+ ESP can be used with various encryption and authentication algorithms.
+ Besides enabling ESP support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;
- if (table)
+ if (table && table != RT_TABLE_MAIN)
tbl = fib_get_table(net, table);
if (tbl)
module_exit(fou_fini);
MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP");
ipcm_init(&ipc);
inet->tos = ip_hdr(skb)->tos;
- sk->sk_mark = mark;
+ ipc.sockc.mark = mark;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
icmp_param.skb = skb_in;
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
- sk->sk_mark = mark;
ipcm_init(&ipc);
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts.opt;
+ ipc.sockc.mark = mark;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
type, code, &icmp_param);
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
sk->sk_sndbuf = sysctl_wmem_default;
- sk->sk_mark = fl4.flowi4_mark;
+ ipc.sockc.mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
if (unlikely(err)) {
__be32 remote, __be32 local,
__be32 key)
{
- unsigned int hash;
struct ip_tunnel *t, *cand = NULL;
struct hlist_head *head;
+ struct net_device *ndev;
+ unsigned int hash;
hash = ip_tunnel_hash(key, remote);
head = &itn->tunnels[hash];
if (t && t->dev->flags & IFF_UP)
return t;
- if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
- return netdev_priv(itn->fb_tunnel_dev);
+ ndev = READ_ONCE(itn->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);
return NULL;
}
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
- /* fb_tunnel_dev will be unregisted in net-exit call. */
- if (itn->fb_tunnel_dev != dev)
- ip_tunnel_del(itn, netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
+ if (itn->fb_tunnel_dev == dev)
+ WRITE_ONCE(itn->fb_tunnel_dev, NULL);
dst_cache_reset(&tunnel->dst_cache);
}
static_branch_dec(&ip_tunnel_metadata_cnt);
}
EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
+{
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
+ ip_hdr(skb)->version == 4)
+ return htons(ETH_P_IP);
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
+ ipv6_hdr(skb)->version == 6)
+ return htons(ETH_P_IPV6);
+ return 0;
+}
+EXPORT_SYMBOL(ip_tunnel_parse_protocol);
+
+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
+EXPORT_SYMBOL(ip_tunnel_header_ops);
static void vti_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &vti_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_TUNNEL;
ip_tunnel_setup(dev, vti_net_id);
}
static void ipip_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_TUNNEL;
dev->flags = IFF_NOARP;
return ret;
}
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops)
+{
+ nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+ __ipt_unregister_table(net, table);
+}
+
void ipt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
if (ops)
- nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+ ipt_unregister_table_pre_exit(net, table, ops);
__ipt_unregister_table(net, table);
}
EXPORT_SYMBOL(ipt_register_table);
EXPORT_SYMBOL(ipt_unregister_table);
+EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
+EXPORT_SYMBOL(ipt_unregister_table_exit);
EXPORT_SYMBOL(ipt_do_table);
module_init(ip_tables_init);
module_exit(ip_tables_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
return 0;
}
+static void __net_exit iptable_filter_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_filter)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_filter,
+ filter_ops);
+}
+
static void __net_exit iptable_filter_net_exit(struct net *net)
{
if (!net->ipv4.iptable_filter)
return;
- ipt_unregister_table(net, net->ipv4.iptable_filter, filter_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_filter);
net->ipv4.iptable_filter = NULL;
}
static struct pernet_operations iptable_filter_net_ops = {
.init = iptable_filter_net_init,
+ .pre_exit = iptable_filter_net_pre_exit,
.exit = iptable_filter_net_exit,
};
return ret;
}
+static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_mangle)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_mangle,
+ mangle_ops);
+}
+
static void __net_exit iptable_mangle_net_exit(struct net *net)
{
if (!net->ipv4.iptable_mangle)
return;
- ipt_unregister_table(net, net->ipv4.iptable_mangle, mangle_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_mangle);
net->ipv4.iptable_mangle = NULL;
}
static struct pernet_operations iptable_mangle_net_ops = {
+ .pre_exit = iptable_mangle_net_pre_exit,
.exit = iptable_mangle_net_exit,
};
return ret;
}
+static void __net_exit iptable_nat_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.nat_table)
+ ipt_nat_unregister_lookups(net);
+}
+
static void __net_exit iptable_nat_net_exit(struct net *net)
{
if (!net->ipv4.nat_table)
return;
- ipt_nat_unregister_lookups(net);
- ipt_unregister_table(net, net->ipv4.nat_table, NULL);
+ ipt_unregister_table_exit(net, net->ipv4.nat_table);
net->ipv4.nat_table = NULL;
}
static struct pernet_operations iptable_nat_net_ops = {
+ .pre_exit = iptable_nat_net_pre_exit,
.exit = iptable_nat_net_exit,
};
return ret;
}
+static void __net_exit iptable_raw_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_raw)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_raw,
+ rawtable_ops);
+}
+
static void __net_exit iptable_raw_net_exit(struct net *net)
{
if (!net->ipv4.iptable_raw)
return;
- ipt_unregister_table(net, net->ipv4.iptable_raw, rawtable_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_raw);
net->ipv4.iptable_raw = NULL;
}
static struct pernet_operations iptable_raw_net_ops = {
+ .pre_exit = iptable_raw_net_pre_exit,
.exit = iptable_raw_net_exit,
};
return ret;
}
+static void __net_exit iptable_security_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_security)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_security,
+ sectbl_ops);
+}
+
static void __net_exit iptable_security_net_exit(struct net *net)
{
if (!net->ipv4.iptable_security)
return;
-
- ipt_unregister_table(net, net->ipv4.iptable_security, sectbl_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_security);
net->ipv4.iptable_security = NULL;
}
static struct pernet_operations iptable_security_net_ops = {
+ .pre_exit = iptable_security_net_pre_exit,
.exit = iptable_security_net_exit,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
+MODULE_DESCRIPTION("Netfilter flow table support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
+MODULE_DESCRIPTION("IPv4 nftables packet duplication support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_ALIAS_NFT_AF_EXPR(2, "fib");
+MODULE_DESCRIPTION("nftables fib / ip route lookup support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
+MODULE_DESCRIPTION("IPv4 packet rejection for nftables");
inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
sk->sk_uid);
+ fl4.fl4_icmp_type = user_icmph.type;
+ fl4.fl4_icmp_code = user_icmph.code;
+
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
const struct sk_buff *hint)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rt = (struct rtable *)hint;
+ struct rtable *rt = skb_rtable(hint);
struct net *net = dev_net(dev);
int err = -EINVAL;
u32 tag = 0;
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
+ if (icsk->icsk_ca_ops->release)
+ icsk->icsk_ca_ops->release(sk);
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
case TCP_MD5SIG_EXT:
- if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
- err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
- else
- err = -EINVAL;
+ err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
break;
#endif
case TCP_USER_TIMEOUT:
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
{
+ u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
struct scatterlist sg;
- sg_init_one(&sg, key->key, key->keylen);
- ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
- return crypto_ahash_update(hp->md5_req);
+ sg_init_one(&sg, key->key, keylen);
+ ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+
+ /* We use data_race() because tcp_md5_do_add() might change key->key under us */
+ return data_race(crypto_ahash_update(hp->md5_req));
}
EXPORT_SYMBOL(tcp_md5_hash_key);
icsk->icsk_ca_setsockopt = 1;
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
- if (sk->sk_state != TCP_CLOSE)
+ if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
tcp_init_congestion_control(sk);
}
if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
+ if (ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
- if (ca->curr_rtt > delay)
- ca->curr_rtt = delay;
-
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}
tcp_in_ack_event(sk, ack_ev_flags);
}
+ /* This is a deviation from RFC3168 since it states that:
+ * "When the TCP data sender is ready to set the CWR bit after reducing
+ * the congestion window, it SHOULD set the CWR bit only on the first
+ * new data packet that it transmits."
+ * We accept CWR on pure ACKs to be more robust
+ * with widely-deployed TCP implementations that do this.
+ */
+ tcp_ecn_accept_cwr(sk, skb);
+
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+ sk->sk_data_ready(sk);
tcp_drop(sk, skb);
return;
}
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
- tcp_ecn_accept_cwr(sk, skb);
-
tp->rx_opt.dsack = 0;
/* Queue data for delivery to the user.
sk_forced_mem_schedule(sk, skb->truesize);
else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+ sk->sk_data_ready(sk);
goto drop;
}
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
if (key) {
- /* Pre-existing entry - just update that one. */
- memcpy(key->key, newkey, newkeylen);
- key->keylen = newkeylen;
+ /* Pre-existing entry - just update that one.
+ * Note that the key might be used concurrently.
+ * data_race() is telling kcsan that we do not care of
+ * key mismatches, since changing MD5 key on live flows
+ * can lead to packet drops.
+ */
+ data_race(memcpy(key->key, newkey, newkeylen));
+
+ /* Pairs with READ_ONCE() in tcp_md5_hash_key().
+ * Also note that a reader could catch new key->keylen value
+ * but old key->key[], this is the reason we use __GFP_ZERO
+ * at sock_kmalloc() time below these lines.
+ */
+ WRITE_ONCE(key->keylen, newkeylen);
+
return 0;
}
rcu_assign_pointer(tp->md5sig_info, md5sig);
}
- key = sock_kmalloc(sk, sizeof(*key), gfp);
+ key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
if (!key)
return -ENOMEM;
if (!tcp_alloc_md5sig_pool()) {
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
- struct tcp_fastopen_cookie *foc)
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
* rather than TS in order to fit in better with old,
* buggy kernels, but that was deemed to be unnecessary.
*/
- ireq->tstamp_ok &= !ireq->sack_ok;
+ if (synack_type != TCP_SYNACK_COOKIE)
+ ireq->tstamp_ok &= !ireq->sack_ok;
}
#endif
#endif
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
- foc) + sizeof(*th);
+ foc, synack_type) + sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
config INET6_AH
tristate "IPv6: AH transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_SHA1
+ select XFRM_AH
help
- Support for IPsec AH.
+ Support for IPsec AH (Authentication Header).
+
+ AH can be used with various authentication algorithms. Besides
+ enabling AH support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
config INET6_ESP
tristate "IPv6: ESP transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_AUTHENC
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_CBC
- select CRYPTO_SHA1
- select CRYPTO_DES
- select CRYPTO_ECHAINIV
+ select XFRM_ESP
help
- Support for IPsec ESP.
+ Support for IPsec ESP (Encapsulating Security Payload).
+
+ ESP can be used with various encryption and authentication algorithms.
+ Besides enabling ESP support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
module_exit(fou6_fini);
MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP (IPv6)");
fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
- sk->sk_mark = mark;
np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl6))
fl6.flowi6_oif = np->ucast_oif;
ipcm6_init_sk(&ipc6, np);
+ ipc6.sockc.mark = mark;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
sk = icmpv6_xmit_lock(net);
if (!sk)
goto out_bh_enable;
- sk->sk_mark = mark;
np = inet6_sk(sk);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
ipcm6_init_sk(&ipc6, np);
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+ ipc6.sockc.mark = mark;
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
skb->len + sizeof(struct icmp6hdr),
module_exit(ila_fini);
MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6: Identifier Locator Addressing (ILA)");
gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
+ struct net_device *ndev;
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
if (t && t->dev->flags & IFF_UP)
return t;
- dev = ign->fb_tunnel_dev;
- if (dev && dev->flags & IFF_UP)
- return netdev_priv(dev);
+ ndev = READ_ONCE(ign->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);
return NULL;
}
ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
+ if (ign->fb_tunnel_dev == dev)
+ WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
}
static void ip6_tnl_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &ip6_tnl_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ip6_dev_free;
static void vti6_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &vti6_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = vti6_dev_free;
return ret;
}
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops)
+{
+ nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+ __ip6t_unregister_table(net, table);
+}
+
void ip6t_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
if (ops)
- nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+ ip6t_unregister_table_pre_exit(net, table, ops);
__ip6t_unregister_table(net, table);
}
EXPORT_SYMBOL(ip6t_register_table);
EXPORT_SYMBOL(ip6t_unregister_table);
+EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
+EXPORT_SYMBOL(ip6t_unregister_table_exit);
EXPORT_SYMBOL(ip6t_do_table);
module_init(ip6_tables_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies");
return 0;
}
+static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_filter)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_filter,
+ filter_ops);
+}
+
static void __net_exit ip6table_filter_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_filter)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_filter, filter_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_filter);
net->ipv6.ip6table_filter = NULL;
}
static struct pernet_operations ip6table_filter_net_ops = {
.init = ip6table_filter_net_init,
+ .pre_exit = ip6table_filter_net_pre_exit,
.exit = ip6table_filter_net_exit,
};
return ret;
}
+static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_mangle)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_mangle,
+ mangle_ops);
+}
+
static void __net_exit ip6table_mangle_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_mangle)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_mangle, mangle_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_mangle);
net->ipv6.ip6table_mangle = NULL;
}
static struct pernet_operations ip6table_mangle_net_ops = {
+ .pre_exit = ip6table_mangle_net_pre_exit,
.exit = ip6table_mangle_net_exit,
};
return ret;
}
+static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_nat)
+ ip6t_nat_unregister_lookups(net);
+}
+
static void __net_exit ip6table_nat_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_nat)
return;
- ip6t_nat_unregister_lookups(net);
- ip6t_unregister_table(net, net->ipv6.ip6table_nat, NULL);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_nat);
net->ipv6.ip6table_nat = NULL;
}
static struct pernet_operations ip6table_nat_net_ops = {
+ .pre_exit = ip6table_nat_net_pre_exit,
.exit = ip6table_nat_net_exit,
};
return ret;
}
+static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_raw)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_raw,
+ rawtable_ops);
+}
+
static void __net_exit ip6table_raw_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_raw)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_raw, rawtable_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_raw);
net->ipv6.ip6table_raw = NULL;
}
static struct pernet_operations ip6table_raw_net_ops = {
+ .pre_exit = ip6table_raw_net_pre_exit,
.exit = ip6table_raw_net_exit,
};
return ret;
}
+static void __net_exit ip6table_security_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_security)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_security,
+ sectbl_ops);
+}
+
static void __net_exit ip6table_security_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_security)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_security, sectbl_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_security);
net->ipv6.ip6table_security = NULL;
}
static struct pernet_operations ip6table_security_net_ops = {
+ .pre_exit = ip6table_security_net_pre_exit,
.exit = ip6table_security_net_exit,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
+MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
+MODULE_DESCRIPTION("IPv6 nftables packet duplication support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_ALIAS_NFT_AF_EXPR(10, "fib");
+MODULE_DESCRIPTION("nftables fib / ipv6 route lookup support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
+MODULE_DESCRIPTION("IPv6 packet rejection for nftables");
struct fib6_info *sibling, *next_sibling;
struct fib6_info *match = res->f6i;
- if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
+ if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
goto out;
+ if (match->nh && have_oif_match && res->nh)
+ return;
+
/* We might have already computed the hash for ICMPv6 errors. In such
* case it will always be non-zero. Otherwise now is the time to do it.
*/
if ((flags & RTF_REJECT) ||
(dev && (dev->flags & IFF_LOOPBACK) &&
!(addr_type & IPV6_ADDR_LOOPBACK) &&
- !(flags & RTF_LOCAL)))
+ !(flags & (RTF_ANYCAST | RTF_LOCAL))))
return true;
return false;
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->netdev_ops = &ipip6_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ipip6_dev_free;
/* Queue the packet to IP for output */
skb->ignore_df = 1;
+ skb_dst_drop(skb);
#if IS_ENABLED(CONFIG_IPV6)
if (l2tp_sk_is_v6(tunnel->sock))
error = inet6_csk_xmit(tunnel->sock, skb, NULL);
goto out_unlock;
}
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, sk_dst_check(sk, 0));
-
inet = inet_sk(sk);
fl = &inet->cork.fl;
switch (tunnel->encap) {
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = ARPHRD_ETHER;
+ if (addr->sllc_arphrd != ARPHRD_ETHER)
+ goto out;
rc = -ENODEV;
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
- if (unlikely(addr->sllc_family != AF_LLC))
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = ARPHRD_ETHER;
+ if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
goto out;
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
rc = -ENODEV;
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
- if (!addr->sllc_arphrd)
- addr->sllc_arphrd = llc->dev->type;
if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
spin_lock_bh(&mpath->state_lock);
- if (mpath->flags & MESH_PATH_DELETED) {
- spin_unlock_bh(&mpath->state_lock);
- goto enddiscovery;
- }
- mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+ if (!(mpath->flags & MESH_PATH_DELETED))
+ mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
spin_unlock_bh(&mpath->state_lock);
enddiscovery:
static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
{
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
+ /* check mesh EAPOL frames first */
+ if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+ ieee80211_is_data(fc))) {
+ struct ieee80211s_hdr *mesh_hdr;
+ u16 hdr_len = ieee80211_hdrlen(fc);
+ u16 ethertype_offset;
+ __be16 ethertype;
+
+ if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+ goto drop_check;
+
+ /* make sure fixed part of mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb, hdr_len + 6))
+ goto drop_check;
+
+ mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+ ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+ sizeof(rfc1042_header);
+
+ if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 &&
+ ethertype == rx->sdata->control_port_protocol)
+ return 0;
+ }
+
+drop_check:
/* Drop unencrypted frames if key is set. */
if (unlikely(!ieee80211_has_protected(fc) &&
!ieee80211_is_any_nullfunc(fc) &&
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr = (void *)skb->data;
+ __be16 ethertype = 0;
+
+ if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3))
+ skb_copy_bits(skb, 2 * ETH_ALEN, ðertype, ETH_TLEN);
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
if (sdata) {
- if (ieee80211_is_any_nullfunc(hdr->frame_control))
+ if (ethertype == sdata->control_port_protocol ||
+ ethertype == cpu_to_be16(ETH_P_PREAUTH))
+ cfg80211_control_port_tx_status(&sdata->wdev,
+ cookie,
+ skb->data,
+ skb->len,
+ acked,
+ GFP_ATOMIC);
+ else if (ieee80211_is_any_nullfunc(hdr->frame_control))
cfg80211_probe_status(sdata->dev, hdr->addr1,
cookie, acked,
info->status.ack_signal,
skb->data, skb->len,
acked, GFP_ATOMIC);
else
- cfg80211_control_port_tx_status(&sdata->wdev,
- cookie,
- skb->data,
- skb->len,
- acked,
- GFP_ATOMIC);
+ pr_warn("Unknown status report in ack skb\n");
+
}
rcu_read_unlock();
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
+ if (skb->protocol == sdata->control_port_protocol)
+ ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
+
skb = ieee80211_build_hdr(sdata, skb, info_flags,
sta, ctrl_flags, cookie);
if (IS_ERR(skb)) {
(!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
ra = sdata->u.mgd.bssid;
- if (!is_valid_ether_addr(ra))
+ if (is_zero_ether_addr(ra))
goto out_free;
multicast = is_multicast_ether_addr(ra);
return -EINVAL;
if (proto == sdata->control_port_protocol)
- ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
+ IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
if (unencrypted)
flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
*/
subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
if (subflow->request_mptcp) {
- pr_debug("local_key=%llu", subflow->local_key);
opts->suboptions = OPTION_MPTCP_MPC_SYN;
- opts->sndr_key = subflow->local_key;
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
}
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
- struct mptcp_ext *ext)
+ struct sk_buff *skb, struct mptcp_ext *ext)
{
- if (!ext->use_map) {
+ if (!ext->use_map || !skb->len) {
/* RFC6824 requires a DSS mapping with specific values
* if DATA_FIN is set but no data payload is mapped
*/
opts->ext_copy = *mpext;
if (skb && tcp_fin && subflow->data_fin_tx_enable)
- mptcp_write_data_fin(subflow, &opts->ext_copy);
+ mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
ret = true;
}
u64 thmac;
u32 local_nonce;
u32 remote_nonce;
+ struct mptcp_sock *msk;
};
static inline struct mptcp_subflow_request_sock *
pr_debug("subflow_req=%p", subflow_req);
+ if (subflow_req->msk)
+ sock_put((struct sock *)subflow_req->msk);
+
if (subflow_req->mp_capable)
mptcp_token_destroy_request(subflow_req->token);
tcp_request_sock_ops.destructor(req);
}
/* validate received token and create truncated hmac and nonce for SYN-ACK */
-static bool subflow_token_join_request(struct request_sock *req,
- const struct sk_buff *skb)
+static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+ const struct sk_buff *skb)
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
u8 hmac[SHA256_DIGEST_SIZE];
msk = mptcp_token_get_sock(subflow_req->token);
if (!msk) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
- return false;
+ return NULL;
}
local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
if (local_id < 0) {
sock_put((struct sock *)msk);
- return false;
+ return NULL;
}
subflow_req->local_id = local_id;
subflow_req->remote_nonce, hmac);
subflow_req->thmac = get_unaligned_be64(hmac);
-
- sock_put((struct sock *)msk);
- return true;
+ return msk;
}
static void subflow_init_req(struct request_sock *req,
subflow_req->mp_capable = 0;
subflow_req->mp_join = 0;
+ subflow_req->msk = NULL;
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
subflow_req->remote_id = mp_opt.join_id;
subflow_req->token = mp_opt.token;
subflow_req->remote_nonce = mp_opt.nonce;
- pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
- subflow_req->remote_nonce);
- if (!subflow_token_join_request(req, skb)) {
- subflow_req->mp_join = 0;
- // @@ need to trigger RST
- }
+ subflow_req->msk = subflow_token_join_request(req, skb);
+ pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+ subflow_req->remote_nonce, subflow_req->msk);
}
}
const struct mptcp_subflow_request_sock *subflow_req;
u8 hmac[SHA256_DIGEST_SIZE];
struct mptcp_sock *msk;
- bool ret;
subflow_req = mptcp_subflow_rsk(req);
- msk = mptcp_token_get_sock(subflow_req->token);
+ msk = subflow_req->msk;
if (!msk)
return false;
subflow_req->remote_nonce,
subflow_req->local_nonce, hmac);
- ret = true;
- if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
- ret = false;
-
- sock_put((struct sock *)msk);
- return ret;
+ return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
}
static void mptcp_sock_destruct(struct sock *sk)
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
struct mptcp_subflow_request_sock *subflow_req;
struct mptcp_options_received mp_opt;
- bool fallback_is_fatal = false;
+ bool fallback, fallback_is_fatal;
struct sock *new_msk = NULL;
- bool fallback = false;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
- /* we need later a valid 'mp_capable' value even when options are not
- * parsed
+ /* After child creation we must look for 'mp_capable' even when options
+ * are not parsed
*/
mp_opt.mp_capable = 0;
- if (tcp_rsk(req)->is_mptcp == 0)
+
+ /* hopefully temporary handling for MP_JOIN+syncookie */
+ subflow_req = mptcp_subflow_rsk(req);
+ fallback_is_fatal = subflow_req->mp_join;
+ fallback = !tcp_rsk(req)->is_mptcp;
+ if (fallback)
goto create_child;
/* if the sk is MP_CAPABLE, we try to fetch the client key */
- subflow_req = mptcp_subflow_rsk(req);
if (subflow_req->mp_capable) {
if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
/* here we can receive and accept an in-window,
if (!new_msk)
fallback = true;
} else if (subflow_req->mp_join) {
- fallback_is_fatal = true;
mptcp_get_options(skb, &mp_opt);
if (!mp_opt.mp_join ||
!subflow_hmac_valid(req, &mp_opt)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
- return NULL;
+ fallback = true;
}
}
} else if (ctx->mp_join) {
struct mptcp_sock *owner;
- owner = mptcp_token_get_sock(ctx->token);
+ owner = subflow_req->msk;
if (!owner)
goto dispose_child;
+ /* move the msk reference ownership to the subflow */
+ subflow_req->msk = NULL;
ctx->conn = (struct sock *)owner;
if (!mptcp_finish_join(child))
goto dispose_child;
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
- kfree(map);
+ ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
- kfree(map);
+ ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
- kfree(map);
+ ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
+ if (align < ip_set_extensions[id].align)
+ align = ip_set_extensions[id].align;
len = ALIGN(len, ip_set_extensions[id].align);
set->offset[id] = len;
set->extensions |= ip_set_extensions[id].type;
}
t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
if (!t->hregion) {
- kfree(t);
+ ip_set_free(t);
ret = -ENOMEM;
goto out;
}
}
t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
if (!t->hregion) {
- kfree(t);
+ ip_set_free(t);
kfree(h);
return -ENOMEM;
}
err = __nf_conntrack_update(net, skb, ct, ctinfo);
if (err < 0)
return err;
+
+ ct = nf_ct_get(skb, &ctinfo);
}
return nf_confirm_cthelper(skb, ct, ctinfo);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter packet duplication support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter flow table module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
+MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
nf_flow_table_gc_cleanup(flowtable, dev);
down_write(&flowtable->flow_block_lock);
list_del(&block_cb->list);
+ list_del(&block_cb->driver_list);
flow_block_cb_free(block_cb);
up_write(&flowtable->flow_block_lock);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
basechain, &extack);
mutex_lock(&net->nft.commit_mutex);
+ list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
nft_flow_offload_unbind(&bo, basechain);
mutex_unlock(&net->nft.commit_mutex);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
+MODULE_DESCRIPTION("Netfilter messages via netlink socket");
#define nfnl_dereference_protected(id) \
rcu_dereference_protected(table[(id)].subsys, \
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("match");
MODULE_ALIAS_NFT_EXPR("target");
+MODULE_DESCRIPTION("x_tables over nftables support");
MODULE_AUTHOR("Pablo Neira Ayuso");
MODULE_ALIAS_NFT_EXPR("connlimit");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT);
+MODULE_DESCRIPTION("nftables connlimit rule support");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("counter");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER);
+MODULE_DESCRIPTION("nftables counter rule support");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);
+MODULE_DESCRIPTION("Netfilter nf_tables conntrack module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(5, "dup");
+MODULE_DESCRIPTION("nftables netdev packet duplication support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_ALIAS_NFT_AF_EXPR(1, "fib");
+MODULE_DESCRIPTION("nftables fib inet support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo M. Bermudo Garay <pablombg@gmail.com>");
MODULE_ALIAS_NFT_AF_EXPR(5, "fib");
+MODULE_DESCRIPTION("nftables netdev fib lookups support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("flow_offload");
+MODULE_DESCRIPTION("nftables hardware flow offload module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
MODULE_ALIAS_NFT_EXPR("hash");
+MODULE_DESCRIPTION("Netfilter nftables hash module");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("limit");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT);
+MODULE_DESCRIPTION("nftables limit expression support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("log");
+MODULE_DESCRIPTION("Netfilter nf_tables log module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
MODULE_ALIAS_NFT_EXPR("masq");
+MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
MODULE_ALIAS_NFT_EXPR("nat");
+MODULE_DESCRIPTION("Network Address Translation support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
MODULE_ALIAS_NFT_EXPR("numgen");
+MODULE_DESCRIPTION("nftables number generator module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("objref");
+MODULE_DESCRIPTION("nftables stateful object reference module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
MODULE_ALIAS_NFT_EXPR("osf");
+MODULE_DESCRIPTION("nftables passive OS fingerprint support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Leblond <eric@regit.org>");
MODULE_ALIAS_NFT_EXPR("queue");
+MODULE_DESCRIPTION("Netfilter nftables queue module");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("quota");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA);
+MODULE_DESCRIPTION("Netfilter nftables quota module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
MODULE_ALIAS_NFT_EXPR("redir");
+MODULE_DESCRIPTION("Netfilter nftables redirect support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Netfilter x_tables over nftables module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
+MODULE_DESCRIPTION("Netfilter nftables reject inet support");
MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
MODULE_ALIAS_NFT_EXPR("synproxy");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("tunnel");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
+MODULE_DESCRIPTION("nftables tunnel expression support");
MODULE_ALIAS("ipt_DNAT");
MODULE_ALIAS("ip6t_SNAT");
MODULE_ALIAS("ip6t_DNAT");
+MODULE_DESCRIPTION("SNAT and DNAT targets support");
start = end = GENL_ID_VFS_DQUOT;
}
- if (family->maxattr && !family->parallel_ops) {
- family->attrbuf = kmalloc_array(family->maxattr + 1,
- sizeof(struct nlattr *),
- GFP_KERNEL);
- if (family->attrbuf == NULL) {
- err = -ENOMEM;
- goto errout_locked;
- }
- } else
- family->attrbuf = NULL;
-
family->id = idr_alloc_cyclic(&genl_fam_idr, family,
start, end + 1, GFP_KERNEL);
if (family->id < 0) {
err = family->id;
- goto errout_free;
+ goto errout_locked;
}
err = genl_validate_assign_mc_groups(family);
errout_remove:
idr_remove(&genl_fam_idr, family->id);
-errout_free:
- kfree(family->attrbuf);
errout_locked:
genl_unlock_all();
return err;
atomic_read(&genl_sk_destructing_cnt) == 0);
genl_unlock();
- kfree(family->attrbuf);
-
genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
return 0;
if (!family->maxattr)
return NULL;
- if (family->parallel_ops) {
- attrbuf = kmalloc_array(family->maxattr + 1,
- sizeof(struct nlattr *), GFP_KERNEL);
- if (!attrbuf)
- return ERR_PTR(-ENOMEM);
- } else {
- attrbuf = family->attrbuf;
- }
+ attrbuf = kmalloc_array(family->maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf)
+ return ERR_PTR(-ENOMEM);
err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
family->policy, validate, extack);
if (err) {
- if (family->parallel_ops)
- kfree(attrbuf);
+ kfree(attrbuf);
return ERR_PTR(err);
}
return attrbuf;
}
-static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
- struct nlattr **attrbuf)
+static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
{
- if (family->parallel_ops)
- kfree(attrbuf);
+ kfree(attrbuf);
}
struct genl_start_context {
no_attrs:
info = genl_dumpit_info_alloc();
if (!info) {
- genl_family_rcv_msg_attrs_free(ctx->family, attrs);
+ genl_family_rcv_msg_attrs_free(attrs);
return -ENOMEM;
}
info->family = ctx->family;
}
if (rc) {
- genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+ genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(info);
cb->data = NULL;
}
rc = ops->done(cb);
genl_unlock();
}
- genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+ genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(info);
return rc;
}
if (ops->done)
rc = ops->done(cb);
- genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+ genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(info);
return rc;
}
family->post_doit(ops, skb, &info);
out:
- genl_family_rcv_msg_attrs_free(family, attrbuf);
+ genl_family_rcv_msg_attrs_free(attrbuf);
return err;
}
.netnsok = true,
};
-static int genl_bind(struct net *net, int group)
-{
- struct genl_family *f;
- int err = -ENOENT;
- unsigned int id;
-
- down_read(&cb_lock);
-
- idr_for_each_entry(&genl_fam_idr, f, id) {
- if (group >= f->mcgrp_offset &&
- group < f->mcgrp_offset + f->n_mcgrps) {
- int fam_grp = group - f->mcgrp_offset;
-
- if (!f->netnsok && net != &init_net)
- err = -ENOENT;
- else if (f->mcast_bind)
- err = f->mcast_bind(net, fam_grp);
- else
- err = 0;
- break;
- }
- }
- up_read(&cb_lock);
-
- return err;
-}
-
-static void genl_unbind(struct net *net, int group)
-{
- struct genl_family *f;
- unsigned int id;
-
- down_read(&cb_lock);
-
- idr_for_each_entry(&genl_fam_idr, f, id) {
- if (group >= f->mcgrp_offset &&
- group < f->mcgrp_offset + f->n_mcgrps) {
- int fam_grp = group - f->mcgrp_offset;
-
- if (f->mcast_unbind)
- f->mcast_unbind(net, fam_grp);
- break;
- }
- }
- up_read(&cb_lock);
-}
-
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
- .bind = genl_bind,
- .unbind = genl_unbind,
};
/* we'll bump the group number right afterwards */
struct sw_flow_key *key,
const struct nlattr *attr, bool last)
{
+ struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
const struct nlattr *actions, *cpl_arg;
+ int len, max_len, rem = nla_len(attr);
const struct check_pkt_len_arg *arg;
- int rem = nla_len(attr);
bool clone_flow_key;
/* The first netlink attribute in 'attr' is always
cpl_arg = nla_data(attr);
arg = nla_data(cpl_arg);
- if (skb->len <= arg->pkt_len) {
+ len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
+ max_len = arg->pkt_len;
+
+ if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
+ len <= max_len) {
/* Second netlink attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
*/
{
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
struct radix_tree_iter iter;
+ struct qrtr_tx_flow *flow;
unsigned long flags;
void __rcu **slot;
/* Free tx flow counters */
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+ flow = *slot;
radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
- kfree(*slot);
+ kfree(flow);
}
kfree(node);
}
unsigned int ver;
size_t hdrlen;
- if (len & 3)
+ if (len == 0 || len & 3)
return -EINVAL;
skb = netdev_alloc_skb(NULL, len);
switch (ver) {
case QRTR_PROTO_VER_1:
+ if (len < sizeof(*v1))
+ goto err;
v1 = data;
hdrlen = sizeof(*v1);
size = le32_to_cpu(v1->size);
break;
case QRTR_PROTO_VER_2:
+ if (len < sizeof(*v2))
+ goto err;
v2 = data;
hdrlen = sizeof(*v2) + v2->optlen;
}
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
+/* Check connectivity of all paths
+ */
+void rds_check_all_paths(struct rds_connection *conn)
+{
+ int i = 0;
+
+ do {
+ rds_conn_path_connect_if_down(&conn->c_path[i]);
+ } while (++i < conn->c_npaths);
+}
+
void rds_conn_connect_if_down(struct rds_connection *conn)
{
WARN_ON(conn->c_trans->t_mp_capable);
void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
void rds_conn_connect_if_down(struct rds_connection *conn);
void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
+void rds_check_all_paths(struct rds_connection *conn);
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
return atomic_read(&cp->cp_state) == RDS_CONN_UP;
}
+static inline int
+rds_conn_path_down(struct rds_conn_path *cp)
+{
+ return atomic_read(&cp->cp_state) == RDS_CONN_DOWN;
+}
+
static inline int
rds_conn_up(struct rds_connection *conn)
{
goto out;
}
- rds_conn_path_connect_if_down(cpath);
+ if (rds_conn_path_down(cpath))
+ rds_check_all_paths(conn);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
#include "rds.h"
#include "loop.h"
+static char * const rds_trans_modules[] = {
+ [RDS_TRANS_IB] = "rds_rdma",
+ [RDS_TRANS_GAP] = NULL,
+ [RDS_TRANS_TCP] = "rds_tcp",
+};
+
static struct rds_transport *transports[RDS_TRANS_COUNT];
static DECLARE_RWSEM(rds_trans_sem);
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
- unsigned int i;
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++) {
- trans = transports[i];
-
- if (trans && trans->t_type == t_type &&
- (!trans->t_owner || try_module_get(trans->t_owner))) {
- ret = trans;
- break;
- }
+ trans = transports[t_type];
+ if (!trans) {
+ up_read(&rds_trans_sem);
+ if (rds_trans_modules[t_type])
+ request_module(rds_trans_modules[t_type]);
+ down_read(&rds_trans_sem);
+ trans = transports[t_type];
}
+ if (trans && trans->t_type == t_type &&
+ (!trans->t_owner || try_module_get(trans->t_owner)))
+ ret = trans;
+
up_read(&rds_trans_sem);
return ret;
#include <net/ip.h>
#include "ar-internal.h"
+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+ unsigned long user_call_ID)
+{
+}
+
/*
* Preallocate a single service call, connection and peer and, if possible,
* give them a user ID and attach the user's side of the ID to them.
if (rx->discard_new_call) {
_debug("discard %lx", call->user_call_ID);
rx->discard_new_call(call, call->user_call_ID);
+ if (call->notify_rx)
+ call->notify_rx = rxrpc_dummy_notify;
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
rxrpc_call_completed(call);
* confuse things
*/
annotation &= ~RXRPC_TX_ANNO_MASK;
- annotation |= RXRPC_TX_ANNO_RESENT;
+ annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
call->rxtx_annotations[ix] = annotation;
skb = call->rxtx_buffer[ix];
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (call->tx_winsize != rwind) {
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (rwind > call->tx_winsize)
wake = true;
- trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
- ntohl(ackinfo->rwind), wake);
+ trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
call->tx_winsize = rwind;
}
tcf_lastuse_update(&ca->tcf_tm);
bstats_update(&ca->tcf_bstats, skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
if (skb->len < sizeof(struct iphdr))
goto out;
proto = NFPROTO_IPV4;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ break;
+ case htons(ETH_P_IPV6):
if (skb->len < sizeof(struct ipv6hdr))
goto out;
proto = NFPROTO_IPV6;
- } else {
+ break;
+ default:
goto out;
}
goto drop;
update_flags = params->update_flags;
- protocol = tc_skb_protocol(skb);
+ protocol = skb_protocol(skb, false);
again:
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
{
u8 family = NFPROTO_UNSPEC;
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
family = NFPROTO_IPV4;
break;
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype)
{
+ __be16 proto = skb_protocol(skb, true);
int hooknum, err = NF_ACCEPT;
/* See HOOK2MANIP(). */
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
- if (skb->protocol == htons(ETH_P_IP) &&
+ if (proto == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
hooknum))
err = NF_DROP;
goto out;
- } else if (IS_ENABLED(CONFIG_IPV6) &&
- skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
__be16 frag_off;
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
int hdrlen = ipv6_skip_exthdr(skb,
force = p->ct_action & TCA_CT_ACT_FORCE;
tmpl = p->tmpl;
+ tcf_lastuse_update(&c->tcf_tm);
+
if (clear) {
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
MODULE_DESCRIPTION("Connection tracking action");
MODULE_LICENSE("GPL v2");
-
action = READ_ONCE(ca->tcf_action);
wlen = skb_network_offset(skb);
- if (tc_skb_protocol(skb) == htons(ETH_P_IP)) {
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen))
goto out;
proto = NFPROTO_IPV4;
- } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) {
+ break;
+ case htons(ETH_P_IPV6):
wlen += sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, wlen))
goto out;
proto = NFPROTO_IPV6;
- } else {
+ break;
+ default:
goto out;
}
return KTIME_MAX;
}
-static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
{
struct tcf_gate_params *param = &gact->param;
ktime_t now, base, cycle;
if (ktime_after(base, now)) {
*start = base;
- return 0;
+ return;
}
cycle = param->tcfg_cycletime;
- /* cycle time should not be zero */
- if (!cycle)
- return -EFAULT;
-
n = div64_u64(ktime_sub_ns(now, base), cycle);
*start = ktime_add_ns(base, (n + 1) * cycle);
- return 0;
}
static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
return err;
}
+static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
+ enum tk_offsets tko, s32 clockid,
+ bool do_init)
+{
+ if (!do_init) {
+ if (basetime == gact->param.tcfg_basetime &&
+ tko == gact->tk_offset &&
+ clockid == gact->param.tcfg_clockid)
+ return;
+
+ spin_unlock_bh(&gact->tcf_lock);
+ hrtimer_cancel(&gact->hitimer);
+ spin_lock_bh(&gact->tcf_lock);
+ }
+ gact->param.tcfg_basetime = basetime;
+ gact->param.tcfg_clockid = clockid;
+ gact->tk_offset = tko;
+ hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
+ gact->hitimer.function = gate_timer_func;
+}
+
static int tcf_gate_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
enum tk_offsets tk_offset = TK_OFFS_TAI;
struct nlattr *tb[TCA_GATE_MAX + 1];
struct tcf_chain *goto_ch = NULL;
+ u64 cycletime = 0, basetime = 0;
struct tcf_gate_params *p;
s32 clockid = CLOCK_TAI;
struct tcf_gate *gact;
struct tc_gate *parm;
int ret = 0, err;
- u64 basetime = 0;
u32 gflags = 0;
s32 prio = -1;
ktime_t start;
if (!tb[TCA_GATE_PARMS])
return -EINVAL;
+ if (tb[TCA_GATE_CLOCKID]) {
+ clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ tk_offset = TK_OFFS_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ tk_offset = TK_OFFS_MAX;
+ break;
+ case CLOCK_BOOTTIME:
+ tk_offset = TK_OFFS_BOOT;
+ break;
+ case CLOCK_TAI:
+ tk_offset = TK_OFFS_TAI;
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+ return -EINVAL;
+ }
+ }
+
parm = nla_data(tb[TCA_GATE_PARMS]);
index = parm->index;
tcf_idr_release(*a, bind);
return -EEXIST;
}
- if (ret == ACT_P_CREATED) {
- to_gate(*a)->param.tcfg_clockid = -1;
- INIT_LIST_HEAD(&(to_gate(*a)->param.entries));
- }
if (tb[TCA_GATE_PRIORITY])
prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
if (tb[TCA_GATE_FLAGS])
gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
- if (tb[TCA_GATE_CLOCKID]) {
- clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
- switch (clockid) {
- case CLOCK_REALTIME:
- tk_offset = TK_OFFS_REAL;
- break;
- case CLOCK_MONOTONIC:
- tk_offset = TK_OFFS_MAX;
- break;
- case CLOCK_BOOTTIME:
- tk_offset = TK_OFFS_BOOT;
- break;
- case CLOCK_TAI:
- tk_offset = TK_OFFS_TAI;
- break;
- default:
- NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
- goto release_idr;
- }
- }
+ gact = to_gate(*a);
+ if (ret == ACT_P_CREATED)
+ INIT_LIST_HEAD(&gact->param.entries);
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
- gact = to_gate(*a);
-
spin_lock_bh(&gact->tcf_lock);
p = &gact->param;
- if (tb[TCA_GATE_CYCLE_TIME]) {
- p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
- if (!p->tcfg_cycletime_ext)
- goto chain_put;
- }
+ if (tb[TCA_GATE_CYCLE_TIME])
+ cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
if (tb[TCA_GATE_ENTRY_LIST]) {
err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
goto chain_put;
}
- if (!p->tcfg_cycletime) {
+ if (!cycletime) {
struct tcfg_gate_entry *entry;
ktime_t cycle = 0;
list_for_each_entry(entry, &p->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
- p->tcfg_cycletime = cycle;
+ cycletime = cycle;
+ if (!cycletime) {
+ err = -EINVAL;
+ goto chain_put;
+ }
}
+ p->tcfg_cycletime = cycletime;
if (tb[TCA_GATE_CYCLE_TIME_EXT])
p->tcfg_cycletime_ext =
nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
+ gate_setup_timer(gact, basetime, tk_offset, clockid,
+ ret == ACT_P_CREATED);
p->tcfg_priority = prio;
- p->tcfg_basetime = basetime;
- p->tcfg_clockid = clockid;
p->tcfg_flags = gflags;
-
- gact->tk_offset = tk_offset;
- hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
- gact->hitimer.function = gate_timer_func;
-
- err = gate_get_start_time(gact, &start);
- if (err < 0) {
- NL_SET_ERR_MSG(extack,
- "Internal error: failed get start time");
- release_entry_list(&p->entries);
- goto chain_put;
- }
+ gate_get_start_time(gact, &start);
gact->current_close_time = start;
gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
+ /* action is not inserted in any list: it's safe to init hitimer
+ * without taking tcf_lock.
+ */
+ if (ret == ACT_P_CREATED)
+ gate_setup_timer(gact, gact->param.tcfg_basetime,
+ gact->tk_offset, gact->param.tcfg_clockid,
+ true);
tcf_idr_release(*a, bind);
return err;
}
struct tcf_gate_params *p;
p = &gact->param;
- if (p->tcfg_clockid != -1)
- hrtimer_cancel(&gact->hitimer);
-
+ hrtimer_cancel(&gact->hitimer);
release_entry_list(&p->entries);
}
goto drop;
break;
case TCA_MPLS_ACT_PUSH:
- new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
+ new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true)));
if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
skb->dev && skb->dev->type == ARPHRD_ETHER))
goto drop;
if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
int wlen = skb_network_offset(skb);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen))
&block->flow_block, tcf_block_shared(block),
&extack);
down_write(&block->cb_lock);
+ list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
up_write(&block->cb_lock);
rtnl_lock();
struct netlink_ext_ack *extack)
{
struct flow_block_offload bo = {};
- int err;
tcf_block_offload_init(&bo, dev, command, ei->binder_type,
&block->flow_block, tcf_block_shared(block),
extack);
- if (dev->netdev_ops->ndo_setup_tc)
+ if (dev->netdev_ops->ndo_setup_tc) {
+ int err;
+
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
- else
- err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
- &bo, tc_block_indr_cleanup);
+ if (err < 0) {
+ if (err != -EOPNOTSUPP)
+ NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
+ return err;
+ }
- if (err < 0) {
- if (err != -EOPNOTSUPP)
- NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
- return err;
+ return tcf_block_setup(block, &bo);
}
- return tcf_block_setup(block, &bo);
+ flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo,
+ tc_block_indr_cleanup);
+ tcf_block_setup(block, &bo);
+
+ return -EOPNOTSUPP;
}
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
- __be16 protocol = tc_skb_protocol(skb);
+ __be16 protocol = skb_protocol(skb, false);
int err;
if (tp->protocol != protocol &&
if (dst)
return ntohl(dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+ return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
}
static u32 flow_get_proto(const struct sk_buff *skb,
if (flow->ports.ports)
return ntohs(flow->ports.dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+ return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
}
static u32 flow_get_iif(const struct sk_buff *skb)
static u32 flow_get_nfct_src(const struct sk_buff *skb,
const struct flow_keys *flow)
{
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, src.u3.ip));
case htons(ETH_P_IPV6):
static u32 flow_get_nfct_dst(const struct sk_buff *skb,
const struct flow_keys *flow)
{
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, dst.u3.ip));
case htons(ETH_P_IPV6):
/* skb_flow_dissect() does not set n_proto in case an unknown
* protocol, so do it rather here.
*/
- skb_key.basic.n_proto = skb->protocol;
+ skb_key.basic.n_proto = skb_protocol(skb, false);
skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
fl_ct_info_to_flower_map,
};
int ret, network_offset;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
state.pf = NFPROTO_IPV4;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
struct nf_hook_state state;
int ret;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
return 0;
META_COLLECTOR(int_protocol)
{
/* Let userspace take care of the byte ordering */
- dst->value = tc_skb_protocol(skb);
+ dst->value = skb_protocol(skb, false);
}
META_COLLECTOR(int_pkttype)
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+ p->link.vcc = NULL;
+ p->link.sock = NULL;
+ p->link.common.classid = sch->handle;
+ p->link.ref = 1;
err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
extack);
if (err)
return err;
- p->link.vcc = NULL;
- p->link.sock = NULL;
- p->link.common.classid = sch->handle;
- p->link.ref = 1;
tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
return 0;
}
bool rev = !skb->_nfct, upd = false;
__be32 ip;
- if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+ if (skb_protocol(skb, true) != htons(ETH_P_IP))
return false;
if (!nf_ct_get_tuple_skb(&tuple, skb))
return idx + (tin << 16);
}
-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
{
- int wlen = skb_network_offset(skb);
+ const int offset = skb_network_offset(skb);
+ u16 *buf, buf_;
u8 dscp;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
- wlen += sizeof(struct iphdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;
- dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* ToS is in the second byte of iphdr */
+ dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct iphdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;
case htons(ETH_P_IPV6):
- wlen += sizeof(struct ipv6hdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;
- dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* Traffic class is in the first and second bytes of ipv6hdr */
+ dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct ipv6hdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;
case htons(ETH_P_ARP):
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin, mark;
+ bool wash;
u8 dscp;
/* Tin selection: Default to diffserv-based selection, allow overriding
- * using firewall marks or skb->priority.
+ * using firewall marks or skb->priority. Call DSCP parsing early if
+ * wash is enabled, otherwise defer to below to skip unneeded parsing.
*/
- dscp = cake_handle_diffserv(skb,
- q->rate_flags & CAKE_FLAG_WASH);
mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
+ wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+ if (wash)
+ dscp = cake_handle_diffserv(skb, wash);
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
else {
+ if (!wash)
+ dscp = cake_handle_diffserv(skb, wash);
tin = q->tin_index[dscp];
if (unlikely(tin >= q->tin_cnt))
qdisc_watchdog_init(&q->watchdog, sch);
if (opt) {
- int err = cake_change(sch, opt, extack);
+ err = cake_change(sch, opt, extack);
if (err)
return err;
PUT_STAT_S32(BLUE_TIMER_US,
ktime_to_us(
ktime_sub(now,
- flow->cvars.blue_timer)));
+ flow->cvars.blue_timer)));
}
if (flow->cvars.dropping) {
PUT_STAT_S32(DROP_NEXT_US,
if (p->set_tc_index) {
int wlen = skb_network_offset(skb);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen) ||
index = skb->tc_index & (p->indices - 1);
pr_debug("index %d->%d\n", skb->tc_index, index);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
p->mv[index].value);
*/
if (p->mv[index].mask != 0xff || p->mv[index].value)
pr_warn("%s: unsupported protocol %d\n",
- __func__, ntohs(tc_skb_protocol(skb)));
+ __func__, ntohs(skb_protocol(skb, true)));
break;
}
module_exit(fq_module_exit)
MODULE_AUTHOR("Eric Dumazet");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
module_exit(fq_codel_module_exit)
MODULE_AUTHOR("Eric Dumazet");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue CoDel discipline");
MODULE_AUTHOR("Terry Lam");
MODULE_AUTHOR("Nandita Dukkipati");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
char haddr[MAX_ADDR_LEN];
neigh_ha_snapshot(haddr, n, dev);
- err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+ err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
haddr, NULL, skb->len);
if (err < 0)
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
enum sctp_scope scope, gfp_t gfp)
{
+ struct sock *sk = asoc->base.sk;
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ if (!inet_v6_ipv6only(sk))
+ flags |= SCTP_ADDR4_ALLOWED;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
* well as the remote peer.
*/
if ((((AF_INET == addr->sa.sa_family) &&
+ (flags & SCTP_ADDR4_ALLOWED) &&
(flags & SCTP_ADDR4_PEERSUPP))) ||
(((AF_INET6 == addr->sa.sa_family) &&
(flags & SCTP_ADDR6_ALLOWED) &&
* sock as well as the remote peer.
*/
if (addr->a.sa.sa_family == AF_INET &&
- !(copy_flags & SCTP_ADDR4_PEERSUPP))
+ (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR4_PEERSUPP)))
continue;
if (addr->a.sa.sa_family == AF_INET6 &&
(!(copy_flags & SCTP_ADDR6_ALLOWED) ||
#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
+#define SMC_CLC_RECV_BUF_LEN 100
/* eye catcher "SMCR" EBCDIC for CLC messages */
static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
/* check if received message has a correct header length and contains valid
* heading and trailing eyecatchers
*/
-static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
{
struct smc_clc_msg_proposal_prefix *pclc_prfx;
struct smc_clc_msg_accept_confirm *clc;
return false;
switch (clcm->type) {
case SMC_CLC_PROPOSAL:
- if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
- clcm->path != SMC_TYPE_B)
- return false;
pclc = (struct smc_clc_msg_proposal *)clcm;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
- if (ntohs(pclc->hdr.length) !=
+ if (ntohs(pclc->hdr.length) <
sizeof(*pclc) + ntohs(pclc->iparea_offset) +
sizeof(*pclc_prfx) +
pclc_prfx->ipv6_prefixes_cnt *
default:
return false;
}
- if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+ if (check_trl &&
+ memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
return false;
return true;
struct msghdr msg = {NULL, 0};
int reason_code = 0;
struct kvec vec = {buf, buflen};
- int len, datlen;
+ int len, datlen, recvlen;
+ bool check_trl = true;
int krflags;
/* peek the first few bytes to determine length of data to receive
}
datlen = ntohs(clcm->length);
if ((len < sizeof(struct smc_clc_msg_hdr)) ||
- (datlen > buflen) ||
- (clcm->version != SMC_CLC_V1) ||
- (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
- clcm->path != SMC_TYPE_B) ||
+ (clcm->version < SMC_CLC_V1) ||
((clcm->type != SMC_CLC_DECLINE) &&
(clcm->type != expected_type))) {
smc->sk.sk_err = EPROTO;
goto out;
}
+ if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N)
+ reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */
+
/* receive the complete CLC message */
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
+ if (datlen > buflen) {
+ check_trl = false;
+ recvlen = buflen;
+ } else {
+ recvlen = datlen;
+ }
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
krflags = MSG_WAITALL;
len = sock_recvmsg(smc->clcsock, &msg, krflags);
- if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
+ if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
smc->sk.sk_err = EPROTO;
reason_code = -EPROTO;
goto out;
}
+ datlen -= len;
+ while (datlen) {
+ u8 tmp[SMC_CLC_RECV_BUF_LEN];
+
+ vec.iov_base = &tmp;
+ vec.iov_len = SMC_CLC_RECV_BUF_LEN;
+ /* receive remaining proposal message */
+ recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
+ SMC_CLC_RECV_BUF_LEN : datlen;
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ datlen -= len;
+ }
if (clcm->type == SMC_CLC_DECLINE) {
struct smc_clc_msg_decline *dclc;
#define SMC_CLC_V1 0x1 /* SMC version */
#define SMC_TYPE_R 0 /* SMC-R only */
#define SMC_TYPE_D 1 /* SMC-D only */
+#define SMC_TYPE_N 2 /* neither SMC-R nor SMC-D */
#define SMC_TYPE_B 3 /* SMC-R and SMC-D */
#define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */
#define CLC_WAIT_TIME_SHORT HZ /* short wait time on clcsock */
#define SMC_CLC_DECL_ISMVLANERR 0x03090000 /* err to reg vlan id on ism dev */
#define SMC_CLC_DECL_NOACTLINK 0x030a0000 /* no active smc-r link in lgr */
#define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */
+#define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */
#define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */
#define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */
#define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/reboot.h>
+#include <linux/mutex.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
if (smc_link_usable(lnk))
lnk->state = SMC_LNK_INACTIVE;
}
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
+ wake_up_all(&lgr->llc_flow_waiter);
}
static void smc_lgr_free(struct smc_link_group *lgr);
return;
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* some other llc task is ongoing */
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+ wait_event_timeout(lgr->llc_flow_waiter,
+ (list_empty(&lgr->list) ||
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
}
- if (list_empty(&lgr->list) ||
- !smc_ib_port_active(smcibdev, ibport))
- return; /* lgr or device no longer active */
- link = smc_llc_usable_link(lgr);
- if (!link)
- return;
- smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
- NULL, SMC_LLC_REQ);
+ /* lgr or device no longer active? */
+ if (!list_empty(&lgr->list) &&
+ smc_ib_port_active(smcibdev, ibport))
+ link = smc_llc_usable_link(lgr);
+ if (link)
+ smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
+ gid, NULL, SMC_LLC_REQ);
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* another llc task is ongoing */
mutex_unlock(&lgr->llc_conf_mutex);
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+ wait_event_timeout(lgr->llc_flow_waiter,
+ (list_empty(&lgr->list) ||
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
mutex_lock(&lgr->llc_conf_mutex);
}
- smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true,
- SMC_LLC_DEL_LOST_PATH);
+ if (!list_empty(&lgr->list))
+ smc_llc_send_delete_link(to_lnk, del_link_id,
+ SMC_LLC_REQ, true,
+ SMC_LLC_DEL_LOST_PATH);
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
if (list_empty(&lgr->list))
return;
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
mutex_lock(&lgr->llc_conf_mutex);
smcr_link_down(link);
mutex_unlock(&lgr->llc_conf_mutex);
struct smc_ib_device *smcibdev;
struct smcd_dev *smcd;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
int i;
for (i = 0; i < SMC_MAX_PORTS; i++)
set_bit(i, smcibdev->ports_going_away);
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd, &smcd_dev_list.list, list) {
smcd->going_away = 1;
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
}
/* Clean up all SMC link groups */
smc_smcr_terminate_all(NULL);
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd, &smcd_dev_list.list, list)
smc_smcd_terminate_all(smcd);
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
}
static int smc_core_reboot_event(struct notifier_block *this,
struct work_struct llc_del_link_work;
struct work_struct llc_event_work;
/* llc event worker */
- wait_queue_head_t llc_waiter;
+ wait_queue_head_t llc_flow_waiter;
/* w4 next llc event */
+ wait_queue_head_t llc_msg_waiter;
+ /* w4 next llc msg */
struct smc_llc_flow llc_flow_lcl;
/* llc local control field */
struct smc_llc_flow llc_flow_rmt;
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
#include <linux/wait.h>
+#include <linux/mutex.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
- .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
+ .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
.list = LIST_HEAD_INIT(smc_ib_devices.list),
};
INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
atomic_set(&smcibdev->lnk_cnt, 0);
init_waitqueue_head(&smcibdev->lnks_deleted);
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_add_tail(&smcibdev->list, &smc_ib_devices.list);
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
smc_ib_global_event_handler);
{
struct smc_ib_device *smcibdev = client_data;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
pr_warn_ratelimited("smc: removing ib device %s\n",
smcibdev->ibdev->name);
smc_smcr_terminate_all(smcibdev);
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/mutex.h>
#include <linux/wait.h>
#include <rdma/ib_verbs.h>
#include <net/smc.h>
struct smc_ib_devices { /* list of smc ib devices definition */
struct list_head list;
- spinlock_t lock; /* protects list of smc ib devices */
+ struct mutex mutex; /* protects list of smc ib devices */
};
extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
*/
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/page.h>
struct smcd_dev_list smcd_dev_list = {
.list = LIST_HEAD_INIT(smcd_dev_list.list),
- .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+ .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
};
/* Test if an ISM communication is possible. */
int smcd_register_dev(struct smcd_dev *smcd)
{
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_add_tail(&smcd->list, &smcd_dev_list.list);
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
dev_name(&smcd->dev), smcd->pnetid,
{
pr_warn_ratelimited("smc: removing smcd device %s\n",
dev_name(&smcd->dev));
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_del_init(&smcd->list);
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
smcd->going_away = 1;
smc_smcd_terminate_all(smcd);
flush_workqueue(smcd->event_wq);
#define SMCD_ISM_H
#include <linux/uio.h>
+#include <linux/mutex.h>
#include "smc.h"
struct smcd_dev_list { /* List of SMCD devices */
struct list_head list;
- spinlock_t lock; /* Protects list of devices */
+ struct mutex mutex; /* Protects list of devices */
};
extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
flow->qentry = qentry;
}
+static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
+ struct smc_llc_qentry *qentry)
+{
+ u8 msg_type = qentry->msg.raw.hdr.common.type;
+
+ if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
+ flow_type != msg_type && !lgr->delayed_event) {
+ lgr->delayed_event = qentry;
+ return;
+ }
+ /* drop parallel or already-in-progress llc requests */
+ if (flow_type != msg_type)
+ pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
+ "LLC msg: msg %d flow %d role %d\n",
+ SMC_LGR_ID_SIZE, &lgr->id,
+ qentry->msg.raw.hdr.common.type,
+ flow_type, lgr->role);
+ kfree(qentry);
+}
+
/* try to start a new llc flow, initiated by an incoming llc msg */
static bool smc_llc_flow_start(struct smc_llc_flow *flow,
struct smc_llc_qentry *qentry)
spin_lock_bh(&lgr->llc_flow_lock);
if (flow->type) {
/* a flow is already active */
- if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
- qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
- !lgr->delayed_event) {
- lgr->delayed_event = qentry;
- } else {
- /* forget this llc request */
- kfree(qentry);
- }
+ smc_llc_flow_parallel(lgr, flow->type, qentry);
spin_unlock_bh(&lgr->llc_flow_lock);
return false;
}
}
if (qentry == lgr->delayed_event)
lgr->delayed_event = NULL;
- spin_unlock_bh(&lgr->llc_flow_lock);
smc_llc_flow_qentry_set(flow, qentry);
+ spin_unlock_bh(&lgr->llc_flow_lock);
return true;
}
return 0;
}
spin_unlock_bh(&lgr->llc_flow_lock);
- rc = wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
- (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
- lgr->llc_flow_rmt.type == allowed_remote)),
- SMC_LLC_WAIT_TIME);
+ rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
+ (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
+ (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
+ lgr->llc_flow_rmt.type == allowed_remote))),
+ SMC_LLC_WAIT_TIME * 10);
if (!rc)
return -ETIMEDOUT;
goto again;
flow == &lgr->llc_flow_lcl)
schedule_work(&lgr->llc_event_work);
else
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_flow_waiter);
}
/* lnk is optional and used for early wakeup when link goes down, useful in
int time_out, u8 exp_msg)
{
struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
+ u8 rcv_msg;
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (flow->qentry ||
- (lnk && !smc_link_usable(lnk)) ||
- list_empty(&lgr->list)),
- time_out);
+ wait_event_timeout(lgr->llc_msg_waiter,
+ (flow->qentry ||
+ (lnk && !smc_link_usable(lnk)) ||
+ list_empty(&lgr->list)),
+ time_out);
if (!flow->qentry ||
(lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
smc_llc_flow_qentry_del(flow);
goto out;
}
- if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
+ rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+ if (exp_msg && rcv_msg != exp_msg) {
if (exp_msg == SMC_LLC_ADD_LINK &&
- flow->qentry->msg.raw.hdr.common.type ==
- SMC_LLC_DELETE_LINK) {
+ rcv_msg == SMC_LLC_DELETE_LINK) {
/* flow_start will delay the unexpected msg */
smc_llc_flow_start(&lgr->llc_flow_lcl,
smc_llc_flow_qentry_clr(flow));
return NULL;
}
+ pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
+ "msg %d exp %d flow %d role %d flags %x\n",
+ SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
+ flow->type, lgr->role,
+ flow->qentry->msg.raw.hdr.flags);
smc_llc_flow_qentry_del(flow);
}
out:
smc_llc_send_message(lnk, &qentry->msg); /* response */
if (smc_link_downing(&lnk_del->state)) {
- smc_switch_conns(lgr, lnk_del, false);
- smc_wr_tx_wait_no_pending_sends(lnk_del);
+ if (smc_switch_conns(lgr, lnk_del, false))
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
}
smcr_link_clear(lnk_del, true);
goto out; /* asymmetric link already deleted */
if (smc_link_downing(&lnk_del->state)) {
- smc_switch_conns(lgr, lnk_del, false);
- smc_wr_tx_wait_no_pending_sends(lnk_del);
+ if (smc_switch_conns(lgr, lnk_del, false))
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
}
if (!list_empty(&lgr->list)) {
/* qentry is either a request from peer (send it back to
/* a flow is waiting for this message */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_msg_waiter);
} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
qentry)) {
schedule_work(&lgr->llc_add_link_work);
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* a flow is waiting for this message */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_msg_waiter);
return;
}
break;
/* DEL LINK REQ during ADD LINK SEQ */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_msg_waiter);
} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
qentry)) {
schedule_work(&lgr->llc_del_link_work);
/* DEL LINK REQ during ADD LINK SEQ */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_msg_waiter);
} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
qentry)) {
schedule_work(&lgr->llc_del_link_work);
case SMC_LLC_DELETE_RKEY:
/* assign responses to the local flow, we requested them */
smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
- wake_up_interruptible(&link->lgr->llc_waiter);
+ wake_up(&link->lgr->llc_msg_waiter);
return;
case SMC_LLC_CONFIRM_RKEY_CONT:
/* not used because max links is 3 */
spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
list_add_tail(&qentry->list, &lgr->llc_event_q);
spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
- schedule_work(&link->lgr->llc_event_work);
+ schedule_work(&lgr->llc_event_work);
}
/* copy received msg and add it to the event queue */
INIT_LIST_HEAD(&lgr->llc_event_q);
spin_lock_init(&lgr->llc_event_q_lock);
spin_lock_init(&lgr->llc_flow_lock);
- init_waitqueue_head(&lgr->llc_waiter);
+ init_waitqueue_head(&lgr->llc_flow_waiter);
+ init_waitqueue_head(&lgr->llc_msg_waiter);
mutex_init(&lgr->llc_conf_mutex);
lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
}
void smc_llc_lgr_clear(struct smc_link_group *lgr)
{
smc_llc_event_flush(lgr);
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_flow_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
cancel_work_sync(&lgr->llc_event_work);
cancel_work_sync(&lgr->llc_add_link_work);
cancel_work_sync(&lgr->llc_del_link_work);
#include <linux/module.h>
#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/mutex.h>
#include <net/netlink.h>
#include <net/genetlink.h>
return rc;
/* remove ib devices */
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
if (ibdev->pnetid_by_user[ibport] &&
}
}
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
/* remove smcd devices */
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
if (smcd_dev->pnetid_by_user &&
(!pnet_name ||
rc = 0;
}
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
return rc;
}
u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
bool applied = false;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
memcpy(ib_dev->pnetid[ib_port - 1], pnet_name,
SMC_MAX_PNETID_LEN);
ib_dev->pnetid_by_user[ib_port - 1] = true;
applied = true;
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
return applied;
}
u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
bool applied = false;
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN);
smcd_dev->pnetid_by_user = true;
applied = true;
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
return applied;
}
{
struct smc_ib_device *ibdev;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (!strncmp(ibdev->ibdev->name, ib_name,
sizeof(ibdev->ibdev->name)) ||
}
ibdev = NULL;
out:
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
return ibdev;
}
{
struct smcd_dev *smcd_dev;
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
IB_DEVICE_NAME_MAX - 1))
}
smcd_dev = NULL;
out:
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
return smcd_dev;
}
int i;
ini->ib_dev = NULL;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (ibdev == known_dev)
continue;
}
}
out:
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
}
/* find alternate roce device with same pnet_id and vlan_id */
{
struct smc_ib_device *ibdev;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
struct net_device *ndev;
int i;
}
}
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
}
/* Determine the corresponding IB device port based on the hardware PNETID.
smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid))
return; /* pnetid could not be determined */
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
!ismdev->going_away) {
break;
}
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
}
/* PNET table analysis for a given sock:
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
{
*idx = link->wr_tx_cnt;
+ if (!smc_link_usable(link))
+ return -ENOLINK;
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
if (!test_and_set_bit(*idx, link->wr_tx_mask))
return 0;
{
struct ib_device *ibdev;
+ if (!lnk->smcibdev)
+ return;
+ ibdev = lnk->smcibdev->ibdev;
+
if (smc_wr_tx_wait_no_pending_sends(lnk))
memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask));
- if (!lnk->smcibdev)
- return;
- ibdev = lnk->smcibdev->ibdev;
-
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
q.len = strlen(gssd_dummy_clnt_dir[0].name);
clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
if (!clnt_dentry) {
+ __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
pipe_dentry = ERR_PTR(-ENOENT);
goto out;
}
#include <net/tcp.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
+#include <linux/highmem.h>
#include <asm/ioctls.h>
#include <linux/sunrpc/types.h>
base = 0;
} else {
base -= buf->head[0].iov_len;
+ subbuf->head[0].iov_base = buf->head[0].iov_base;
subbuf->head[0].iov_len = 0;
}
base = 0;
} else {
base -= buf->page_len;
+ subbuf->pages = buf->pages;
+ subbuf->page_base = 0;
subbuf->page_len = 0;
}
base = 0;
} else {
base -= buf->tail[0].iov_len;
+ subbuf->tail[0].iov_base = buf->tail[0].iov_base;
subbuf->tail[0].iov_len = 0;
}
trace_xprtrdma_wc_fastreg(wc, frwr);
/* The MR will get recycled when the associated req is retransmitted */
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
trace_xprtrdma_wc_li(wc, frwr);
__frwr_release_mr(wc, mr);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
__frwr_release_mr(wc, mr);
complete(&frwr->fr_linv_done);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
smp_rmb();
rpcrdma_complete_rqst(rep);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
}
- r_xprt->rx_stats.bad_reply_count++;
- return -EREMOTEIO;
+ return -EIO;
}
/* Perform XID lookup, reconstruction of the RPC reply, and
spin_unlock(&xprt->queue_lock);
return;
-/* If the incoming reply terminated a pending RPC, the next
- * RPC call will post a replacement receive buffer as it is
- * being marshaled.
- */
out_badheader:
trace_xprtrdma_reply_hdr(rep);
r_xprt->rx_stats.bad_reply_count++;
+ rqst->rq_task->tk_status = status;
+ status = 0;
goto out;
}
rc = rpcrdma_xprt_connect(r_xprt);
xprt_clear_connecting(xprt);
- if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) {
+ if (!rc) {
xprt->connect_cookie++;
xprt->stat.connect_count++;
xprt->stat.connect_time += (long)jiffies -
static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep);
+static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
+static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
gfp_t flags);
*/
static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
{
- struct rdma_cm_id *id = r_xprt->rx_ep->re_id;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ struct rdma_cm_id *id = ep->re_id;
/* Flush Receives, then wait for deferred Reply work
* to complete.
* local invalidations.
*/
ib_drain_sq(id->qp);
+
+ rpcrdma_ep_put(ep);
}
/**
trace_xprtrdma_qp_event(ep, event);
}
+/* Ensure xprt_force_disconnect() is invoked exactly once when a
+ * connection is closed or lost. (The important thing is it needs
+ * to be invoked "at least" once).
+ */
+static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
+{
+ if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
+ xprt_force_disconnect(ep->re_xprt);
+}
+
/**
* rpcrdma_flush_disconnect - Disconnect on flushed completion
- * @cq: completion queue
+ * @r_xprt: transport to disconnect
* @wc: work completion entry
*
* Must be called in process context.
*/
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
{
- struct rpcrdma_xprt *r_xprt = cq->cq_context;
- struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
- if (wc->status != IB_WC_SUCCESS &&
- r_xprt->rx_ep->re_connect_status == 1) {
- r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
- xprt_force_disconnect(xprt);
- }
+ if (wc->status != IB_WC_SUCCESS)
+ rpcrdma_force_disconnect(r_xprt->rx_ep);
}
/**
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_sendctx *sc =
container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
+ struct rpcrdma_xprt *r_xprt = cq->cq_context;
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_send(sc, wc);
- rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_sendctx_put_locked(r_xprt, sc);
+ rpcrdma_flush_disconnect(r_xprt, wc);
}
/**
return;
out_flushed:
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(r_xprt, wc);
rpcrdma_rep_destroy(rep);
}
{
struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
struct rpcrdma_ep *ep = id->context;
- struct rpc_xprt *xprt = ep->re_xprt;
might_sleep();
/* fall through */
case RDMA_CM_EVENT_ADDR_CHANGE:
ep->re_connect_status = -ENODEV;
- xprt_force_disconnect(xprt);
goto disconnected;
case RDMA_CM_EVENT_ESTABLISHED:
- kref_get(&ep->re_kref);
+ rpcrdma_ep_get(ep);
ep->re_connect_status = 1;
rpcrdma_update_cm_private(ep, &event->param.conn);
trace_xprtrdma_inline_thresh(ep);
case RDMA_CM_EVENT_DISCONNECTED:
ep->re_connect_status = -ECONNABORTED;
disconnected:
- xprt_force_disconnect(xprt);
- return rpcrdma_ep_destroy(ep);
+ rpcrdma_force_disconnect(ep);
+ return rpcrdma_ep_put(ep);
default:
break;
}
return ERR_PTR(rc);
}
-static void rpcrdma_ep_put(struct kref *kref)
+static void rpcrdma_ep_destroy(struct kref *kref)
{
struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
module_put(THIS_MODULE);
}
+static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
+{
+ kref_get(&ep->re_kref);
+}
+
/* Returns:
* %0 if @ep still has a positive kref count, or
* %1 if @ep was destroyed successfully.
*/
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep)
+static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
{
- return kref_put(&ep->re_kref, rpcrdma_ep_put);
+ return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
}
static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
return 0;
out_destroy:
- rpcrdma_ep_destroy(ep);
+ rpcrdma_ep_put(ep);
rdma_destroy_id(id);
out_free:
kfree(ep);
return rc;
ep = r_xprt->rx_ep;
- ep->re_connect_status = 0;
xprt_clear_connected(xprt);
-
rpcrdma_reset_cwnd(r_xprt);
+
+ /* Bump the ep's reference count while there are
+ * outstanding Receives.
+ */
+ rpcrdma_ep_get(ep);
rpcrdma_post_recvs(r_xprt, true);
rc = rpcrdma_sendctxs_create(r_xprt);
rpcrdma_mrs_create(r_xprt);
out:
- if (rc)
- ep->re_connect_status = rc;
trace_xprtrdma_connect(r_xprt, rc);
return rc;
}
rpcrdma_mrs_destroy(r_xprt);
rpcrdma_sendctxs_destroy(r_xprt);
- if (rpcrdma_ep_destroy(ep))
+ if (rpcrdma_ep_put(ep))
rdma_destroy_id(id);
r_xprt->rx_ep = NULL;
unsigned int re_max_inline_recv;
int re_async_rc;
int re_connect_status;
+ atomic_t re_force_disconnect;
struct ib_qp_init_attr re_attr;
wait_queue_head_t re_connect_wait;
struct rpc_xprt *re_xprt;
/*
* Endpoint calls - xprtrdma/verbs.c
*/
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc);
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
}
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ * the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+ struct tipc_link *l)
+{
+ if (link_is_bc_sndlink(l))
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ else
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+}
+
void tipc_link_reset(struct tipc_link *l)
{
struct sk_buff_head list;
return -ENOBUFS;
}
__skb_queue_tail(transmq, skb);
- /* next retransmit attempt */
- if (link_is_bc_sndlink(l))
- TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
if (unlikely(skb == l->backlog[imp].target_bskb))
l->backlog[imp].target_bskb = NULL;
__skb_queue_tail(&l->transmq, skb);
- /* next retransmit attempt */
- if (link_is_bc_sndlink(l))
- TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
/* retransmit skb if unrestricted*/
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
- TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ?
- TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM;
+ tipc_link_set_skb_retransmit_time(skb, l);
_skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
continue;
err = nl80211_parse_he_obss_pd(
info->attrs[NL80211_ATTR_HE_OBSS_PD],
¶ms.he_obss_pd);
- goto out;
+ if (err)
+ goto out;
}
if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
info->attrs[NL80211_ATTR_HE_BSS_COLOR],
¶ms.he_bss_color);
if (err)
- return err;
+ goto out;
}
nl80211_calculate_ap_params(¶ms);
#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>
#include "xsk_queue.h"
pool->free_heads_cnt = chunks;
pool->headroom = headroom;
pool->chunk_size = chunk_size;
- pool->cheap_dma = true;
pool->unaligned = unaligned;
pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
INIT_LIST_HEAD(&pool->free_list);
}
}
-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
- phys_addr_t paddr;
- u32 i;
-
- for (i = 0; i < pool->dma_pages_cnt; i++) {
- paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
- if (is_swiotlb_buffer(paddr))
- return false;
- }
-#endif
- return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
- const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
- if (ops) {
- return !ops->sync_single_for_cpu &&
- !ops->sync_single_for_device;
- }
-
- if (!dma_is_direct(ops))
- return false;
-
- if (!xp_check_swiotlb_dma(pool))
- return false;
-
- if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
- return false;
-#endif
- }
-#endif
- return true;
-}
-
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages)
{
pool->dev = dev;
pool->dma_pages_cnt = nr_pages;
+ pool->dma_need_sync = false;
for (i = 0; i < pool->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
xp_dma_unmap(pool, attrs);
return -ENOMEM;
}
+ if (dma_need_sync(dev, dma))
+ pool->dma_need_sync = true;
pool->dma_pages[i] = dma;
}
if (pool->unaligned)
xp_check_dma_contiguity(pool);
-
- pool->dev = dev;
- pool->cheap_dma = xp_check_cheap_dma(pool);
return 0;
}
EXPORT_SYMBOL(xp_dma_map);
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
xskb->xdp.data_meta = xskb->xdp.data;
- if (!pool->cheap_dma) {
+ if (pool->dma_need_sync) {
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
pool->frame_len,
DMA_BIDIRECTIONAL);
If unsure, say N.
+# This option selects XFRM_ALGO along with the AH authentication algorithms that
+# RFC 8221 lists as MUST be implemented.
+config XFRM_AH
+ tristate
+ select XFRM_ALGO
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+
+# This option selects XFRM_ALGO along with the ESP encryption and authentication
+# algorithms that RFC 8221 lists as MUST be implemented.
+config XFRM_ESP
+ tristate
+ select XFRM_ALGO
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_AUTHENC
+ select CRYPTO_CBC
+ select CRYPTO_ECHAINIV
+ select CRYPTO_GCM
+ select CRYPTO_HMAC
+ select CRYPTO_SEQIV
+ select CRYPTO_SHA256
+
config XFRM_IPCOMP
tristate
select XFRM_ALGO
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp;
- if (!xo)
+ if (!xo || (xo->flags & XFRM_XMIT))
return skb;
if (!(features & NETIF_F_HW_ESP))
return skb;
}
+ xo->flags |= XFRM_XMIT;
+
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
static void xfrmi_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &xfrmi_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_NONE;
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
switch (x->outer_mode.family) {
case AF_INET:
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-#ifdef CONFIG_NETFILTER
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-#endif
break;
case AF_INET6:
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
break;
}
{
unsigned int nr_cpus = bpf_num_possible_cpus();
void *array;
- size_t size;
- size = record_size * nr_cpus;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_cpus, record_size);
if (!array) {
fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
exit(EXIT_FAIL_MEM);
int i;
/* Alloc main stats_record structure */
- rec = malloc(sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
+ rec = calloc(1, sizeof(*rec));
if (!rec) {
fprintf(stderr, "Mem alloc error\n");
exit(EXIT_FAIL_MEM);
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct datarec *array;
- size_t size;
- size = sizeof(struct datarec) * nr_cpus;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_cpus, sizeof(struct datarec));
if (!array) {
fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
exit(EXIT_FAIL_MEM);
size = sizeof(*rec) + n_cpus * sizeof(struct record);
rec = malloc(size);
- memset(rec, 0, size);
if (!rec) {
fprintf(stderr, "Mem alloc error\n");
exit(EXIT_FAIL_MEM);
}
+ memset(rec, 0, size);
rec->rx_cnt.cpu = alloc_record_per_cpu();
rec->redir_err.cpu = alloc_record_per_cpu();
rec->kthread.cpu = alloc_record_per_cpu();
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct datarec *array;
- size_t size;
- size = sizeof(struct datarec) * nr_cpus;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_cpus, sizeof(struct datarec));
if (!array) {
fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
exit(EXIT_FAIL_MEM);
{
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
struct record *array;
- size_t size;
- size = sizeof(struct record) * nr_rxqs;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_rxqs, sizeof(struct record));
if (!array) {
fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
exit(EXIT_FAIL_MEM);
struct stats_record *rec;
int i;
- rec = malloc(sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
+ rec = calloc(1, sizeof(struct stats_record));
if (!rec) {
fprintf(stderr, "Mem alloc error\n");
exit(EXIT_FAIL_MEM);
#include <linux/fcntl.h>
#define statx foo
#define statx_timestamp foo_timestamp
+struct statx;
+struct statx_timestamp;
#include <sys/stat.h>
#undef statx
#undef statx_timestamp
# The following turn off the warnings enabled by -Wextra
KBUILD_CFLAGS += -Wno-missing-field-initializers
KBUILD_CFLAGS += -Wno-sign-compare
+KBUILD_CFLAGS += -Wno-type-limits
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1
KBUILD_CFLAGS += $(call cc-option, -Wlogical-op)
KBUILD_CFLAGS += -Wmissing-field-initializers
KBUILD_CFLAGS += -Wsign-compare
+KBUILD_CFLAGS += -Wtype-limits
KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
$(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
endef
+quiet_cmd_copy = COPY $@
+ cmd_copy = cp $< $@
+
# Shipped files
# ===========================================================================
# DTC
# ---------------------------------------------------------------------------
DTC ?= $(objtree)/scripts/dtc/dtc
+DTC_FLAGS += -Wno-interrupt_provider
# Disable noisy checks by default
ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
DTC_FLAGS += -Wnode_name_chars_strict \
- -Wproperty_name_chars_strict
+ -Wproperty_name_chars_strict \
+ -Winterrupt_provider
endif
DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
EOF
}
+gen_proto_order_variant()
+{
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+ local arch="$1"
+ local atomic="$2"
+
+ local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+
+ printf "#define arch_${basename}${order} ${basename}${order}\n"
+}
+
#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
gen_proto_order_variants()
{
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+ if [ -z "$arch" ]; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+ if meta_has_acquire "${meta}"; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+ fi
+ if meta_has_release "${meta}"; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+ fi
+ if meta_has_relaxed "${meta}"; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+ fi
+
+ echo ""
+ fi
+
# If we don't have relaxed atomics, then we don't bother with ordering fallbacks
# read_acquire and set_release need to be templated, though
if ! meta_has_relaxed "${meta}"; then
}
WARNING(i2c_bus_bridge, check_i2c_bus_bridge, NULL, &addr_size_cells);
+#define I2C_OWN_SLAVE_ADDRESS (1U << 30)
+#define I2C_TEN_BIT_ADDRESS (1U << 31)
+
static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
{
struct property *prop;
}
reg = fdt32_to_cpu(*cells);
+ /* Ignore I2C_OWN_SLAVE_ADDRESS */
+ reg &= ~I2C_OWN_SLAVE_ADDRESS;
snprintf(unit_addr, sizeof(unit_addr), "%x", reg);
if (!streq(unitname, unit_addr))
FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"",
for (len = prop->val.len; len > 0; len -= 4) {
reg = fdt32_to_cpu(*(cells++));
- if (reg > 0x3ff)
+ /* Ignore I2C_OWN_SLAVE_ADDRESS */
+ reg &= ~I2C_OWN_SLAVE_ADDRESS;
+
+ if ((reg & I2C_TEN_BIT_ADDRESS) && ((reg & ~I2C_TEN_BIT_ADDRESS) > 0x3ff))
FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"",
reg);
-
+ else if (reg > 0x7f)
+ FAIL_PROP(c, dti, node, prop, "I2C address must be less than 7-bits, got \"0x%x\". Set I2C_TEN_BIT_ADDRESS for 10 bit addresses or fix the property",
+ reg);
}
}
WARNING(i2c_bus_reg, check_i2c_bus_reg, NULL, ®_format, &i2c_bus_bridge);
return false;
}
+
+static void check_interrupt_provider(struct check *c,
+ struct dt_info *dti,
+ struct node *node)
+{
+ struct property *prop;
+
+ if (!node_is_interrupt_provider(node))
+ return;
+
+ prop = get_property(node, "#interrupt-cells");
+ if (!prop)
+ FAIL(c, dti, node,
+ "Missing #interrupt-cells in interrupt provider");
+
+ prop = get_property(node, "#address-cells");
+ if (!prop)
+ FAIL(c, dti, node,
+ "Missing #address-cells in interrupt provider");
+}
+WARNING(interrupt_provider, check_interrupt_provider, NULL);
+
static void check_interrupts_property(struct check *c,
struct dt_info *dti,
struct node *node)
prop = get_property(irq_node, "#interrupt-cells");
if (!prop) {
- FAIL(c, dti, irq_node, "Missing #interrupt-cells in interrupt-parent");
+ /* We warn about that already in another test. */
return;
}
&deprecated_gpio_property,
&gpios_property,
&interrupts_property,
+ &interrupt_provider,
&alias_paths,
typedef uint32_t cell_t;
+static inline uint16_t dtb_ld16(const void *p)
+{
+ const uint8_t *bp = (const uint8_t *)p;
+
+ return ((uint16_t)bp[0] << 8)
+ | bp[1];
+}
+
+static inline uint32_t dtb_ld32(const void *p)
+{
+ const uint8_t *bp = (const uint8_t *)p;
+
+ return ((uint32_t)bp[0] << 24)
+ | ((uint32_t)bp[1] << 16)
+ | ((uint32_t)bp[2] << 8)
+ | bp[3];
+}
+
+static inline uint64_t dtb_ld64(const void *p)
+{
+ const uint8_t *bp = (const uint8_t *)p;
+
+ return ((uint64_t)bp[0] << 56)
+ | ((uint64_t)bp[1] << 48)
+ | ((uint64_t)bp[2] << 40)
+ | ((uint64_t)bp[3] << 32)
+ | ((uint64_t)bp[4] << 24)
+ | ((uint64_t)bp[5] << 16)
+ | ((uint64_t)bp[6] << 8)
+ | bp[7];
+}
#define streq(a, b) (strcmp((a), (b)) == 0)
#define strstarts(s, prefix) (strncmp((s), (prefix), strlen(prefix)) == 0)
emit_offset_label(f, m->ref, m->offset);
while ((d.len - off) >= sizeof(uint32_t)) {
- asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off))));
+ asm_emit_cell(e, dtb_ld32(d.val + off));
off += sizeof(uint32_t);
}
return struct_size;
}
- if (can_assume(LIBFDT_ORDER) |
+ if (can_assume(LIBFDT_ORDER) ||
!fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) {
/* no further work necessary */
err = fdt_move(fdt, buf, bufsize);
/* 'memrsv' state: Initial state after fdt_create()
*
* Allowed functions:
- * fdt_add_reservmap_entry()
+ * fdt_add_reservemap_entry()
* fdt_finish_reservemap() [moves to 'struct' state]
*/
static int fdt_sw_probe_memrsv_(void *fdt)
#include "libfdt_env.h"
#include "fdt.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#define FDT_FIRST_SUPPORTED_VERSION 0x02
#define FDT_LAST_SUPPORTED_VERSION 0x11
const char *fdt_strerror(int errval);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LIBFDT_H */
fprintf(f, "%02"PRIx8, *(const uint8_t*)p);
break;
case 2:
- fprintf(f, "0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p));
+ fprintf(f, "0x%02"PRIx16, dtb_ld16(p));
break;
case 4:
- fprintf(f, "0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p));
+ fprintf(f, "0x%02"PRIx32, dtb_ld32(p));
break;
case 8:
- fprintf(f, "0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p));
+ fprintf(f, "0x%02"PRIx64, dtb_ld64(p));
break;
}
if (p + width < end)
nnotcelllbl++;
}
- if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
+ if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul <= (len-nnul))
&& (nnotstringlbl == 0)) {
return TYPE_STRING;
} else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
-#define DTC_VERSION "DTC 1.6.0-g87a656ae"
+#define DTC_VERSION "DTC 1.6.0-g9d7888cb"
sprintf(buf, "0x%"PRIx8, *(uint8_t*)(data + off));
break;
case 2:
- sprintf(buf, "0x%"PRIx16, fdt16_to_cpu(*(fdt16_t*)(data + off)));
+ sprintf(buf, "0x%"PRIx16, dtb_ld16(data + off));
break;
case 4:
- sprintf(buf, "0x%"PRIx32, fdt32_to_cpu(*(fdt32_t*)(data + off)));
+ sprintf(buf, "0x%"PRIx32, dtb_ld32(data + off));
m = markers;
is_phandle = false;
for_each_marker_of_type(m, REF_PHANDLE) {
}
break;
case 8:
- sprintf(buf, "0x%"PRIx64, fdt64_to_cpu(*(fdt64_t*)(data + off)));
+ sprintf(buf, "0x%"PRIx64, dtb_ld64(data + off));
break;
}
source tree isn't cleaned after kernel installation).
The seed used for compilation is located at
- scripts/gcc-plgins/randomize_layout_seed.h. It remains after
+ scripts/gcc-plugins/randomize_layout_seed.h. It remains after
a make clean to allow for external modules to be compiled with
the existing seed and will be removed by a make mrproper or
make distclean.
* Copyright (C) 2015 Boris Barbulovski <bbarbulovski@gmail.com>
*/
-#include <qglobal.h>
-
-#include <QMainWindow>
-#include <QList>
-#include <qtextbrowser.h>
#include <QAction>
+#include <QApplication>
+#include <QCloseEvent>
+#include <QDebug>
+#include <QDesktopWidget>
#include <QFileDialog>
+#include <QLabel>
+#include <QLayout>
+#include <QList>
#include <QMenu>
-
-#include <qapplication.h>
-#include <qdesktopwidget.h>
-#include <qtoolbar.h>
-#include <qlayout.h>
-#include <qsplitter.h>
-#include <qlineedit.h>
-#include <qlabel.h>
-#include <qpushbutton.h>
-#include <qmenubar.h>
-#include <qmessagebox.h>
-#include <qregexp.h>
-#include <qevent.h>
+#include <QMenuBar>
+#include <QMessageBox>
+#include <QToolBar>
#include <stdlib.h>
if (rootEntry != &rootmenu && (mode == singleMode ||
(mode == symbolMode && rootEntry->parent != &rootmenu))) {
item = (ConfigItem *)topLevelItem(0);
- if (!item)
+ if (!item && mode != symbolMode) {
item = new ConfigItem(this, 0, true);
- last = item;
+ last = item;
+ }
}
if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
rootEntry->sym && rootEntry->prompt) {
rootEntry = menu;
updateListAll();
if (currentItem()) {
- currentItem()->setSelected(hasFocus());
+ setSelected(currentItem(), hasFocus());
scrollToItem(currentItem());
}
}
ConfigItem* item = (ConfigItem *)currentItem();
if (item) {
- item->setSelected(true);
+ setSelected(item, true);
menu = item->menu;
}
emit gotFocus(menu);
: Parent(parent), sym(0), _menu(0)
{
setObjectName(name);
-
+ setOpenLinks(false);
if (!objectName().isEmpty()) {
configSettings->beginGroup(objectName());
if (sym->name) {
head += " (";
if (showDebug())
- head += QString().sprintf("<a href=\"s%p\">", sym);
+ head += QString().sprintf("<a href=\"s%s\">", sym->name);
head += print_filter(sym->name);
if (showDebug())
head += "</a>";
} else if (sym->name) {
head += "<big><b>";
if (showDebug())
- head += QString().sprintf("<a href=\"s%p\">", sym);
+ head += QString().sprintf("<a href=\"s%s\">", sym->name);
head += print_filter(sym->name);
if (showDebug())
head += "</a>";
switch (prop->type) {
case P_PROMPT:
case P_MENU:
- debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
+ debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name);
debug += print_filter(prop->text);
debug += "</a><br>";
break;
case P_DEFAULT:
case P_SELECT:
case P_RANGE:
+ case P_COMMENT:
+ case P_IMPLY:
+ case P_SYMBOL:
debug += prop_get_type_name(prop->type);
debug += ": ";
expr_print(prop->expr, expr_print_help, &debug, E_NONE);
QString str2 = print_filter(str);
if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
- *text += QString().sprintf("<a href=\"s%p\">", sym);
+ *text += QString().sprintf("<a href=\"s%s\">", sym->name);
*text += str2;
*text += "</a>";
} else
*text += str2;
}
+void ConfigInfoView::clicked(const QUrl &url)
+{
+ QByteArray str = url.toEncoded();
+ const std::size_t count = str.size();
+ char *data = new char[count + 1];
+ struct symbol **result;
+ struct menu *m = NULL;
+
+ if (count < 1) {
+ qInfo() << "Clicked link is empty";
+ delete data;
+ return;
+ }
+
+ memcpy(data, str.constData(), count);
+ data[count] = '\0';
+
+ /* Seek for exact match */
+ data[0] = '^';
+ strcat(data, "$");
+ result = sym_re_search(data);
+ if (!result) {
+ qInfo() << "Clicked symbol is invalid:" << data;
+ delete data;
+ return;
+ }
+
+ sym = *result;
+
+ /* Seek for the menu which holds the symbol */
+ for (struct property *prop = sym->prop; prop; prop = prop->next) {
+ if (prop->type != P_PROMPT && prop->type != P_MENU)
+ continue;
+ m = prop->menu;
+ break;
+ }
+
+ if (!m) {
+ /* Symbol is not visible as a menu */
+ symbolInfo();
+ emit showDebugChanged(true);
+ } else {
+ emit menuSelected(m);
+ }
+
+ free(result);
+ delete data;
+}
+
QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
{
QMenu* popup = Parent::createStandardContextMenu(pos);
addToolBar(toolBar);
backAction = new QAction(QPixmap(xpm_back), "Back", this);
- connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
- backAction->setEnabled(false);
+ connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
+
QAction *quitAction = new QAction("&Quit", this);
quitAction->setShortcut(Qt::CTRL + Qt::Key_Q);
- connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+ connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+
QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this);
loadAction->setShortcut(Qt::CTRL + Qt::Key_L);
- connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+ connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+
saveAction = new QAction(QPixmap(xpm_save), "&Save", this);
saveAction->setShortcut(Qt::CTRL + Qt::Key_S);
- connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+ connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+
conf_set_changed_callback(conf_changed);
+
// Set saveAction's initial state
conf_changed();
configname = xstrdup(conf_get_configname());
helpMenu->addAction(showIntroAction);
helpMenu->addAction(showAboutAction);
+ connect (helpText, SIGNAL (anchorClicked (const QUrl &)),
+ helpText, SLOT (clicked (const QUrl &)) );
+
connect(configList, SIGNAL(menuChanged(struct menu *)),
helpText, SLOT(setInfo(struct menu *)));
connect(configList, SIGNAL(menuSelected(struct menu *)),
void ConfigMainWindow::changeItens(struct menu *menu)
{
configList->setRootMenu(menu);
-
- if (configList->rootEntry->parent == &rootmenu)
- backAction->setEnabled(false);
- else
- backAction->setEnabled(true);
}
void ConfigMainWindow::changeMenu(struct menu *menu)
{
menuList->setRootMenu(menu);
-
- if (menuList->rootEntry->parent == &rootmenu)
- backAction->setEnabled(false);
- else
- backAction->setEnabled(true);
}
void ConfigMainWindow::setMenuLink(struct menu *menu)
return;
list->setRootMenu(parent);
break;
- case symbolMode:
+ case menuMode:
if (menu->flags & MENU_ROOT) {
- configList->setRootMenu(menu);
+ menuList->setRootMenu(menu);
configList->clearSelection();
- list = menuList;
- } else {
list = configList;
+ } else {
parent = menu_get_parent_menu(menu->parent);
if (!parent)
return;
- item = menuList->findConfigItem(parent);
+
+ /* Select the config view */
+ item = configList->findConfigItem(parent);
if (item) {
- item->setSelected(true);
- menuList->scrollToItem(item);
+ configList->setSelected(item, true);
+ configList->scrollToItem(item);
}
- list->setRootMenu(parent);
+
+ menuList->setRootMenu(parent);
+ menuList->clearSelection();
+ list = menuList;
}
break;
case fullMode:
if (list) {
item = list->findConfigItem(menu);
if (item) {
- item->setSelected(true);
+ list->setSelected(item, true);
list->scrollToItem(item);
list->setFocus();
+ helpText->setInfo(menu);
}
}
}
void ConfigMainWindow::goBack(void)
{
- ConfigItem* item, *oldSelection;
-
- configList->setParentMenu();
+qInfo() << __FUNCTION__;
if (configList->rootEntry == &rootmenu)
- backAction->setEnabled(false);
-
- if (menuList->selectedItems().count() == 0)
return;
- item = (ConfigItem*)menuList->selectedItems().first();
- oldSelection = item;
- while (item) {
- if (item->menu == configList->rootEntry) {
- oldSelection->setSelected(false);
- item->setSelected(true);
- break;
- }
- item = (ConfigItem*)item->parent();
- }
+ configList->setParentMenu();
}
void ConfigMainWindow::showSingleView(void)
fullViewAction->setEnabled(true);
fullViewAction->setChecked(false);
+ backAction->setEnabled(true);
+
menuView->hide();
menuList->setRootMenu(0);
configList->mode = singleMode;
fullViewAction->setEnabled(true);
fullViewAction->setChecked(false);
+ backAction->setEnabled(false);
+
configList->mode = menuMode;
if (configList->rootEntry == &rootmenu)
configList->updateListAll();
fullViewAction->setEnabled(false);
fullViewAction->setChecked(true);
+ backAction->setEnabled(false);
+
menuView->hide();
menuList->setRootMenu(0);
configList->mode = fullMode;
* Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
*/
-#include <QTextBrowser>
-#include <QTreeWidget>
-#include <QMainWindow>
+#include <QCheckBox>
+#include <QDialog>
#include <QHeaderView>
-#include <qsettings.h>
+#include <QLineEdit>
+#include <QMainWindow>
#include <QPushButton>
#include <QSettings>
-#include <QLineEdit>
#include <QSplitter>
-#include <QCheckBox>
-#include <QDialog>
+#include <QTextBrowser>
+#include <QTreeWidget>
+
#include "expr.h"
class ConfigView;
public:
ConfigList(ConfigView* p, const char *name = 0);
void reinit(void);
+ ConfigItem* findConfigItem(struct menu *);
ConfigView* parent(void) const
{
return (ConfigView*)Parent::parent();
}
- ConfigItem* findConfigItem(struct menu *);
+ void setSelected(QTreeWidgetItem *item, bool enable) {
+ for (int i = 0; i < selectedItems().size(); i++)
+ selectedItems().at(i)->setSelected(false);
+
+ item->setSelected(enable);
+ }
protected:
void keyPressEvent(QKeyEvent *e);
void setInfo(struct menu *menu);
void saveSettings(void);
void setShowDebug(bool);
+ void clicked (const QUrl &url);
signals:
void showDebugChanged(bool);
int integrity_kernel_read(struct file *file, loff_t offset,
void *addr, unsigned long count)
{
- mm_segment_t old_fs;
- char __user *buf = (char __user *)addr;
- ssize_t ret;
-
- if (!(file->f_mode & FMODE_READ))
- return -EBADF;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = __vfs_read(file, buf, count, &offset);
- set_fs(old_fs);
-
- return ret;
+ return __kernel_read(file, addr, count, &offset);
}
/*
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
-enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
/* digest size for IMA, fits SHA1 or MD5 */
#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
if (rc != 0)
return rc;
- /* cumulative sha1 over tpm registers 0-7 */
+ /* cumulative digest over TPM registers 0-7 */
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, &d);
/* now accumulate with current aggregate */
rc = crypto_shash_update(shash, d.digest,
crypto_shash_digestsize(tfm));
}
+ /*
+ * Extend cumulative digest over TPM registers 8-9, which contain
+ * measurement for the kernel command line (reg. 8) and image (reg. 9)
+ * in a typical PCR allocation. Registers 8-9 are only included in
+ * non-SHA1 boot_aggregate digests to avoid ambiguity.
+ */
+ if (alg_id != TPM_ALG_SHA1) {
+ for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+ ima_pcrread(i, &d);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ }
+ }
if (!rc)
crypto_shash_final(shash, digest);
return rc;
int security_inode_copy_up_xattr(const char *name)
{
- return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * The implementation can return 0 (accept the xattr), 1 (discard the
+ * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
+ * any other error code incase of an error.
+ */
+ hlist_for_each_entry(hp,
+ &security_hook_heads.inode_copy_up_xattr, list) {
+ rc = hp->hook.inode_copy_up_xattr(name);
+ if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
+ return rc;
+ }
+
+ return LSM_RET_DEFAULT(inode_copy_up_xattr);
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
+ /* clear flags and stop any drain wait */
+ stream->partial_drain = false;
+ stream->metadata_set = false;
snd_compr_drain_notify(stream);
stream->runtime->total_bytes_available = 0;
stream->runtime->total_bytes_transferred = 0;
if (stream->next_track == false)
return -EPERM;
+ stream->partial_drain = true;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
if (retval) {
pr_debug("Partial drain returned failure\n");
{
struct snd_dm_fm_info info;
+ memset(&info, 0, sizeof(info));
+
info.fm_mode = opl3->fm_mode;
info.rhythm = opl3->rhythm;
if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
},
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
/* Cometlake-LP */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
{
.flags = FLAG_SOF,
.device = 0x02c8,
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x02c8,
},
-#endif
/* Cometlake-H */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
{
.flags = FLAG_SOF,
.device = 0x06c8,
if (a->type != b->type)
return (int)(a->type - b->type);
+ /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
+ if (a->is_headset_mic && b->is_headphone_mic)
+ return -1; /* don't swap */
+ else if (a->is_headphone_mic && b->is_headset_mic)
+ return 1; /* swap */
+
/* In case one has boost and the other one has not,
pick the one with boost first. */
return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Icelake-H */
+ { PCI_DEVICE(0x8086, 0x3dc8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Jasperlake */
{ PCI_DEVICE(0x8086, 0x38c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Tigerlake */
{ PCI_DEVICE(0x8086, 0xa0c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Tigerlake-H */
+ { PCI_DEVICE(0x8086, 0x43c8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4b58),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
if (get_pcm_rec(spec, pcm_idx)->stream == hinfo)
return pcm_idx;
- codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo);
+ codec_warn(codec, "HDMI: hinfo %p not tied to a PCM\n", hinfo);
return -EINVAL;
}
return pin_idx;
}
- codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo);
+ codec_dbg(codec, "HDMI: hinfo %p (pcm %d) not registered\n", hinfo,
+ hinfo_to_pcm_index(codec, hinfo));
return -EINVAL;
}
static int hdmi_parse_codec(struct hda_codec *codec)
{
- hda_nid_t nid;
+ hda_nid_t start_nid;
+ unsigned int caps;
int i, nodes;
- nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid);
- if (!nid || nodes < 0) {
+ nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
+ if (!start_nid || nodes < 0) {
codec_warn(codec, "HDMI: failed to get afg sub nodes\n");
return -EINVAL;
}
- for (i = 0; i < nodes; i++, nid++) {
- unsigned int caps;
- unsigned int type;
+ /*
+ * hdmi_add_pin() assumes total amount of converters to
+ * be known, so first discover all converters
+ */
+ for (i = 0; i < nodes; i++) {
+ hda_nid_t nid = start_nid + i;
caps = get_wcaps(codec, nid);
- type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL))
continue;
- switch (type) {
- case AC_WID_AUD_OUT:
+ if (get_wcaps_type(caps) == AC_WID_AUD_OUT)
hdmi_add_cvt(codec, nid);
- break;
- case AC_WID_PIN:
+ }
+
+ /* discover audio pins */
+ for (i = 0; i < nodes; i++) {
+ hda_nid_t nid = start_nid + i;
+
+ caps = get_wcaps(codec, nid);
+
+ if (!(caps & AC_WCAP_DIGITAL))
+ continue;
+
+ if (get_wcaps_type(caps) == AC_WID_PIN)
hdmi_add_pin(codec, nid);
- break;
- }
}
return 0;
HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
ALC236_FIXUP_HP_MUTE_LED,
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+ ALC269VC_FIXUP_ACER_HEADSET_MIC,
+ ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
};
static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x90100120 }, /* use as internal speaker */
+ { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */
+ { 0x1a, 0x01011020 }, /* use as line out */
+ { },
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
+ [ALC269VC_FIXUP_ACER_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x02a11030 }, /* use as headset mic */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
+ [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+ SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
- SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
{
struct snd_soc_pcm_runtime *prtd;
struct i2s_dev_data *adata;
+ struct i2s_stream_instance *ins;
prtd = substream->private_data;
component = snd_soc_rtdcom_lookup(prtd, DRV_NAME);
adata = dev_get_drvdata(component->dev);
+ ins = substream->runtime->private_data;
+ if (!ins)
+ return -EINVAL;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ switch (ins->i2s_instance) {
+ case I2S_BT_INSTANCE:
+ adata->play_stream = NULL;
+ break;
+ case I2S_SP_INSTANCE:
+ default:
+ adata->i2ssp_play_stream = NULL;
+ }
+ } else {
+ switch (ins->i2s_instance) {
+ case I2S_BT_INSTANCE:
+ adata->capture_stream = NULL;
+ break;
+ case I2S_SP_INSTANCE:
+ default:
+ adata->i2ssp_capture_stream = NULL;
+ }
+ }
/* Disable ACP irq, when the current stream is being closed and
* another stream is also not active.
if (!adata->play_stream && !adata->capture_stream &&
!adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- adata->play_stream = NULL;
- adata->i2ssp_play_stream = NULL;
- } else {
- adata->capture_stream = NULL;
- adata->i2ssp_capture_stream = NULL;
- }
return 0;
}
# Renoir platform Support
snd-rn-pci-acp3x-objs := rn-pci-acp3x.o
snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += acp3x-rn.o
+snd-acp3x-rn-objs := acp3x-rn.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += snd-acp3x-rn.o
ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name);
if (ret < 0) {
dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name);
- goto error;
+ goto error_pm;
}
ret = snd_hdac_regmap_init(&hcodec->core);
if (ret < 0) {
dev_err(&hdev->dev, "regmap init failed\n");
- goto error;
+ goto error_pm;
}
patch = (hda_codec_patch_t)hcodec->preset->driver_data;
ret = patch(hcodec);
if (ret < 0) {
dev_err(&hdev->dev, "patch failed %d\n", ret);
- goto error;
+ goto error_regmap;
}
} else {
dev_dbg(&hdev->dev, "no patch file found\n");
ret = snd_hda_codec_parse_pcms(hcodec);
if (ret < 0) {
dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
- goto error;
+ goto error_regmap;
}
/* HDMI controls need to be created in machine drivers */
if (ret < 0) {
dev_err(&hdev->dev, "unable to create controls %d\n",
ret);
- goto error;
+ goto error_regmap;
}
}
return 0;
-error:
+error_regmap:
+ snd_hdac_regmap_exit(hdev);
+error_pm:
pm_runtime_put(&hdev->dev);
error_no_pm:
snd_hdac_ext_bus_link_put(hdev->bus, hlink);
pm_runtime_disable(&hdev->dev);
snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+
+ snd_hdac_regmap_exit(hdev);
}
static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
case MAX98390_IRQ_CTRL ... MAX98390_WDOG_CTRL:
case MAX98390_MEAS_ADC_THERM_WARN_THRESH
... MAX98390_BROWNOUT_INFINITE_HOLD:
- case MAX98390_BROWNOUT_LVL_HOLD ... THERMAL_COILTEMP_RD_BACK_BYTE0:
- case DSMIG_DEBUZZER_THRESHOLD ... MAX98390_R24FF_REV_ID:
+ case MAX98390_BROWNOUT_LVL_HOLD ... DSMIG_DEBUZZER_THRESHOLD:
+ case DSM_VOL_ENA ... MAX98390_R24FF_REV_ID:
return true;
default:
return false;
case MAX98390_BROWNOUT_LOWEST_STATUS:
case MAX98390_ENV_TRACK_BOOST_VOUT_READ:
case DSM_STBASS_HPF_B0_BYTE0 ... DSM_DEBUZZER_ATTACK_TIME_BYTE2:
- case THERMAL_RDC_RD_BACK_BYTE1 ... THERMAL_COILTEMP_RD_BACK_BYTE0:
+ case THERMAL_RDC_RD_BACK_BYTE1 ... DSMIG_DEBUZZER_THRESHOLD:
case DSM_THERMAL_GAIN ... DSM_WBDRC_GAIN:
return true;
default:
{ 0x0000, 0x0000 },
{ 0x0004, 0xa000 },
{ 0x0006, 0x0003 },
- { 0x000a, 0x0802 },
- { 0x000c, 0x0020 },
+ { 0x000a, 0x081e },
+ { 0x000c, 0x0006 },
{ 0x000e, 0x0000 },
{ 0x0010, 0x0000 },
{ 0x0012, 0x0000 },
+ { 0x0014, 0x0000 },
+ { 0x0016, 0x0000 },
+ { 0x0018, 0x0000 },
{ 0x0020, 0x8000 },
- { 0x0022, 0x471b },
- { 0x006a, 0x0000 },
- { 0x006c, 0x4020 },
+ { 0x0022, 0x8043 },
{ 0x0076, 0x0000 },
{ 0x0078, 0x0000 },
- { 0x007a, 0x0000 },
+ { 0x007a, 0x0002 },
{ 0x007c, 0x10ec },
{ 0x007d, 0x1015 },
{ 0x00f0, 0x5000 },
- { 0x00f2, 0x0774 },
- { 0x00f3, 0x8400 },
+ { 0x00f2, 0x004c },
+ { 0x00f3, 0xecfe },
{ 0x00f4, 0x0000 },
+ { 0x00f6, 0x0400 },
{ 0x0100, 0x0028 },
{ 0x0102, 0xff02 },
- { 0x0104, 0x8232 },
+ { 0x0104, 0xa213 },
{ 0x0106, 0x200c },
- { 0x010c, 0x002f },
- { 0x010e, 0xc000 },
+ { 0x010c, 0x0000 },
+ { 0x010e, 0x0058 },
{ 0x0111, 0x0200 },
{ 0x0112, 0x0400 },
{ 0x0114, 0x0022 },
{ 0x0118, 0x0000 },
{ 0x011a, 0x0123 },
{ 0x011c, 0x4567 },
- { 0x0300, 0xdddd },
- { 0x0302, 0x0000 },
- { 0x0311, 0x9330 },
- { 0x0313, 0x0000 },
- { 0x0314, 0x0000 },
+ { 0x0300, 0x203d },
+ { 0x0302, 0x001e },
+ { 0x0311, 0x0000 },
+ { 0x0313, 0x6014 },
+ { 0x0314, 0x00a2 },
{ 0x031a, 0x00a0 },
{ 0x031c, 0x001f },
{ 0x031d, 0xffff },
{ 0x031e, 0x0000 },
{ 0x031f, 0x0000 },
+ { 0x0320, 0x0000 },
{ 0x0321, 0x0000 },
- { 0x0322, 0x0000 },
- { 0x0328, 0x0000 },
- { 0x0329, 0x0000 },
- { 0x032a, 0x0000 },
- { 0x032b, 0x0000 },
- { 0x032c, 0x0000 },
- { 0x032d, 0x0000 },
- { 0x032e, 0x030e },
- { 0x0330, 0x0080 },
+ { 0x0322, 0xd7df },
+ { 0x0328, 0x10b2 },
+ { 0x0329, 0x0175 },
+ { 0x032a, 0x36ad },
+ { 0x032b, 0x7e55 },
+ { 0x032c, 0x0520 },
+ { 0x032d, 0xaa00 },
+ { 0x032e, 0x570e },
+ { 0x0330, 0xe180 },
{ 0x0332, 0x0034 },
- { 0x0334, 0x0000 },
- { 0x0336, 0x0000 },
+ { 0x0334, 0x0001 },
+ { 0x0336, 0x0010 },
+ { 0x0338, 0x0000 },
+ { 0x04fa, 0x0030 },
+ { 0x04fc, 0x35c8 },
+ { 0x04fe, 0x0800 },
+ { 0x0500, 0x0400 },
+ { 0x0502, 0x1000 },
+ { 0x0504, 0x0000 },
{ 0x0506, 0x04ff },
- { 0x0508, 0x0030 },
- { 0x050a, 0x0018 },
- { 0x0519, 0x307f },
- { 0x051a, 0xffff },
- { 0x051b, 0x4000 },
+ { 0x0508, 0x0010 },
+ { 0x050a, 0x001a },
+ { 0x0519, 0x1c68 },
+ { 0x051a, 0x0ccc },
+ { 0x051b, 0x0666 },
{ 0x051d, 0x0000 },
{ 0x051f, 0x0000 },
- { 0x0536, 0x1000 },
+ { 0x0536, 0x061c },
{ 0x0538, 0x0000 },
{ 0x053a, 0x0000 },
{ 0x053c, 0x0000 },
{ 0x0544, 0x0000 },
{ 0x0568, 0x0000 },
{ 0x056a, 0x0000 },
- { 0x1000, 0x0000 },
- { 0x1002, 0x6505 },
+ { 0x1000, 0x0040 },
+ { 0x1002, 0x5405 },
{ 0x1006, 0x5515 },
- { 0x1007, 0x003f },
- { 0x1009, 0x770f },
- { 0x100a, 0x01ff },
- { 0x100c, 0x0000 },
+ { 0x1007, 0x05f7 },
+ { 0x1009, 0x0b0a },
+ { 0x100a, 0x00ef },
{ 0x100d, 0x0003 },
{ 0x1010, 0xa433 },
{ 0x1020, 0x0000 },
- { 0x1200, 0x3d02 },
- { 0x1202, 0x0813 },
- { 0x1204, 0x0211 },
+ { 0x1200, 0x5a01 },
+ { 0x1202, 0x6524 },
+ { 0x1204, 0x1f00 },
{ 0x1206, 0x0000 },
{ 0x1208, 0x0000 },
{ 0x120a, 0x0000 },
{ 0x120e, 0x0000 },
{ 0x1210, 0x0000 },
{ 0x1212, 0x0000 },
- { 0x1300, 0x0701 },
- { 0x1302, 0x12f9 },
- { 0x1304, 0x3405 },
+ { 0x1300, 0x10a1 },
+ { 0x1302, 0x12ff },
+ { 0x1304, 0x0400 },
{ 0x1305, 0x0844 },
- { 0x1306, 0x1611 },
+ { 0x1306, 0x4611 },
{ 0x1308, 0x555e },
{ 0x130a, 0x0000 },
- { 0x130c, 0x2400},
- { 0x130e, 0x7700 },
- { 0x130f, 0x0000 },
+ { 0x130c, 0x2000 },
+ { 0x130e, 0x0100 },
+ { 0x130f, 0x0001 },
{ 0x1310, 0x0000 },
{ 0x1312, 0x0000 },
{ 0x1314, 0x0000 },
case RT1015_DC_CALIB_CLSD7:
case RT1015_DC_CALIB_CLSD8:
case RT1015_S_BST_TIMING_INTER1:
+ case RT1015_OSCK_STA:
+ case RT1015_MONO_DYNA_CTRL1:
+ case RT1015_MONO_DYNA_CTRL5:
return true;
default:
case RT1015_CLK3:
case RT1015_PLL1:
case RT1015_PLL2:
+ case RT1015_DUM_RW1:
+ case RT1015_DUM_RW2:
+ case RT1015_DUM_RW3:
+ case RT1015_DUM_RW4:
+ case RT1015_DUM_RW5:
+ case RT1015_DUM_RW6:
case RT1015_CLK_DET:
case RT1015_SIL_DET:
case RT1015_CUSTOMER_ID:
case RT1015_PAD_DRV2:
case RT1015_GAT_BOOST:
case RT1015_PRO_ALT:
+ case RT1015_OSCK_STA:
case RT1015_MAN_I2C:
case RT1015_DAC1:
case RT1015_DAC2:
case RT1015_SMART_BST_CTRL2:
case RT1015_ANA_CTRL1:
case RT1015_ANA_CTRL2:
+ case RT1015_PWR_STATE_CTRL:
+ case RT1015_MONO_DYNA_CTRL:
+ case RT1015_MONO_DYNA_CTRL1:
+ case RT1015_MONO_DYNA_CTRL2:
+ case RT1015_MONO_DYNA_CTRL3:
+ case RT1015_MONO_DYNA_CTRL4:
+ case RT1015_MONO_DYNA_CTRL5:
case RT1015_SPK_VOL:
case RT1015_SHORT_DETTOP1:
case RT1015_SHORT_DETTOP2:
#define RT1015_CLK3 0x0006
#define RT1015_PLL1 0x000a
#define RT1015_PLL2 0x000c
+#define RT1015_DUM_RW1 0x000e
+#define RT1015_DUM_RW2 0x0010
+#define RT1015_DUM_RW3 0x0012
+#define RT1015_DUM_RW4 0x0014
+#define RT1015_DUM_RW5 0x0016
+#define RT1015_DUM_RW6 0x0018
#define RT1015_CLK_DET 0x0020
#define RT1015_SIL_DET 0x0022
#define RT1015_CUSTOMER_ID 0x0076
#define RT1015_PAD_DRV2 0x00f2
#define RT1015_GAT_BOOST 0x00f3
#define RT1015_PRO_ALT 0x00f4
+#define RT1015_OSCK_STA 0x00f6
#define RT1015_MAN_I2C 0x0100
#define RT1015_DAC1 0x0102
#define RT1015_DAC2 0x0104
#define RT1015_ANA_CTRL1 0x0334
#define RT1015_ANA_CTRL2 0x0336
#define RT1015_PWR_STATE_CTRL 0x0338
-#define RT1015_SPK_VOL 0x0506
+#define RT1015_MONO_DYNA_CTRL 0x04fa
+#define RT1015_MONO_DYNA_CTRL1 0x04fc
+#define RT1015_MONO_DYNA_CTRL2 0x04fe
+#define RT1015_MONO_DYNA_CTRL3 0x0500
+#define RT1015_MONO_DYNA_CTRL4 0x0502
+#define RT1015_MONO_DYNA_CTRL5 0x0504
+#define RT1015_SPK_VOL 0x0506
#define RT1015_SHORT_DETTOP1 0x0508
#define RT1015_SHORT_DETTOP2 0x050a
#define RT1015_SPK_DC_DETECT1 0x0519
RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
RT5682_PWR_CBJ, RT5682_PWR_CBJ);
-
+ snd_soc_component_update_bits(component,
+ RT5682_HP_CHARGE_PUMP_1,
+ RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
rt5682->jack_type = SND_JACK_HEADPHONE;
break;
}
+
+ snd_soc_component_update_bits(component,
+ RT5682_HP_CHARGE_PUMP_1,
+ RT5682_OSW_L_MASK | RT5682_OSW_R_MASK,
+ RT5682_OSW_L_EN | RT5682_OSW_R_EN);
} else {
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
return ret;
}
rt5682->mclk = NULL;
- } else {
- /* Register CCF DAI clock control */
- ret = rt5682_register_dai_clks(component);
- if (ret)
- return ret;
}
+
+ /* Register CCF DAI clock control */
+ ret = rt5682_register_dai_clks(component);
+ if (ret)
+ return ret;
+
/* Initial setup for CCF */
rt5682->lrck[RT5682_AIF1] = CLK_48;
#endif
* @dma_chan: inputer and output DMA channels
* @dma_data: private dma data
* @pos: hardware pointer position
+ * @req_dma_chan: flag to release dev_to_dev chan
* @private: pair private area
*/
struct fsl_asrc_pair {
struct dma_chan *dma_chan[2];
struct imx_dma_data dma_data;
unsigned int pos;
+ bool req_dma_chan;
void *private;
};
struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
+ struct dma_chan *tmp_chan = NULL, *be_chan = NULL;
+ struct snd_soc_component *component_be = NULL;
struct fsl_asrc *asrc = pair->asrc;
struct dma_slave_config config_fe, config_be;
enum asrc_pair_index index = pair->index;
int stream = substream->stream;
struct imx_dma_data *tmp_data;
struct snd_soc_dpcm *dpcm;
- struct dma_chan *tmp_chan;
struct device *dev_be;
u8 dir = tx ? OUT : IN;
dma_cap_mask_t mask;
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_CYCLIC, mask);
+ /*
+ * The Back-End device might have already requested a DMA channel,
+ * so try to reuse it first, and then request a new one upon NULL.
+ */
+ component_be = snd_soc_lookup_component_nolocked(dev_be, SND_DMAENGINE_PCM_DRV_NAME);
+ if (component_be) {
+ be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+ tmp_chan = be_chan;
+ }
+ if (!tmp_chan)
+ tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
+
/*
* An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
* peripheral, unlike SDMA channel that is allocated dynamically. So no
- * need to configure dma_request and dma_request2, but get dma_chan via
- * dma_request_slave_channel directly with dma name of Front-End device
+ * need to configure dma_request and dma_request2, but get dma_chan of
+ * Back-End device directly via dma_request_slave_channel.
*/
if (!asrc->use_edma) {
/* Get DMA request of Back-End */
- tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
tmp_data = tmp_chan->private;
pair->dma_data.dma_request = tmp_data->dma_request;
- dma_release_channel(tmp_chan);
+ if (!be_chan)
+ dma_release_channel(tmp_chan);
/* Get DMA request of Front-End */
tmp_chan = asrc->get_dma_channel(pair, dir);
pair->dma_chan[dir] =
dma_request_channel(mask, filter, &pair->dma_data);
+ pair->req_dma_chan = true;
} else {
- pair->dma_chan[dir] =
- asrc->get_dma_channel(pair, dir);
+ pair->dma_chan[dir] = tmp_chan;
+ /* Do not flag to release if we are reusing the Back-End one */
+ pair->req_dma_chan = !be_chan;
}
if (!pair->dma_chan[dir]) {
ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
if (ret) {
dev_err(dev, "failed to config DMA channel for Back-End\n");
- dma_release_channel(pair->dma_chan[dir]);
+ if (pair->req_dma_chan)
+ dma_release_channel(pair->dma_chan[dir]);
return ret;
}
static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
+ u8 dir = tx ? OUT : IN;
snd_pcm_set_runtime_buffer(substream, NULL);
- if (pair->dma_chan[IN])
- dma_release_channel(pair->dma_chan[IN]);
+ if (pair->dma_chan[!dir])
+ dma_release_channel(pair->dma_chan[!dir]);
- if (pair->dma_chan[OUT])
- dma_release_channel(pair->dma_chan[OUT]);
+ /* release dev_to_dev chan if we aren't reusing the Back-End one */
+ if (pair->dma_chan[dir] && pair->req_dma_chan)
+ dma_release_channel(pair->dma_chan[dir]);
- pair->dma_chan[IN] = NULL;
- pair->dma_chan[OUT] = NULL;
+ pair->dma_chan[!dir] = NULL;
+ pair->dma_chan[dir] = NULL;
return 0;
}
static int fsl_mqs_runtime_resume(struct device *dev)
{
struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+ int ret;
- if (mqs_priv->ipg)
- clk_prepare_enable(mqs_priv->ipg);
+ ret = clk_prepare_enable(mqs_priv->ipg);
+ if (ret) {
+ dev_err(dev, "failed to enable ipg clock\n");
+ return ret;
+ }
- if (mqs_priv->mclk)
- clk_prepare_enable(mqs_priv->mclk);
+ ret = clk_prepare_enable(mqs_priv->mclk);
+ if (ret) {
+ dev_err(dev, "failed to enable mclk clock\n");
+ clk_disable_unprepare(mqs_priv->ipg);
+ return ret;
+ }
if (mqs_priv->use_gpr)
regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
&mqs_priv->reg_mqs_ctrl);
- if (mqs_priv->mclk)
- clk_disable_unprepare(mqs_priv->mclk);
-
- if (mqs_priv->ipg)
- clk_disable_unprepare(mqs_priv->ipg);
+ clk_disable_unprepare(mqs_priv->mclk);
+ clk_disable_unprepare(mqs_priv->ipg);
return 0;
}
struct regmap *regs = ssi->regs;
u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
unsigned long clkrate, baudrate, tmprate;
- unsigned int slots = params_channels(hw_params);
- unsigned int slot_width = 32;
+ unsigned int channels = params_channels(hw_params);
+ unsigned int slot_width = params_width(hw_params);
+ unsigned int slots = 2;
u64 sub, savesub = 100000;
unsigned int freq;
bool baudclk_is_used;
/* Override slots and slot_width if being specifically set... */
if (ssi->slots)
slots = ssi->slots;
- /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
- if (ssi->slot_width && slots != 2)
+ if (ssi->slot_width)
slot_width = ssi->slot_width;
+ /* ...but force 32 bits for stereo audio using I2S Master Mode */
+ if (channels == 2 &&
+ (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
+ slot_width = 32;
+
/* Generate bit clock based on the slot number and slot width */
freq = slots * slot_width * params_rate(hw_params);
endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
-if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
+if (SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK)
config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
+endif ## SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK
if SND_SOC_SOF_JASPERLAKE
#include <linux/module.h>
#include "common.h"
+#include "qdsp6/q6afe.h"
int qcom_snd_parse_of(struct snd_soc_card *card)
{
}
link->no_pcm = 1;
link->ignore_pmdown_time = 1;
+
+ if (q6afe_is_rx_port(link->id)) {
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 0;
+ } else {
+ link->dpcm_playback = 0;
+ link->dpcm_capture = 1;
+ }
+
} else {
dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
if (!dlc)
link->codecs->dai_name = "snd-soc-dummy-dai";
link->codecs->name = "snd-soc-dummy";
link->dynamic = 1;
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
}
link->ignore_suspend = 1;
link->nonatomic = 1;
- link->dpcm_playback = 1;
- link->dpcm_capture = 1;
link->stream_name = link->name;
link++;
}
EXPORT_SYMBOL_GPL(q6afe_get_port_id);
+int q6afe_is_rx_port(int index)
+{
+ if (index < 0 || index >= AFE_PORT_MAX)
+ return -EINVAL;
+
+ return port_maps[index].is_rx;
+}
+EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
struct q6afe_port *port)
{
int q6afe_port_stop(struct q6afe_port *port);
void q6afe_port_put(struct q6afe_port *port);
int q6afe_get_port_id(int index);
+int q6afe_is_rx_port(int index);
void q6afe_hdmi_port_prepare(struct q6afe_port *port,
struct q6afe_hdmi_cfg *cfg);
void q6afe_slim_port_prepare(struct q6afe_port *port,
#define ASM_STREAM_CMD_FLUSH 0x00010BCE
#define ASM_SESSION_CMD_PAUSE 0x00010BD3
#define ASM_DATA_CMD_EOS 0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C
#define ASM_NULL_POPP_TOPOLOGY 0x00010C68
#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
case ASM_SESSION_CMD_SUSPEND:
client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
break;
- case ASM_DATA_CMD_EOS:
- client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
- break;
case ASM_STREAM_CMD_FLUSH:
client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
break;
spin_unlock_irqrestore(&ac->lock, flags);
}
+ break;
+ case ASM_DATA_EVENT_RENDERED_EOS:
+ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
break;
}
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
ret = regcache_sync(pdm->regmap);
}
EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
-static struct snd_soc_component
+struct snd_soc_component
*snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name)
{
struct snd_soc_component *component;
return found_component;
}
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component_nolocked);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name)
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
+static void devm_dai_release(struct device *dev, void *res)
+{
+ snd_soc_unregister_dai(*(struct snd_soc_dai **)res);
+}
+
+/**
+ * devm_snd_soc_register_dai - resource-managed dai registration
+ * @dev: Device used to manage component
+ * @component: The component the DAIs are registered for
+ * @dai_drv: DAI driver to use for the DAI
+ * @legacy_dai_naming: if %true, use legacy single-name format;
+ * if %false, use multiple-name format;
+ */
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+ struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming)
+{
+ struct snd_soc_dai **ptr;
+ struct snd_soc_dai *dai;
+
+ ptr = devres_alloc(devm_dai_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ dai = snd_soc_register_dai(component, dai_drv, legacy_dai_naming);
+ if (dai) {
+ *ptr = dai;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return dai;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai);
+
static void devm_component_release(struct device *dev, void *res)
{
snd_soc_unregister_component(*(struct device **)res);
*/
#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
-struct dmaengine_pcm {
- struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
- const struct snd_dmaengine_pcm_config *config;
- struct snd_soc_component component;
- unsigned int flags;
-};
-
-static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
-{
- return container_of(p, struct dmaengine_pcm, component);
-}
-
static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
struct snd_pcm_substream *substream)
{
int count, paths;
int ret;
+ if (!fe->dai_link->dynamic)
+ return 0;
+
if (fe->num_cpus > 1) {
dev_err(fe->dev,
"%s doesn't support Multi CPU yet\n", __func__);
return -EINVAL;
}
- if (!fe->dai_link->dynamic)
- return 0;
-
/* only check active links */
if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0)))
return 0;
list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
/* register the DAI to the component */
- dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
+ dai = devm_snd_soc_register_dai(tplg->comp->dev, tplg->comp, dai_drv, false);
if (!dai)
return -ENOMEM;
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
if (ret != 0) {
dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
- snd_soc_unregister_dai(dai);
return ret;
}
select SND_SOC_SOF_CANNONLAKE if SND_SOC_SOF_CANNONLAKE_SUPPORT
select SND_SOC_SOF_COFFEELAKE if SND_SOC_SOF_COFFEELAKE_SUPPORT
select SND_SOC_SOF_ICELAKE if SND_SOC_SOF_ICELAKE_SUPPORT
- select SND_SOC_SOF_COMETLAKE_LP if SND_SOC_SOF_COMETLAKE_LP_SUPPORT
- select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT
+ select SND_SOC_SOF_COMETLAKE if SND_SOC_SOF_COMETLAKE_SUPPORT
select SND_SOC_SOF_TIGERLAKE if SND_SOC_SOF_TIGERLAKE_SUPPORT
select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
select SND_SOC_SOF_JASPERLAKE if SND_SOC_SOF_JASPERLAKE_SUPPORT
This option is not user-selectable but automagically handled by
'select' statements at a higher level
-config SND_SOC_SOF_COMETLAKE_LP
+config SND_SOC_SOF_COMETLAKE
tristate
select SND_SOC_SOF_HDA_COMMON
help
This option is not user-selectable but automagically handled by
'select' statements at a higher level
-config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
- bool "SOF support for CometLake-LP"
- help
- This adds support for Sound Open Firmware for Intel(R) platforms
- using the Cometlake-LP processors.
- Say Y if you have such a device.
- If unsure select "N".
+config SND_SOC_SOF_COMETLAKE_SUPPORT
+ bool
-config SND_SOC_SOF_COMETLAKE_H
- tristate
- select SND_SOC_SOF_HDA_COMMON
- help
- This option is not user-selectable but automagically handled by
- 'select' statements at a higher level
-
-config SND_SOC_SOF_COMETLAKE_H_SUPPORT
- bool "SOF support for CometLake-H"
+config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
+ bool "SOF support for CometLake"
+ select SND_SOC_SOF_COMETLAKE_SUPPORT
help
This adds support for Sound Open Firmware for Intel(R) platforms
- using the Cometlake-H processors.
- Say Y if you have such a device.
+ using the Cometlake processors.
If unsure select "N".
config SND_SOC_SOF_TIGERLAKE_SUPPORT
if (status & AZX_INT_CTRL_EN) {
rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
if (rirb_status & RIRB_INT_MASK) {
+ /*
+ * Clearing the interrupt status here ensures
+ * that no interrupt gets masked after the RIRB
+ * wp is read in snd_hdac_bus_update_rirb.
+ */
+ snd_hdac_chip_writeb(bus, RIRBSTS,
+ RIRB_INT_MASK);
active = true;
if (rirb_status & RIRB_INT_RESPONSE)
snd_hdac_bus_update_rirb(bus);
- snd_hdac_chip_writeb(bus, RIRBSTS,
- RIRB_INT_MASK);
}
}
#endif
};
#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) || \
- IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
static const struct sof_dev_desc cml_desc = {
.machines = snd_soc_acpi_intel_cml_machines,
.alt_machines = snd_soc_acpi_intel_cml_sdw_machines,
.driver_data = (unsigned long)&cfl_desc},
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
- { PCI_DEVICE(0x8086, 0x34C8),
+ { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */
+ .driver_data = (unsigned long)&icl_desc},
+ { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */
.driver_data = (unsigned long)&icl_desc},
+
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
{ PCI_DEVICE(0x8086, 0x38c8),
{ PCI_DEVICE(0x8086, 0x4dc8),
.driver_data = (unsigned long)&jsl_desc},
#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
- { PCI_DEVICE(0x8086, 0x02c8),
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
+ { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */
.driver_data = (unsigned long)&cml_desc},
-#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
- { PCI_DEVICE(0x8086, 0x06c8),
+ { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */
+ .driver_data = (unsigned long)&cml_desc},
+ { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
.driver_data = (unsigned long)&cml_desc},
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
- { PCI_DEVICE(0x8086, 0xa0c8),
+ { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */
+ .driver_data = (unsigned long)&tgl_desc},
+ { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */
.driver_data = (unsigned long)&tgl_desc},
+
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
{ PCI_DEVICE(0x8086, 0x4b55),
dma_addr_t sync_dma; /* DMA address of syncbuf */
unsigned int pipe; /* the data i/o pipe */
- unsigned int framesize[2]; /* small/large frame sizes in samples */
- unsigned int sample_rem; /* remainder from division fs/fps */
+ unsigned int packsize[2]; /* small/large packet sizes in samples */
+ unsigned int sample_rem; /* remainder from division fs/pps */
unsigned int sample_accum; /* sample accumulator */
- unsigned int fps; /* frames per second */
+ unsigned int pps; /* packets per second */
unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */
unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */
int freqshift; /* how much to shift the feedback value to get Q16.16 */
return ep->maxframesize;
ep->sample_accum += ep->sample_rem;
- if (ep->sample_accum >= ep->fps) {
- ep->sample_accum -= ep->fps;
- ret = ep->framesize[1];
+ if (ep->sample_accum >= ep->pps) {
+ ep->sample_accum -= ep->pps;
+ ret = ep->packsize[1];
} else {
- ret = ep->framesize[0];
+ ret = ep->packsize[0];
}
return ret;
if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
ep->freqn = get_usb_full_speed_rate(rate);
- ep->fps = 1000;
+ ep->pps = 1000 >> ep->datainterval;
} else {
ep->freqn = get_usb_high_speed_rate(rate);
- ep->fps = 8000;
+ ep->pps = 8000 >> ep->datainterval;
}
- ep->sample_rem = rate % ep->fps;
- ep->framesize[0] = rate / ep->fps;
- ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps;
+ ep->sample_rem = rate % ep->pps;
+ ep->packsize[0] = rate / ep->pps;
+ ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps;
/* calculate the frequency in 16.16 format */
ep->freqm = ep->freqn;
return nr_rates;
}
-/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
- * call. Return a static table of known clock rates.
+/* Line6 Helix series and the Rode Rodecaster Pro don't support the
+ * UAC2_CS_RANGE usb function call. Return a static table of known
+ * clock rates.
*/
static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
struct audioformat *fp)
case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+ case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
}
* if failed, give up and free the control instance.
*/
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
- struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+ struct snd_kcontrol *kctl,
+ bool is_std_info)
{
struct usb_mixer_interface *mixer = list->mixer;
int err;
return err;
}
list->kctl = kctl;
+ list->is_std_info = is_std_info;
list->next_id_elem = mixer->id_elems[list->id];
mixer->id_elems[list->id] = list;
return 0;
unitid = delegate_notify(mixer, unitid, NULL, NULL);
for_each_mixer_elem(list, mixer, unitid) {
- struct usb_mixer_elem_info *info =
- mixer_elem_list_to_info(list);
+ struct usb_mixer_elem_info *info;
+
+ if (!list->is_std_info)
+ continue;
+ info = mixer_elem_list_to_info(list);
/* invalidate cache, so the value is read from the device */
info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
if (!list->kctl)
continue;
+ if (!list->is_std_info)
+ continue;
info = mixer_elem_list_to_info(list);
if (count > 1 && info->control != control)
struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
struct snd_kcontrol *kctl;
unsigned int id;
+ bool is_std_info;
usb_mixer_elem_dump_func_t dump;
usb_mixer_elem_resume_func_t resume;
};
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set);
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
- struct snd_kcontrol *kctl);
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+ struct snd_kcontrol *kctl,
+ bool is_std_info);
+
+#define snd_usb_mixer_add_control(list, kctl) \
+ snd_usb_mixer_add_list(list, kctl, true)
void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
struct usb_mixer_interface *mixer,
return -ENOMEM;
}
kctl->private_free = snd_usb_mixer_elem_free;
- return snd_usb_mixer_add_control(list, kctl);
+ /* don't use snd_usb_mixer_add_control() here, this is a special list element */
+ return snd_usb_mixer_add_list(list, kctl, false);
}
/*
ifnum = 0;
goto add_sync_ep_from_ifnum;
case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+ case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
return 0;
case SNDRV_PCM_TRIGGER_STOP:
stop_endpoints(subs);
+ subs->data_endpoint->retire_data_urb = NULL;
subs->running = 0;
return 0;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
}
},
+/*
+ * MacroSilicon MS2109 based HDMI capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have swapped L-R channels, but that's for userspace to deal
+ * with.
+ */
+{
+ USB_DEVICE(0x534d, 0x2109),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "MacroSilicon",
+ .product_name = "MS2109",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 3,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
static bool is_itf_usb_dsd_dac(unsigned int id)
{
switch (id) {
+ case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
usleep_range(1000, 2000);
+
+ /*
+ * Samsung USBC Headset (AKG) need a tiny delay after each
+ * class compliant request. (Model number: AAM625R or AAM627R)
+ */
+ if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ usleep_range(5000, 6000);
}
/*
static const struct registration_quirk registration_quirks[] = {
REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
+ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
{ 0 } /* terminator */
};
#include <asm/alternative-asm.h>
#include <asm/export.h>
+.pushsection .noinstr.text, "ax"
+
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
retq
SYM_FUNC_END(memcpy_orig)
+.popsection
+
#ifndef CONFIG_UML
MCSAFE_TEST_CTL
| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
-| | **queue** | **stack** | **sk_storage** | **struct_ops** }
+| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** }
DESCRIPTION
===========
[BPF_MAP_TYPE_STACK] = "stack",
[BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
[BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
};
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
" lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
- " queue | stack | sk_storage | struct_ops }\n"
+ " queue | stack | sk_storage | struct_ops | ringbuf }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2]);
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
-#if !defined(__ASSEMBLY__) && \
- (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
* Description
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- * new data availability is sent.
- * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- * new data availability is sent unconditionally.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
* Return
- * 0, on success;
- * < 0, on error.
+ * 0 on success, or a negative error in case of failure.
*
* void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
* Description
* void bpf_ringbuf_submit(void *data, u64 flags)
* Description
* Submit reserved ring buffer sample, pointed to by *data*.
- * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- * new data availability is sent.
- * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- * new data availability is sent unconditionally.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
* Return
* Nothing. Always succeeds.
*
* void bpf_ringbuf_discard(void *data, u64 flags)
* Description
* Discard reserved ring buffer sample, pointed to by *data*.
- * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- * new data availability is sent.
- * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- * new data availability is sent unconditionally.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
* Return
* Nothing. Always succeeds.
*
* Description
* Query various characteristics of provided ring buffer. What
* exactly is queries is determined by *flags*:
- * - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- * - BPF_RB_RING_SIZE - the size of ring buffer;
- * - BPF_RB_CONS_POS - consumer position (can wrap around);
- * - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- * Data returned is just a momentary snapshots of actual values
+ *
+ * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ * * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ * Data returned is just a momentary snapshot of actual values
* and could be inaccurate, so this facility should be used to
* power heuristics and for reporting, not to make 100% correct
* calculation.
* Return
- * Requested value, or 0, if flags are not recognized.
+ * Requested value, or 0, if *flags* are not recognized.
*
* int bpf_csum_level(struct sk_buff *skb, u64 level)
* Description
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
__u64 *probe_offset, __u64 *probe_addr);
+
+enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
#ifdef __cplusplus
#include <stdbool.h>
#include <stddef.h>
#include <limits.h>
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
static inline size_t hash_bits(size_t h, int bits)
{
/* shuffle bits and return requested number of upper bits */
- return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+ /* LP64 case */
+ return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
+ return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
+#else
+# error "Unsupported size_t size"
+#endif
}
typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
err = -EINVAL;
goto out;
}
- prog = bpf_object__find_program_by_title(obj, sec_name);
+ prog = NULL;
+ for (i = 0; i < obj->nr_programs; i++) {
+ if (!strcmp(obj->programs[i].section_name, sec_name)) {
+ prog = &obj->programs[i];
+ break;
+ }
+ }
if (!prog) {
pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
sec_name);
.expected_attach_type = BPF_TRACE_ITER,
.is_attach_btf = true,
.attach_fn = attach_iter),
- BPF_EAPROG_SEC("xdp_devmap", BPF_PROG_TYPE_XDP,
+ BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
BPF_XDP_DEVMAP),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
break;
case KBUFFER_TYPE_TIME_EXTEND:
+ case KBUFFER_TYPE_TIME_STAMP:
extend = read_4(kbuf, data);
data += 4;
extend <<= TS_SHIFT;
*length = 0;
break;
- case KBUFFER_TYPE_TIME_STAMP:
- data += 12;
- *length = 0;
- break;
case 0:
*length = read_4(kbuf, data) - 4;
*length = (*length + 3) & ~3;
type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
- kbuf->timestamp += delta;
+ if (type_len == KBUFFER_TYPE_TIME_STAMP)
+ kbuf->timestamp = delta;
+ else
+ kbuf->timestamp += delta;
+
kbuf->index = calc_index(kbuf, ptr);
kbuf->next = kbuf->index + length;
if (kbuf->next >= kbuf->size)
return -1;
type = update_pointers(kbuf);
- } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
+ } while (type == KBUFFER_TYPE_TIME_EXTEND ||
+ type == KBUFFER_TYPE_TIME_STAMP ||
+ type == KBUFFER_TYPE_PADDING);
return 0;
}
return 0;
}
+/**
+ * kbuffer_subbuf_timestamp - read the timestamp from a sub buffer
+ * @kbuf: The kbuffer to load
+ * @subbuf: The subbuffer to read from.
+ *
+ * Return the timestamp from a subbuffer.
+ */
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf)
+{
+ return kbuf->read_8(subbuf);
+}
+
+/**
+ * kbuffer_ptr_delta - read the delta field from a record
+ * @kbuf: The kbuffer to load
+ * @ptr: The record in the buffe.
+ *
+ * Return the timestamp delta from a record
+ */
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr)
+{
+ unsigned int type_len_ts;
+
+ type_len_ts = read_4(kbuf, ptr);
+ return ts4host(kbuf, type_len_ts);
+}
+
+
/**
* kbuffer_read_event - read the next event in the kbuffer subbuffer
* @kbuf: The kbuffer to read from
void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf);
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr);
void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
unsigned long arch_dest_rela_offset(int addend);
+const char *arch_nop_insn(int len);
+
#endif /* _ARCH_H */
state->regs[16].base = CFI_CFA;
state->regs[16].offset = -8;
}
+
+const char *arch_nop_insn(int len)
+{
+ static const char nops[5][5] = {
+ /* 1 */ { 0x90 },
+ /* 2 */ { 0x66, 0x90 },
+ /* 3 */ { 0x0f, 0x1f, 0x00 },
+ /* 4 */ { 0x0f, 0x1f, 0x40, 0x00 },
+ /* 5 */ { 0x0f, 0x1f, 0x44, 0x00, 0x00 },
+ };
+
+ if (len < 1 || len > 5) {
+ WARN("invalid NOP size: %d\n", len);
+ return NULL;
+ }
+
+ return nops[len-1];
+}
--- /dev/null
+#ifndef _OBJTOOL_ARCH_ELF
+#define _OBJTOOL_ARCH_ELF
+
+#define R_NONE R_X86_64_NONE
+
+#endif /* _OBJTOOL_ARCH_ELF */
#include "check.h"
#include "special.h"
#include "warn.h"
+#include "arch_elf.h"
#include <linux/hashtable.h>
#include <linux/kernel.h>
} else
insn->call_dest = rela->sym;
+ /*
+ * Many compilers cannot disable KCOV with a function attribute
+ * so they need a little help, NOP out any KCOV calls from noinstr
+ * text.
+ */
+ if (insn->sec->noinstr &&
+ !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
+ if (rela) {
+ rela->type = R_NONE;
+ elf_write_rela(file->elf, rela);
+ }
+
+ elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ arch_nop_insn(insn->len));
+ insn->type = INSN_NOP;
+ }
+
/*
* Whatever stack impact regular CALLs have, should be undone
* by the RETURN of the called function.
return "{dynamic}";
}
+static inline bool noinstr_call_dest(struct symbol *func)
+{
+ /*
+ * We can't deal with indirect function calls at present;
+ * assume they're instrumented.
+ */
+ if (!func)
+ return false;
+
+ /*
+ * If the symbol is from a noinstr section; we good.
+ */
+ if (func->sec->noinstr)
+ return true;
+
+ /*
+ * The __ubsan_handle_*() calls are like WARN(), they only happen when
+ * something 'BAD' happened. At the risk of taking the machine down,
+ * let them proceed to get the message out.
+ */
+ if (!strncmp(func->name, "__ubsan_handle_", 15))
+ return true;
+
+ return false;
+}
+
static int validate_call(struct instruction *insn, struct insn_state *state)
{
if (state->noinstr && state->instr <= 0 &&
- (!insn->call_dest || !insn->call_dest->sec->noinstr)) {
+ !noinstr_call_dest(insn->call_dest)) {
WARN_FUNC("call to %s() leaves .noinstr.text section",
insn->sec, insn->offset, call_dest_name(insn));
return 1;
objname = _objname;
- file.elf = elf_open_read(objname, orc ? O_RDWR : O_RDONLY);
+ file.elf = elf_open_read(objname, O_RDWR);
if (!file.elf)
return 1;
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
- file.c_file = find_section_by_name(file.elf, ".comment");
+ file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
file.ignore_unreachables = no_unreachable;
file.hints = false;
ret = create_orc_sections(&file);
if (ret < 0)
goto out;
+ }
+ if (file.elf->changed) {
ret = elf_write(file.elf);
if (ret < 0)
goto out;
rela->addend = rela->rela.r_addend;
rela->offset = rela->rela.r_offset;
symndx = GELF_R_SYM(rela->rela.r_info);
- rela->sym = find_symbol_by_index(elf, symndx);
rela->sec = sec;
+ rela->idx = i;
+ rela->sym = find_symbol_by_index(elf, symndx);
if (!rela->sym) {
WARN("can't find rela entry symbol %d for %s",
symndx, sec->name);
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf->changed = true;
+
return sec;
}
return sec;
}
-int elf_rebuild_rela_section(struct section *sec)
+int elf_rebuild_rela_section(struct elf *elf, struct section *sec)
{
struct rela *rela;
int nr, idx = 0, size;
return -1;
}
+ sec->changed = true;
+ elf->changed = true;
+
sec->data->d_buf = relas;
sec->data->d_size = size;
return 0;
}
-int elf_write(const struct elf *elf)
+int elf_write_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int len,
+ const char *insn)
+{
+ Elf_Data *data = sec->data;
+
+ if (data->d_type != ELF_T_BYTE || data->d_off) {
+ WARN("write to unexpected data for section: %s", sec->name);
+ return -1;
+ }
+
+ memcpy(data->d_buf + offset, insn, len);
+ elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY);
+
+ elf->changed = true;
+
+ return 0;
+}
+
+int elf_write_rela(struct elf *elf, struct rela *rela)
+{
+ struct section *sec = rela->sec;
+
+ rela->rela.r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+ rela->rela.r_addend = rela->addend;
+ rela->rela.r_offset = rela->offset;
+
+ if (!gelf_update_rela(sec->data, rela->idx, &rela->rela)) {
+ WARN_ELF("gelf_update_rela");
+ return -1;
+ }
+
+ elf->changed = true;
+
+ return 0;
+}
+
+int elf_write(struct elf *elf)
{
struct section *sec;
Elf_Scn *s;
WARN_ELF("gelf_update_shdr");
return -1;
}
+
+ sec->changed = false;
}
}
return -1;
}
+ elf->changed = false;
+
return 0;
}
GElf_Rela rela;
struct section *sec;
struct symbol *sym;
- unsigned int type;
unsigned long offset;
+ unsigned int type;
int addend;
+ int idx;
bool jump_table_start;
};
Elf *elf;
GElf_Ehdr ehdr;
int fd;
+ bool changed;
char *name;
struct list_head sections;
DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS);
struct section *elf_create_section(struct elf *elf, const char *name, size_t entsize, int nr);
struct section *elf_create_rela_section(struct elf *elf, struct section *base);
void elf_add_rela(struct elf *elf, struct rela *rela);
-int elf_write(const struct elf *elf);
+int elf_write_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int len,
+ const char *insn);
+int elf_write_rela(struct elf *elf, struct rela *rela);
+int elf_write(struct elf *elf);
void elf_close(struct elf *elf);
struct section *find_section_by_name(const struct elf *elf, const char *name);
struct rela *find_rela_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len);
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
-int elf_rebuild_rela_section(struct section *sec);
+int elf_rebuild_rela_section(struct elf *elf, struct section *sec);
#define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list)
}
}
- if (elf_rebuild_rela_section(ip_relasec))
+ if (elf_rebuild_rela_section(file->elf, ip_relasec))
return -1;
return 0;
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
+ evsel->no_aux_samples = true;
intel_pt_evsel = evsel;
opts->full_auxtrace = true;
}
* event synthesis.
*/
if (opts->initial_delay || target__has_cpu(&opts->target)) {
- if (perf_evlist__add_dummy(evlist))
- return -ENOMEM;
+ pos = perf_evlist__get_tracking_event(evlist);
+ if (!evsel__is_dummy_event(pos)) {
+ /* Set up dummy event. */
+ if (perf_evlist__add_dummy(evlist))
+ return -ENOMEM;
+ pos = evlist__last(evlist);
+ perf_evlist__set_tracking_event(evlist, pos);
+ }
- /* Disable tracking of mmaps on lead event. */
- pos = evlist__first(evlist);
- pos->tracking = 0;
- /* Set up dummy event. */
- pos = evlist__last(evlist);
- pos->tracking = 1;
/*
* Enable the dummy event when the process is forked for
* initial_delay, immediately for system wide.
*/
- if (opts->initial_delay)
+ if (opts->initial_delay && !pos->immediate)
pos->core.attr.enable_on_exec = 1;
else
pos->immediate = 1;
return -EINVAL;
if (PRINT_FIELD(IREGS) &&
- evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(UREGS) &&
cbr = data[0]
MHz = (data[4] + 500) / 1000
percent = ((cbr * 1000 / data[2]) + 5) / 10
- value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
+ value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
cbr_file.write(value)
def mwait(id, raw_buf):
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
+ " WHERE calls.id <> 0"
+ " AND symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
+ " WHERE calls.id <> 0"
+ " AND symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
+ self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
+ self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
return
last_child = None
for row in xrange(n):
+ self.view.setExpanded(parent, True)
child = self.model.index(row, 0, parent)
child_call_time = child.internalPointer().call_time
if child_call_time < time:
if not last_child:
if not found:
child = self.model.index(0, 0, parent)
+ self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
return
found = True
+ self.view.setExpanded(parent, True)
self.view.setCurrentIndex(last_child)
parent = last_child
from __future__ import print_function
import sys
import os
+import io
import argparse
import json
if self.args.format == "html":
try:
- with open(self.args.template) as f:
+ with io.open(self.args.template, encoding="utf-8") as f:
output_str = f.read().replace("/** @flamegraph_json **/",
json_str)
except IOError as e:
output_fn = self.args.output or "stacks.json"
if output_fn == "-":
- sys.stdout.write(output_str)
+ with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
+ out.write(output_str)
else:
print("dumping data to {}".format(output_fn))
try:
- with open(output_fn, "w") as out:
+ with io.open(output_fn, "w", encoding="utf-8") as out:
out.write(output_str)
except IOError as e:
print("Error writing output file: {}".format(e), file=sys.stderr)
return browser->he_selection->thread;
}
+static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
+{
+ return browser->he_selection ? browser->he_selection->res_samples : NULL;
+}
+
/* Check whether the browser is for 'top' or 'report' */
static inline bool is_report_browser(void *timer)
{
&options[nr_options], NULL, NULL, evsel);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
- hist_browser__selected_entry(browser)->res_samples,
- evsel, A_NORMAL);
+ hist_browser__selected_res_sample(browser),
+ evsel, A_NORMAL);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
- hist_browser__selected_entry(browser)->res_samples,
- evsel, A_ASM);
+ hist_browser__selected_res_sample(browser),
+ evsel, A_ASM);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
- hist_browser__selected_entry(browser)->res_samples,
- evsel, A_SOURCE);
+ hist_browser__selected_res_sample(browser),
+ evsel, A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
hbt, warn_lost_event);
}
+static bool perf_evlist__single_entry(struct evlist *evlist)
+{
+ int nr_entries = evlist->core.nr_entries;
+
+ if (nr_entries == 1)
+ return true;
+
+ if (nr_entries == 2) {
+ struct evsel *last = evlist__last(evlist);
+
+ if (evsel__is_dummy_event(last))
+ return true;
+ }
+
+ return false;
+}
+
int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
float min_pcnt,
int nr_entries = evlist->core.nr_entries;
single_entry:
- if (nr_entries == 1) {
+ if (perf_evlist__single_entry(evlist)) {
struct evsel *first = evlist__first(evlist);
return perf_evsel__hists_browse(first, nr_entries, help,
list_splice(&move, &evlist->core.entries);
}
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->tracking)
+ return evsel;
+ }
+
+ return evlist__first(evlist);
+}
+
void perf_evlist__set_tracking_event(struct evlist *evlist,
struct evsel *tracking_evsel)
{
evlist__cpu_iter_start(evlist); \
perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist);
void perf_evlist__set_tracking_event(struct evlist *evlist,
struct evsel *tracking_evsel);
}
}
-static bool is_dummy_event(struct evsel *evsel)
-{
- return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
- (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
-}
-
struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
{
struct evsel_config_term *term, *found_term = NULL;
if (callchain && callchain->enabled && !evsel->no_aux_samples)
evsel__config_callchain(evsel, opts, callchain);
- if (opts->sample_intr_regs) {
+ if (opts->sample_intr_regs && !evsel->no_aux_samples) {
attr->sample_regs_intr = opts->sample_intr_regs;
evsel__set_sample_bit(evsel, REGS_INTR);
}
- if (opts->sample_user_regs) {
+ if (opts->sample_user_regs && !evsel->no_aux_samples) {
attr->sample_regs_user |= opts->sample_user_regs;
evsel__set_sample_bit(evsel, REGS_USER);
}
* The software event will trigger -EOPNOTSUPP error out,
* if BRANCH_STACK bit is set.
*/
- if (is_dummy_event(evsel))
+ if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
}
evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
}
+static inline bool evsel__is_dummy_event(struct evsel *evsel)
+{
+ return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
+ (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
+}
+
struct perf_env *evsel__env(struct evsel *evsel);
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
u64 sample_type = evsel->core.attr.sample_type;
u64 id = evsel->core.id[0];
u8 cpumode;
+ u64 regs[8 * sizeof(sample.intr_regs.mask)];
if (intel_pt_skip_event(pt))
return 0;
}
if (sample_type & PERF_SAMPLE_REGS_INTR &&
- items->mask[INTEL_PT_GP_REGS_POS]) {
- u64 regs[sizeof(sample.intr_regs.mask)];
+ (items->mask[INTEL_PT_GP_REGS_POS] ||
+ items->mask[INTEL_PT_XMM_POS])) {
u64 regs_mask = evsel->core.attr.sample_regs_intr;
u64 *pos;
static int iterations;
static int interval = 5; /* interval in seconds for showing transfer rate */
-uint8_t default_tx[] = {
+static uint8_t default_tx[] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x40, 0x00, 0x00, 0x00, 0x00, 0x95,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xF0, 0x0D,
};
-uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
-char *input_tx;
+static uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
+static char *input_tx;
static void hex_dump(const void *src, size_t length, size_t line_size,
char *prefix)
pabort("can't get max speed hz");
printf("spi mode: 0x%x\n", mode);
- printf("bits per word: %d\n", bits);
- printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000);
+ printf("bits per word: %u\n", bits);
+ printf("max speed: %u Hz (%u kHz)\n", speed, speed/1000);
if (input_tx)
transfer_escaped_string(fd, input_tx);
request.make_options)
build_end = time.time()
if not success:
- return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
+ return KunitResult(KunitStatus.BUILD_FAILURE,
+ 'could not build kernel',
+ build_end - build_start)
if not success:
return KunitResult(KunitStatus.BUILD_FAILURE,
'could not build kernel',
import re
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
-CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+)$'
+CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
return bubble_up_errors(lambda x: x.status, test_suite_list)
def parse_test_result(lines: List[str]) -> TestResult:
- if not lines:
- return TestResult(TestStatus.NO_TESTS, [], lines)
consume_non_diagnositic(lines)
- if not parse_tap_header(lines):
- return None
+ if not lines or not parse_tap_header(lines):
+ return TestResult(TestStatus.NO_TESTS, [], lines)
test_suites = []
test_suite = parse_test_suite(lines)
while test_suite:
failed_tests = 0
crashed_tests = 0
test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
+ if test_result.status == TestStatus.NO_TESTS:
+ print_with_timestamp(red('[ERROR] ') + 'no kunit output detected')
for test_suite in test_result.suites:
if test_suite.status == TestStatus.SUCCESS:
print_suite_divider(green('[PASSED] ') + test_suite.name)
result.status)
file.close()
+ def test_no_kunit_output(self):
+ crash_log = get_absolute_path(
+ 'test_data/test_insufficient_memory.log')
+ file = open(crash_log)
+ print_mock = mock.patch('builtins.print').start()
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.isolate_kunit_output(file.readlines()))
+ print_mock.assert_any_call(StrContains("no kunit output detected"))
+ print_mock.stop()
+ file.close()
+
def test_crashed_test(self):
crashed_log = get_absolute_path(
'test_data/test_is_test_passed-crash.log')
$(TEST_GEN_PROGS): $(PROGS)
cp $(PROGS) $(OUTPUT)/
-clean:
- $(CLEAN)
- rm -f $(PROGS)
-
# Common test-unit targets to build common-layout test-cases executables
# Needs secondary expansion to properly include the testcase c-file in pre-reqs
.SECONDEXPANSION:
fentry_res = (__u64 *)fentry_skel->bss;
fexit_res = (__u64 *)fexit_skel->bss;
printf("%lld\n", fentry_skel->bss->test1_result);
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < 8; i++) {
CHECK(fentry_res[i] != 1, "result",
"fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
CHECK(fexit_res[i] != 1, "result",
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
- err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
- CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno);
+ err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
}
static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
// SPDX-License-Identifier: GPL-2.0
/*
- * Test that the flow_dissector program can be updated with a single
- * syscall by attaching a new program that replaces the existing one.
- *
- * Corner case - the same program cannot be attached twice.
+ * Tests for attaching, detaching, and replacing flow_dissector BPF program.
*/
#define _GNU_SOURCE
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
out_detach:
- err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+ err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err))
perror("bpf_prog_detach");
CHECK_FAIL(prog_is_attached(netns));
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
int err, link;
- err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0);
+ err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach(prog1)");
return;
close(link);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
- err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+ err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err))
perror("bpf_prog_detach");
CHECK_FAIL(prog_is_attached(netns));
/* Expect failure attaching prog when link exists */
errno = 0;
- err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0);
+ err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(!err || errno != EEXIST))
perror("bpf_prog_attach(prog2) expected EEXIST");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
/* Expect failure detaching prog when link exists */
errno = 0;
- err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+ err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(!err || errno != EINVAL))
perror("bpf_prog_detach expected EINVAL");
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
}
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
- err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+ err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err)) {
perror("bpf_prog_detach");
return;
CHECK_FAIL(prog_is_attached(netns));
}
+static void test_link_update_same_prog(int netns, int prog1, int prog2)
+{
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
+ DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
+ int err, link;
+
+ link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
+ if (CHECK_FAIL(link < 0)) {
+ perror("bpf_link_create(prog1)");
+ return;
+ }
+ CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+ /* Expect success updating the prog with the same one */
+ update_opts.flags = 0;
+ update_opts.old_prog_fd = 0;
+ err = bpf_link_update(link, prog1, &update_opts);
+ if (CHECK_FAIL(err))
+ perror("bpf_link_update");
+ CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+ close(link);
+ CHECK_FAIL(prog_is_attached(netns));
+}
+
static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
test_link_update_no_old_prog },
{ "link update with replace old prog",
test_link_update_replace_old_prog },
+ { "link update with same prog",
+ test_link_update_same_prog },
{ "link update invalid opts",
test_link_update_invalid_opts },
{ "link update invalid prog",
char cc[16]; /* TCP_CA_NAME_MAX */
} buf = {};
socklen_t optlen;
+ char *big_buf = NULL;
fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0) {
/* IP_TOS - BPF bypass */
- buf.u8[0] = 0x08;
- err = setsockopt(fd, SOL_IP, IP_TOS, &buf, 1);
+ optlen = getpagesize() * 2;
+ big_buf = calloc(1, optlen);
+ if (!big_buf) {
+ log_err("Couldn't allocate two pages");
+ goto err;
+ }
+
+ *(int *)big_buf = 0x08;
+ err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen);
if (err) {
log_err("Failed to call setsockopt(IP_TOS)");
goto err;
}
- buf.u8[0] = 0x00;
+ memset(big_buf, 0, optlen);
optlen = 1;
- err = getsockopt(fd, SOL_IP, IP_TOS, &buf, &optlen);
+ err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen);
if (err) {
log_err("Failed to call getsockopt(IP_TOS)");
goto err;
}
- if (buf.u8[0] != 0x08) {
- log_err("Unexpected getsockopt(IP_TOS) buf[0] 0x%02x != 0x08",
- buf.u8[0]);
+ if (*(int *)big_buf != 0x08) {
+ log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
+ *(int *)big_buf);
goto err;
}
goto err;
}
+ /* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */
+
+ optlen = getpagesize() * 2;
+ memset(big_buf, 0, optlen);
+
+ err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen);
+ if (err != 0) {
+ log_err("Failed to call setsockopt, ret=%d", err);
+ goto err;
+ }
+
+ err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen);
+ if (err != 0) {
+ log_err("Failed to call getsockopt, ret=%d", err);
+ goto err;
+ }
+
+ if (optlen != 1 || *(__u8 *)big_buf != 0x55) {
+ log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x",
+ optlen, *(__u8 *)big_buf);
+ }
+
/* SO_SNDBUF is overwritten */
buf.u32 = 0x01010101;
goto err;
}
+ free(big_buf);
close(fd);
return 0;
err:
+ free(big_buf);
close(fd);
return -1;
}
if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
+ if (ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
- if (ca->curr_rtt > delay)
- ca->curr_rtt = delay;
-
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
struct netlink_sock *sk;
} __attribute__((preserve_access_index));
-static inline struct inode *SOCK_INODE(struct socket *socket)
+static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
{
return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
}
e == (void *)20 && f == 21;
return 0;
}
+
+struct bpf_fentry_test_t {
+ struct bpf_fentry_test_t *a;
+};
+
+__u64 test7_result = 0;
+SEC("fentry/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+ if (arg == 0)
+ test7_result = 1;
+ return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fentry/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+ if (arg->a == 0)
+ test8_result = 1;
+ return 0;
+}
e == (void *)20 && f == 21 && ret == 111;
return 0;
}
+
+struct bpf_fentry_test_t {
+ struct bpf_fentry_test *a;
+};
+
+__u64 test7_result = 0;
+SEC("fexit/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+ if (arg == 0)
+ test7_result = 1;
+ return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fexit/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+ if (arg->a == 0)
+ test8_result = 1;
+ return 0;
+}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1;
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
#define SOL_CUSTOM 0xdeadbeef
struct sockopt_sk {
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
- if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+ if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
+ ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
return 1;
+ }
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
/* Not interested in SOL_SOCKET:SO_SNDBUF;
return 1;
}
+ if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+ if (optval + 1 > optval_end)
+ return 0; /* EPERM, bounds check */
+
+ ctx->retval = 0; /* Reset system call return value to zero */
+
+ /* Always export 0x55 */
+ optval[0] = 0x55;
+ ctx->optlen = 1;
+
+ /* Userspace buffer is PAGE_SIZE * 2, but BPF
+ * program can only see the first PAGE_SIZE
+ * bytes of data.
+ */
+ if (optval_end - optval != PAGE_SIZE)
+ return 0; /* EPERM, unexpected data size */
+
+ return 1;
+ }
+
if (ctx->level != SOL_CUSTOM)
return 0; /* EPERM, deny everything except custom level */
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
- if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+ if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
+ ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
return 1;
+ }
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
/* Overwrite SO_SNDBUF value */
return 1;
}
+ if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+ /* Original optlen is larger than PAGE_SIZE. */
+ if (ctx->optlen != PAGE_SIZE * 2)
+ return 0; /* EPERM, unexpected data size */
+
+ if (optval + 1 > optval_end)
+ return 0; /* EPERM, bounds check */
+
+ /* Make sure we can trim the buffer. */
+ optval[0] = 0;
+ ctx->optlen = 1;
+
+ /* Usepace buffer is PAGE_SIZE * 2, but BPF
+ * program can only see the first PAGE_SIZE
+ * bytes of data.
+ */
+ if (optval_end - optval != PAGE_SIZE)
+ return 0; /* EPERM, unexpected data size */
+
+ return 1;
+ }
+
if (ctx->level != SOL_CUSTOM)
return 0; /* EPERM, deny everything except custom level */
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 2);
+ __uint(max_entries, 3);
__type(key, int);
__type(value, int);
} sock_skb_opts SEC(".maps");
SEC("sk_skb1")
int bpf_prog1(struct __sk_buff *skb)
{
+ int *f, two = 2;
+
+ f = bpf_map_lookup_elem(&sock_skb_opts, &two);
+ if (f && *f) {
+ return *f;
+ }
return skb->len;
}
/* valid program on DEVMAP entry via SEC name;
* has access to egress and ingress ifindex
*/
-SEC("xdp_devmap")
+SEC("xdp_devmap/map_prog")
int xdp_dummy_dm(struct xdp_md *ctx)
{
char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
}
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
- if (err) {
+ if (!err) {
printf("Failed empty parser prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
- if (err) {
+ if (!err) {
printf("Failed empty verdict prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
- if (err) {
+ if (!err) {
printf("Failed empty msg verdict prog detach\n");
goto out_sockmap;
}
assert(status == 0);
}
- err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
+ err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
if (!err) {
printf("Detached an invalid prog type.\n");
goto out_sockmap;
}
- err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
+ err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
if (err) {
printf("Failed parser prog detach\n");
goto out_sockmap;
}
- err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
+ err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
if (err) {
printf("Failed parser prog detach\n");
goto out_sockmap;
int txmsg_ktls_skb_redir;
int ktls;
int peek_flag;
+int skb_use_parser;
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
txmsg_apply = txmsg_cork = 0;
txmsg_ingress = txmsg_redir_skb = 0;
txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
+ skb_use_parser = 0;
}
static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
}
}
+ if (skb_use_parser) {
+ i = 2;
+ err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
+ }
+
if (txmsg_drop)
options->drop_expected = true;
test_send(opt, cgrp);
}
+static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
+{
+ txmsg_pass = 1;
+ skb_use_parser = 512;
+ opt->iov_length = 256;
+ opt->iov_count = 1;
+ opt->rate = 2;
+ test_exec(cgrp, opt);
+}
+
char *map_names[] = {
"sock_map",
"sock_map_txmsg",
{"txmsg test pull-data", test_txmsg_pull},
{"txmsg test pop-data", test_txmsg_pop},
{"txmsg test push/pop data", test_txmsg_push_pop},
+ {"txmsg text ingress parser", test_txmsg_ingress_parser},
};
static int check_whitelist(struct _test *t, struct sockmap_options *opt)
ALL_TESTS="$ALL_TESTS 0009:150:1"
ALL_TESTS="$ALL_TESTS 0010:1:1"
ALL_TESTS="$ALL_TESTS 0011:1:1"
+ALL_TESTS="$ALL_TESTS 0012:1:1"
+ALL_TESTS="$ALL_TESTS 0013:1:1"
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
echo "$MODPROBE" > /proc/sys/kernel/modprobe
}
+kmod_check_visibility()
+{
+ local name="$1"
+ local cmd="$2"
+
+ modprobe $DEFAULT_KMOD_DRIVER
+
+ local priv=$(eval $cmd)
+ local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd")
+
+ if [ "$priv" = "$unpriv" ] || \
+ [ "${priv:0:3}" = "0x0" ] || \
+ [ "${unpriv:0:3}" != "0x0" ] ; then
+ echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2
+ exit 1
+ else
+ echo "${FUNCNAME[0]}: OK!"
+ fi
+}
+
+kmod_test_0012()
+{
+ kmod_check_visibility /proc/modules \
+ "grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'"
+}
+
+kmod_test_0013()
+{
+ kmod_check_visibility '/sys/module/*/sections/*' \
+ "cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1"
+}
+
list_tests()
{
echo "Test ID list:"
echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path"
echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading"
+ echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG"
+ echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG"
}
usage()
static struct ksft_count ksft_cnt;
static unsigned int ksft_plan;
-static inline int ksft_test_num(void)
+static inline unsigned int ksft_test_num(void)
{
return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1"
run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
+ # rpfilter and default route
+ $IP nexthop flush >/dev/null 2>&1
+ run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP"
+ run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1"
+ run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
+ run_cmd "$IP nexthop add id 93 group 91/92"
+ run_cmd "$IP -6 ro add default nhid 91"
+ run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ log_test $? 0 "Nexthop with default route and rpfilter"
+ run_cmd "$IP -6 ro replace default nhid 93"
+ run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ log_test $? 0 "Nexthop with multipath default route and rpfilter"
+
# TO-DO:
# existing route with old nexthop; append route with new nexthop
# existing route with old nexthop; replace route with new
#include <inttypes.h>
#include <linux/net_tstamp.h>
#include <linux/errqueue.h>
+#include <linux/if_ether.h>
#include <linux/ipv6.h>
-#include <linux/tcp.h>
+#include <linux/udp.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
{
char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
- char data[sizeof(struct ipv6hdr) +
- sizeof(struct tcphdr) + 1];
+ char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
+ sizeof(struct udphdr) + 1];
struct sock_extended_err *err;
struct msghdr msg = {0};
struct iovec iov = {0};
msg.msg_controllen = sizeof(control);
while (1) {
+ const char *reason;
+
ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno == EAGAIN)
break;
err = (struct sock_extended_err *)CMSG_DATA(cm);
if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
- if (err->ee_code != ECANCELED)
- error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
+
+ switch (err->ee_errno) {
+ case ECANCELED:
+ if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
+ error(1, 0, "errqueue: unknown ECANCELED %u\n",
+ err->ee_code);
+ reason = "missed txtime";
+ break;
+ case EINVAL:
+ if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
+ error(1, 0, "errqueue: unknown EINVAL %u\n",
+ err->ee_code);
+ reason = "invalid txtime";
+ break;
+ default:
+ error(1, 0, "errqueue: errno %u code %u\n",
+ err->ee_errno, err->ee_code);
+ };
tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
tstamp -= (int64_t) glob_tstart;
tstamp /= 1000 * 1000;
- fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
- data[ret - 1], tstamp);
+ fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
+ data[ret - 1], tstamp, reason);
msg.msg_flags = 0;
msg.msg_controllen = sizeof(control);
TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
- nft_concat_range.sh \
+ nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh
LDLIBS = -lmnl
--- /dev/null
+#!/bin/bash
+#
+# This tests connection tracking helper assignment:
+# 1. can attach ftp helper to a connection from nft ruleset.
+# 2. auto-assign still works.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+testipv6=1
+
+cleanup()
+{
+ ip netns del ${ns1}
+ ip netns del ${ns2}
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without conntrack tool"
+ exit $ksft_skip
+fi
+
+which nc >/dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without netcat tool"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+ip link add veth0 netns ${ns1} type veth peer name veth0 netns ${ns2} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set veth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set veth0 up
+
+ip -net ${ns1} addr add 10.0.1.1/24 dev veth0
+ip -net ${ns1} addr add dead:1::1/64 dev veth0
+
+ip -net ${ns2} addr add 10.0.1.2/24 dev veth0
+ip -net ${ns2} addr add dead:1::2/64 dev veth0
+
+load_ruleset_family() {
+ local family=$1
+ local ns=$2
+
+ip netns exec ${ns} nft -f - <<EOF
+table $family raw {
+ ct helper ftp {
+ type "ftp" protocol tcp
+ }
+ chain pre {
+ type filter hook prerouting priority 0; policy accept;
+ tcp dport 2121 ct helper set "ftp"
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ tcp dport 2121 ct helper set "ftp"
+ }
+}
+EOF
+ return $?
+}
+
+check_for_helper()
+{
+ local netns=$1
+ local message=$2
+ local port=$3
+
+ ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+ if [ $? -ne 0 ] ; then
+ echo "FAIL: ${netns} did not show attached helper $message" 1>&2
+ ret=1
+ fi
+
+ echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
+ return 0
+}
+
+test_helper()
+{
+ local port=$1
+ local msg=$2
+
+ sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
+
+ sleep 1
+ sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+
+ check_for_helper "$ns1" "ip $msg" $port
+ check_for_helper "$ns2" "ip $msg" $port
+
+ wait
+
+ if [ $testipv6 -eq 0 ] ;then
+ return 0
+ fi
+
+ ip netns exec ${ns1} conntrack -F 2> /dev/null
+ ip netns exec ${ns2} conntrack -F 2> /dev/null
+
+ sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
+
+ sleep 1
+ sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+
+ check_for_helper "$ns1" "ipv6 $msg" $port
+ check_for_helper "$ns2" "ipv6 $msg" $port
+
+ wait
+}
+
+load_ruleset_family ip ${ns1}
+if [ $? -ne 0 ];then
+ echo "FAIL: ${ns1} cannot load ip ruleset" 1>&2
+ exit 1
+fi
+
+load_ruleset_family ip6 ${ns1}
+if [ $? -ne 0 ];then
+ echo "SKIP: ${ns1} cannot load ip6 ruleset" 1>&2
+ testipv6=0
+fi
+
+load_ruleset_family inet ${ns2}
+if [ $? -ne 0 ];then
+ echo "SKIP: ${ns1} cannot load inet ruleset" 1>&2
+ load_ruleset_family ip ${ns2}
+ if [ $? -ne 0 ];then
+ echo "FAIL: ${ns2} cannot load ip ruleset" 1>&2
+ exit 1
+ fi
+
+ if [ $testipv6 -eq 1 ] ;then
+ load_ruleset_family ip6 ${ns2}
+ if [ $? -ne 0 ];then
+ echo "FAIL: ${ns2} cannot load ip6 ruleset" 1>&2
+ exit 1
+ fi
+ fi
+fi
+
+test_helper 2121 "set via ruleset"
+ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+test_helper 21 "auto-assign"
+
+exit $ret
return syscall(__NR_pidfd_getfd, pidfd, fd, flags);
}
+static inline int sys_memfd_create(const char *name, unsigned int flags)
+{
+ return syscall(__NR_memfd_create, name, flags);
+}
+
#endif /* __PIDFD_H */
return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2);
}
-static int sys_memfd_create(const char *name, unsigned int flags)
-{
- return syscall(__NR_memfd_create, name, flags);
-}
-
static int __child(int sk, int memfd)
{
int ret;
}
}
+TEST(setns_einval)
+{
+ int fd;
+
+ fd = sys_memfd_create("rostock", 0);
+ EXPECT_GT(fd, 0);
+
+ ASSERT_NE(setns(fd, 0), 0);
+ EXPECT_EQ(errno, EINVAL);
+ close(fd);
+}
+
TEST_HARNESS_MAIN
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
-TMPOUT = $(OUTPUT)/
+TMPOUT = $(OUTPUT)/TMPDIR/
# Toolchains may build PIE by default which breaks the assembly
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
255
]
],
- "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 12345",
+ "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 123456",
"expExitCode": "255",
"verifyCmd": "$TC action ls action bpf",
- "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 12345",
+ "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 123456",
"matchCount": "0",
"teardown": [
"$TC action flush action bpf"
255
]
],
- "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
"expExitCode": "0",
"verifyCmd": "$TC actions ls action csum",
"matchPattern": "^[ \t]+index [0-9]* ref",
1,
255
],
- "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
+ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
],
"cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
"expExitCode": "0",
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 824212:80:00880022.*index 1",
"matchCount": "0",
"teardown": [
"$TC actions flush action tunnel_key"
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:4224:00880022.*index 1",
"matchCount": "0",
"teardown": [
"$TC actions flush action tunnel_key"
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288.*index 1",
"matchCount": "0",
"teardown": [
"$TC actions flush action tunnel_key"
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288428822.*index 1",
"matchCount": "0",
"teardown": [
"$TC actions flush action tunnel_key"
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:.*index 1",
"matchCount": "0",
"teardown": [
"$TC actions flush action tunnel_key"
1,
255
],
- "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie 123456"
],
- "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie 123456",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 1",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie 123456",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"
-#!/bin/bash
+#!/bin/sh
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-[ -f /dev/tpm0 ] || exit $ksft_skip
+[ -e /dev/tpm0 ] || exit $ksft_skip
-python -m unittest -v tpm2_tests.SmokeTest
-python -m unittest -v tpm2_tests.AsyncTest
-
-CLEAR_CMD=$(which tpm2_clear)
-if [ -n $CLEAR_CMD ]; then
- tpm2_clear -T device
-fi
+python3 -m unittest -v tpm2_tests.SmokeTest
+python3 -m unittest -v tpm2_tests.AsyncTest
-#!/bin/bash
+#!/bin/sh
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-[ -f /dev/tpmrm0 ] || exit $ksft_skip
+[ -e /dev/tpmrm0 ] || exit $ksft_skip
-python -m unittest -v tpm2_tests.SpaceTest
+python3 -m unittest -v tpm2_tests.SpaceTest
class AuthCommand(object):
"""TPMS_AUTH_COMMAND"""
- def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
- hmac=''):
+ def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(),
+ session_attributes=0, hmac=bytes()):
self.session_handle = session_handle
self.nonce = nonce
self.session_attributes = session_attributes
self.hmac = hmac
- def __str__(self):
+ def __bytes__(self):
fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
return struct.pack(fmt, self.session_handle, len(self.nonce),
self.nonce, self.session_attributes, len(self.hmac),
class SensitiveCreate(object):
"""TPMS_SENSITIVE_CREATE"""
- def __init__(self, user_auth='', data=''):
+ def __init__(self, user_auth=bytes(), data=bytes()):
self.user_auth = user_auth
self.data = data
- def __str__(self):
+ def __bytes__(self):
fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
return struct.pack(fmt, len(self.user_auth), self.user_auth,
len(self.data), self.data)
return '>HHIH%us%usH%us' % \
(len(self.auth_policy), len(self.parameters), len(self.unique))
- def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
- parameters='', unique=''):
+ def __init__(self, object_type, name_alg, object_attributes,
+ auth_policy=bytes(), parameters=bytes(),
+ unique=bytes()):
self.object_type = object_type
self.name_alg = name_alg
self.object_attributes = object_attributes
self.parameters = parameters
self.unique = unique
- def __str__(self):
+ def __bytes__(self):
return struct.pack(self.__fmt(),
self.object_type,
self.name_alg,
def hex_dump(d):
d = [format(ord(x), '02x') for x in d]
- d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+ d = [d[i: i + 16] for i in range(0, len(d), 16)]
d = [' '.join(x) for x in d]
d = os.linesep.join(d)
pcrsel_len = max((i >> 3) + 1, 3)
pcrsel = [0] * pcrsel_len
pcrsel[i >> 3] = 1 << (i & 7)
- pcrsel = ''.join(map(chr, pcrsel))
+ pcrsel = ''.join(map(chr, pcrsel)).encode()
fmt = '>HII IHB%us' % (pcrsel_len)
cmd = struct.pack(fmt,
TPM2_CC_PCR_EXTEND,
i,
len(auth_cmd),
- str(auth_cmd),
+ bytes(auth_cmd),
1, bank_alg, dig)
self.send_cmd(cmd)
TPM2_RH_NULL,
TPM2_RH_NULL,
16,
- '\0' * 16,
+ ('\0' * 16).encode(),
0,
session_type,
TPM2_ALG_NULL,
for i in pcrs:
pcr = self.read_pcr(i, bank_alg)
- if pcr == None:
+ if pcr is None:
return None
x += pcr
pcrsel = [0] * pcrsel_len
for i in pcrs:
pcrsel[i >> 3] |= 1 << (i & 7)
- pcrsel = ''.join(map(chr, pcrsel))
+ pcrsel = ''.join(map(chr, pcrsel)).encode()
fmt = '>HII IH%usIHB3s' % ds
cmd = struct.pack(fmt,
struct.calcsize(fmt),
TPM2_CC_POLICY_PCR,
handle,
- len(dig), str(dig),
+ len(dig),
+ bytes(dig),
1,
bank_alg,
pcrsel_len, pcrsel)
self.send_cmd(cmd)
- def create_root_key(self, auth_value = ''):
+ def create_root_key(self, auth_value = bytes()):
attributes = \
Public.FIXED_TPM | \
Public.FIXED_PARENT | \
TPM2_CC_CREATE_PRIMARY,
TPM2_RH_OWNER,
len(auth_cmd),
- str(auth_cmd),
+ bytes(auth_cmd),
len(sensitive),
- str(sensitive),
+ bytes(sensitive),
len(public),
- str(public),
+ bytes(public),
0, 0)
return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
attributes = 0
if not policy_dig:
attributes |= Public.USER_WITH_AUTH
- policy_dig = ''
+ policy_dig = bytes()
auth_cmd = AuthCommand()
sensitive = SensitiveCreate(user_auth=auth_value, data=data)
TPM2_CC_CREATE,
parent_key,
len(auth_cmd),
- str(auth_cmd),
+ bytes(auth_cmd),
len(sensitive),
- str(sensitive),
+ bytes(sensitive),
len(public),
- str(public),
+ bytes(public),
0, 0)
rsp = self.send_cmd(cmd)
TPM2_CC_LOAD,
parent_key,
len(auth_cmd),
- str(auth_cmd),
+ bytes(auth_cmd),
blob)
data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
TPM2_CC_UNSEAL,
data_handle,
len(auth_cmd),
- str(auth_cmd))
+ bytes(auth_cmd))
try:
rsp = self.send_cmd(cmd)
TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
TPM2_RH_LOCKOUT,
len(auth_cmd),
- str(auth_cmd))
+ bytes(auth_cmd))
self.send_cmd(cmd)
more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
rsp = rsp[9:]
- for i in xrange(0, cnt):
+ for i in range(0, cnt):
handle = struct.unpack('>I', rsp[:4])[0]
handles.append(handle)
rsp = rsp[4:]
self.client.close()
def test_seal_with_auth(self):
- data = 'X' * 64
- auth = 'A' * 15
+ data = ('X' * 64).encode()
+ auth = ('A' * 15).encode()
blob = self.client.seal(self.root_key, data, auth, None)
result = self.client.unseal(self.root_key, blob, auth, None)
def test_seal_with_policy(self):
handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
- data = 'X' * 64
- auth = 'A' * 15
+ data = ('X' * 64).encode()
+ auth = ('A' * 15).encode()
pcrs = [16]
try:
self.assertEqual(data, result)
def test_unseal_with_wrong_auth(self):
- data = 'X' * 64
- auth = 'A' * 20
+ data = ('X' * 64).encode()
+ auth = ('A' * 20).encode()
rc = 0
blob = self.client.seal(self.root_key, data, auth, None)
try:
- result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
- except ProtocolError, e:
+ result = self.client.unseal(self.root_key, blob,
+ auth[:-1] + 'B'.encode(), None)
+ except ProtocolError as e:
rc = e.rc
self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
def test_unseal_with_wrong_policy(self):
handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
- data = 'X' * 64
- auth = 'A' * 17
+ data = ('X' * 64).encode()
+ auth = ('A' * 17).encode()
pcrs = [16]
try:
# This should succeed.
ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
- self.client.extend_pcr(1, 'X' * ds)
+ self.client.extend_pcr(1, ('X' * ds).encode())
handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
# Then, extend a PCR that is part of the policy and try to unseal.
# This should fail.
- self.client.extend_pcr(16, 'X' * ds)
+ self.client.extend_pcr(16, ('X' * ds).encode())
handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
self.client.policy_password(handle)
result = self.client.unseal(self.root_key, blob, auth, handle)
- except ProtocolError, e:
+ except ProtocolError as e:
rc = e.rc
self.client.flush_context(handle)
except:
def test_seal_with_too_long_auth(self):
ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
- data = 'X' * 64
- auth = 'A' * (ds + 1)
+ data = ('X' * 64).encode()
+ auth = ('A' * (ds + 1)).encode()
rc = 0
try:
blob = self.client.seal(self.root_key, data, auth, None)
- except ProtocolError, e:
+ except ProtocolError as e:
rc = e.rc
self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
0xDEADBEEF)
self.client.send_cmd(cmd)
- except IOError, e:
+ except IOError as e:
rejected = True
except:
pass
self.client.tpm.write(cmd)
rsp = self.client.tpm.read()
- except IOError, e:
+ except IOError as e:
# read the response
rsp = self.client.tpm.read()
rejected = True
rc = 0
try:
space1.send_cmd(cmd)
- except ProtocolError, e:
+ except ProtocolError as e:
rc = e.rc
self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
kill $ncat_pid
ip0 link del wg0
+# Ensure there aren't circular reference loops
+ip1 link add wg1 type wireguard
+ip2 link add wg2 type wireguard
+ip1 link set wg1 netns $netns2
+ip2 link set wg2 netns $netns1
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
+sleep 2 # Wait for cleanup and grace periods
declare -A objects
while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
- [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue
+ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
done < /dev/kmsg
alldeleted=1
EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
-$(BINARIES_32): $(OUTPUT)/%_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
-$(BINARIES_64): $(OUTPUT)/%_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
$(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
# x86_64 users should be encouraged to install 32-bit libraries
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __SELFTESTS_X86_HELPERS_H
+#define __SELFTESTS_X86_HELPERS_H
+
+#include <asm/processor-flags.h>
+
+static inline unsigned long get_eflags(void)
+{
+ unsigned long eflags;
+
+ asm volatile (
+#ifdef __x86_64__
+ "subq $128, %%rsp\n\t"
+ "pushfq\n\t"
+ "popq %0\n\t"
+ "addq $128, %%rsp"
+#else
+ "pushfl\n\t"
+ "popl %0"
+#endif
+ : "=r" (eflags) :: "memory");
+
+ return eflags;
+}
+
+static inline void set_eflags(unsigned long eflags)
+{
+ asm volatile (
+#ifdef __x86_64__
+ "subq $128, %%rsp\n\t"
+ "pushq %0\n\t"
+ "popfq\n\t"
+ "addq $128, %%rsp"
+#else
+ "pushl %0\n\t"
+ "popfl"
+#endif
+ :: "r" (eflags) : "flags", "memory");
+}
+
+#endif /* __SELFTESTS_X86_HELPERS_H */
#include <sys/ptrace.h>
#include <sys/user.h>
+#include "helpers.h"
+
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
# define INT80_CLOBBERS
#endif
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
#include <setjmp.h>
#include <errno.h>
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
/* Our sigaltstack scratch space. */
static unsigned char altstack_data[SIGSTKSZ];
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
#include <signal.h>
#include <err.h>
#include <sys/syscall.h>
-#include <asm/processor-flags.h>
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
static unsigned int nerrs;
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
set_eflags(get_eflags() | extraflags);
syscall(SYS_getpid);
flags = get_eflags();
+ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
if ((flags & extraflags) == extraflags) {
printf("[OK]\tThe syscall worked and flags are still set\n");
} else {
printf("[RUN]\tSet NT and issue a syscall\n");
do_it(X86_EFLAGS_NT);
+ printf("[RUN]\tSet AC and issue a syscall\n");
+ do_it(X86_EFLAGS_AC);
+
+ printf("[RUN]\tSet NT|AC and issue a syscall\n");
+ do_it(X86_EFLAGS_NT | X86_EFLAGS_AC);
+
/*
* Now try it again with TF set -- TF forces returns via IRET in all
* cases except non-ptregs-using 64-bit full fast path syscalls.
sethandler(SIGTRAP, sigtrap, 0);
+ printf("[RUN]\tSet TF and issue a syscall\n");
+ do_it(X86_EFLAGS_TF);
+
printf("[RUN]\tSet NT|TF and issue a syscall\n");
do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
+ printf("[RUN]\tSet AC|TF and issue a syscall\n");
+ do_it(X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+ printf("[RUN]\tSet NT|AC|TF and issue a syscall\n");
+ do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+ /*
+ * Now try DF. This is evil and it's plausible that we will crash
+ * glibc, but glibc would have to do something rather surprising
+ * for this to happen.
+ */
+ printf("[RUN]\tSet DF and issue a syscall\n");
+ do_it(X86_EFLAGS_DF);
+
+ printf("[RUN]\tSet TF|DF and issue a syscall\n");
+ do_it(X86_EFLAGS_TF | X86_EFLAGS_DF);
+
return nerrs == 0 ? 0 : 1;
}
#include <setjmp.h>
#include <sys/uio.h>
+#include "helpers.h"
+
#ifdef __x86_64__
# define VSYS(x) (x)
#else
}
#ifdef __x86_64__
-#define X86_EFLAGS_TF (1UL << 8)
static volatile sig_atomic_t num_vsyscall_traps;
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
-}
-
static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
#include <features.h>
#include <stdio.h>
+#include "helpers.h"
+
#if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
int main()
err(1, "sigaction");
}
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
-
-static unsigned long get_eflags(void)
-{
- unsigned long eflags;
- asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
- return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
- asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
- : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
static volatile sig_atomic_t nerrs;
static unsigned long sysinfo;
static bool got_sysinfo = false;
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/list.h>
#include <linux/printk.h>
#include <linux/bug.h>
#include <errno.h>
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2; })
-/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
-#define list_add_tail(a, b) do {} while (0)
-#define list_del(a) do {} while (0)
-#define list_for_each_entry(a, b, c) while (0)
-/* end of stubs */
-
#endif /* KERNEL_H */
struct virtio_device {
struct device dev;
u64 features;
+ struct list_head vqs;
};
struct virtqueue {
- /* TODO: commented as list macros are empty stubs for now.
- * Broken but enough for virtio_ring.c
- * struct list_head list; */
+ struct list_head list;
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <getopt.h>
+#include <limits.h>
#include <string.h>
#include <poll.h>
#include <sys/eventfd.h>
#include <linux/virtio_ring.h>
#include "../../drivers/vhost/test.h"
+#define RANDOM_BATCH -1
+
/* Unused */
void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
struct vhost_memory *mem;
};
+static const struct vhost_vring_file no_backend = { .fd = -1 },
+ backend = { .fd = 1 };
+static const struct vhost_vring_state null_state = {};
+
bool vq_notify(struct virtqueue *vq)
{
struct vq_info *info = vq->priv;
assert(r >= 0);
}
+static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
+{
+ if (info->vq)
+ vring_del_virtqueue(info->vq);
+
+ memset(info->ring, 0, vring_size(num, 4096));
+ vring_init(&info->vring, num, info->ring, 4096);
+ info->vq = __vring_new_virtqueue(info->idx, info->vring, vdev, true,
+ false, vq_notify, vq_callback, "test");
+ assert(info->vq);
+ info->vq->priv = info;
+}
+
static void vq_info_add(struct vdev_info *dev, int num)
{
struct vq_info *info = &dev->vqs[dev->nvqs];
info->call = eventfd(0, EFD_NONBLOCK);
r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
assert(r >= 0);
- memset(info->ring, 0, vring_size(num, 4096));
- vring_init(&info->vring, num, info->ring, 4096);
- info->vq = vring_new_virtqueue(info->idx,
- info->vring.num, 4096, &dev->vdev,
- true, false, info->ring,
- vq_notify, vq_callback, "test");
- assert(info->vq);
- info->vq->priv = info;
+ vq_reset(info, num, &dev->vdev);
vhost_vq_setup(dev, info);
dev->fds[info->idx].fd = info->call;
dev->fds[info->idx].events = POLLIN;
int r;
memset(dev, 0, sizeof *dev);
dev->vdev.features = features;
+ INIT_LIST_HEAD(&dev->vdev.vqs);
dev->buf_size = 1024;
dev->buf = malloc(dev->buf_size);
assert(dev->buf);
}
static void run_test(struct vdev_info *dev, struct vq_info *vq,
- bool delayed, int bufs)
+ bool delayed, int batch, int reset_n, int bufs)
{
struct scatterlist sl;
- long started = 0, completed = 0;
- long completed_before;
+ long started = 0, completed = 0, next_reset = reset_n;
+ long completed_before, started_before;
int r, test = 1;
unsigned len;
long long spurious = 0;
+ const bool random_batch = batch == RANDOM_BATCH;
+
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
+ if (!reset_n) {
+ next_reset = INT_MAX;
+ }
+
for (;;) {
virtqueue_disable_cb(vq->vq);
completed_before = completed;
+ started_before = started;
do {
- if (started < bufs) {
+ const bool reset = completed > next_reset;
+ if (random_batch)
+ batch = (random() % vq->vring.num) + 1;
+
+ while (started < bufs &&
+ (started - completed) < batch) {
sg_init_one(&sl, dev->buf, dev->buf_size);
r = virtqueue_add_outbuf(vq->vq, &sl, 1,
dev->buf + started,
GFP_ATOMIC);
- if (likely(r == 0)) {
- ++started;
- if (unlikely(!virtqueue_kick(vq->vq)))
+ if (unlikely(r != 0)) {
+ if (r == -ENOSPC &&
+ started > started_before)
+ r = 0;
+ else
r = -1;
+ break;
}
- } else
+
+ ++started;
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (started >= bufs)
r = -1;
+ if (reset) {
+ r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+ &no_backend);
+ assert(!r);
+ }
+
/* Flush out completed bufs if any */
- if (virtqueue_get_buf(vq->vq, &len)) {
+ while (virtqueue_get_buf(vq->vq, &len)) {
++completed;
r = 0;
}
+ if (reset) {
+ struct vhost_vring_state s = { .index = 0 };
+
+ vq_reset(vq, vq->vring.num, &dev->vdev);
+
+ r = ioctl(dev->control, VHOST_GET_VRING_BASE,
+ &s);
+ assert(!r);
+
+ s.num = 0;
+ r = ioctl(dev->control, VHOST_SET_VRING_BASE,
+ &null_state);
+ assert(!r);
+
+ r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+ &backend);
+ assert(!r);
+
+ started = completed;
+ while (completed > next_reset)
+ next_reset += completed;
+ }
} while (r == 0);
- if (completed == completed_before)
+ if (completed == completed_before && started == started_before)
++spurious;
assert(completed <= bufs);
assert(started <= bufs);
test = 0;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
- fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious);
+ fprintf(stderr,
+ "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, started, completed);
}
const char optstring[] = "h";
.name = "no-delayed-interrupt",
.val = 'd',
},
+ {
+ .name = "batch",
+ .val = 'b',
+ .has_arg = required_argument,
+ },
+ {
+ .name = "reset",
+ .val = 'r',
+ .has_arg = optional_argument,
+ },
{
}
};
" [--no-event-idx]"
" [--no-virtio-1]"
" [--delayed-interrupt]"
+ " [--batch=random/N]"
+ " [--reset=N]"
"\n");
}
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+ long batch = 1, reset = 0;
int o;
bool delayed = false;
case 'D':
delayed = true;
break;
+ case 'b':
+ if (0 == strcmp(optarg, "random")) {
+ batch = RANDOM_BATCH;
+ } else {
+ batch = strtol(optarg, NULL, 10);
+ assert(batch > 0);
+ assert(batch < (long)INT_MAX + 1);
+ }
+ break;
+ case 'r':
+ if (!optarg) {
+ reset = 1;
+ } else {
+ reset = strtol(optarg, NULL, 10);
+ assert(reset > 0);
+ assert(reset < (long)INT_MAX + 1);
+ }
+ break;
default:
assert(0);
break;
done:
vdev_info_init(&dev, features);
vq_info_add(&dev, 256);
- run_test(&dev, &dev.vqs[0], delayed, 0x100000);
+ run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000);
return 0;
}
close(to_host[0]);
gvdev.vdev.features = features;
+ INIT_LIST_HEAD(&gvdev.vdev.vqs);
gvdev.to_host_fd = to_host[1];
gvdev.notifies = 0;
getrange = getrange_iov;
vdev.features = 0;
+ INIT_LIST_HEAD(&vdev.vqs);
while (argv[1]) {
if (strcmp(argv[1], "--indirect") == 0)
if (kvm_sigmask.len != sizeof(compat_sigset_t))
goto out;
r = -EFAULT;
- if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
+ if (get_compat_sigset(&sigset,
+ (compat_sigset_t __user *)sigmask_arg->sigset))
goto out;
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
} else